I'm creating an endpoint that allows a user to upload several files at the same time and store them in S3. Currently I'm able to achieve this using MultipartReader and s3manager but only in a non-synchronous fashion.
I'm trying to implement Go routines to speed this functionality up and have multiple files uploaded to S3 concurrently, but a data race error is causing trouble. I think *s3manager might not be goroutine safe as the docs say it is. (Code works synchronously if go-statement is replaced with function code).
Could implementing mutex locks possibly fix my error?
func uploadHandler(w http.ResponseWriter, r *http.Request) {
counter := 0
switch r.Method {
// GET to display the upload form.
case "GET":
err := templates.Execute(w, nil)
if err != nil {
log.Print(err)
}
// POST uploads each file and sends them to S3
case "POST":
c := make(chan string)
// grab the request.MultipartReader
reader, err := r.MultipartReader()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// copy each part to destination.
for {
part, err := reader.NextPart()
if err == io.EOF {
break
}
// if part.FileName() is empty, skip this iteration.
if part.FileName() == "" {
continue
}
counter++
go S3Upload(c, part)
}
for i := 0; i < counter; i++ {
fmt.Println(<-c)
}
// displaying a success message.
err = templates.Execute(w, "Upload successful.")
if err != nil {
log.Print(err)
}
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
func S3Upload(c chan string, part *multipart.Part) {
bucket := os.Getenv("BUCKET")
sess, err := session.NewSession(&aws.Config{
Region: aws.String(os.Getenv("REGION"))},
)
if err != nil {
c <- "error occured creating session"
return
}
uploader := s3manager.NewUploader(sess)
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucket),
Key: aws.String(part.FileName()),
Body: part,
})
if err != nil {
c <- "Error occurred attempting to upload to S3"
return
}
// successful upload
c <- "successful upload"
}