如何处理大文件上传到Google Bucket?

huangapple go评论124阅读模式
英文:

How to handle large file upload to Google Bucket?

问题

我有以下的Golang代码用于将文件上传到Google Bucket:

  1. package main
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "net/http"
  7. "os"
  8. "time"
  9. "cloud.google.com/go/storage"
  10. )
  11. var serverPort = ":8444"
  12. var googleCredential = "./credential.json"
  13. var googleBucket = "g1-BUCKET-001"
  14. var googleFolder = "test_folder/"
  15. func uploadToBucket(w http.ResponseWriter, r *http.Request) {
  16. t1 := time.Now()
  17. fmt.Println("File Upload Endpoint Hit")
  18. // 解析我们的多部分表单,10 << 20 指定了最大
  19. // 上传10MB文件。
  20. r.ParseMultipartForm(10 << 20)
  21. file, handler, err := r.FormFile("myFile")
  22. if err != nil {
  23. fmt.Fprintf(w, fmt.Sprintf("上传文件时出错:%v", err))
  24. return
  25. }
  26. defer file.Close()
  27. fmt.Printf("已上传的文件:%+v\n", handler.Filename)
  28. fmt.Printf("文件大小:%+v\n", handler.Size)
  29. fmt.Printf("MIME 头:%+v\n", handler.Header)
  30. // 将文件上传到存储桶
  31. os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", googleCredential)
  32. ctx := context.Background()
  33. client, err := storage.NewClient(ctx)
  34. if err != nil {
  35. fmt.Fprintf(w, fmt.Sprintf("创建 storage.NewClient 时出错:%v", err))
  36. return
  37. }
  38. defer client.Close()
  39. fmt.Println("存储桶客户端已创建。")
  40. // 设置超时时间
  41. ctx, cancel := context.WithTimeout(ctx, time.Second*7200)
  42. defer cancel()
  43. // 为流式复制创建存储桶对象
  44. destFilePath := googleFolder + handler.Filename
  45. fmt.Printf("目标存储桶:gs://%s/。\n", googleBucket)
  46. fmt.Printf("目标文件路径:%s。\n", destFilePath)
  47. o := client.Bucket(googleBucket).Object(destFilePath)
  48. o = o.If(storage.Conditions{DoesNotExist: true})
  49. // 使用 storage.Writer 上传对象。
  50. wc := o.NewWriter(ctx)
  51. if _, err = io.Copy(wc, file); err != nil {
  52. fmt.Fprintf(w, fmt.Sprintf("io.Copy 出错:%v", err))
  53. return
  54. }
  55. if err := wc.Close(); err != nil {
  56. fmt.Fprintf(w, fmt.Sprintf("Writer.Close() 出错:%v", err))
  57. return
  58. }
  59. fmt.Printf("%s 已上传到 gs://%s/%s。\n", handler.Filename, googleBucket, googleFolder)
  60. t2 := time.Now()
  61. diff := t2.Sub(t1)
  62. fmt.Printf("开始时间:%+v\n", t1)
  63. fmt.Printf("结束时间:%+v\n", t2)
  64. fmt.Printf("时间差:%+v\n", diff)
  65. // 返回成功上传文件的消息!
  66. fmt.Fprintf(w, "文件上传成功\n")
  67. }
  68. func setupRoutes() {
  69. http.HandleFunc("/upload", uploadToBucket)
  70. http.ListenAndServe(serverPort, nil)
  71. }
  72. func main() {
  73. setupRoutes()
  74. }

它可以正常处理大约100MB的文件。但是当文件大小超过1GB时,等待时间会很长。用户可能会认为代码停止工作并在完成之前退出。目前,他们只会在最后得到这一行:

  1. fmt.Fprintf(w, "文件上传成功\n")

我该如何实现一种方式来给用户一些反馈,比如一个完成进度条?

英文:

I have the following Golang code to upload a file to Google bucket:

  1. package main
  2. import (
  3. &quot;context&quot;
  4. &quot;fmt&quot;
  5. &quot;io&quot;
  6. &quot;net/http&quot;
  7. &quot;os&quot;
  8. &quot;time&quot;
  9. &quot;cloud.google.com/go/storage&quot;
  10. )
  11. var serverPort = &quot;:8444&quot;
  12. var googleCredential = &quot;./credential.json&quot;
  13. var googleBucket = &quot;g1-BUCKET-001&quot;
  14. var googleFolder = &quot;test_folder/&quot;
  15. func uploadToBucket(w http.ResponseWriter, r *http.Request) {
  16. t1 := time.Now()
  17. fmt.Println(&quot;File Upload Endpoint Hit&quot;)
  18. // Parse our multipart form, 10 &lt;&lt; 20 specifies a maximum
  19. // upload of 10 MB files.
  20. r.ParseMultipartForm(10 &lt;&lt; 20)
  21. file, handler, err := r.FormFile(&quot;myFile&quot;)
  22. if err != nil {
  23. fmt.Fprintf(w, fmt.Sprintf(&quot;Error uploading file: %v&quot;, err))
  24. return
  25. }
  26. defer file.Close()
  27. fmt.Printf(&quot;Uploaded File: %+v\n&quot;, handler.Filename)
  28. fmt.Printf(&quot;File Size: %+v\n&quot;, handler.Size)
  29. fmt.Printf(&quot;MIME Header: %+v\n&quot;, handler.Header)
  30. // Upload file to bucket
  31. os.Setenv(&quot;GOOGLE_APPLICATION_CREDENTIALS&quot;, googleCredential)
  32. ctx := context.Background()
  33. client, err := storage.NewClient(ctx)
  34. if err != nil {
  35. fmt.Fprintf(w, fmt.Sprintf(&quot;Error creating storage.NewClient: %v&quot;, err))
  36. return
  37. }
  38. defer client.Close()
  39. fmt.Println(&quot;Bucket client created.&quot;)
  40. // Set timeout
  41. ctx, cancel := context.WithTimeout(ctx, time.Second*7200)
  42. defer cancel()
  43. // Create bucket object for stream copy
  44. destFilePath := googleFolder + handler.Filename
  45. fmt.Printf(&quot;Target bucket: gs://&quot; + googleBucket + &quot;/.\n&quot;)
  46. fmt.Printf(&quot;Destination file path: &quot; + destFilePath + &quot;.\n&quot;)
  47. o := client.Bucket(googleBucket).Object(destFilePath)
  48. o = o.If(storage.Conditions{DoesNotExist: true})
  49. // Upload an object with storage.Writer.
  50. wc := o.NewWriter(ctx)
  51. if _, err = io.Copy(wc, file); err != nil {
  52. fmt.Fprintf(w, fmt.Sprintf(&quot;io.Copy error: %v&quot;, err))
  53. return
  54. }
  55. if err := wc.Close(); err != nil {
  56. fmt.Fprintf(w, fmt.Sprintf(&quot;Writer.Close() error: %v&quot;, err))
  57. return
  58. }
  59. fmt.Printf(&quot;%s uploaded to gs://%s/%s.&quot;, handler.Filename, googleBucket, googleFolder)
  60. t2 := time.Now()
  61. diff := t2.Sub(t1)
  62. fmt.Printf(&quot;Time start: %+v\n&quot;, t1)
  63. fmt.Printf(&quot;Time end: %+v\n&quot;, t2)
  64. fmt.Printf(&quot;Time diff: %+v\n&quot;, diff)
  65. // Return that we have successfully uploaded our file!
  66. fmt.Fprintf(w, &quot;Successfully Uploaded File\n&quot;)
  67. }
  68. func setupRoutes() {
  69. http.HandleFunc(&quot;/upload&quot;, uploadToBucket)
  70. http.ListenAndServe(serverPort, nil)
  71. }
  72. func main() {
  73. setupRoutes()
  74. }

It works fine with file around 100MB. However when it gets to 1GB+, the wait time is so long. The user may think the code stops working and quit before it can finish. For now, all they get is this line at the end:

  1. fmt.Fprintf(w, &quot;Successfully Uploaded File\n&quot;)

How can I implement a way to give the user some feedback, like a completion bar for instance?

答案1

得分: 1

这里有一种方法可以为任意的io.Writer(例如从o.NewWriter(ctx)获得的)添加进度跟踪。使用io.MultiWriter可以将写操作复制到多个写入器中。其中一个可以是你的o.NewWriter(ctx),另一个可以是一个简单地计算字节数并与总大小进行比较的io.Writer实现。

然后,你可以运行一个goroutine,定期通过写入到stdout(或其他地方)来更新进度。

请参考这个示例,使用这个概念验证实现:

  1. package main
  2. import (
  3. "fmt"
  4. "io"
  5. "os"
  6. "github.com/farrellit/writeprogress"
  7. )
  8. func main() {
  9. in, err := os.Open("/dev/urandom")
  10. if err != nil {
  11. panic(err)
  12. }
  13. out, err := os.OpenFile("/dev/null", os.O_WRONLY, 0)
  14. if err != nil {
  15. panic(err)
  16. }
  17. defer in.Close()
  18. defer out.Close()
  19. length := int64(1e6)
  20. wp := writeprogress.NewProgressWriter(uint64(length))
  21. d, _ := wp.Watch(func(p float64) { fmt.Printf("\r%2.0f%%", p*100) })
  22. if b, err := io.Copy(
  23. io.MultiWriter(out, wp),
  24. &io.LimitedReader{R: in, N: length},
  25. ); err != nil {
  26. panic(err)
  27. } else {
  28. <-d
  29. fmt.Printf("\n%d/%d %2.0f%%\n", b, length, wp.GetProgress()*100)
  30. }
  31. }

你可以使用相同的方法,将o.NewWriter(ctx)和由stat确定的文件长度结合起来。如果你想要更漂亮的效果,你还可以与https://github.com/schollz/progressbar等工具一起使用。

英文:

Here's one option to add progress tracking to an arbitrary io.Writer such as you get from o.NewWriter(ctx). Using io.MultiWriter you can duplicate writes to multiple writers. One can be your o.NewWriter(ctx) and another can be an implementation of io.Writer that simply counts bytes and can compare against a total size.

You could then run a goroutine that could periodically update progress by writing to stdout (or something else).

Take a look at this example using This Proof of concept implementation:

  1. package main
  2. import (
  3. &quot;fmt&quot;
  4. &quot;io&quot;
  5. &quot;os&quot;
  6. &quot;github.com/farrellit/writeprogress&quot;
  7. )
  8. func main() {
  9. in, err := os.Open(&quot;/dev/urandom&quot;)
  10. if err != nil {
  11. panic(err)
  12. }
  13. out, err := os.OpenFile(&quot;/dev/null&quot;, os.O_WRONLY, 0)
  14. if err != nil {
  15. panic(err)
  16. }
  17. defer in.Close()
  18. defer out.Close()
  19. length := int64(1e6)
  20. wp := writeprogress.NewProgressWriter(uint64(length))
  21. d, _ := wp.Watch(func(p float64) { fmt.Printf(&quot;\r%2.0f%%&quot;, p*100) })
  22. if b, err := io.Copy(
  23. io.MultiWriter(out, wp),
  24. &amp;io.LimitedReader{R: in, N: length},
  25. ); err != nil {
  26. panic(err)
  27. } else {
  28. &lt;-d
  29. fmt.Printf(&quot;\n%d/%d %2.0f%%\n&quot;, b, length, wp.GetProgress()*100)
  30. }
  31. }

You can do the same thing with your o.NewWriter(ctx) and a file length determined by stat. you could also use it in conjunction with something like https://github.com/schollz/progressbar if you wanted something pretty.

答案2

得分: 0

以下是一个工作示例,它在每个部分上传后流式传输响应。更改您的存储桶、文件源、目标对象名称和内容类型。

使用Curl命令访问localhost:8080,您将收到分块响应。然后,将该逻辑注入到您的代码中,使用JSON响应或其他方式。

我之前的问题是由于我的测试中chunkSize小于5MiB。

  1. package main
  2. import (
  3. "bytes"
  4. "encoding/xml"
  5. "fmt"
  6. "golang.org/x/net/context"
  7. "golang.org/x/oauth2/google"
  8. "io/ioutil"
  9. "log"
  10. "net/http"
  11. )
  12. const (
  13. bucket = "gib-multiregion-us"
  14. fileName = "./20220909_ListePieces.csv"
  15. objectName = "test-multipart.csv"
  16. contentType = "text/csv"
  17. // 最小为5MiB,且为256 KiB的倍数
  18. chunckSize = 5242880 + (256 * 1024 * 0)
  19. // API密钥值
  20. storageapi = "storage.googleapis.com"
  21. uploadsExtension = "uploads"
  22. partNumberExtension = "partNumber"
  23. uploadIdExtension = "uploadId"
  24. etagKey = "ETag"
  25. )
  26. var httpClient *http.Client
  27. func main() {
  28. ctx := context.Background()
  29. c, err := google.DefaultClient(ctx)
  30. if err != nil {
  31. log.Fatal(err)
  32. }
  33. httpClient = c
  34. http.HandleFunc("/", multipartUpload)
  35. http.ListenAndServe(":8080", nil)
  36. }
  37. func multipartUpload(w http.ResponseWriter, r *http.Request) {
  38. url := fmt.Sprintf("https://%s.%s/%s", bucket, storageapi, objectName)
  39. // 初始化
  40. resp, err := httpClient.Post(fmt.Sprintf("%s?%s", url, uploadsExtension), contentType, nil)
  41. if err != nil {
  42. w.WriteHeader(http.StatusInternalServerError)
  43. fmt.Println("初始化时出错:", err)
  44. fmt.Fprintf(w, "初始化时出错:%s\n", err)
  45. return
  46. }
  47. respbody, err := ioutil.ReadAll(resp.Body)
  48. if err != nil {
  49. w.WriteHeader(http.StatusInternalServerError)
  50. fmt.Println("读取初始化响应时出错:", err)
  51. fmt.Fprintf(w, "读取初始化响应时出错:%s\n", err)
  52. return
  53. }
  54. defer resp.Body.Close()
  55. var initMultiPart InitiateMultipartUploadResult
  56. err = xml.Unmarshal(respbody, &initMultiPart)
  57. if err != nil {
  58. w.WriteHeader(http.StatusInternalServerError)
  59. fmt.Println("解析初始化响应时出错:", err)
  60. fmt.Fprintf(w, "解析初始化响应时出错:%s\n", err)
  61. return
  62. }
  63. uploadId := initMultiPart.UploadId
  64. // 发送内容
  65. content, err := ioutil.ReadFile(fileName)
  66. if err != nil {
  67. w.WriteHeader(http.StatusInternalServerError)
  68. fmt.Println("读取输入文件时出错:", err)
  69. fmt.Fprintf(w, "读取输入文件时出错:%s\n", err)
  70. return
  71. }
  72. // 准备分块响应
  73. flusher, ok := w.(http.Flusher)
  74. if !ok {
  75. w.WriteHeader(http.StatusInternalServerError)
  76. fmt.Println("无法创建流式刷新器:", err)
  77. fmt.Fprintf(w, "无法创建流式刷新器:%s\n", err)
  78. return
  79. }
  80. w.Header().Set("Transfer-Encoding", "chunked")
  81. flusher.Flush()
  82. complMulti := CompleteMultipartUpload{}
  83. counter := 0
  84. for {
  85. if len(content) <= counter*chunckSize {
  86. break
  87. }
  88. var toSend []byte
  89. if len(content) >= (counter+1)*chunckSize {
  90. toSend = content[counter*chunckSize : (counter+1)*chunckSize]
  91. } else {
  92. toSend = content[counter*chunckSize:]
  93. }
  94. req, err := http.NewRequest(http.MethodPut, fmt.Sprintf("%s?%s=%d&%s=%s", url, partNumberExtension, counter+1, uploadIdExtension, uploadId), bytes.NewReader(toSend))
  95. if err != nil {
  96. w.WriteHeader(http.StatusInternalServerError)
  97. fmt.Println("创建分块请求时出错:", err)
  98. fmt.Fprintf(w, "创建分块请求时出错:%s\n", err)
  99. return
  100. }
  101. resp, err = httpClient.Do(req)
  102. if err != nil {
  103. w.WriteHeader(http.StatusInternalServerError)
  104. fmt.Println("提交分块时出错:", err)
  105. fmt.Fprintf(w, "提交分块时出错:%s\n", err)
  106. return
  107. }
  108. etag := resp.Header.Get(etagKey)
  109. p := Part{
  110. PartNumber: counter + 1,
  111. ETag: etag,
  112. }
  113. fmt.Printf("上传完成:%d%%\n", 100*counter*chunckSize/len(content))
  114. fmt.Fprintf(w, "上传完成:%d%%\n", 100*counter*chunckSize/len(content))
  115. flusher.Flush()
  116. complMulti.Parts = append(complMulti.Parts, p)
  117. counter++
  118. }
  119. toSend, err := xml.Marshal(complMulti)
  120. if err != nil {
  121. w.WriteHeader(http.StatusInternalServerError)
  122. fmt.Println("编组完整多部分时出错:", err)
  123. fmt.Fprintf(w, "编组完整多部分时出错:%s\n", err)
  124. return
  125. }
  126. // 完成传输
  127. resp, err = httpClient.Post(fmt.Sprintf("%s?%s=%s", url, uploadIdExtension, uploadId), "application/xml", bytes.NewReader(toSend))
  128. if err != nil {
  129. w.WriteHeader(http.StatusInternalServerError)
  130. fmt.Println("发送完成时出错:", err)
  131. fmt.Fprintf(w, "发送完成时出错:%s\n", err)
  132. return
  133. }
  134. if resp.StatusCode == http.StatusOK {
  135. fmt.Fprintf(w, "完成")
  136. fmt.Println("完成")
  137. } else {
  138. b, _ := ioutil.ReadAll(resp.Body)
  139. fmt.Printf("错误 %s,响应体:%s\n", resp.Status, string(b))
  140. fmt.Fprintf(w, "错误 %s,响应体:%s\n", resp.Status, string(b))
  141. }
  142. }
  143. type InitiateMultipartUploadResult struct {
  144. InitiateMultipartUploadResult xml.Name `xml:"InitiateMultipartUploadResult"`
  145. Bucket string `xml:"Bucket"`
  146. Key string `xml:"Key"`
  147. UploadId string `xml:"UploadId"`
  148. }
  149. type CompleteMultipartUpload struct {
  150. completeMultipartUpload xml.Name `xml:"CompleteMultipartUpload"`
  151. Parts []Part `xml:"Part"`
  152. }
  153. type Part struct {
  154. part xml.Name `xml:"Part"`
  155. PartNumber int `xml:"PartNumber"`
  156. ETag string `xml:"ETag"`
  157. }
英文:

here a working example that stream the response after each part upload. Change your bucket, file source, the object target name and the content Type.

Curl the localhost:8080, you will received chunk response. Then, inject that logic in your code, with JSON response or whatever.

My previous issue comes from the chunkSize below 5MiB in my tests.

  1. package main
  2. import (
  3. &quot;bytes&quot;
  4. &quot;encoding/xml&quot;
  5. &quot;fmt&quot;
  6. &quot;golang.org/x/net/context&quot;
  7. &quot;golang.org/x/oauth2/google&quot;
  8. &quot;io/ioutil&quot;
  9. &quot;log&quot;
  10. &quot;net/http&quot;
  11. )
  12. const (
  13. bucket = &quot;gib-multiregion-us&quot;
  14. fileName = &quot;./20220909_ListePieces.csv&quot;
  15. objectName = &quot;test-multipart.csv&quot;
  16. contentType = &quot;text/csv&quot;
  17. // Min 5MiB with multiple of 256 KiB
  18. chunckSize = 5242880 + (256 * 1024 * 0)
  19. // API key values
  20. storageapi = &quot;storage.googleapis.com&quot;
  21. uploadsExtension = &quot;uploads&quot;
  22. partNumberExtension = &quot;partNumber&quot;
  23. uploadIdExtension = &quot;uploadId&quot;
  24. etagKey = &quot;ETag&quot;
  25. )
  26. var httpClient *http.Client
  27. func main() {
  28. ctx := context.Background()
  29. c, err := google.DefaultClient(ctx)
  30. if err != nil {
  31. log.Fatal(err)
  32. }
  33. httpClient = c
  34. http.HandleFunc(&quot;/&quot;, multipartUpload)
  35. http.ListenAndServe(&quot;:8080&quot;, nil)
  36. }
  37. func multipartUpload(w http.ResponseWriter, r *http.Request) {
  38. url := fmt.Sprintf(&quot;https://%s.%s/%s&quot;, bucket, storageapi, objectName)
  39. // init
  40. resp, err := httpClient.Post(fmt.Sprintf(&quot;%s?%s&quot;, url, uploadsExtension), contentType, nil)
  41. if err != nil {
  42. w.WriteHeader(http.StatusInternalServerError)
  43. fmt.Println(&quot;error during init: &quot;, err)
  44. fmt.Fprintf(w, &quot;error during init: %s\n&quot;, err)
  45. return
  46. }
  47. respbody, err := ioutil.ReadAll(resp.Body)
  48. if err != nil {
  49. w.WriteHeader(http.StatusInternalServerError)
  50. fmt.Println(&quot;error during reading init resp: &quot;, err)
  51. fmt.Fprintf(w, &quot;error during reading init resp: %s\n&quot;, err)
  52. return
  53. }
  54. defer resp.Body.Close()
  55. var initMultiPart InitiateMultipartUploadResult
  56. err = xml.Unmarshal(respbody, &amp;initMultiPart)
  57. if err != nil {
  58. w.WriteHeader(http.StatusInternalServerError)
  59. fmt.Println(&quot;error during unmarshalling init resp: &quot;, err)
  60. fmt.Fprintf(w, &quot;error during unmarshalling init resp: %s\n&quot;, err)
  61. return
  62. }
  63. uploadId := initMultiPart.UploadId
  64. // send content
  65. content, err := ioutil.ReadFile(fileName)
  66. if err != nil {
  67. w.WriteHeader(http.StatusInternalServerError)
  68. fmt.Println(&quot;error during reading input file: &quot;, err)
  69. fmt.Fprintf(w, &quot;error during reading input file: %s\n&quot;, err)
  70. return
  71. }
  72. // Prepare the multipart response
  73. flusher, ok := w.(http.Flusher)
  74. if !ok {
  75. w.WriteHeader(http.StatusInternalServerError)
  76. fmt.Println(&quot;Impossible to create the stream flusher: &quot;, err)
  77. fmt.Fprintf(w, &quot;Impossible to create the stream flusher: %s\n&quot;, err)
  78. return
  79. }
  80. w.Header().Set(&quot;Transfer-Encoding&quot;, &quot;chunked&quot;)
  81. flusher.Flush()
  82. complMulti := CompleteMultipartUpload{}
  83. counter := 0
  84. for {
  85. if len(content) &lt;= counter*chunckSize {
  86. break
  87. }
  88. var toSend []byte
  89. if len(content) &gt;= (counter+1)*chunckSize {
  90. toSend = content[counter*chunckSize : (counter+1)*chunckSize]
  91. } else {
  92. toSend = content[counter*chunckSize:]
  93. }
  94. req, err := http.NewRequest(http.MethodPut, fmt.Sprintf(&quot;%s?%s=%d&amp;%s=%s&quot;, url, partNumberExtension, counter+1, uploadIdExtension, uploadId), bytes.NewReader(toSend))
  95. if err != nil {
  96. w.WriteHeader(http.StatusInternalServerError)
  97. fmt.Println(&quot;error chunk req creation: &quot;, err)
  98. fmt.Fprintf(w, &quot;error chunk req creation: %s\n&quot;, err)
  99. return
  100. }
  101. resp, err = httpClient.Do(req)
  102. if err != nil {
  103. w.WriteHeader(http.StatusInternalServerError)
  104. fmt.Println(&quot;error chunk submission: &quot;, err)
  105. fmt.Fprintf(w, &quot;error chunk submission: %s\n&quot;, err)
  106. return
  107. }
  108. etag := resp.Header.Get(etagKey)
  109. p := Part{
  110. PartNumber: counter + 1,
  111. ETag: etag,
  112. }
  113. fmt.Printf(&quot;upload is done at %d%%\n&quot;, 100*counter*chunckSize/len(content))
  114. fmt.Fprintf(w, &quot;upload is done at %d%%\n&quot;, 100*counter*chunckSize/len(content))
  115. flusher.Flush()
  116. complMulti.Parts = append(complMulti.Parts, p)
  117. counter++
  118. }
  119. toSend, err := xml.Marshal(complMulti)
  120. if err != nil {
  121. w.WriteHeader(http.StatusInternalServerError)
  122. fmt.Println(&quot;error marshal complete multi: &quot;, err)
  123. fmt.Fprintf(w, &quot;error marshal complete multi: %s\n&quot;, err)
  124. return
  125. }
  126. // Complete transfert
  127. resp, err = httpClient.Post(fmt.Sprintf(&quot;%s?%s=%s&quot;, url, uploadIdExtension, uploadId), &quot;application/xml&quot;, bytes.NewReader(toSend))
  128. if err != nil {
  129. w.WriteHeader(http.StatusInternalServerError)
  130. fmt.Println(&quot;error during send complete: &quot;, err)
  131. fmt.Fprintf(w, &quot;error during send complete: %s\n&quot;, err)
  132. return
  133. }
  134. if resp.StatusCode == http.StatusOK {
  135. fmt.Fprintf(w, &quot;done&quot;)
  136. fmt.Println(&quot;done&quot;)
  137. } else {
  138. b, _ := ioutil.ReadAll(resp.Body)
  139. fmt.Printf(&quot;error %s with body %s\n&quot;, resp.Status, string(b))
  140. fmt.Fprintf(w, &quot;error %s with body %s\n&quot;, resp.Status, string(b))
  141. }
  142. }
  143. type InitiateMultipartUploadResult struct {
  144. InitiateMultipartUploadResult xml.Name `xml:&quot;InitiateMultipartUploadResult&quot;`
  145. Bucket string `xml:&quot;Bucket&quot;`
  146. Key string `xml:&quot;Key&quot;`
  147. UploadId string `xml:&quot;UploadId&quot;`
  148. }
  149. type CompleteMultipartUpload struct {
  150. completeMultipartUpload xml.Name `xml:&quot;CompleteMultipartUpload&quot;`
  151. Parts []Part `xml:&quot;Part&quot;`
  152. }
  153. type Part struct {
  154. part xml.Name `xml:&quot;Part&quot;`
  155. PartNumber int `xml:&quot;PartNumber&quot;`
  156. ETag string `xml:&quot;ETag&quot;`
  157. }

huangapple
  • 本文由 发表于 2023年4月6日 21:04:23
  • 转载请务必保留本文链接:https://go.coder-hub.com/75949860.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定