aws-sdk-s3 1.167.0 → 1.208.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +257 -0
  3. data/VERSION +1 -1
  4. data/lib/aws-sdk-s3/bucket.rb +145 -39
  5. data/lib/aws-sdk-s3/bucket_acl.rb +7 -6
  6. data/lib/aws-sdk-s3/bucket_cors.rb +6 -5
  7. data/lib/aws-sdk-s3/bucket_lifecycle.rb +7 -2
  8. data/lib/aws-sdk-s3/bucket_lifecycle_configuration.rb +22 -2
  9. data/lib/aws-sdk-s3/bucket_logging.rb +2 -2
  10. data/lib/aws-sdk-s3/bucket_policy.rb +6 -5
  11. data/lib/aws-sdk-s3/bucket_request_payment.rb +3 -3
  12. data/lib/aws-sdk-s3/bucket_tagging.rb +3 -3
  13. data/lib/aws-sdk-s3/bucket_versioning.rb +42 -9
  14. data/lib/aws-sdk-s3/bucket_website.rb +3 -3
  15. data/lib/aws-sdk-s3/client.rb +4313 -1871
  16. data/lib/aws-sdk-s3/client_api.rb +619 -160
  17. data/lib/aws-sdk-s3/customizations/object.rb +76 -86
  18. data/lib/aws-sdk-s3/customizations.rb +4 -1
  19. data/lib/aws-sdk-s3/default_executor.rb +103 -0
  20. data/lib/aws-sdk-s3/encryption/client.rb +2 -2
  21. data/lib/aws-sdk-s3/encryption/default_cipher_provider.rb +2 -0
  22. data/lib/aws-sdk-s3/encryption/encrypt_handler.rb +2 -0
  23. data/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb +2 -0
  24. data/lib/aws-sdk-s3/encryptionV2/client.rb +98 -23
  25. data/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb +7 -162
  26. data/lib/aws-sdk-s3/encryptionV2/decryption.rb +205 -0
  27. data/lib/aws-sdk-s3/encryptionV2/default_cipher_provider.rb +17 -0
  28. data/lib/aws-sdk-s3/encryptionV2/encrypt_handler.rb +2 -0
  29. data/lib/aws-sdk-s3/encryptionV2/io_encrypter.rb +2 -0
  30. data/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb +8 -0
  31. data/lib/aws-sdk-s3/encryptionV2/utils.rb +5 -0
  32. data/lib/aws-sdk-s3/encryptionV3/client.rb +885 -0
  33. data/lib/aws-sdk-s3/encryptionV3/decrypt_handler.rb +98 -0
  34. data/lib/aws-sdk-s3/encryptionV3/decryption.rb +244 -0
  35. data/lib/aws-sdk-s3/encryptionV3/default_cipher_provider.rb +159 -0
  36. data/lib/aws-sdk-s3/encryptionV3/default_key_provider.rb +35 -0
  37. data/lib/aws-sdk-s3/encryptionV3/encrypt_handler.rb +98 -0
  38. data/lib/aws-sdk-s3/encryptionV3/errors.rb +47 -0
  39. data/lib/aws-sdk-s3/encryptionV3/io_auth_decrypter.rb +60 -0
  40. data/lib/aws-sdk-s3/encryptionV3/io_decrypter.rb +35 -0
  41. data/lib/aws-sdk-s3/encryptionV3/io_encrypter.rb +84 -0
  42. data/lib/aws-sdk-s3/encryptionV3/key_provider.rb +28 -0
  43. data/lib/aws-sdk-s3/encryptionV3/kms_cipher_provider.rb +159 -0
  44. data/lib/aws-sdk-s3/encryptionV3/materials.rb +58 -0
  45. data/lib/aws-sdk-s3/encryptionV3/utils.rb +321 -0
  46. data/lib/aws-sdk-s3/encryption_v2.rb +1 -0
  47. data/lib/aws-sdk-s3/encryption_v3.rb +24 -0
  48. data/lib/aws-sdk-s3/endpoint_parameters.rb +30 -35
  49. data/lib/aws-sdk-s3/endpoint_provider.rb +572 -278
  50. data/lib/aws-sdk-s3/endpoints.rb +555 -1403
  51. data/lib/aws-sdk-s3/errors.rb +55 -0
  52. data/lib/aws-sdk-s3/file_downloader.rb +189 -143
  53. data/lib/aws-sdk-s3/file_uploader.rb +9 -13
  54. data/lib/aws-sdk-s3/legacy_signer.rb +2 -1
  55. data/lib/aws-sdk-s3/multipart_download_error.rb +8 -0
  56. data/lib/aws-sdk-s3/multipart_file_uploader.rb +105 -102
  57. data/lib/aws-sdk-s3/multipart_stream_uploader.rb +96 -107
  58. data/lib/aws-sdk-s3/multipart_upload.rb +83 -6
  59. data/lib/aws-sdk-s3/multipart_upload_error.rb +3 -4
  60. data/lib/aws-sdk-s3/multipart_upload_part.rb +50 -34
  61. data/lib/aws-sdk-s3/object.rb +357 -131
  62. data/lib/aws-sdk-s3/object_acl.rb +12 -6
  63. data/lib/aws-sdk-s3/object_multipart_copier.rb +2 -1
  64. data/lib/aws-sdk-s3/object_summary.rb +269 -96
  65. data/lib/aws-sdk-s3/object_version.rb +58 -13
  66. data/lib/aws-sdk-s3/plugins/checksum_algorithm.rb +31 -0
  67. data/lib/aws-sdk-s3/plugins/endpoints.rb +2 -205
  68. data/lib/aws-sdk-s3/plugins/express_session_auth.rb +11 -20
  69. data/lib/aws-sdk-s3/plugins/http_200_errors.rb +3 -3
  70. data/lib/aws-sdk-s3/plugins/md5s.rb +10 -71
  71. data/lib/aws-sdk-s3/plugins/streaming_retry.rb +5 -7
  72. data/lib/aws-sdk-s3/plugins/url_encoded_keys.rb +2 -1
  73. data/lib/aws-sdk-s3/presigner.rb +5 -5
  74. data/lib/aws-sdk-s3/resource.rb +41 -10
  75. data/lib/aws-sdk-s3/transfer_manager.rb +303 -0
  76. data/lib/aws-sdk-s3/types.rb +3758 -1264
  77. data/lib/aws-sdk-s3.rb +1 -1
  78. data/sig/bucket.rbs +27 -9
  79. data/sig/bucket_acl.rbs +1 -1
  80. data/sig/bucket_cors.rbs +1 -1
  81. data/sig/bucket_lifecycle.rbs +1 -1
  82. data/sig/bucket_lifecycle_configuration.rbs +1 -1
  83. data/sig/bucket_logging.rbs +1 -1
  84. data/sig/bucket_policy.rbs +1 -1
  85. data/sig/bucket_request_payment.rbs +1 -1
  86. data/sig/bucket_tagging.rbs +1 -1
  87. data/sig/bucket_versioning.rbs +3 -3
  88. data/sig/bucket_website.rbs +1 -1
  89. data/sig/client.rbs +279 -70
  90. data/sig/errors.rbs +10 -0
  91. data/sig/multipart_upload.rbs +12 -3
  92. data/sig/multipart_upload_part.rbs +5 -1
  93. data/sig/object.rbs +37 -16
  94. data/sig/object_acl.rbs +1 -1
  95. data/sig/object_summary.rbs +28 -16
  96. data/sig/object_version.rbs +9 -3
  97. data/sig/resource.rbs +15 -4
  98. data/sig/types.rbs +373 -66
  99. metadata +26 -10
  100. data/lib/aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb +0 -31
@@ -7,34 +7,21 @@ module Aws
7
7
  module S3
8
8
  # @api private
9
9
  class MultipartFileUploader
10
-
11
10
  MIN_PART_SIZE = 5 * 1024 * 1024 # 5MB
12
-
13
- FILE_TOO_SMALL = "unable to multipart upload files smaller than 5MB"
14
-
15
11
  MAX_PARTS = 10_000
16
-
17
- THREAD_COUNT = 10
18
-
19
- # @api private
20
- CREATE_OPTIONS = Set.new(
21
- Client.api.operation(:create_multipart_upload).input.shape.member_names
22
- )
23
-
24
- COMPLETE_OPTIONS = Set.new(
25
- Client.api.operation(:complete_multipart_upload).input.shape.member_names
26
- )
27
-
28
- # @api private
29
- UPLOAD_PART_OPTIONS = Set.new(
30
- Client.api.operation(:upload_part).input.shape.member_names
12
+ CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
13
+ COMPLETE_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
14
+ UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
15
+ CHECKSUM_KEYS = Set.new(
16
+ Client.api.operation(:upload_part).input.shape.members.map do |n, s|
17
+ n if s.location == 'header' && s.location_name.start_with?('x-amz-checksum-')
18
+ end.compact
31
19
  )
32
20
 
33
21
  # @option options [Client] :client
34
- # @option options [Integer] :thread_count (THREAD_COUNT)
35
22
  def initialize(options = {})
36
23
  @client = options[:client] || Client.new
37
- @thread_count = options[:thread_count] || THREAD_COUNT
24
+ @executor = options[:executor]
38
25
  end
39
26
 
40
27
  # @return [Client]
@@ -48,13 +35,12 @@ module Aws
48
35
  # It will be invoked with [bytes_read], [total_sizes]
49
36
  # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
50
37
  def upload(source, options = {})
51
- if File.size(source) < MIN_PART_SIZE
52
- raise ArgumentError, FILE_TOO_SMALL
53
- else
54
- upload_id = initiate_upload(options)
55
- parts = upload_parts(upload_id, source, options)
56
- complete_upload(upload_id, parts, options)
57
- end
38
+ file_size = File.size(source)
39
+ raise ArgumentError, 'unable to multipart upload files smaller than 5MB' if file_size < MIN_PART_SIZE
40
+
41
+ upload_id = initiate_upload(options)
42
+ parts = upload_parts(upload_id, source, file_size, options)
43
+ complete_upload(upload_id, parts, file_size, options)
58
44
  end
59
45
 
60
46
  private
@@ -63,19 +49,21 @@ module Aws
63
49
  @client.create_multipart_upload(create_opts(options)).upload_id
64
50
  end
65
51
 
66
- def complete_upload(upload_id, parts, options)
52
+ def complete_upload(upload_id, parts, file_size, options)
67
53
  @client.complete_multipart_upload(
68
- **complete_opts(options).merge(
69
- upload_id: upload_id,
70
- multipart_upload: { parts: parts }
71
- )
54
+ **complete_opts(options),
55
+ upload_id: upload_id,
56
+ multipart_upload: { parts: parts },
57
+ mpu_object_size: file_size
72
58
  )
59
+ rescue StandardError => e
60
+ abort_upload(upload_id, options, [e])
73
61
  end
74
62
 
75
- def upload_parts(upload_id, source, options)
76
- pending = PartList.new(compute_parts(upload_id, source, options))
63
+ def upload_parts(upload_id, source, file_size, options)
77
64
  completed = PartList.new
78
- errors = upload_in_threads(pending, completed, options)
65
+ pending = PartList.new(compute_parts(upload_id, source, file_size, options))
66
+ errors = upload_with_executor(pending, completed, options)
79
67
  if errors.empty?
80
68
  completed.to_a.sort_by { |part| part[:part_number] }
81
69
  else
@@ -84,35 +72,30 @@ module Aws
84
72
  end
85
73
 
86
74
  def abort_upload(upload_id, options, errors)
87
- @client.abort_multipart_upload(
88
- bucket: options[:bucket],
89
- key: options[:key],
90
- upload_id: upload_id
91
- )
75
+ @client.abort_multipart_upload(bucket: options[:bucket], key: options[:key], upload_id: upload_id)
92
76
  msg = "multipart upload failed: #{errors.map(&:message).join('; ')}"
93
77
  raise MultipartUploadError.new(msg, errors)
94
- rescue MultipartUploadError => error
95
- raise error
96
- rescue => error
97
- msg = "failed to abort multipart upload: #{error.message}. "\
98
- "Multipart upload failed: #{errors.map(&:message).join('; ')}"
99
- raise MultipartUploadError.new(msg, errors + [error])
78
+ rescue MultipartUploadError => e
79
+ raise e
80
+ rescue StandardError => e
81
+ msg = "failed to abort multipart upload: #{e.message}. " \
82
+ "Multipart upload failed: #{errors.map(&:message).join('; ')}"
83
+ raise MultipartUploadError.new(msg, errors + [e])
100
84
  end
101
85
 
102
- def compute_parts(upload_id, source, options)
103
- size = File.size(source)
104
- default_part_size = compute_default_part_size(size)
86
+ def compute_parts(upload_id, source, file_size, options)
87
+ default_part_size = compute_default_part_size(file_size)
105
88
  offset = 0
106
89
  part_number = 1
107
90
  parts = []
108
- while offset < size
91
+ while offset < file_size
109
92
  parts << upload_part_opts(options).merge(
110
93
  upload_id: upload_id,
111
94
  part_number: part_number,
112
95
  body: FilePart.new(
113
96
  source: source,
114
97
  offset: offset,
115
- size: part_size(size, default_part_size, offset)
98
+ size: part_size(file_size, default_part_size, offset)
116
99
  )
117
100
  )
118
101
  part_number += 1
@@ -121,68 +104,79 @@ module Aws
121
104
  parts
122
105
  end
123
106
 
107
+ def checksum_key?(key)
108
+ CHECKSUM_KEYS.include?(key)
109
+ end
110
+
111
+ def has_checksum_key?(keys)
112
+ keys.any? { |key| checksum_key?(key) }
113
+ end
114
+
115
+ def checksum_not_required?(options)
116
+ @client.config.request_checksum_calculation == 'when_required' && !options[:checksum_algorithm]
117
+ end
118
+
124
119
  def create_opts(options)
125
- CREATE_OPTIONS.inject({}) do |hash, key|
126
- hash[key] = options[key] if options.key?(key)
127
- hash
120
+ opts = {}
121
+ unless checksum_not_required?(options)
122
+ opts[:checksum_algorithm] = Aws::Plugins::ChecksumAlgorithm::DEFAULT_CHECKSUM
128
123
  end
124
+ opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
125
+ CREATE_OPTIONS.each_with_object(opts) { |k, h| h[k] = options[k] if options.key?(k) }
129
126
  end
130
127
 
131
128
  def complete_opts(options)
132
- COMPLETE_OPTIONS.inject({}) do |hash, key|
133
- hash[key] = options[key] if options.key?(key)
134
- hash
135
- end
129
+ opts = {}
130
+ opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
131
+ COMPLETE_OPTIONS.each_with_object(opts) { |k, h| h[k] = options[k] if options.key?(k) }
136
132
  end
137
133
 
138
134
  def upload_part_opts(options)
139
- UPLOAD_PART_OPTIONS.inject({}) do |hash, key|
140
- hash[key] = options[key] if options.key?(key)
141
- hash
135
+ UPLOAD_PART_OPTIONS.each_with_object({}) do |key, hash|
136
+ # don't pass through checksum calculations
137
+ hash[key] = options[key] if options.key?(key) && !checksum_key?(key)
142
138
  end
143
139
  end
144
140
 
145
- def upload_in_threads(pending, completed, options)
146
- threads = []
147
- if (callback = options[:progress_callback])
148
- progress = MultipartProgress.new(pending, callback)
149
- end
150
- options.fetch(:thread_count, @thread_count).times do
151
- thread = Thread.new do
152
- begin
153
- while part = pending.shift
154
- if progress
155
- part[:on_chunk_sent] =
156
- proc do |_chunk, bytes, _total|
157
- progress.call(part[:part_number], bytes)
158
- end
159
- end
160
- resp = @client.upload_part(part)
161
- part[:body].close
162
- completed_part = {etag: resp.etag, part_number: part[:part_number]}
163
-
164
- # get the requested checksum from the response
165
- if part[:checksum_algorithm]
166
- k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
167
- completed_part[k] = resp[k]
168
- end
169
-
170
- completed.push(completed_part)
171
- end
172
- nil
173
- rescue => error
174
- # keep other threads from uploading other parts
175
- pending.clear!
176
- error
177
- end
141
+ def upload_with_executor(pending, completed, options)
142
+ upload_attempts = 0
143
+ completion_queue = Queue.new
144
+ abort_upload = false
145
+ errors = []
146
+ progress = MultipartProgress.new(pending, options[:progress_callback])
147
+
148
+ while (part = pending.shift)
149
+ break if abort_upload
150
+
151
+ upload_attempts += 1
152
+ @executor.post(part) do |p|
153
+ update_progress(progress, p)
154
+ resp = @client.upload_part(p)
155
+ p[:body].close
156
+ completed_part = { etag: resp.etag, part_number: p[:part_number] }
157
+ apply_part_checksum(resp, completed_part)
158
+ completed.push(completed_part)
159
+ rescue StandardError => e
160
+ abort_upload = true
161
+ errors << e
162
+ ensure
163
+ completion_queue << :done
178
164
  end
179
- threads << thread
180
165
  end
181
- threads.map(&:value).compact
166
+
167
+ upload_attempts.times { completion_queue.pop }
168
+ errors
169
+ end
170
+
171
+ def apply_part_checksum(resp, part)
172
+ return unless (checksum = resp.context.params[:checksum_algorithm])
173
+
174
+ k = :"checksum_#{checksum.downcase}"
175
+ part[k] = resp.send(k)
182
176
  end
183
177
 
184
- def compute_default_part_size(source_size)
185
- [(source_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i
178
+ def compute_default_part_size(file_size)
179
+ [(file_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i
186
180
  end
187
181
 
188
182
  def part_size(total_size, part_size, offset)
@@ -193,9 +187,17 @@ module Aws
193
187
  end
194
188
  end
195
189
 
190
+ def update_progress(progress, part)
191
+ return unless progress.progress_callback
192
+
193
+ part[:on_chunk_sent] =
194
+ proc do |_chunk, bytes, _total|
195
+ progress.call(part[:part_number], bytes)
196
+ end
197
+ end
198
+
196
199
  # @api private
197
200
  class PartList
198
-
199
201
  def initialize(parts = [])
200
202
  @parts = parts
201
203
  @mutex = Mutex.new
@@ -224,7 +226,6 @@ module Aws
224
226
  def to_a
225
227
  @mutex.synchronize { @parts.dup }
226
228
  end
227
-
228
229
  end
229
230
 
230
231
  # @api private
@@ -235,6 +236,8 @@ module Aws
235
236
  @progress_callback = progress_callback
236
237
  end
237
238
 
239
+ attr_reader :progress_callback
240
+
238
241
  def call(part_number, bytes_read)
239
242
  # part numbers start at 1
240
243
  @bytes_sent[part_number - 1] = bytes_read
@@ -243,4 +246,4 @@ module Aws
243
246
  end
244
247
  end
245
248
  end
246
- end
249
+ end
@@ -9,33 +9,18 @@ module Aws
9
9
  module S3
10
10
  # @api private
11
11
  class MultipartStreamUploader
12
- # api private
13
- PART_SIZE = 5 * 1024 * 1024 # 5MB
14
12
 
15
- # api private
16
- THREAD_COUNT = 10
17
-
18
- # api private
19
- TEMPFILE_PREIX = 'aws-sdk-s3-upload_stream'.freeze
20
-
21
- # @api private
22
- CREATE_OPTIONS =
23
- Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
24
-
25
- # @api private
26
- UPLOAD_PART_OPTIONS =
27
- Set.new(Client.api.operation(:upload_part).input.shape.member_names)
28
-
29
- # @api private
30
- COMPLETE_UPLOAD_OPTIONS =
31
- Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
13
+ DEFAULT_PART_SIZE = 5 * 1024 * 1024 # 5MB
14
+ CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
15
+ UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
16
+ COMPLETE_UPLOAD_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
32
17
 
33
18
  # @option options [Client] :client
34
19
  def initialize(options = {})
35
20
  @client = options[:client] || Client.new
21
+ @executor = options[:executor]
36
22
  @tempfile = options[:tempfile]
37
- @part_size = options[:part_size] || PART_SIZE
38
- @thread_count = options[:thread_count] || THREAD_COUNT
23
+ @part_size = options[:part_size] || DEFAULT_PART_SIZE
39
24
  end
40
25
 
41
26
  # @return [Client]
@@ -43,7 +28,6 @@ module Aws
43
28
 
44
29
  # @option options [required,String] :bucket
45
30
  # @option options [required,String] :key
46
- # @option options [Integer] :thread_count (THREAD_COUNT)
47
31
  # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
48
32
  def upload(options = {}, &block)
49
33
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
@@ -61,86 +45,80 @@ module Aws
61
45
 
62
46
  def complete_upload(upload_id, parts, options)
63
47
  @client.complete_multipart_upload(
64
- **complete_opts(options).merge(
65
- upload_id: upload_id,
66
- multipart_upload: { parts: parts }
67
- )
48
+ **complete_opts(options).merge(upload_id: upload_id, multipart_upload: { parts: parts })
68
49
  )
50
+ rescue StandardError => e
51
+ abort_upload(upload_id, options, [e])
69
52
  end
70
53
 
71
54
  def upload_parts(upload_id, options, &block)
72
- completed = Queue.new
73
- thread_errors = []
74
- errors = begin
55
+ completed_parts = Queue.new
56
+ errors = []
57
+
58
+ begin
75
59
  IO.pipe do |read_pipe, write_pipe|
76
- threads = upload_in_threads(
77
- read_pipe, completed,
78
- upload_part_opts(options).merge(upload_id: upload_id),
79
- thread_errors)
80
- begin
81
- block.call(write_pipe)
82
- ensure
83
- # Ensure the pipe is closed to avoid https://github.com/jruby/jruby/issues/6111
84
- write_pipe.close
60
+ upload_thread = Thread.new do
61
+ upload_with_executor(
62
+ read_pipe,
63
+ completed_parts,
64
+ errors,
65
+ upload_part_opts(options).merge(upload_id: upload_id)
66
+ )
85
67
  end
86
- threads.map(&:value).compact
68
+
69
+ block.call(write_pipe)
70
+ ensure
71
+ # Ensure the pipe is closed to avoid https://github.com/jruby/jruby/issues/6111
72
+ write_pipe.close
73
+ upload_thread.join
87
74
  end
88
- rescue => e
89
- thread_errors + [e]
75
+ rescue StandardError => e
76
+ errors << e
90
77
  end
78
+ return ordered_parts(completed_parts) if errors.empty?
91
79
 
92
- if errors.empty?
93
- Array.new(completed.size) { completed.pop }.sort_by { |part| part[:part_number] }
94
- else
95
- abort_upload(upload_id, options, errors)
96
- end
80
+ abort_upload(upload_id, options, errors)
97
81
  end
98
82
 
99
83
  def abort_upload(upload_id, options, errors)
100
- @client.abort_multipart_upload(
101
- bucket: options[:bucket],
102
- key: options[:key],
103
- upload_id: upload_id
104
- )
84
+ @client.abort_multipart_upload(bucket: options[:bucket], key: options[:key], upload_id: upload_id)
105
85
  msg = "multipart upload failed: #{errors.map(&:message).join('; ')}"
106
86
  raise MultipartUploadError.new(msg, errors)
107
- rescue MultipartUploadError => error
108
- raise error
109
- rescue => error
110
- msg = "failed to abort multipart upload: #{error.message}. "\
87
+ rescue MultipartUploadError => e
88
+ raise e
89
+ rescue StandardError => e
90
+ msg = "failed to abort multipart upload: #{e.message}. "\
111
91
  "Multipart upload failed: #{errors.map(&:message).join('; ')}"
112
- raise MultipartUploadError.new(msg, errors + [error])
92
+ raise MultipartUploadError.new(msg, errors + [e])
113
93
  end
114
94
 
115
95
  def create_opts(options)
116
- CREATE_OPTIONS.inject({}) do |hash, key|
96
+ CREATE_OPTIONS.each_with_object({}) do |key, hash|
117
97
  hash[key] = options[key] if options.key?(key)
118
- hash
119
98
  end
120
99
  end
121
100
 
122
101
  def upload_part_opts(options)
123
- UPLOAD_PART_OPTIONS.inject({}) do |hash, key|
102
+ UPLOAD_PART_OPTIONS.each_with_object({}) do |key, hash|
124
103
  hash[key] = options[key] if options.key?(key)
125
- hash
126
104
  end
127
105
  end
128
106
 
129
107
  def complete_opts(options)
130
- COMPLETE_UPLOAD_OPTIONS.inject({}) do |hash, key|
108
+ COMPLETE_UPLOAD_OPTIONS.each_with_object({}) do |key, hash|
131
109
  hash[key] = options[key] if options.key?(key)
132
- hash
133
110
  end
134
111
  end
135
112
 
136
113
  def read_to_part_body(read_pipe)
137
114
  return if read_pipe.closed?
138
- temp_io = @tempfile ? Tempfile.new(TEMPFILE_PREIX) : StringIO.new(String.new)
115
+
116
+ temp_io = @tempfile ? Tempfile.new('aws-sdk-s3-upload_stream') : StringIO.new(String.new)
139
117
  temp_io.binmode
140
118
  bytes_copied = IO.copy_stream(read_pipe, temp_io, @part_size)
141
119
  temp_io.rewind
142
- if bytes_copied == 0
143
- if Tempfile === temp_io
120
+ if bytes_copied.zero?
121
+ if temp_io.is_a?(Tempfile)
144
122
  temp_io.close
145
123
  temp_io.unlink
146
124
  end
@@ -150,51 +128,62 @@ module Aws
150
128
  end
151
129
  end
152
130
 
153
- def upload_in_threads(read_pipe, completed, options, thread_errors)
154
- mutex = Mutex.new
131
+ def upload_with_executor(read_pipe, completed, errors, options)
132
+ completion_queue = Queue.new
133
+ queued_parts = 0
155
134
  part_number = 0
156
- options.fetch(:thread_count, @thread_count).times.map do
157
- thread = Thread.new do
158
- begin
159
- loop do
160
- body, thread_part_number = mutex.synchronize do
161
- [read_to_part_body(read_pipe), part_number += 1]
162
- end
163
- break unless (body || thread_part_number == 1)
164
- begin
165
- part = options.merge(
166
- body: body,
167
- part_number: thread_part_number,
168
- )
169
- resp = @client.upload_part(part)
170
- completed_part = {etag: resp.etag, part_number: part[:part_number]}
171
-
172
- # get the requested checksum from the response
173
- if part[:checksum_algorithm]
174
- k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
175
- completed_part[k] = resp[k]
176
- end
177
- completed.push(completed_part)
178
- ensure
179
- if Tempfile === body
180
- body.close
181
- body.unlink
182
- elsif StringIO === body
183
- body.string.clear
184
- end
185
- end
186
- end
187
- nil
188
- rescue => error
189
- # keep other threads from uploading other parts
190
- mutex.synchronize do
191
- thread_errors.push(error)
192
- read_pipe.close_read unless read_pipe.closed?
193
- end
194
- error
135
+ mutex = Mutex.new
136
+ loop do
137
+ part_body, current_part_num = mutex.synchronize do
138
+ [read_to_part_body(read_pipe), part_number += 1]
139
+ end
140
+ break unless part_body || current_part_num == 1
141
+
142
+ queued_parts += 1
143
+ @executor.post(part_body, current_part_num, options) do |body, num, opts|
144
+ part = opts.merge(body: body, part_number: num)
145
+ resp = @client.upload_part(part)
146
+ completed_part = create_completed_part(resp, part)
147
+ completed.push(completed_part)
148
+ rescue StandardError => e
149
+ mutex.synchronize do
150
+ errors.push(e)
151
+ read_pipe.close_read unless read_pipe.closed?
195
152
  end
153
+ ensure
154
+ clear_body(body)
155
+ completion_queue << :done
196
156
  end
197
- thread
157
+ end
158
+ queued_parts.times { completion_queue.pop }
159
+ end
160
+
161
+ def create_completed_part(resp, part)
162
+ completed_part = { etag: resp.etag, part_number: part[:part_number] }
163
+ return completed_part unless part[:checksum_algorithm]
164
+
165
+ # get the requested checksum from the response
166
+ k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
167
+ completed_part[k] = resp[k]
168
+ completed_part
169
+ end
170
+
171
+ def ordered_parts(parts)
172
+ sorted = []
173
+ until parts.empty?
174
+ part = parts.pop
175
+ index = sorted.bsearch_index { |p| p[:part_number] >= part[:part_number] } || sorted.size
176
+ sorted.insert(index, part)
177
+ end
178
+ sorted
179
+ end
180
+
181
+ def clear_body(body)
182
+ if body.is_a?(Tempfile)
183
+ body.close
184
+ body.unlink
185
+ elsif body.is_a?(StringIO)
186
+ body.string.clear
198
187
  end
199
188
  end
200
189
  end