aws-sdk-s3 1.109.0 → 1.156.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +352 -0
  3. data/VERSION +1 -1
  4. data/lib/aws-sdk-s3/access_grants_credentials.rb +57 -0
  5. data/lib/aws-sdk-s3/access_grants_credentials_provider.rb +241 -0
  6. data/lib/aws-sdk-s3/bucket.rb +585 -110
  7. data/lib/aws-sdk-s3/bucket_acl.rb +28 -6
  8. data/lib/aws-sdk-s3/bucket_cors.rb +34 -10
  9. data/lib/aws-sdk-s3/bucket_lifecycle.rb +34 -10
  10. data/lib/aws-sdk-s3/bucket_lifecycle_configuration.rb +34 -10
  11. data/lib/aws-sdk-s3/bucket_logging.rb +35 -6
  12. data/lib/aws-sdk-s3/bucket_notification.rb +12 -6
  13. data/lib/aws-sdk-s3/bucket_policy.rb +78 -10
  14. data/lib/aws-sdk-s3/bucket_region_cache.rb +9 -5
  15. data/lib/aws-sdk-s3/bucket_request_payment.rb +28 -6
  16. data/lib/aws-sdk-s3/bucket_tagging.rb +34 -10
  17. data/lib/aws-sdk-s3/bucket_versioning.rb +72 -14
  18. data/lib/aws-sdk-s3/bucket_website.rb +34 -10
  19. data/lib/aws-sdk-s3/client.rb +7900 -3252
  20. data/lib/aws-sdk-s3/client_api.rb +706 -228
  21. data/lib/aws-sdk-s3/customizations/bucket.rb +23 -47
  22. data/lib/aws-sdk-s3/customizations/errors.rb +40 -0
  23. data/lib/aws-sdk-s3/customizations/object.rb +97 -21
  24. data/lib/aws-sdk-s3/customizations/types/permanent_redirect.rb +26 -0
  25. data/lib/aws-sdk-s3/customizations.rb +10 -0
  26. data/lib/aws-sdk-s3/encryption/client.rb +6 -2
  27. data/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb +13 -9
  28. data/lib/aws-sdk-s3/encryptionV2/client.rb +6 -2
  29. data/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb +1 -0
  30. data/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb +10 -6
  31. data/lib/aws-sdk-s3/endpoint_parameters.rb +186 -0
  32. data/lib/aws-sdk-s3/endpoint_provider.rb +592 -0
  33. data/lib/aws-sdk-s3/endpoints.rb +2689 -0
  34. data/lib/aws-sdk-s3/express_credentials.rb +55 -0
  35. data/lib/aws-sdk-s3/express_credentials_provider.rb +59 -0
  36. data/lib/aws-sdk-s3/file_downloader.rb +170 -45
  37. data/lib/aws-sdk-s3/file_uploader.rb +11 -4
  38. data/lib/aws-sdk-s3/multipart_file_uploader.rb +30 -11
  39. data/lib/aws-sdk-s3/multipart_stream_uploader.rb +46 -17
  40. data/lib/aws-sdk-s3/multipart_upload.rb +194 -19
  41. data/lib/aws-sdk-s3/multipart_upload_part.rb +280 -30
  42. data/lib/aws-sdk-s3/object.rb +1753 -266
  43. data/lib/aws-sdk-s3/object_acl.rb +49 -13
  44. data/lib/aws-sdk-s3/object_copier.rb +7 -5
  45. data/lib/aws-sdk-s3/object_multipart_copier.rb +46 -22
  46. data/lib/aws-sdk-s3/object_summary.rb +1497 -221
  47. data/lib/aws-sdk-s3/object_version.rb +383 -58
  48. data/lib/aws-sdk-s3/plugins/accelerate.rb +3 -50
  49. data/lib/aws-sdk-s3/plugins/access_grants.rb +114 -0
  50. data/lib/aws-sdk-s3/plugins/arn.rb +0 -184
  51. data/lib/aws-sdk-s3/plugins/bucket_dns.rb +3 -39
  52. data/lib/aws-sdk-s3/plugins/bucket_name_restrictions.rb +1 -6
  53. data/lib/aws-sdk-s3/plugins/dualstack.rb +1 -49
  54. data/lib/aws-sdk-s3/plugins/endpoints.rb +274 -0
  55. data/lib/aws-sdk-s3/plugins/expect_100_continue.rb +2 -1
  56. data/lib/aws-sdk-s3/plugins/express_session_auth.rb +97 -0
  57. data/lib/aws-sdk-s3/plugins/http_200_errors.rb +53 -16
  58. data/lib/aws-sdk-s3/plugins/iad_regional_endpoint.rb +6 -29
  59. data/lib/aws-sdk-s3/plugins/location_constraint.rb +3 -1
  60. data/lib/aws-sdk-s3/plugins/md5s.rb +6 -3
  61. data/lib/aws-sdk-s3/plugins/s3_signer.rb +42 -126
  62. data/lib/aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb +31 -0
  63. data/lib/aws-sdk-s3/plugins/streaming_retry.rb +23 -2
  64. data/lib/aws-sdk-s3/presigned_post.rb +99 -78
  65. data/lib/aws-sdk-s3/presigner.rb +28 -37
  66. data/lib/aws-sdk-s3/resource.rb +89 -13
  67. data/lib/aws-sdk-s3/types.rb +6544 -4909
  68. data/lib/aws-sdk-s3.rb +5 -1
  69. data/sig/bucket.rbs +212 -0
  70. data/sig/bucket_acl.rbs +78 -0
  71. data/sig/bucket_cors.rbs +69 -0
  72. data/sig/bucket_lifecycle.rbs +88 -0
  73. data/sig/bucket_lifecycle_configuration.rbs +111 -0
  74. data/sig/bucket_logging.rbs +76 -0
  75. data/sig/bucket_notification.rbs +114 -0
  76. data/sig/bucket_policy.rbs +59 -0
  77. data/sig/bucket_request_payment.rbs +54 -0
  78. data/sig/bucket_tagging.rbs +65 -0
  79. data/sig/bucket_versioning.rbs +77 -0
  80. data/sig/bucket_website.rbs +93 -0
  81. data/sig/client.rbs +2381 -0
  82. data/sig/customizations/bucket.rbs +19 -0
  83. data/sig/customizations/object.rbs +38 -0
  84. data/sig/customizations/object_summary.rbs +35 -0
  85. data/sig/errors.rbs +34 -0
  86. data/sig/multipart_upload.rbs +110 -0
  87. data/sig/multipart_upload_part.rbs +105 -0
  88. data/sig/object.rbs +442 -0
  89. data/sig/object_acl.rbs +86 -0
  90. data/sig/object_summary.rbs +334 -0
  91. data/sig/object_version.rbs +137 -0
  92. data/sig/resource.rbs +127 -0
  93. data/sig/types.rbs +2568 -0
  94. data/sig/waiters.rbs +95 -0
  95. metadata +50 -16
  96. data/lib/aws-sdk-s3/arn/access_point_arn.rb +0 -69
  97. data/lib/aws-sdk-s3/arn/multi_region_access_point_arn.rb +0 -68
  98. data/lib/aws-sdk-s3/arn/object_lambda_arn.rb +0 -69
  99. data/lib/aws-sdk-s3/arn/outpost_access_point_arn.rb +0 -74
  100. data/lib/aws-sdk-s3/plugins/object_lambda_endpoint.rb +0 -25
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'set'
4
+
5
+ module Aws
6
+ module S3
7
+ # @api private
8
+ class ExpressCredentials
9
+ include CredentialProvider
10
+ include RefreshingCredentials
11
+
12
+ SYNC_EXPIRATION_LENGTH = 60 # 1 minute
13
+ ASYNC_EXPIRATION_LENGTH = 120 # 2 minutes
14
+
15
+ def initialize(options = {})
16
+ @client = options[:client]
17
+ @create_session_params = {}
18
+ options.each_pair do |key, value|
19
+ if self.class.create_session_options.include?(key)
20
+ @create_session_params[key] = value
21
+ end
22
+ end
23
+ @async_refresh = true
24
+ super
25
+ end
26
+
27
+ # @return [S3::Client]
28
+ attr_reader :client
29
+
30
+ private
31
+
32
+ def refresh
33
+ c = @client.create_session(@create_session_params).credentials
34
+ @credentials = Credentials.new(
35
+ c.access_key_id,
36
+ c.secret_access_key,
37
+ c.session_token
38
+ )
39
+ @expiration = c.expiration
40
+ end
41
+
42
+ class << self
43
+
44
+ # @api private
45
+ def create_session_options
46
+ @cso ||= begin
47
+ input = S3::Client.api.operation(:create_session).input
48
+ Set.new(input.shape.member_names)
49
+ end
50
+ end
51
+
52
+ end
53
+ end
54
+ end
55
+ end
@@ -0,0 +1,59 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aws
4
+ module S3
5
+ # @api private
6
+ def self.express_credentials_cache
7
+ @express_credentials_cache ||= LRUCache.new(max_entries: 100)
8
+ end
9
+
10
+ # Returns Credentials class for S3 Express. Accepts CreateSession
11
+ # params as options. See {Client#create_session} for details.
12
+ class ExpressCredentialsProvider
13
+ # @param [Hash] options
14
+ # @option options [Client] :client The S3 client used to create the
15
+ # session.
16
+ # @option options [String] :session_mode (see: {Client#create_session})
17
+ # @option options [Boolean] :caching (true) When true, credentials will
18
+ # be cached.
19
+ # @option options [Callable] :before_refresh Proc called before
20
+ # credentials are refreshed.
21
+ def initialize(options = {})
22
+ @client = options.delete(:client)
23
+ @caching = options.delete(:caching) != false
24
+ @options = options
25
+ return unless @caching
26
+
27
+ @cache = Aws::S3.express_credentials_cache
28
+ end
29
+
30
+ def express_credentials_for(bucket)
31
+ if @caching
32
+ cached_credentials_for(bucket)
33
+ else
34
+ new_credentials_for(bucket)
35
+ end
36
+ end
37
+
38
+ attr_accessor :client
39
+
40
+ private
41
+
42
+ def cached_credentials_for(bucket)
43
+ if @cache.key?(bucket)
44
+ @cache[bucket]
45
+ else
46
+ @cache[bucket] = new_credentials_for(bucket)
47
+ end
48
+ end
49
+
50
+ def new_credentials_for(bucket)
51
+ ExpressCredentials.new(
52
+ bucket: bucket,
53
+ client: @client,
54
+ **@options
55
+ )
56
+ end
57
+ end
58
+ end
59
+ end
@@ -32,39 +32,68 @@ module Aws
32
32
  }
33
33
  @params[:version_id] = options[:version_id] if options[:version_id]
34
34
 
35
- case @mode
36
- when 'auto' then multipart_download
37
- when 'single_request' then single_request
38
- when 'get_range'
39
- if @chunk_size
40
- resp = @client.head_object(@params)
41
- multithreaded_get_by_ranges(construct_chunks(resp.content_length))
35
+ # checksum_mode only supports the value "ENABLED"
36
+ # falsey values (false/nil) or "DISABLED" should be considered
37
+ # disabled and the api parameter should be unset.
38
+ if (checksum_mode = options.fetch(:checksum_mode, 'ENABLED'))
39
+ @params[:checksum_mode] = checksum_mode unless checksum_mode.upcase == 'DISABLED'
40
+ end
41
+ @on_checksum_validated = options[:on_checksum_validated]
42
+
43
+ @progress_callback = options[:progress_callback]
44
+
45
+ validate!
46
+
47
+ Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
48
+ case @mode
49
+ when 'auto' then multipart_download
50
+ when 'single_request' then single_request
51
+ when 'get_range'
52
+ if @chunk_size
53
+ resp = @client.head_object(@params)
54
+ multithreaded_get_by_ranges(resp.content_length)
55
+ else
56
+ msg = 'In :get_range mode, :chunk_size must be provided'
57
+ raise ArgumentError, msg
58
+ end
42
59
  else
43
- msg = 'In :get_range mode, :chunk_size must be provided'
60
+ msg = "Invalid mode #{@mode} provided, "\
61
+ 'mode should be :single_request, :get_range or :auto'
44
62
  raise ArgumentError, msg
45
63
  end
46
- else
47
- msg = "Invalid mode #{@mode} provided, "\
48
- 'mode should be :single_request, :get_range or :auto'
49
- raise ArgumentError, msg
50
64
  end
51
65
  end
52
66
 
53
67
  private
54
68
 
69
+ def validate!
70
+ if @on_checksum_validated && @params[:checksum_mode] != 'ENABLED'
71
+ raise ArgumentError, "You must set checksum_mode: 'ENABLED' " +
72
+ "when providing a on_checksum_validated callback"
73
+ end
74
+
75
+ if @on_checksum_validated && !@on_checksum_validated.respond_to?(:call)
76
+ raise ArgumentError, 'on_checksum_validated must be callable'
77
+ end
78
+ end
79
+
55
80
  def multipart_download
56
81
  resp = @client.head_object(@params.merge(part_number: 1))
57
82
  count = resp.parts_count
58
83
  if count.nil? || count <= 1
59
- resp.content_length < MIN_CHUNK_SIZE ?
60
- single_request :
61
- multithreaded_get_by_ranges(construct_chunks(resp.content_length))
84
+ if resp.content_length <= MIN_CHUNK_SIZE
85
+ single_request
86
+ else
87
+ multithreaded_get_by_ranges(resp.content_length)
88
+ end
62
89
  else
63
90
  # partNumber is an option
64
91
  resp = @client.head_object(@params)
65
- resp.content_length < MIN_CHUNK_SIZE ?
66
- single_request :
92
+ if resp.content_length <= MIN_CHUNK_SIZE
93
+ single_request
94
+ else
67
95
  compute_mode(resp.content_length, count)
96
+ end
68
97
  end
69
98
  end
70
99
 
@@ -72,9 +101,9 @@ module Aws
72
101
  chunk_size = compute_chunk(file_size)
73
102
  part_size = (file_size.to_f / count.to_f).ceil
74
103
  if chunk_size < part_size
75
- multithreaded_get_by_ranges(construct_chunks(file_size))
104
+ multithreaded_get_by_ranges(file_size)
76
105
  else
77
- multithreaded_get_by_parts(count)
106
+ multithreaded_get_by_parts(count, file_size)
78
107
  end
79
108
  end
80
109
 
@@ -82,10 +111,11 @@ module Aws
82
111
  offset = 0
83
112
  default_chunk_size = compute_chunk(file_size)
84
113
  chunks = []
85
- while offset <= file_size
114
+ while offset < file_size
86
115
  progress = offset + default_chunk_size
87
- chunks << "bytes=#{offset}-#{progress < file_size ? progress : file_size}"
88
- offset = progress + 1
116
+ progress = file_size if progress > file_size
117
+ chunks << "bytes=#{offset}-#{progress - 1}"
118
+ offset = progress
89
119
  end
90
120
  chunks
91
121
  end
@@ -94,12 +124,9 @@ module Aws
94
124
  if @chunk_size && @chunk_size > file_size
95
125
  raise ArgumentError, ":chunk_size shouldn't exceed total file size."
96
126
  else
97
- chunk_size = @chunk_size || [
98
- (file_size.to_f / MAX_PARTS).ceil,
99
- MIN_CHUNK_SIZE
127
+ @chunk_size || [
128
+ (file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE
100
129
  ].max.to_i
101
- chunk_size -= 1 if file_size % chunk_size == 1
102
- chunk_size
103
130
  end
104
131
  end
105
132
 
@@ -108,39 +135,137 @@ module Aws
108
135
  chunks.each_slice(@thread_count).to_a
109
136
  end
110
137
 
111
- def multithreaded_get_by_ranges(chunks)
112
- thread_batches(chunks, 'range')
138
+ def multithreaded_get_by_ranges(file_size)
139
+ offset = 0
140
+ default_chunk_size = compute_chunk(file_size)
141
+ chunks = []
142
+ part_number = 1 # parts start at 1
143
+ while offset < file_size
144
+ progress = offset + default_chunk_size
145
+ progress = file_size if progress > file_size
146
+ range = "bytes=#{offset}-#{progress - 1}"
147
+ chunks << Part.new(
148
+ part_number: part_number,
149
+ size: (progress-offset),
150
+ params: @params.merge(range: range)
151
+ )
152
+ part_number += 1
153
+ offset = progress
154
+ end
155
+ download_in_threads(PartList.new(chunks), file_size)
113
156
  end
114
157
 
115
- def multithreaded_get_by_parts(parts)
116
- thread_batches(parts, 'part_number')
158
+ def multithreaded_get_by_parts(n_parts, total_size)
159
+ parts = (1..n_parts).map do |part|
160
+ Part.new(part_number: part, params: @params.merge(part_number: part))
161
+ end
162
+ download_in_threads(PartList.new(parts), total_size)
117
163
  end
118
164
 
119
- def thread_batches(chunks, param)
120
- batches(chunks, param).each do |batch|
121
- threads = []
122
- batch.each do |chunk|
123
- threads << Thread.new do
124
- resp = @client.get_object(
125
- @params.merge(param.to_sym => chunk)
126
- )
127
- write(resp)
165
+ def download_in_threads(pending, total_size)
166
+ threads = []
167
+ if @progress_callback
168
+ progress = MultipartProgress.new(pending, total_size, @progress_callback)
169
+ end
170
+ @thread_count.times do
171
+ thread = Thread.new do
172
+ begin
173
+ while part = pending.shift
174
+ if progress
175
+ part.params[:on_chunk_received] =
176
+ proc do |_chunk, bytes, total|
177
+ progress.call(part.part_number, bytes, total)
178
+ end
179
+ end
180
+ resp = @client.get_object(part.params)
181
+ write(resp)
182
+ if @on_checksum_validated && resp.checksum_validated
183
+ @on_checksum_validated.call(resp.checksum_validated, resp)
184
+ end
185
+ end
186
+ nil
187
+ rescue => error
188
+ # keep other threads from downloading other parts
189
+ pending.clear!
190
+ raise error
128
191
  end
129
192
  end
130
- threads.each(&:join)
193
+ threads << thread
131
194
  end
195
+ threads.map(&:value).compact
132
196
  end
133
197
 
134
198
  def write(resp)
135
199
  range, _ = resp.content_range.split(' ').last.split('/')
136
200
  head, _ = range.split('-').map {|s| s.to_i}
137
- IO.write(@path, resp.body.read, head)
201
+ File.write(@path, resp.body.read, head)
138
202
  end
139
203
 
140
204
  def single_request
141
- @client.get_object(
142
- @params.merge(response_target: @path)
143
- )
205
+ params = @params.merge(response_target: @path)
206
+ params[:on_chunk_received] = single_part_progress if @progress_callback
207
+ resp = @client.get_object(params)
208
+
209
+ return resp unless @on_checksum_validated
210
+
211
+ if resp.checksum_validated
212
+ @on_checksum_validated.call(resp.checksum_validated, resp)
213
+ end
214
+
215
+ resp
216
+ end
217
+
218
+ def single_part_progress
219
+ proc do |_chunk, bytes_read, total_size|
220
+ @progress_callback.call([bytes_read], [total_size], total_size)
221
+ end
222
+ end
223
+
224
+ class Part < Struct.new(:part_number, :size, :params)
225
+ include Aws::Structure
226
+ end
227
+
228
+ # @api private
229
+ class PartList
230
+ include Enumerable
231
+ def initialize(parts = [])
232
+ @parts = parts
233
+ @mutex = Mutex.new
234
+ end
235
+
236
+ def shift
237
+ @mutex.synchronize { @parts.shift }
238
+ end
239
+
240
+ def size
241
+ @mutex.synchronize { @parts.size }
242
+ end
243
+
244
+ def clear!
245
+ @mutex.synchronize { @parts.clear }
246
+ end
247
+
248
+ def each(&block)
249
+ @mutex.synchronize { @parts.each(&block) }
250
+ end
251
+ end
252
+
253
+ # @api private
254
+ class MultipartProgress
255
+ def initialize(parts, total_size, progress_callback)
256
+ @bytes_received = Array.new(parts.size, 0)
257
+ @part_sizes = parts.map(&:size)
258
+ @total_size = total_size
259
+ @progress_callback = progress_callback
260
+ end
261
+
262
+ def call(part_number, bytes_received, total)
263
+ # part numbers start at 1
264
+ @bytes_received[part_number - 1] = bytes_received
265
+ # part size may not be known until we get the first response
266
+ @part_sizes[part_number - 1] ||= total
267
+ @progress_callback.call(@bytes_received, @part_sizes, @total_size)
268
+ end
144
269
  end
145
270
  end
146
271
  end
@@ -32,12 +32,19 @@ module Aws
32
32
  # @option options [Proc] :progress_callback
33
33
  # A Proc that will be called when each chunk of the upload is sent.
34
34
  # It will be invoked with [bytes_read], [total_sizes]
35
+ # @option options [Integer] :thread_count
36
+ # The thread count to use for multipart uploads. Ignored for
37
+ # objects smaller than the multipart threshold.
35
38
  # @return [void]
36
39
  def upload(source, options = {})
37
- if File.size(source) >= multipart_threshold
38
- MultipartFileUploader.new(@options).upload(source, options)
39
- else
40
- put_object(source, options)
40
+ Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
41
+ if File.size(source) >= multipart_threshold
42
+ MultipartFileUploader.new(@options).upload(source, options)
43
+ else
44
+ # remove multipart parameters not supported by put_object
45
+ options.delete(:thread_count)
46
+ put_object(source, options)
47
+ end
41
48
  end
42
49
  end
43
50
 
@@ -21,6 +21,10 @@ module Aws
21
21
  Client.api.operation(:create_multipart_upload).input.shape.member_names
22
22
  )
23
23
 
24
+ COMPLETE_OPTIONS = Set.new(
25
+ Client.api.operation(:complete_multipart_upload).input.shape.member_names
26
+ )
27
+
24
28
  # @api private
25
29
  UPLOAD_PART_OPTIONS = Set.new(
26
30
  Client.api.operation(:upload_part).input.shape.member_names
@@ -42,7 +46,7 @@ module Aws
42
46
  # @option options [Proc] :progress_callback
43
47
  # A Proc that will be called when each chunk of the upload is sent.
44
48
  # It will be invoked with [bytes_read], [total_sizes]
45
- # @return [void]
49
+ # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
46
50
  def upload(source, options = {})
47
51
  if File.size(source) < MIN_PART_SIZE
48
52
  raise ArgumentError, FILE_TOO_SMALL
@@ -61,10 +65,10 @@ module Aws
61
65
 
62
66
  def complete_upload(upload_id, parts, options)
63
67
  @client.complete_multipart_upload(
64
- bucket: options[:bucket],
65
- key: options[:key],
66
- upload_id: upload_id,
67
- multipart_upload: { parts: parts }
68
+ **complete_opts(options).merge(
69
+ upload_id: upload_id,
70
+ multipart_upload: { parts: parts }
71
+ )
68
72
  )
69
73
  end
70
74
 
@@ -85,12 +89,13 @@ module Aws
85
89
  key: options[:key],
86
90
  upload_id: upload_id
87
91
  )
88
- msg = "multipart upload failed: #{errors.map(&:message).join("; ")}"
92
+ msg = "multipart upload failed: #{errors.map(&:message).join('; ')}"
89
93
  raise MultipartUploadError.new(msg, errors)
90
94
  rescue MultipartUploadError => error
91
95
  raise error
92
96
  rescue => error
93
- msg = "failed to abort multipart upload: #{error.message}"
97
+ msg = "failed to abort multipart upload: #{error.message}. "\
98
+ "Multipart upload failed: #{errors.map(&:message).join('; ')}"
94
99
  raise MultipartUploadError.new(msg, errors + [error])
95
100
  end
96
101
 
@@ -123,6 +128,13 @@ module Aws
123
128
  end
124
129
  end
125
130
 
131
+ def complete_opts(options)
132
+ COMPLETE_OPTIONS.inject({}) do |hash, key|
133
+ hash[key] = options[key] if options.key?(key)
134
+ hash
135
+ end
136
+ end
137
+
126
138
  def upload_part_opts(options)
127
139
  UPLOAD_PART_OPTIONS.inject({}) do |hash, key|
128
140
  hash[key] = options[key] if options.key?(key)
@@ -135,7 +147,7 @@ module Aws
135
147
  if (callback = options[:progress_callback])
136
148
  progress = MultipartProgress.new(pending, callback)
137
149
  end
138
- @thread_count.times do
150
+ options.fetch(:thread_count, @thread_count).times do
139
151
  thread = Thread.new do
140
152
  begin
141
153
  while part = pending.shift
@@ -147,7 +159,15 @@ module Aws
147
159
  end
148
160
  resp = @client.upload_part(part)
149
161
  part[:body].close
150
- completed.push(etag: resp.etag, part_number: part[:part_number])
162
+ completed_part = {etag: resp.etag, part_number: part[:part_number]}
163
+
164
+ # get the requested checksum from the response
165
+ if part[:checksum_algorithm]
166
+ k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
167
+ completed_part[k] = resp[k]
168
+ end
169
+
170
+ completed.push(completed_part)
151
171
  end
152
172
  nil
153
173
  rescue => error
@@ -156,7 +176,6 @@ module Aws
156
176
  error
157
177
  end
158
178
  end
159
- thread.abort_on_exception = true
160
179
  threads << thread
161
180
  end
162
181
  threads.map(&:value).compact
@@ -224,4 +243,4 @@ module Aws
224
243
  end
225
244
  end
226
245
  end
227
- end
246
+ end
@@ -26,6 +26,10 @@ module Aws
26
26
  UPLOAD_PART_OPTIONS =
27
27
  Set.new(Client.api.operation(:upload_part).input.shape.member_names)
28
28
 
29
+ # @api private
30
+ COMPLETE_UPLOAD_OPTIONS =
31
+ Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
32
+
29
33
  # @option options [Client] :client
30
34
  def initialize(options = {})
31
35
  @client = options[:client] || Client.new
@@ -39,11 +43,14 @@ module Aws
39
43
 
40
44
  # @option options [required,String] :bucket
41
45
  # @option options [required,String] :key
42
- # @return [void]
46
+ # @option options [Integer] :thread_count (THREAD_COUNT)
47
+ # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
43
48
  def upload(options = {}, &block)
44
- upload_id = initiate_upload(options)
45
- parts = upload_parts(upload_id, options, &block)
46
- complete_upload(upload_id, parts, options)
49
+ Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
50
+ upload_id = initiate_upload(options)
51
+ parts = upload_parts(upload_id, options, &block)
52
+ complete_upload(upload_id, parts, options)
53
+ end
47
54
  end
48
55
 
49
56
  private
@@ -54,17 +61,22 @@ module Aws
54
61
 
55
62
  def complete_upload(upload_id, parts, options)
56
63
  @client.complete_multipart_upload(
57
- bucket: options[:bucket],
58
- key: options[:key],
59
- upload_id: upload_id,
60
- multipart_upload: { parts: parts })
64
+ **complete_opts(options).merge(
65
+ upload_id: upload_id,
66
+ multipart_upload: { parts: parts }
67
+ )
68
+ )
61
69
  end
62
70
 
63
71
  def upload_parts(upload_id, options, &block)
64
72
  completed = Queue.new
73
+ thread_errors = []
65
74
  errors = begin
66
75
  IO.pipe do |read_pipe, write_pipe|
67
- threads = upload_in_threads(read_pipe, completed, upload_part_opts(options).merge(upload_id: upload_id))
76
+ threads = upload_in_threads(
77
+ read_pipe, completed,
78
+ upload_part_opts(options).merge(upload_id: upload_id),
79
+ thread_errors)
68
80
  begin
69
81
  block.call(write_pipe)
70
82
  ensure
@@ -74,7 +86,7 @@ module Aws
74
86
  threads.map(&:value).compact
75
87
  end
76
88
  rescue => e
77
- [e]
89
+ thread_errors + [e]
78
90
  end
79
91
 
80
92
  if errors.empty?
@@ -90,12 +102,13 @@ module Aws
90
102
  key: options[:key],
91
103
  upload_id: upload_id
92
104
  )
93
- msg = "multipart upload failed: #{errors.map(&:message).join("; ")}"
105
+ msg = "multipart upload failed: #{errors.map(&:message).join('; ')}"
94
106
  raise MultipartUploadError.new(msg, errors)
95
107
  rescue MultipartUploadError => error
96
108
  raise error
97
109
  rescue => error
98
- msg = "failed to abort multipart upload: #{error.message}"
110
+ msg = "failed to abort multipart upload: #{error.message}. "\
111
+ "Multipart upload failed: #{errors.map(&:message).join('; ')}"
99
112
  raise MultipartUploadError.new(msg, errors + [error])
100
113
  end
101
114
 
@@ -113,6 +126,13 @@ module Aws
113
126
  end
114
127
  end
115
128
 
129
+ def complete_opts(options)
130
+ COMPLETE_UPLOAD_OPTIONS.inject({}) do |hash, key|
131
+ hash[key] = options[key] if options.key?(key)
132
+ hash
133
+ end
134
+ end
135
+
116
136
  def read_to_part_body(read_pipe)
117
137
  return if read_pipe.closed?
118
138
  temp_io = @tempfile ? Tempfile.new(TEMPFILE_PREIX) : StringIO.new(String.new)
@@ -130,10 +150,10 @@ module Aws
130
150
  end
131
151
  end
132
152
 
133
- def upload_in_threads(read_pipe, completed, options)
153
+ def upload_in_threads(read_pipe, completed, options, thread_errors)
134
154
  mutex = Mutex.new
135
155
  part_number = 0
136
- @thread_count.times.map do
156
+ options.fetch(:thread_count, @thread_count).times.map do
137
157
  thread = Thread.new do
138
158
  begin
139
159
  loop do
@@ -147,7 +167,14 @@ module Aws
147
167
  part_number: thread_part_number,
148
168
  )
149
169
  resp = @client.upload_part(part)
150
- completed << {etag: resp.etag, part_number: part[:part_number]}
170
+ completed_part = {etag: resp.etag, part_number: part[:part_number]}
171
+
172
+ # get the requested checksum from the response
173
+ if part[:checksum_algorithm]
174
+ k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
175
+ completed_part[k] = resp[k]
176
+ end
177
+ completed.push(completed_part)
151
178
  ensure
152
179
  if Tempfile === body
153
180
  body.close
@@ -160,11 +187,13 @@ module Aws
160
187
  nil
161
188
  rescue => error
162
189
  # keep other threads from uploading other parts
163
- mutex.synchronize { read_pipe.close_read unless read_pipe.closed? }
190
+ mutex.synchronize do
191
+ thread_errors.push(error)
192
+ read_pipe.close_read unless read_pipe.closed?
193
+ end
164
194
  error
165
195
  end
166
196
  end
167
- thread.abort_on_exception = true
168
197
  thread
169
198
  end
170
199
  end