aws-sdk-s3 1.196.1 → 1.213.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +116 -0
  3. data/VERSION +1 -1
  4. data/lib/aws-sdk-s3/bucket.rb +17 -17
  5. data/lib/aws-sdk-s3/bucket_acl.rb +1 -1
  6. data/lib/aws-sdk-s3/bucket_versioning.rb +33 -0
  7. data/lib/aws-sdk-s3/client.rb +1271 -453
  8. data/lib/aws-sdk-s3/client_api.rb +115 -0
  9. data/lib/aws-sdk-s3/customizations/object.rb +39 -24
  10. data/lib/aws-sdk-s3/customizations.rb +3 -1
  11. data/lib/aws-sdk-s3/default_executor.rb +103 -0
  12. data/lib/aws-sdk-s3/encryption/client.rb +2 -2
  13. data/lib/aws-sdk-s3/encryption/default_cipher_provider.rb +2 -0
  14. data/lib/aws-sdk-s3/encryption/encrypt_handler.rb +2 -0
  15. data/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb +2 -0
  16. data/lib/aws-sdk-s3/encryptionV2/client.rb +98 -23
  17. data/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb +7 -162
  18. data/lib/aws-sdk-s3/encryptionV2/decryption.rb +205 -0
  19. data/lib/aws-sdk-s3/encryptionV2/default_cipher_provider.rb +17 -0
  20. data/lib/aws-sdk-s3/encryptionV2/encrypt_handler.rb +2 -0
  21. data/lib/aws-sdk-s3/encryptionV2/io_encrypter.rb +2 -0
  22. data/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb +8 -0
  23. data/lib/aws-sdk-s3/encryptionV2/utils.rb +5 -0
  24. data/lib/aws-sdk-s3/encryptionV3/client.rb +885 -0
  25. data/lib/aws-sdk-s3/encryptionV3/decrypt_handler.rb +98 -0
  26. data/lib/aws-sdk-s3/encryptionV3/decryption.rb +244 -0
  27. data/lib/aws-sdk-s3/encryptionV3/default_cipher_provider.rb +159 -0
  28. data/lib/aws-sdk-s3/encryptionV3/default_key_provider.rb +35 -0
  29. data/lib/aws-sdk-s3/encryptionV3/encrypt_handler.rb +98 -0
  30. data/lib/aws-sdk-s3/encryptionV3/errors.rb +47 -0
  31. data/lib/aws-sdk-s3/encryptionV3/io_auth_decrypter.rb +60 -0
  32. data/lib/aws-sdk-s3/encryptionV3/io_decrypter.rb +35 -0
  33. data/lib/aws-sdk-s3/encryptionV3/io_encrypter.rb +84 -0
  34. data/lib/aws-sdk-s3/encryptionV3/key_provider.rb +28 -0
  35. data/lib/aws-sdk-s3/encryptionV3/kms_cipher_provider.rb +159 -0
  36. data/lib/aws-sdk-s3/encryptionV3/materials.rb +58 -0
  37. data/lib/aws-sdk-s3/encryptionV3/utils.rb +321 -0
  38. data/lib/aws-sdk-s3/encryption_v2.rb +1 -0
  39. data/lib/aws-sdk-s3/encryption_v3.rb +24 -0
  40. data/lib/aws-sdk-s3/endpoint_parameters.rb +17 -17
  41. data/lib/aws-sdk-s3/endpoint_provider.rb +241 -68
  42. data/lib/aws-sdk-s3/endpoints.rb +39 -0
  43. data/lib/aws-sdk-s3/errors.rb +11 -0
  44. data/lib/aws-sdk-s3/file_downloader.rb +192 -104
  45. data/lib/aws-sdk-s3/file_uploader.rb +17 -13
  46. data/lib/aws-sdk-s3/multipart_file_uploader.rb +82 -69
  47. data/lib/aws-sdk-s3/multipart_stream_uploader.rb +96 -107
  48. data/lib/aws-sdk-s3/multipart_upload.rb +12 -12
  49. data/lib/aws-sdk-s3/multipart_upload_part.rb +8 -8
  50. data/lib/aws-sdk-s3/object.rb +88 -59
  51. data/lib/aws-sdk-s3/object_acl.rb +5 -5
  52. data/lib/aws-sdk-s3/object_summary.rb +70 -41
  53. data/lib/aws-sdk-s3/object_version.rb +23 -25
  54. data/lib/aws-sdk-s3/plugins/checksum_algorithm.rb +18 -5
  55. data/lib/aws-sdk-s3/plugins/endpoints.rb +1 -1
  56. data/lib/aws-sdk-s3/plugins/http_200_errors.rb +58 -34
  57. data/lib/aws-sdk-s3/transfer_manager.rb +321 -0
  58. data/lib/aws-sdk-s3/types.rb +687 -330
  59. data/lib/aws-sdk-s3.rb +1 -1
  60. data/sig/bucket.rbs +1 -1
  61. data/sig/client.rbs +62 -12
  62. data/sig/errors.rbs +2 -0
  63. data/sig/multipart_upload.rbs +1 -1
  64. data/sig/object.rbs +7 -5
  65. data/sig/object_summary.rbs +7 -5
  66. data/sig/types.rbs +84 -14
  67. metadata +21 -3
@@ -337,6 +337,17 @@ module Aws::S3
337
337
  end
338
338
  end
339
339
 
340
+ class GetBucketAbac
341
+ def self.build(context)
342
+ Aws::S3::EndpointParameters.create(
343
+ context.config,
344
+ bucket: context.params[:bucket],
345
+ use_dual_stack: context[:use_dualstack_endpoint],
346
+ accelerate: context[:use_accelerate_endpoint],
347
+ )
348
+ end
349
+ end
350
+
340
351
  class GetBucketAccelerateConfiguration
341
352
  def self.build(context)
342
353
  Aws::S3::EndpointParameters.create(
@@ -878,6 +889,17 @@ module Aws::S3
878
889
  end
879
890
  end
880
891
 
892
+ class PutBucketAbac
893
+ def self.build(context)
894
+ Aws::S3::EndpointParameters.create(
895
+ context.config,
896
+ bucket: context.params[:bucket],
897
+ use_dual_stack: context[:use_dualstack_endpoint],
898
+ accelerate: context[:use_accelerate_endpoint],
899
+ )
900
+ end
901
+ end
902
+
881
903
  class PutBucketAccelerateConfiguration
882
904
  def self.build(context)
883
905
  Aws::S3::EndpointParameters.create(
@@ -1256,6 +1278,17 @@ module Aws::S3
1256
1278
  end
1257
1279
  end
1258
1280
 
1281
+ class UpdateObjectEncryption
1282
+ def self.build(context)
1283
+ Aws::S3::EndpointParameters.create(
1284
+ context.config,
1285
+ bucket: context.params[:bucket],
1286
+ use_dual_stack: context[:use_dualstack_endpoint],
1287
+ accelerate: context[:use_accelerate_endpoint],
1288
+ )
1289
+ end
1290
+ end
1291
+
1259
1292
  class UploadPart
1260
1293
  def self.build(context)
1261
1294
  Aws::S3::EndpointParameters.create(
@@ -1348,6 +1381,8 @@ module Aws::S3
1348
1381
  DeleteObjects.build(context)
1349
1382
  when :delete_public_access_block
1350
1383
  DeletePublicAccessBlock.build(context)
1384
+ when :get_bucket_abac
1385
+ GetBucketAbac.build(context)
1351
1386
  when :get_bucket_accelerate_configuration
1352
1387
  GetBucketAccelerateConfiguration.build(context)
1353
1388
  when :get_bucket_acl
@@ -1440,6 +1475,8 @@ module Aws::S3
1440
1475
  ListObjectsV2.build(context)
1441
1476
  when :list_parts
1442
1477
  ListParts.build(context)
1478
+ when :put_bucket_abac
1479
+ PutBucketAbac.build(context)
1443
1480
  when :put_bucket_accelerate_configuration
1444
1481
  PutBucketAccelerateConfiguration.build(context)
1445
1482
  when :put_bucket_acl
@@ -1504,6 +1541,8 @@ module Aws::S3
1504
1541
  UpdateBucketMetadataInventoryTableConfiguration.build(context)
1505
1542
  when :update_bucket_metadata_journal_table_configuration
1506
1543
  UpdateBucketMetadataJournalTableConfiguration.build(context)
1544
+ when :update_object_encryption
1545
+ UpdateObjectEncryption.build(context)
1507
1546
  when :upload_part
1508
1547
  UploadPart.build(context)
1509
1548
  when :upload_part_copy
@@ -27,6 +27,7 @@ module Aws::S3
27
27
  # See {Seahorse::Client::RequestContext} for more information.
28
28
  #
29
29
  # ## Error Classes
30
+ # * {AccessDenied}
30
31
  # * {BucketAlreadyExists}
31
32
  # * {BucketAlreadyOwnedByYou}
32
33
  # * {EncryptionTypeMismatch}
@@ -47,6 +48,16 @@ module Aws::S3
47
48
 
48
49
  extend Aws::Errors::DynamicErrors
49
50
 
51
+ class AccessDenied < ServiceError
52
+
53
+ # @param [Seahorse::Client::RequestContext] context
54
+ # @param [String] message
55
+ # @param [Aws::S3::Types::AccessDenied] data
56
+ def initialize(context, message, data = Aws::EmptyStructure.new)
57
+ super(context, message, data)
58
+ end
59
+ end
60
+
50
61
  class BucketAlreadyExists < ServiceError
51
62
 
52
63
  # @param [Seahorse::Client::RequestContext] context
@@ -8,176 +8,262 @@ module Aws
8
8
  module S3
9
9
  # @api private
10
10
  class FileDownloader
11
-
12
11
  MIN_CHUNK_SIZE = 5 * 1024 * 1024
13
12
  MAX_PARTS = 10_000
13
+ HEAD_OPTIONS = Set.new(Client.api.operation(:head_object).input.shape.member_names)
14
+ GET_OPTIONS = Set.new(Client.api.operation(:get_object).input.shape.member_names)
14
15
 
15
16
  def initialize(options = {})
16
17
  @client = options[:client] || Client.new
18
+ @executor = options[:executor]
17
19
  end
18
20
 
19
21
  # @return [Client]
20
22
  attr_reader :client
21
23
 
22
24
  def download(destination, options = {})
23
- @path = destination
24
- @mode = options.delete(:mode) || 'auto'
25
- @thread_count = options.delete(:thread_count) || 10
26
- @chunk_size = options.delete(:chunk_size)
27
- @on_checksum_validated = options.delete(:on_checksum_validated)
28
- @progress_callback = options.delete(:progress_callback)
29
- @params = options
30
- validate!
25
+ validate_destination!(destination)
26
+ opts = build_download_opts(destination, options)
27
+ validate_opts!(opts)
31
28
 
32
29
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
33
- case @mode
34
- when 'auto' then multipart_download
35
- when 'single_request' then single_request
36
- when 'get_range'
37
- raise ArgumentError, 'In get_range mode, :chunk_size must be provided' unless @chunk_size
38
-
39
- resp = @client.head_object(@params)
40
- multithreaded_get_by_ranges(resp.content_length, resp.etag)
41
- else
42
- raise ArgumentError, "Invalid mode #{@mode} provided, :mode should be single_request, get_range or auto"
30
+ case opts[:mode]
31
+ when 'auto' then multipart_download(opts)
32
+ when 'single_request' then single_request(opts)
33
+ when 'get_range' then range_request(opts)
43
34
  end
44
35
  end
45
- File.rename(@temp_path, @path) if @temp_path
36
+ File.rename(opts[:temp_path], destination) if opts[:temp_path]
46
37
  ensure
47
- File.delete(@temp_path) if @temp_path && File.exist?(@temp_path)
38
+ cleanup_temp_file(opts)
48
39
  end
49
40
 
50
41
  private
51
42
 
52
- def validate!
53
- return unless @on_checksum_validated && !@on_checksum_validated.respond_to?(:call)
43
+ def build_download_opts(destination, opts)
44
+ {
45
+ destination: destination,
46
+ mode: opts.delete(:mode) || 'auto',
47
+ chunk_size: opts.delete(:chunk_size),
48
+ on_checksum_validated: opts.delete(:on_checksum_validated),
49
+ progress_callback: opts.delete(:progress_callback),
50
+ params: opts,
51
+ temp_path: nil
52
+ }
53
+ end
54
+
55
+ def cleanup_temp_file(opts)
56
+ return unless opts
57
+
58
+ temp_file = opts[:temp_path]
59
+ File.delete(temp_file) if temp_file && File.exist?(temp_file)
60
+ end
61
+
62
+ def download_with_executor(part_list, total_size, opts)
63
+ download_attempts = 0
64
+ completion_queue = Queue.new
65
+ abort_download = false
66
+ error = nil
67
+ progress = MultipartProgress.new(part_list, total_size, opts[:progress_callback])
68
+
69
+ while (part = part_list.shift)
70
+ break if abort_download
71
+
72
+ download_attempts += 1
73
+ @executor.post(part) do |p|
74
+ update_progress(progress, p)
75
+ resp = @client.get_object(p.params)
76
+ range = extract_range(resp.content_range)
77
+ validate_range(range, p.params[:range]) if p.params[:range]
78
+ write(resp.body, range, opts)
79
+
80
+ execute_checksum_callback(resp, opts)
81
+ rescue StandardError => e
82
+ abort_download = true
83
+ error = e
84
+ ensure
85
+ completion_queue << :done
86
+ end
87
+ end
88
+
89
+ download_attempts.times { completion_queue.pop }
90
+ raise error unless error.nil?
91
+ end
92
+
93
+ def handle_checksum_mode_option(option_key, opts)
94
+ return false unless option_key == :checksum_mode && opts[:checksum_mode] == 'DISABLED'
95
+
96
+ msg = ':checksum_mode option is deprecated. Checksums will be validated by default. ' \
97
+ 'To disable checksum validation, set :response_checksum_validation to "when_required" on your S3 client.'
98
+ warn(msg)
99
+ true
100
+ end
101
+
102
+ def get_opts(opts)
103
+ GET_OPTIONS.each_with_object({}) do |k, h|
104
+ next if k == :checksum_mode
105
+
106
+ h[k] = opts[k] if opts.key?(k)
107
+ end
108
+ end
109
+
110
+ def head_opts(opts)
111
+ HEAD_OPTIONS.each_with_object({}) do |k, h|
112
+ next if handle_checksum_mode_option(k, opts)
54
113
 
55
- raise ArgumentError, ':on_checksum_validated must be callable'
114
+ h[k] = opts[k] if opts.key?(k)
115
+ end
116
+ end
117
+
118
+ def compute_chunk(chunk_size, file_size)
119
+ raise ArgumentError, ":chunk_size shouldn't exceed total file size." if chunk_size && chunk_size > file_size
120
+
121
+ chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
122
+ end
123
+
124
+ def compute_mode(file_size, total_parts, etag, opts)
125
+ chunk_size = compute_chunk(opts[:chunk_size], file_size)
126
+ part_size = (file_size.to_f / total_parts).ceil
127
+
128
+ resolve_temp_path(opts)
129
+ if chunk_size < part_size
130
+ multithreaded_get_by_ranges(file_size, etag, opts)
131
+ else
132
+ multithreaded_get_by_parts(total_parts, file_size, etag, opts)
133
+ end
56
134
  end
57
135
 
58
- def multipart_download
59
- resp = @client.head_object(@params.merge(part_number: 1))
136
+ def extract_range(value)
137
+ value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
138
+ end
139
+
140
+ def multipart_download(opts)
141
+ resp = @client.head_object(head_opts(opts[:params].merge(part_number: 1)))
60
142
  count = resp.parts_count
61
143
 
62
144
  if count.nil? || count <= 1
63
145
  if resp.content_length <= MIN_CHUNK_SIZE
64
- single_request
146
+ single_request(opts)
65
147
  else
66
- multithreaded_get_by_ranges(resp.content_length, resp.etag)
148
+ resolve_temp_path(opts)
149
+ multithreaded_get_by_ranges(resp.content_length, resp.etag, opts)
67
150
  end
68
151
  else
69
152
  # covers cases when given object is not uploaded via UploadPart API
70
- resp = @client.head_object(@params) # partNumber is an option
153
+ resp = @client.head_object(head_opts(opts[:params])) # partNumber is an option
71
154
  if resp.content_length <= MIN_CHUNK_SIZE
72
- single_request
155
+ single_request(opts)
73
156
  else
74
- compute_mode(resp.content_length, count, resp.etag)
157
+ compute_mode(resp.content_length, count, resp.etag, opts)
75
158
  end
76
159
  end
77
160
  end
78
161
 
79
- def compute_mode(file_size, count, etag)
80
- chunk_size = compute_chunk(file_size)
81
- part_size = (file_size.to_f / count).ceil
82
- if chunk_size < part_size
83
- multithreaded_get_by_ranges(file_size, etag)
84
- else
85
- multithreaded_get_by_parts(count, file_size, etag)
162
+ def multithreaded_get_by_parts(total_parts, file_size, etag, opts)
163
+ parts = (1..total_parts).map do |part|
164
+ params = get_opts(opts[:params].merge(part_number: part, if_match: etag))
165
+ Part.new(part_number: part, params: params)
86
166
  end
167
+ download_with_executor(PartList.new(parts), file_size, opts)
87
168
  end
88
169
 
89
- def compute_chunk(file_size)
90
- raise ArgumentError, ":chunk_size shouldn't exceed total file size." if @chunk_size && @chunk_size > file_size
91
-
92
- @chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
93
- end
94
-
95
- def multithreaded_get_by_ranges(file_size, etag)
170
+ def multithreaded_get_by_ranges(file_size, etag, opts)
96
171
  offset = 0
97
- default_chunk_size = compute_chunk(file_size)
172
+ default_chunk_size = compute_chunk(opts[:chunk_size], file_size)
98
173
  chunks = []
99
174
  part_number = 1 # parts start at 1
100
175
  while offset < file_size
101
176
  progress = offset + default_chunk_size
102
177
  progress = file_size if progress > file_size
103
- params = @params.merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag)
178
+ params = get_opts(opts[:params].merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag))
104
179
  chunks << Part.new(part_number: part_number, size: (progress - offset), params: params)
105
180
  part_number += 1
106
181
  offset = progress
107
182
  end
108
- download_in_threads(PartList.new(chunks), file_size)
109
- end
110
-
111
- def multithreaded_get_by_parts(n_parts, total_size, etag)
112
- parts = (1..n_parts).map do |part|
113
- Part.new(part_number: part, params: @params.merge(part_number: part, if_match: etag))
114
- end
115
- download_in_threads(PartList.new(parts), total_size)
116
- end
117
-
118
- def download_in_threads(pending, total_size)
119
- threads = []
120
- progress = MultipartProgress.new(pending, total_size, @progress_callback) if @progress_callback
121
- @temp_path = "#{@path}.s3tmp.#{SecureRandom.alphanumeric(8)}"
122
- @thread_count.times do
123
- thread = Thread.new do
124
- begin
125
- while (part = pending.shift)
126
- if progress
127
- part.params[:on_chunk_received] =
128
- proc do |_chunk, bytes, total|
129
- progress.call(part.part_number, bytes, total)
130
- end
131
- end
132
- resp = @client.get_object(part.params)
133
- range = extract_range(resp.content_range)
134
- validate_range(range, part.params[:range]) if part.params[:range]
135
- write(resp.body, range)
136
- if @on_checksum_validated && resp.checksum_validated
137
- @on_checksum_validated.call(resp.checksum_validated, resp)
138
- end
139
- end
140
- nil
141
- rescue StandardError => e
142
- pending.clear! # keep other threads from downloading other parts
143
- raise e
144
- end
145
- end
146
- threads << thread
147
- end
148
- threads.map(&:value).compact
183
+ download_with_executor(PartList.new(chunks), file_size, opts)
149
184
  end
150
185
 
151
- def extract_range(value)
152
- value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
186
+ def range_request(opts)
187
+ resp = @client.head_object(head_opts(opts[:params]))
188
+ resolve_temp_path(opts)
189
+ multithreaded_get_by_ranges(resp.content_length, resp.etag, opts)
153
190
  end
154
191
 
155
- def validate_range(actual, expected)
156
- return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
157
-
158
- raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
159
- end
192
+ def resolve_temp_path(opts)
193
+ return if [File, Tempfile].include?(opts[:destination].class)
160
194
 
161
- def write(body, range)
162
- File.write(@temp_path, body.read, range.split('-').first.to_i)
195
+ opts[:temp_path] ||= "#{opts[:destination]}.s3tmp.#{SecureRandom.alphanumeric(8)}"
163
196
  end
164
197
 
165
- def single_request
166
- params = @params.merge(response_target: @path)
167
- params[:on_chunk_received] = single_part_progress if @progress_callback
198
+ def single_request(opts)
199
+ params = get_opts(opts[:params]).merge(response_target: opts[:destination])
200
+ params[:on_chunk_received] = single_part_progress(opts) if opts[:progress_callback]
168
201
  resp = @client.get_object(params)
169
- return resp unless @on_checksum_validated
202
+ return resp unless opts[:on_checksum_validated]
170
203
 
171
- @on_checksum_validated.call(resp.checksum_validated, resp) if resp.checksum_validated
204
+ opts[:on_checksum_validated].call(resp.checksum_validated, resp) if resp.checksum_validated
172
205
  resp
173
206
  end
174
207
 
175
- def single_part_progress
208
+ def single_part_progress(opts)
176
209
  proc do |_chunk, bytes_read, total_size|
177
- @progress_callback.call([bytes_read], [total_size], total_size)
210
+ opts[:progress_callback].call([bytes_read], [total_size], total_size)
178
211
  end
179
212
  end
180
213
 
214
+ def update_progress(progress, part)
215
+ return unless progress.progress_callback
216
+
217
+ part.params[:on_chunk_received] =
218
+ proc do |_chunk, bytes, total|
219
+ progress.call(part.part_number, bytes, total)
220
+ end
221
+ end
222
+
223
+ def execute_checksum_callback(resp, opts)
224
+ return unless opts[:on_checksum_validated] && resp.checksum_validated
225
+
226
+ opts[:on_checksum_validated].call(resp.checksum_validated, resp)
227
+ end
228
+
229
+ def validate_destination!(destination)
230
+ valid_types = [String, Pathname, File, Tempfile]
231
+ return if valid_types.include?(destination.class)
232
+
233
+ raise ArgumentError, "Invalid destination, expected #{valid_types.join(', ')} but got: #{destination.class}"
234
+ end
235
+
236
+ def validate_opts!(opts)
237
+ if opts[:on_checksum_validated] && !opts[:on_checksum_validated].respond_to?(:call)
238
+ raise ArgumentError, ':on_checksum_validated must be callable'
239
+ end
240
+
241
+ valid_modes = %w[auto get_range single_request]
242
+ unless valid_modes.include?(opts[:mode])
243
+ msg = "Invalid mode #{opts[:mode]} provided, :mode should be single_request, get_range or auto"
244
+ raise ArgumentError, msg
245
+ end
246
+
247
+ if opts[:mode] == 'get_range' && opts[:chunk_size].nil?
248
+ raise ArgumentError, 'In get_range mode, :chunk_size must be provided'
249
+ end
250
+
251
+ if opts[:chunk_size] && opts[:chunk_size] <= 0
252
+ raise ArgumentError, ':chunk_size must be positive'
253
+ end
254
+ end
255
+
256
+ def validate_range(actual, expected)
257
+ return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
258
+
259
+ raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
260
+ end
261
+
262
+ def write(body, range, opts)
263
+ path = opts[:temp_path] || opts[:destination]
264
+ File.write(path, body.read, range.split('-').first.to_i)
265
+ end
266
+
181
267
  # @api private
182
268
  class Part < Struct.new(:part_number, :size, :params)
183
269
  include Aws::Structure
@@ -217,6 +303,8 @@ module Aws
217
303
  @progress_callback = progress_callback
218
304
  end
219
305
 
306
+ attr_reader :progress_callback
307
+
220
308
  def call(part_number, bytes_received, total)
221
309
  # part numbers start at 1
222
310
  @bytes_received[part_number - 1] = bytes_received
@@ -7,23 +7,22 @@ module Aws
7
7
  # @api private
8
8
  class FileUploader
9
9
 
10
- ONE_HUNDRED_MEGABYTES = 100 * 1024 * 1024
10
+ DEFAULT_MULTIPART_THRESHOLD = 100 * 1024 * 1024
11
11
 
12
12
  # @param [Hash] options
13
13
  # @option options [Client] :client
14
14
  # @option options [Integer] :multipart_threshold (104857600)
15
15
  def initialize(options = {})
16
- @options = options
17
16
  @client = options[:client] || Client.new
18
- @multipart_threshold = options[:multipart_threshold] ||
19
- ONE_HUNDRED_MEGABYTES
17
+ @executor = options[:executor]
18
+ @http_chunk_size = options[:http_chunk_size]
19
+ @multipart_threshold = options[:multipart_threshold] || DEFAULT_MULTIPART_THRESHOLD
20
20
  end
21
21
 
22
22
  # @return [Client]
23
23
  attr_reader :client
24
24
 
25
- # @return [Integer] Files larger than or equal to this in bytes are uploaded
26
- # using a {MultipartFileUploader}.
25
+ # @return [Integer] Files larger than or equal to this in bytes are uploaded using a {MultipartFileUploader}.
27
26
  attr_reader :multipart_threshold
28
27
 
29
28
  # @param [String, Pathname, File, Tempfile] source The file to upload.
@@ -38,11 +37,13 @@ module Aws
38
37
  # @return [void]
39
38
  def upload(source, options = {})
40
39
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
41
- if File.size(source) >= multipart_threshold
42
- MultipartFileUploader.new(@options).upload(source, options)
40
+ if File.size(source) >= @multipart_threshold
41
+ MultipartFileUploader.new(
42
+ client: @client,
43
+ executor: @executor,
44
+ http_chunk_size: @http_chunk_size
45
+ ).upload(source, options)
43
46
  else
44
- # remove multipart parameters not supported by put_object
45
- options.delete(:thread_count)
46
47
  put_object(source, options)
47
48
  end
48
49
  end
@@ -50,9 +51,9 @@ module Aws
50
51
 
51
52
  private
52
53
 
53
- def open_file(source)
54
- if String === source || Pathname === source
55
- File.open(source, 'rb') { |file| yield(file) }
54
+ def open_file(source, &block)
55
+ if source.is_a?(String) || source.is_a?(Pathname)
56
+ File.open(source, 'rb', &block)
56
57
  else
57
58
  yield(source)
58
59
  end
@@ -63,7 +64,10 @@ module Aws
63
64
  options[:on_chunk_sent] = single_part_progress(callback)
64
65
  end
65
66
  open_file(source) do |file|
67
+ Thread.current[:net_http_override_body_stream_chunk] = @http_chunk_size if @http_chunk_size
66
68
  @client.put_object(options.merge(body: file))
69
+ ensure
70
+ Thread.current[:net_http_override_body_stream_chunk] = nil
67
71
  end
68
72
  end
69
73