aws-sdk-s3 1.196.0 → 1.197.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 52142f91568fe72f865055a683d9cf9f4a86c0d6446f4c7b2ab491a5b37f2cd9
4
- data.tar.gz: 06e34c3d319a400fe08d08b8aed7f49813f4cb99e6129f4eee6a2b8fdbc452a9
3
+ metadata.gz: 206194d0b6217c7fd01477cc0407b4d55080bdd269f14d33f3ae072fabfaa2ee
4
+ data.tar.gz: e269ddd5c94adc905a49ddf2f8fa80f1446684889d934aefd14d38136f81822b
5
5
  SHA512:
6
- metadata.gz: a73af036ed37f5ed060d993d681e747ec554176bb09d5f4bbb77201b60280c23f47a69e5757cc9fb8f33f3e0bf971bb0fd065376a93f897990d053a157385a97
7
- data.tar.gz: d0aac2333813cd782ec4f1d0699288fcc4ed720583883a17ae77f35ccd725b5f3471d931acc2c820928859665ee13c8c8efde428cb4e638840bdb4446bb2161c
6
+ metadata.gz: 2d27932019687706da5456a0c7dfa3e4c9bb81e3173105f6572e7a1afd4af96e553e94fedb19ca3c0052fcc4f3b297fcb14fe3719da037d9a9b333701fc35c24
7
+ data.tar.gz: bc7d64a072edfe9fc2154fec0834e6d27b19f2fbdca79b39571b1057cd73e5041abb113678d5c03414e664bd9422a8a714c37ec84015865de6566dc5b1a5909c
data/CHANGELOG.md CHANGED
@@ -1,6 +1,24 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.197.0 (2025-08-19)
5
+ ------------------
6
+
7
+ * Issue - When multipart stream uploader fails to complete multipart upload, it calls abort multipart upload.
8
+
9
+ * Issue - For `Aws::S3::Object` class, the following methods have been deprecated: `download_file`, `upload_file` and `upload_stream`. Use `Aws::S3::TransferManager` instead.
10
+
11
+ * Feature - Add `Aws::S3::TransferManager`, a S3 transfer utility that provides upload/download capabilities with automatic multipart handling, progress tracking, and handling of large files.
12
+
13
+ 1.196.1 (2025-08-05)
14
+ ------------------
15
+
16
+ * Issue - Add range validation to multipart download to ensure all parts are successfully processed.
17
+
18
+ * Issue - When multipart uploader fails to complete multipart upload, it calls abort multipart upload.
19
+
20
+ * Issue - Clean up partially downloaded file on multipart `download_file` failure while preserving existing file.
21
+
4
22
  1.196.0 (2025-08-04)
5
23
  ------------------
6
24
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.196.0
1
+ 1.197.0
@@ -21735,7 +21735,7 @@ module Aws::S3
21735
21735
  tracer: tracer
21736
21736
  )
21737
21737
  context[:gem_name] = 'aws-sdk-s3'
21738
- context[:gem_version] = '1.196.0'
21738
+ context[:gem_version] = '1.197.0'
21739
21739
  Seahorse::Client::Request.new(handlers, context)
21740
21740
  end
21741
21741
 
@@ -398,14 +398,14 @@ module Aws
398
398
  end
399
399
  true
400
400
  end
401
+ deprecated(:upload_stream, use: 'Aws::S3::TransferManager#upload_stream', version: 'next major version')
401
402
 
402
403
  # Uploads a file from disk to the current object in S3.
403
404
  #
404
405
  # # small files are uploaded in a single API call
405
406
  # obj.upload_file('/path/to/file')
406
407
  #
407
- # Files larger than or equal to `:multipart_threshold` are uploaded
408
- # using the Amazon S3 multipart upload APIs.
408
+ # Files larger than or equal to `:multipart_threshold` are uploaded using the Amazon S3 multipart upload APIs.
409
409
  #
410
410
  # # large files are automatically split into parts
411
411
  # # and the parts are uploaded in parallel
@@ -421,47 +421,37 @@ module Aws
421
421
  # You can provide a callback to monitor progress of the upload:
422
422
  #
423
423
  # # bytes and totals are each an array with 1 entry per part
424
- # progress = Proc.new do |bytes, totals|
425
- # puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%" }
424
+ # progress = proc do |bytes, totals|
425
+ # puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%"
426
426
  # end
427
427
  # obj.upload_file('/path/to/file', progress_callback: progress)
428
428
  #
429
- # @param [String, Pathname, File, Tempfile] source A file on the local
430
- # file system that will be uploaded as this object. This can either be
431
- # a String or Pathname to the file, an open File object, or an open
432
- # Tempfile object. If you pass an open File or Tempfile object, then
433
- # you are responsible for closing it after the upload completes. When
434
- # using an open Tempfile, rewind it before uploading or else the object
429
+ # @param [String, Pathname, File, Tempfile] source A file on the local file system that will be uploaded as
430
+ # this object. This can either be a String or Pathname to the file, an open File object, or an open
431
+ # Tempfile object. If you pass an open File or Tempfile object, then you are responsible for closing it
432
+ # after the upload completes. When using an open Tempfile, rewind it before uploading or else the object
435
433
  # will be empty.
436
434
  #
437
435
  # @param [Hash] options
438
- # Additional options for {Client#put_object}
439
- # when file sizes below the multipart threshold. For files larger than
440
- # the multipart threshold, options for {Client#create_multipart_upload},
441
- # {Client#complete_multipart_upload},
442
- # and {Client#upload_part} can be provided.
436
+ # Additional options for {Client#put_object} when file sizes below the multipart threshold.
437
+ # For files larger than the multipart threshold, options for {Client#create_multipart_upload},
438
+ # {Client#complete_multipart_upload}, and {Client#upload_part} can be provided.
443
439
  #
444
- # @option options [Integer] :multipart_threshold (104857600) Files larger
445
- # than or equal to `:multipart_threshold` are uploaded using the S3
446
- # multipart APIs.
447
- # Default threshold is 100MB.
440
+ # @option options [Integer] :multipart_threshold (104857600) Files larger han or equal to
441
+ # `:multipart_threshold` are uploaded using the S3 multipart APIs. Default threshold is 100MB.
448
442
  #
449
- # @option options [Integer] :thread_count (10) The number of parallel
450
- # multipart uploads. This option is not used if the file is smaller than
451
- # `:multipart_threshold`.
443
+ # @option options [Integer] :thread_count (10) The number of parallel multipart uploads.
444
+ # This option is not used if the file is smaller than `:multipart_threshold`.
452
445
  #
453
446
  # @option options [Proc] :progress_callback
454
447
  # A Proc that will be called when each chunk of the upload is sent.
455
448
  # It will be invoked with [bytes_read], [total_sizes]
456
449
  #
457
- # @raise [MultipartUploadError] If an object is being uploaded in
458
- # parts, and the upload can not be completed, then the upload is
459
- # aborted and this error is raised. The raised error has a `#errors`
460
- # method that returns the failures that caused the upload to be
461
- # aborted.
450
+ # @raise [MultipartUploadError] If an object is being uploaded in parts, and the upload can not be completed,
451
+ # then the upload is aborted and this error is raised. The raised error has a `#errors` method that
452
+ # returns the failures that caused the upload to be aborted.
462
453
  #
463
- # @return [Boolean] Returns `true` when the object is uploaded
464
- # without any errors.
454
+ # @return [Boolean] Returns `true` when the object is uploaded without any errors.
465
455
  #
466
456
  # @see Client#put_object
467
457
  # @see Client#create_multipart_upload
@@ -469,26 +459,21 @@ module Aws
469
459
  # @see Client#upload_part
470
460
  def upload_file(source, options = {})
471
461
  uploading_options = options.dup
472
- uploader = FileUploader.new(
473
- multipart_threshold: uploading_options.delete(:multipart_threshold),
474
- client: client
475
- )
462
+ uploader = FileUploader.new(multipart_threshold: uploading_options.delete(:multipart_threshold), client: client)
476
463
  response = Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
477
- uploader.upload(
478
- source,
479
- uploading_options.merge(bucket: bucket_name, key: key)
480
- )
464
+ uploader.upload(source, uploading_options.merge(bucket: bucket_name, key: key))
481
465
  end
482
466
  yield response if block_given?
483
467
  true
484
468
  end
469
+ deprecated(:upload_file, use: 'Aws::S3::TransferManager#upload_file', version: 'next major version')
485
470
 
486
471
  # Downloads a file in S3 to a path on disk.
487
472
  #
488
473
  # # small files (< 5MB) are downloaded in a single API call
489
474
  # obj.download_file('/path/to/file')
490
475
  #
491
- # Files larger than 5MB are downloaded using multipart method
476
+ # Files larger than 5MB are downloaded using multipart method:
492
477
  #
493
478
  # # large files are split into parts
494
479
  # # and the parts are downloaded in parallel
@@ -498,67 +483,60 @@ module Aws
498
483
  #
499
484
  # # bytes and part_sizes are each an array with 1 entry per part
500
485
  # # part_sizes may not be known until the first bytes are retrieved
501
- # progress = Proc.new do |bytes, part_sizes, file_size|
502
- # puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{part_sizes[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / file_size}%" }
486
+ # progress = proc do |bytes, part_sizes, file_size|
487
+ # puts bytes.map.with_index { |b, i| "Part #{i + 1}: #{b} / #{part_sizes[i]}" }.join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
503
488
  # end
504
489
  # obj.download_file('/path/to/file', progress_callback: progress)
505
490
  #
506
491
  # @param [String] destination Where to download the file to.
507
492
  #
508
493
  # @param [Hash] options
509
- # Additional options for {Client#get_object} and #{Client#head_object}
510
- # may be provided.
494
+ # Additional options for {Client#get_object} and #{Client#head_object} may be provided.
511
495
  #
512
- # @option options [String] mode `auto`, `single_request`, `get_range`
513
- # `single_request` mode forces only 1 GET request is made in download,
514
- # `get_range` mode allows `chunk_size` parameter to configured in
515
- # customizing each range size in multipart_download,
516
- # By default, `auto` mode is enabled, which performs multipart_download
496
+ # @option options [String] :mode ("auto") `"auto"`, `"single_request"` or `"get_range"`
517
497
  #
518
- # @option options [Integer] chunk_size required in get_range mode.
498
+ # * `auto` mode is enabled by default, which performs `multipart_download`
499
+ # * `"single_request`" mode forces only 1 GET request is made in download
500
+ # * `"get_range"` mode requires `:chunk_size` parameter to configured in customizing each range size
519
501
  #
520
- # @option options [Integer] thread_count (10) Customize threads used in
521
- # the multipart download.
502
+ # @option options [Integer] :chunk_size required in `"get_range"` mode.
522
503
  #
523
- # @option options [String] version_id The object version id used to
524
- # retrieve the object. For more about object versioning, see:
525
- # https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html
504
+ # @option options [Integer] :thread_count (10) Customize threads used in the multipart download.
526
505
  #
527
- # @option options [String] checksum_mode (ENABLED) When `ENABLED` and
528
- # the object has a stored checksum, it will be used to validate the
529
- # download and will raise an `Aws::Errors::ChecksumError` if
530
- # checksum validation fails. You may provide a `on_checksum_validated`
531
- # callback if you need to verify that validation occurred and which
532
- # algorithm was used. To disable checksum validation, set
533
- # `checksum_mode` to "DISABLED".
506
+ # @option options [String] :version_id The object version id used to retrieve the object.
534
507
  #
535
- # @option options [Callable] on_checksum_validated Called each time a
536
- # request's checksum is validated with the checksum algorithm and the
537
- # response. For multipart downloads, this will be called for each
538
- # part that is downloaded and validated.
508
+ # @see https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html ObjectVersioning
509
+ #
510
+ # @option options [String] :checksum_mode ("ENABLED")
511
+ # When `"ENABLED"` and the object has a stored checksum, it will be used to validate the download and will
512
+ # raise an `Aws::Errors::ChecksumError` if checksum validation fails. You may provide a `on_checksum_validated`
513
+ # callback if you need to verify that validation occurred and which algorithm was used.
514
+ # To disable checksum validation, set `checksum_mode` to `"DISABLED"`.
515
+ #
516
+ # @option options [Callable] :on_checksum_validated
517
+ # Called each time a request's checksum is validated with the checksum algorithm and the
518
+ # response. For multipart downloads, this will be called for each part that is downloaded and validated.
539
519
  #
540
520
  # @option options [Proc] :progress_callback
541
- # A Proc that will be called when each chunk of the download is received.
542
- # It will be invoked with [bytes_read], [part_sizes], file_size.
543
- # When the object is downloaded as parts (rather than by ranges), the
544
- # part_sizes will not be known ahead of time and will be nil in the
545
- # callback until the first bytes in the part are received.
521
+ # A Proc that will be called when each chunk of the download is received. It will be invoked with
522
+ # `bytes_read`, `part_sizes`, `file_size`. When the object is downloaded as parts (rather than by ranges),
523
+ # the `part_sizes` will not be known ahead of time and will be `nil` in the callback until the first bytes
524
+ # in the part are received.
546
525
  #
547
- # @return [Boolean] Returns `true` when the file is downloaded without
548
- # any errors.
526
+ # @raise [MultipartDownloadError] Raised when an object validation fails outside of service errors.
527
+ #
528
+ # @return [Boolean] Returns `true` when the file is downloaded without any errors.
549
529
  #
550
530
  # @see Client#get_object
551
531
  # @see Client#head_object
552
532
  def download_file(destination, options = {})
553
533
  downloader = FileDownloader.new(client: client)
554
534
  Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
555
- downloader.download(
556
- destination,
557
- options.merge(bucket: bucket_name, key: key)
558
- )
535
+ downloader.download(destination, options.merge(bucket: bucket_name, key: key))
559
536
  end
560
537
  true
561
538
  end
539
+ deprecated(:download_file, use: 'Aws::S3::TransferManager#download_file', version: 'next major version')
562
540
 
563
541
  class Collection < Aws::Resources::Collection
564
542
  alias_method :delete, :batch_delete!
@@ -10,6 +10,7 @@ module Aws
10
10
  autoload :FileUploader, 'aws-sdk-s3/file_uploader'
11
11
  autoload :FileDownloader, 'aws-sdk-s3/file_downloader'
12
12
  autoload :LegacySigner, 'aws-sdk-s3/legacy_signer'
13
+ autoload :MultipartDownloadError, 'aws-sdk-s3/multipart_download_error'
13
14
  autoload :MultipartFileUploader, 'aws-sdk-s3/multipart_file_uploader'
14
15
  autoload :MultipartStreamUploader, 'aws-sdk-s3/multipart_stream_uploader'
15
16
  autoload :MultipartUploadError, 'aws-sdk-s3/multipart_upload_error'
@@ -17,13 +18,13 @@ module Aws
17
18
  autoload :ObjectMultipartCopier, 'aws-sdk-s3/object_multipart_copier'
18
19
  autoload :PresignedPost, 'aws-sdk-s3/presigned_post'
19
20
  autoload :Presigner, 'aws-sdk-s3/presigner'
21
+ autoload :TransferManager, 'aws-sdk-s3/transfer_manager'
20
22
 
21
23
  # s3 express session auth
22
24
  autoload :ExpressCredentials, 'aws-sdk-s3/express_credentials'
23
25
  autoload :ExpressCredentialsProvider, 'aws-sdk-s3/express_credentials_provider'
24
26
 
25
27
  # s3 access grants auth
26
-
27
28
  autoload :AccessGrantsCredentials, 'aws-sdk-s3/access_grants_credentials'
28
29
  autoload :AccessGrantsCredentialsProvider, 'aws-sdk-s3/access_grants_credentials_provider'
29
30
  end
@@ -1,9 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'pathname'
4
- require 'thread'
4
+ require 'securerandom'
5
5
  require 'set'
6
- require 'tmpdir'
7
6
 
8
7
  module Aws
9
8
  module S3
@@ -12,7 +11,6 @@ module Aws
12
11
 
13
12
  MIN_CHUNK_SIZE = 5 * 1024 * 1024
14
13
  MAX_PARTS = 10_000
15
- THREAD_COUNT = 10
16
14
 
17
15
  def initialize(options = {})
18
16
  @client = options[:client] || Client.new
@@ -23,17 +21,12 @@ module Aws
23
21
 
24
22
  def download(destination, options = {})
25
23
  @path = destination
26
- @mode = options[:mode] || 'auto'
27
- @thread_count = options[:thread_count] || THREAD_COUNT
28
- @chunk_size = options[:chunk_size]
29
- @params = {
30
- bucket: options[:bucket],
31
- key: options[:key]
32
- }
33
- @params[:version_id] = options[:version_id] if options[:version_id]
34
- @on_checksum_validated = options[:on_checksum_validated]
35
- @progress_callback = options[:progress_callback]
36
-
24
+ @mode = options.delete(:mode) || 'auto'
25
+ @thread_count = options.delete(:thread_count) || 10
26
+ @chunk_size = options.delete(:chunk_size)
27
+ @on_checksum_validated = options.delete(:on_checksum_validated)
28
+ @progress_callback = options.delete(:progress_callback)
29
+ @params = options
37
30
  validate!
38
31
 
39
32
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
@@ -41,32 +34,31 @@ module Aws
41
34
  when 'auto' then multipart_download
42
35
  when 'single_request' then single_request
43
36
  when 'get_range'
44
- if @chunk_size
45
- resp = @client.head_object(@params)
46
- multithreaded_get_by_ranges(resp.content_length, resp.etag)
47
- else
48
- msg = 'In :get_range mode, :chunk_size must be provided'
49
- raise ArgumentError, msg
50
- end
37
+ raise ArgumentError, 'In get_range mode, :chunk_size must be provided' unless @chunk_size
38
+
39
+ resp = @client.head_object(@params)
40
+ multithreaded_get_by_ranges(resp.content_length, resp.etag)
51
41
  else
52
- msg = "Invalid mode #{@mode} provided, "\
53
- 'mode should be :single_request, :get_range or :auto'
54
- raise ArgumentError, msg
42
+ raise ArgumentError, "Invalid mode #{@mode} provided, :mode should be single_request, get_range or auto"
55
43
  end
56
44
  end
45
+ File.rename(@temp_path, @path) if @temp_path
46
+ ensure
47
+ File.delete(@temp_path) if @temp_path && File.exist?(@temp_path)
57
48
  end
58
49
 
59
50
  private
60
51
 
61
52
  def validate!
62
- if @on_checksum_validated && !@on_checksum_validated.respond_to?(:call)
63
- raise ArgumentError, 'on_checksum_validated must be callable'
64
- end
53
+ return unless @on_checksum_validated && !@on_checksum_validated.respond_to?(:call)
54
+
55
+ raise ArgumentError, ':on_checksum_validated must be callable'
65
56
  end
66
57
 
67
58
  def multipart_download
68
59
  resp = @client.head_object(@params.merge(part_number: 1))
69
60
  count = resp.parts_count
61
+
70
62
  if count.nil? || count <= 1
71
63
  if resp.content_length <= MIN_CHUNK_SIZE
72
64
  single_request
@@ -74,8 +66,8 @@ module Aws
74
66
  multithreaded_get_by_ranges(resp.content_length, resp.etag)
75
67
  end
76
68
  else
77
- # partNumber is an option
78
- resp = @client.head_object(@params)
69
+ # covers cases when given object is not uploaded via UploadPart API
70
+ resp = @client.head_object(@params) # partNumber is an option
79
71
  if resp.content_length <= MIN_CHUNK_SIZE
80
72
  single_request
81
73
  else
@@ -86,7 +78,7 @@ module Aws
86
78
 
87
79
  def compute_mode(file_size, count, etag)
88
80
  chunk_size = compute_chunk(file_size)
89
- part_size = (file_size.to_f / count.to_f).ceil
81
+ part_size = (file_size.to_f / count).ceil
90
82
  if chunk_size < part_size
91
83
  multithreaded_get_by_ranges(file_size, etag)
92
84
  else
@@ -94,32 +86,10 @@ module Aws
94
86
  end
95
87
  end
96
88
 
97
- def construct_chunks(file_size)
98
- offset = 0
99
- default_chunk_size = compute_chunk(file_size)
100
- chunks = []
101
- while offset < file_size
102
- progress = offset + default_chunk_size
103
- progress = file_size if progress > file_size
104
- chunks << "bytes=#{offset}-#{progress - 1}"
105
- offset = progress
106
- end
107
- chunks
108
- end
109
-
110
89
  def compute_chunk(file_size)
111
- if @chunk_size && @chunk_size > file_size
112
- raise ArgumentError, ":chunk_size shouldn't exceed total file size."
113
- else
114
- @chunk_size || [
115
- (file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE
116
- ].max.to_i
117
- end
118
- end
90
+ raise ArgumentError, ":chunk_size shouldn't exceed total file size." if @chunk_size && @chunk_size > file_size
119
91
 
120
- def batches(chunks, mode)
121
- chunks = (1..chunks) if mode.eql? 'part_number'
122
- chunks.each_slice(@thread_count).to_a
92
+ @chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
123
93
  end
124
94
 
125
95
  def multithreaded_get_by_ranges(file_size, etag)
@@ -130,12 +100,8 @@ module Aws
130
100
  while offset < file_size
131
101
  progress = offset + default_chunk_size
132
102
  progress = file_size if progress > file_size
133
- range = "bytes=#{offset}-#{progress - 1}"
134
- chunks << Part.new(
135
- part_number: part_number,
136
- size: (progress-offset),
137
- params: @params.merge(range: range, if_match: etag)
138
- )
103
+ params = @params.merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag)
104
+ chunks << Part.new(part_number: part_number, size: (progress - offset), params: params)
139
105
  part_number += 1
140
106
  offset = progress
141
107
  end
@@ -152,10 +118,11 @@ module Aws
152
118
  def download_in_threads(pending, total_size)
153
119
  threads = []
154
120
  progress = MultipartProgress.new(pending, total_size, @progress_callback) if @progress_callback
121
+ @temp_path = "#{@path}.s3tmp.#{SecureRandom.alphanumeric(8)}"
155
122
  @thread_count.times do
156
123
  thread = Thread.new do
157
124
  begin
158
- while part = pending.shift
125
+ while (part = pending.shift)
159
126
  if progress
160
127
  part.params[:on_chunk_received] =
161
128
  proc do |_chunk, bytes, total|
@@ -163,16 +130,17 @@ module Aws
163
130
  end
164
131
  end
165
132
  resp = @client.get_object(part.params)
166
- write(resp)
133
+ range = extract_range(resp.content_range)
134
+ validate_range(range, part.params[:range]) if part.params[:range]
135
+ write(resp.body, range)
167
136
  if @on_checksum_validated && resp.checksum_validated
168
137
  @on_checksum_validated.call(resp.checksum_validated, resp)
169
138
  end
170
139
  end
171
140
  nil
172
- rescue => error
173
- # keep other threads from downloading other parts
174
- pending.clear!
175
- raise error
141
+ rescue StandardError => e
142
+ pending.clear! # keep other threads from downloading other parts
143
+ raise e
176
144
  end
177
145
  end
178
146
  threads << thread
@@ -180,21 +148,27 @@ module Aws
180
148
  threads.map(&:value).compact
181
149
  end
182
150
 
183
- def write(resp)
184
- range, _ = resp.content_range.split(' ').last.split('/')
185
- head, _ = range.split('-').map {|s| s.to_i}
186
- File.write(@path, resp.body.read, head)
151
+ def extract_range(value)
152
+ value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
153
+ end
154
+
155
+ def validate_range(actual, expected)
156
+ return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
157
+
158
+ raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
159
+ end
160
+
161
+ def write(body, range)
162
+ File.write(@temp_path, body.read, range.split('-').first.to_i)
187
163
  end
188
164
 
189
165
  def single_request
190
166
  params = @params.merge(response_target: @path)
191
167
  params[:on_chunk_received] = single_part_progress if @progress_callback
192
168
  resp = @client.get_object(params)
193
-
194
169
  return resp unless @on_checksum_validated
195
170
 
196
171
  @on_checksum_validated.call(resp.checksum_validated, resp) if resp.checksum_validated
197
-
198
172
  resp
199
173
  end
200
174
 
@@ -204,6 +178,7 @@ module Aws
204
178
  end
205
179
  end
206
180
 
181
+ # @api private
207
182
  class Part < Struct.new(:part_number, :size, :params)
208
183
  include Aws::Structure
209
184
  end
@@ -7,7 +7,7 @@ module Aws
7
7
  # @api private
8
8
  class FileUploader
9
9
 
10
- ONE_HUNDRED_MEGABYTES = 100 * 1024 * 1024
10
+ DEFAULT_MULTIPART_THRESHOLD = 100 * 1024 * 1024
11
11
 
12
12
  # @param [Hash] options
13
13
  # @option options [Client] :client
@@ -15,15 +15,13 @@ module Aws
15
15
  def initialize(options = {})
16
16
  @options = options
17
17
  @client = options[:client] || Client.new
18
- @multipart_threshold = options[:multipart_threshold] ||
19
- ONE_HUNDRED_MEGABYTES
18
+ @multipart_threshold = options[:multipart_threshold] || DEFAULT_MULTIPART_THRESHOLD
20
19
  end
21
20
 
22
21
  # @return [Client]
23
22
  attr_reader :client
24
23
 
25
- # @return [Integer] Files larger than or equal to this in bytes are uploaded
26
- # using a {MultipartFileUploader}.
24
+ # @return [Integer] Files larger than or equal to this in bytes are uploaded using a {MultipartFileUploader}.
27
25
  attr_reader :multipart_threshold
28
26
 
29
27
  # @param [String, Pathname, File, Tempfile] source The file to upload.
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aws
4
+ module S3
5
+ # Raised when multipart download fails to complete.
6
+ class MultipartDownloadError < StandardError; end
7
+ end
8
+ end