aws-sdk-s3 1.199.1 → 1.202.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -907,6 +907,8 @@ module Aws::S3
907
907
  CopyObjectRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read"))
908
908
  CopyObjectRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp"))
909
909
  CopyObjectRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp"))
910
+ CopyObjectRequest.add_member(:if_match, Shapes::ShapeRef.new(shape: IfMatch, location: "header", location_name: "If-Match"))
911
+ CopyObjectRequest.add_member(:if_none_match, Shapes::ShapeRef.new(shape: IfNoneMatch, location: "header", location_name: "If-None-Match"))
910
912
  CopyObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam" => {"name" => "Key"}}))
911
913
  CopyObjectRequest.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-"))
912
914
  CopyObjectRequest.add_member(:metadata_directive, Shapes::ShapeRef.new(shape: MetadataDirective, location: "header", location_name: "x-amz-metadata-directive"))
@@ -358,8 +358,8 @@ module Aws
358
358
  # {Client#complete_multipart_upload},
359
359
  # and {Client#upload_part} can be provided.
360
360
  #
361
- # @option options [Integer] :thread_count (10) The number of parallel
362
- # multipart uploads
361
+ # @option options [Integer] :thread_count (10) The number of parallel multipart uploads.
362
+ # An additional thread is used internally for task coordination.
363
363
  #
364
364
  # @option options [Boolean] :tempfile (false) Normally read data is stored
365
365
  # in memory when building the parts in order to complete the underlying
@@ -383,19 +383,18 @@ module Aws
383
383
  # @see Client#complete_multipart_upload
384
384
  # @see Client#upload_part
385
385
  def upload_stream(options = {}, &block)
386
- uploading_options = options.dup
386
+ upload_opts = options.merge(bucket: bucket_name, key: key)
387
+ executor = DefaultExecutor.new(max_threads: upload_opts.delete(:thread_count))
387
388
  uploader = MultipartStreamUploader.new(
388
389
  client: client,
389
- thread_count: uploading_options.delete(:thread_count),
390
- tempfile: uploading_options.delete(:tempfile),
391
- part_size: uploading_options.delete(:part_size)
390
+ executor: executor,
391
+ tempfile: upload_opts.delete(:tempfile),
392
+ part_size: upload_opts.delete(:part_size)
392
393
  )
393
394
  Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
394
- uploader.upload(
395
- uploading_options.merge(bucket: bucket_name, key: key),
396
- &block
397
- )
395
+ uploader.upload(upload_opts, &block)
398
396
  end
397
+ executor.shutdown
399
398
  true
400
399
  end
401
400
  deprecated(:upload_stream, use: 'Aws::S3::TransferManager#upload_stream', version: 'next major version')
@@ -458,12 +457,18 @@ module Aws
458
457
  # @see Client#complete_multipart_upload
459
458
  # @see Client#upload_part
460
459
  def upload_file(source, options = {})
461
- uploading_options = options.dup
462
- uploader = FileUploader.new(multipart_threshold: uploading_options.delete(:multipart_threshold), client: client)
460
+ upload_opts = options.merge(bucket: bucket_name, key: key)
461
+ executor = DefaultExecutor.new(max_threads: upload_opts.delete(:thread_count))
462
+ uploader = FileUploader.new(
463
+ client: client,
464
+ executor: executor,
465
+ multipart_threshold: upload_opts.delete(:multipart_threshold)
466
+ )
463
467
  response = Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
464
- uploader.upload(source, uploading_options.merge(bucket: bucket_name, key: key))
468
+ uploader.upload(source, upload_opts)
465
469
  end
466
470
  yield response if block_given?
471
+ executor.shutdown
467
472
  true
468
473
  end
469
474
  deprecated(:upload_file, use: 'Aws::S3::TransferManager#upload_file', version: 'next major version')
@@ -512,10 +517,6 @@ module Aws
512
517
  #
513
518
  # @option options [Integer] :thread_count (10) Customize threads used in the multipart download.
514
519
  #
515
- # @option options [String] :version_id The object version id used to retrieve the object.
516
- #
517
- # @see https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html ObjectVersioning
518
- #
519
520
  # @option options [String] :checksum_mode ("ENABLED")
520
521
  # When `"ENABLED"` and the object has a stored checksum, it will be used to validate the download and will
521
522
  # raise an `Aws::Errors::ChecksumError` if checksum validation fails. You may provide a `on_checksum_validated`
@@ -539,10 +540,13 @@ module Aws
539
540
  # @see Client#get_object
540
541
  # @see Client#head_object
541
542
  def download_file(destination, options = {})
542
- downloader = FileDownloader.new(client: client)
543
+ download_opts = options.merge(bucket: bucket_name, key: key)
544
+ executor = DefaultExecutor.new(max_threads: download_opts.delete([:thread_count]))
545
+ downloader = FileDownloader.new(client: client, executor: executor)
543
546
  Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
544
- downloader.download(destination, options.merge(bucket: bucket_name, key: key))
547
+ downloader.download(destination, download_opts)
545
548
  end
549
+ executor.shutdown
546
550
  true
547
551
  end
548
552
  deprecated(:download_file, use: 'Aws::S3::TransferManager#download_file', version: 'next major version')
@@ -7,6 +7,7 @@ module Aws
7
7
  autoload :Encryption, 'aws-sdk-s3/encryption'
8
8
  autoload :EncryptionV2, 'aws-sdk-s3/encryption_v2'
9
9
  autoload :FilePart, 'aws-sdk-s3/file_part'
10
+ autoload :DefaultExecutor, 'aws-sdk-s3/default_executor'
10
11
  autoload :FileUploader, 'aws-sdk-s3/file_uploader'
11
12
  autoload :FileDownloader, 'aws-sdk-s3/file_downloader'
12
13
  autoload :LegacySigner, 'aws-sdk-s3/legacy_signer'
@@ -0,0 +1,103 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aws
4
+ module S3
5
+ # @api private
6
+ class DefaultExecutor
7
+ DEFAULT_MAX_THREADS = 10
8
+ RUNNING = :running
9
+ SHUTTING_DOWN = :shutting_down
10
+ SHUTDOWN = :shutdown
11
+
12
+ def initialize(options = {})
13
+ @max_threads = options[:max_threads] || DEFAULT_MAX_THREADS
14
+ @state = RUNNING
15
+ @queue = Queue.new
16
+ @pool = []
17
+ @mutex = Mutex.new
18
+ end
19
+
20
+ # Submits a task for execution.
21
+ # @param [Object] args Variable number of arguments to pass to the block
22
+ # @param [Proc] block The block to be executed
23
+ # @return [Boolean] Returns true if the task was submitted successfully
24
+ def post(*args, &block)
25
+ @mutex.synchronize do
26
+ raise 'Executor has been shutdown and is no longer accepting tasks' unless @state == RUNNING
27
+
28
+ @queue << [args, block]
29
+ ensure_worker_available
30
+ end
31
+ true
32
+ end
33
+
34
+ # Immediately terminates all worker threads and clears pending tasks.
35
+ # This is a forceful shutdown that doesn't wait for running tasks to complete.
36
+ #
37
+ # @return [Boolean] true when termination is complete
38
+ def kill
39
+ @mutex.synchronize do
40
+ @state = SHUTDOWN
41
+ @pool.each(&:kill)
42
+ @pool.clear
43
+ @queue.clear
44
+ end
45
+ true
46
+ end
47
+
48
+ # Gracefully shuts down the executor, optionally with a timeout.
49
+ # Stops accepting new tasks and waits for running tasks to complete.
50
+ #
51
+ # @param timeout [Numeric, nil] Maximum time in seconds to wait for shutdown.
52
+ # If nil, waits indefinitely. If timeout expires, remaining threads are killed.
53
+ # @return [Boolean] true when shutdown is complete
54
+ def shutdown(timeout = nil)
55
+ @mutex.synchronize do
56
+ return true if @state == SHUTDOWN
57
+
58
+ @state = SHUTTING_DOWN
59
+ @pool.size.times { @queue << :shutdown }
60
+ end
61
+
62
+ if timeout
63
+ deadline = Time.now + timeout
64
+ @pool.each do |thread|
65
+ remaining = deadline - Time.now
66
+ break if remaining <= 0
67
+
68
+ thread.join([remaining, 0].max)
69
+ end
70
+ @pool.select(&:alive?).each(&:kill)
71
+ else
72
+ @pool.each(&:join)
73
+ end
74
+
75
+ @mutex.synchronize do
76
+ @pool.clear
77
+ @state = SHUTDOWN
78
+ end
79
+ true
80
+ end
81
+
82
+ private
83
+
84
+ def ensure_worker_available
85
+ return unless @state == RUNNING
86
+
87
+ @pool.select!(&:alive?)
88
+ @pool << spawn_worker if @pool.size < @max_threads
89
+ end
90
+
91
+ def spawn_worker
92
+ Thread.new do
93
+ while (job = @queue.shift)
94
+ break if job == :shutdown
95
+
96
+ args, block = job
97
+ block.call(*args)
98
+ end
99
+ end
100
+ end
101
+ end
102
+ end
103
+ end
@@ -13,87 +13,87 @@ module Aws::S3
13
13
  # @!attribute bucket
14
14
  # The S3 bucket used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 bucket.
15
15
  #
16
- # @return [String]
16
+ # @return [string]
17
17
  #
18
18
  # @!attribute region
19
19
  # The AWS region used to dispatch the request.
20
20
  #
21
- # @return [String]
21
+ # @return [string]
22
22
  #
23
23
  # @!attribute use_fips
24
24
  # When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.
25
25
  #
26
- # @return [Boolean]
26
+ # @return [boolean]
27
27
  #
28
28
  # @!attribute use_dual_stack
29
29
  # When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.
30
30
  #
31
- # @return [Boolean]
31
+ # @return [boolean]
32
32
  #
33
33
  # @!attribute endpoint
34
34
  # Override the endpoint used to send this request
35
35
  #
36
- # @return [String]
36
+ # @return [string]
37
37
  #
38
38
  # @!attribute force_path_style
39
39
  # When true, force a path-style endpoint to be used where the bucket name is part of the path.
40
40
  #
41
- # @return [Boolean]
41
+ # @return [boolean]
42
42
  #
43
43
  # @!attribute accelerate
44
44
  # When true, use S3 Accelerate. NOTE: Not all regions support S3 accelerate.
45
45
  #
46
- # @return [Boolean]
46
+ # @return [boolean]
47
47
  #
48
48
  # @!attribute use_global_endpoint
49
49
  # Whether the global endpoint should be used, rather then the regional endpoint for us-east-1.
50
50
  #
51
- # @return [Boolean]
51
+ # @return [boolean]
52
52
  #
53
53
  # @!attribute use_object_lambda_endpoint
54
54
  # Internal parameter to use object lambda endpoint for an operation (eg: WriteGetObjectResponse)
55
55
  #
56
- # @return [Boolean]
56
+ # @return [boolean]
57
57
  #
58
58
  # @!attribute key
59
59
  # The S3 Key used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 Key.
60
60
  #
61
- # @return [String]
61
+ # @return [string]
62
62
  #
63
63
  # @!attribute prefix
64
64
  # The S3 Prefix used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 Prefix.
65
65
  #
66
- # @return [String]
66
+ # @return [string]
67
67
  #
68
68
  # @!attribute copy_source
69
69
  # The Copy Source used for Copy Object request. This is an optional parameter that will be set automatically for operations that are scoped to Copy Source.
70
70
  #
71
- # @return [String]
71
+ # @return [string]
72
72
  #
73
73
  # @!attribute disable_access_points
74
74
  # Internal parameter to disable Access Point Buckets
75
75
  #
76
- # @return [Boolean]
76
+ # @return [boolean]
77
77
  #
78
78
  # @!attribute disable_multi_region_access_points
79
79
  # Whether multi-region access points (MRAP) should be disabled.
80
80
  #
81
- # @return [Boolean]
81
+ # @return [boolean]
82
82
  #
83
83
  # @!attribute use_arn_region
84
84
  # When an Access Point ARN is provided and this flag is enabled, the SDK MUST use the ARN&#39;s region when constructing the endpoint instead of the client&#39;s configured region.
85
85
  #
86
- # @return [Boolean]
86
+ # @return [boolean]
87
87
  #
88
88
  # @!attribute use_s3_express_control_endpoint
89
89
  # Internal parameter to indicate whether S3Express operation should use control plane, (ex. CreateBucket)
90
90
  #
91
- # @return [Boolean]
91
+ # @return [boolean]
92
92
  #
93
93
  # @!attribute disable_s3_express_session_auth
94
94
  # Parameter to indicate whether S3Express session auth should be disabled
95
95
  #
96
- # @return [Boolean]
96
+ # @return [boolean]
97
97
  #
98
98
  EndpointParameters = Struct.new(
99
99
  :bucket,
@@ -8,184 +8,245 @@ module Aws
8
8
  module S3
9
9
  # @api private
10
10
  class FileDownloader
11
-
12
11
  MIN_CHUNK_SIZE = 5 * 1024 * 1024
13
12
  MAX_PARTS = 10_000
13
+ HEAD_OPTIONS = Set.new(Client.api.operation(:head_object).input.shape.member_names)
14
+ GET_OPTIONS = Set.new(Client.api.operation(:get_object).input.shape.member_names)
14
15
 
15
16
  def initialize(options = {})
16
17
  @client = options[:client] || Client.new
18
+ @executor = options[:executor]
17
19
  end
18
20
 
19
21
  # @return [Client]
20
22
  attr_reader :client
21
23
 
22
24
  def download(destination, options = {})
23
- valid_types = [String, Pathname, File, Tempfile]
24
- unless valid_types.include?(destination.class)
25
- raise ArgumentError, "Invalid destination, expected #{valid_types.join(', ')} but got: #{destination.class}"
26
- end
27
-
28
- @destination = destination
29
- @mode = options.delete(:mode) || 'auto'
30
- @thread_count = options.delete(:thread_count) || 10
31
- @chunk_size = options.delete(:chunk_size)
32
- @on_checksum_validated = options.delete(:on_checksum_validated)
33
- @progress_callback = options.delete(:progress_callback)
34
- @params = options
35
- validate!
25
+ validate_destination!(destination)
26
+ opts = build_download_opts(destination, options)
27
+ validate_opts!(opts)
36
28
 
37
29
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
38
- case @mode
39
- when 'auto' then multipart_download
40
- when 'single_request' then single_request
41
- when 'get_range'
42
- raise ArgumentError, 'In get_range mode, :chunk_size must be provided' unless @chunk_size
43
-
44
- resp = @client.head_object(@params)
45
- multithreaded_get_by_ranges(resp.content_length, resp.etag)
46
- else
47
- raise ArgumentError, "Invalid mode #{@mode} provided, :mode should be single_request, get_range or auto"
30
+ case opts[:mode]
31
+ when 'auto' then multipart_download(opts)
32
+ when 'single_request' then single_request(opts)
33
+ when 'get_range' then range_request(opts)
48
34
  end
49
35
  end
50
- File.rename(@temp_path, @destination) if @temp_path
36
+ File.rename(opts[:temp_path], destination) if opts[:temp_path]
51
37
  ensure
52
- File.delete(@temp_path) if @temp_path && File.exist?(@temp_path)
38
+ cleanup_temp_file(opts)
53
39
  end
54
40
 
55
41
  private
56
42
 
57
- def validate!
58
- return unless @on_checksum_validated && !@on_checksum_validated.respond_to?(:call)
43
+ def build_download_opts(destination, opts)
44
+ {
45
+ destination: destination,
46
+ mode: opts.delete(:mode) || 'auto',
47
+ chunk_size: opts.delete(:chunk_size),
48
+ on_checksum_validated: opts.delete(:on_checksum_validated),
49
+ progress_callback: opts.delete(:progress_callback),
50
+ params: opts,
51
+ temp_path: nil
52
+ }
53
+ end
54
+
55
+ def cleanup_temp_file(opts)
56
+ return unless opts
57
+
58
+ temp_file = opts[:temp_path]
59
+ File.delete(temp_file) if temp_file && File.exist?(temp_file)
60
+ end
61
+
62
+ def download_with_executor(part_list, total_size, opts)
63
+ download_attempts = 0
64
+ completion_queue = Queue.new
65
+ abort_download = false
66
+ error = nil
67
+ progress = MultipartProgress.new(part_list, total_size, opts[:progress_callback])
68
+
69
+ while (part = part_list.shift)
70
+ break if abort_download
71
+
72
+ download_attempts += 1
73
+ @executor.post(part) do |p|
74
+ update_progress(progress, p)
75
+ resp = @client.get_object(p.params)
76
+ range = extract_range(resp.content_range)
77
+ validate_range(range, p.params[:range]) if p.params[:range]
78
+ write(resp.body, range, opts)
79
+
80
+ execute_checksum_callback(resp, opts)
81
+ rescue StandardError => e
82
+ abort_download = true
83
+ error = e
84
+ ensure
85
+ completion_queue << :done
86
+ end
87
+ end
88
+
89
+ download_attempts.times { completion_queue.pop }
90
+ raise error unless error.nil?
91
+ end
92
+
93
+ def get_opts(opts)
94
+ GET_OPTIONS.each_with_object({}) { |k, h| h[k] = opts[k] if opts.key?(k) }
95
+ end
96
+
97
+ def head_opts(opts)
98
+ HEAD_OPTIONS.each_with_object({}) { |k, h| h[k] = opts[k] if opts.key?(k) }
99
+ end
100
+
101
+ def compute_chunk(chunk_size, file_size)
102
+ raise ArgumentError, ":chunk_size shouldn't exceed total file size." if chunk_size && chunk_size > file_size
59
103
 
60
- raise ArgumentError, ':on_checksum_validated must be callable'
104
+ chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
61
105
  end
62
106
 
63
- def multipart_download
64
- resp = @client.head_object(@params.merge(part_number: 1))
107
+ def compute_mode(file_size, total_parts, etag, opts)
108
+ chunk_size = compute_chunk(opts[:chunk_size], file_size)
109
+ part_size = (file_size.to_f / total_parts).ceil
110
+
111
+ resolve_temp_path(opts)
112
+ if chunk_size < part_size
113
+ multithreaded_get_by_ranges(file_size, etag, opts)
114
+ else
115
+ multithreaded_get_by_parts(total_parts, file_size, etag, opts)
116
+ end
117
+ end
118
+
119
+ def extract_range(value)
120
+ value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
121
+ end
122
+
123
+ def multipart_download(opts)
124
+ resp = @client.head_object(head_opts(opts[:params].merge(part_number: 1)))
65
125
  count = resp.parts_count
66
126
 
67
127
  if count.nil? || count <= 1
68
128
  if resp.content_length <= MIN_CHUNK_SIZE
69
- single_request
129
+ single_request(opts)
70
130
  else
71
- multithreaded_get_by_ranges(resp.content_length, resp.etag)
131
+ resolve_temp_path(opts)
132
+ multithreaded_get_by_ranges(resp.content_length, resp.etag, opts)
72
133
  end
73
134
  else
74
135
  # covers cases when given object is not uploaded via UploadPart API
75
- resp = @client.head_object(@params) # partNumber is an option
136
+ resp = @client.head_object(head_opts(opts[:params])) # partNumber is an option
76
137
  if resp.content_length <= MIN_CHUNK_SIZE
77
- single_request
138
+ single_request(opts)
78
139
  else
79
- compute_mode(resp.content_length, count, resp.etag)
140
+ compute_mode(resp.content_length, count, resp.etag, opts)
80
141
  end
81
142
  end
82
143
  end
83
144
 
84
- def compute_mode(file_size, count, etag)
85
- chunk_size = compute_chunk(file_size)
86
- part_size = (file_size.to_f / count).ceil
87
- if chunk_size < part_size
88
- multithreaded_get_by_ranges(file_size, etag)
89
- else
90
- multithreaded_get_by_parts(count, file_size, etag)
145
+ def multithreaded_get_by_parts(total_parts, file_size, etag, opts)
146
+ parts = (1..total_parts).map do |part|
147
+ params = get_opts(opts[:params].merge(part_number: part, if_match: etag))
148
+ Part.new(part_number: part, params: params)
91
149
  end
150
+ download_with_executor(PartList.new(parts), file_size, opts)
92
151
  end
93
152
 
94
- def compute_chunk(file_size)
95
- raise ArgumentError, ":chunk_size shouldn't exceed total file size." if @chunk_size && @chunk_size > file_size
96
-
97
- @chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
98
- end
99
-
100
- def multithreaded_get_by_ranges(file_size, etag)
153
+ def multithreaded_get_by_ranges(file_size, etag, opts)
101
154
  offset = 0
102
- default_chunk_size = compute_chunk(file_size)
155
+ default_chunk_size = compute_chunk(opts[:chunk_size], file_size)
103
156
  chunks = []
104
157
  part_number = 1 # parts start at 1
105
158
  while offset < file_size
106
159
  progress = offset + default_chunk_size
107
160
  progress = file_size if progress > file_size
108
- params = @params.merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag)
161
+ params = get_opts(opts[:params].merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag))
109
162
  chunks << Part.new(part_number: part_number, size: (progress - offset), params: params)
110
163
  part_number += 1
111
164
  offset = progress
112
165
  end
113
- download_in_threads(PartList.new(chunks), file_size)
114
- end
115
-
116
- def multithreaded_get_by_parts(n_parts, total_size, etag)
117
- parts = (1..n_parts).map do |part|
118
- Part.new(part_number: part, params: @params.merge(part_number: part, if_match: etag))
119
- end
120
- download_in_threads(PartList.new(parts), total_size)
121
- end
122
-
123
- def download_in_threads(pending, total_size)
124
- threads = []
125
- progress = MultipartProgress.new(pending, total_size, @progress_callback) if @progress_callback
126
- unless [File, Tempfile].include?(@destination.class)
127
- @temp_path = "#{@destination}.s3tmp.#{SecureRandom.alphanumeric(8)}"
128
- end
129
- @thread_count.times do
130
- thread = Thread.new do
131
- begin
132
- while (part = pending.shift)
133
- if progress
134
- part.params[:on_chunk_received] =
135
- proc do |_chunk, bytes, total|
136
- progress.call(part.part_number, bytes, total)
137
- end
138
- end
139
- resp = @client.get_object(part.params)
140
- range = extract_range(resp.content_range)
141
- validate_range(range, part.params[:range]) if part.params[:range]
142
- write(resp.body, range)
143
- if @on_checksum_validated && resp.checksum_validated
144
- @on_checksum_validated.call(resp.checksum_validated, resp)
145
- end
146
- end
147
- nil
148
- rescue StandardError => e
149
- pending.clear! # keep other threads from downloading other parts
150
- raise e
151
- end
152
- end
153
- threads << thread
154
- end
155
- threads.map(&:value).compact
166
+ download_with_executor(PartList.new(chunks), file_size, opts)
156
167
  end
157
168
 
158
- def extract_range(value)
159
- value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
169
+ def range_request(opts)
170
+ resp = @client.head_object(head_opts(opts[:params]))
171
+ resolve_temp_path(opts)
172
+ multithreaded_get_by_ranges(resp.content_length, resp.etag, opts)
160
173
  end
161
174
 
162
- def validate_range(actual, expected)
163
- return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
164
-
165
- raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
166
- end
175
+ def resolve_temp_path(opts)
176
+ return if [File, Tempfile].include?(opts[:destination].class)
167
177
 
168
- def write(body, range)
169
- path = @temp_path || @destination
170
- File.write(path, body.read, range.split('-').first.to_i)
178
+ opts[:temp_path] ||= "#{opts[:destination]}.s3tmp.#{SecureRandom.alphanumeric(8)}"
171
179
  end
172
180
 
173
- def single_request
174
- params = @params.merge(response_target: @destination)
175
- params[:on_chunk_received] = single_part_progress if @progress_callback
181
+ def single_request(opts)
182
+ params = get_opts(opts[:params]).merge(response_target: opts[:destination])
183
+ params[:on_chunk_received] = single_part_progress(opts) if opts[:progress_callback]
176
184
  resp = @client.get_object(params)
177
- return resp unless @on_checksum_validated
185
+ return resp unless opts[:on_checksum_validated]
178
186
 
179
- @on_checksum_validated.call(resp.checksum_validated, resp) if resp.checksum_validated
187
+ opts[:on_checksum_validated].call(resp.checksum_validated, resp) if resp.checksum_validated
180
188
  resp
181
189
  end
182
190
 
183
- def single_part_progress
191
+ def single_part_progress(opts)
184
192
  proc do |_chunk, bytes_read, total_size|
185
- @progress_callback.call([bytes_read], [total_size], total_size)
193
+ opts[:progress_callback].call([bytes_read], [total_size], total_size)
186
194
  end
187
195
  end
188
196
 
197
+ def update_progress(progress, part)
198
+ return unless progress.progress_callback
199
+
200
+ part.params[:on_chunk_received] =
201
+ proc do |_chunk, bytes, total|
202
+ progress.call(part.part_number, bytes, total)
203
+ end
204
+ end
205
+
206
+ def execute_checksum_callback(resp, opts)
207
+ return unless opts[:on_checksum_validated] && resp.checksum_validated
208
+
209
+ opts[:on_checksum_validated].call(resp.checksum_validated, resp)
210
+ end
211
+
212
+ def validate_destination!(destination)
213
+ valid_types = [String, Pathname, File, Tempfile]
214
+ return if valid_types.include?(destination.class)
215
+
216
+ raise ArgumentError, "Invalid destination, expected #{valid_types.join(', ')} but got: #{destination.class}"
217
+ end
218
+
219
+ def validate_opts!(opts)
220
+ if opts[:on_checksum_validated] && !opts[:on_checksum_validated].respond_to?(:call)
221
+ raise ArgumentError, ':on_checksum_validated must be callable'
222
+ end
223
+
224
+ valid_modes = %w[auto get_range single_request]
225
+ unless valid_modes.include?(opts[:mode])
226
+ msg = "Invalid mode #{opts[:mode]} provided, :mode should be single_request, get_range or auto"
227
+ raise ArgumentError, msg
228
+ end
229
+
230
+ if opts[:mode] == 'get_range' && opts[:chunk_size].nil?
231
+ raise ArgumentError, 'In get_range mode, :chunk_size must be provided'
232
+ end
233
+
234
+ if opts[:chunk_size] && opts[:chunk_size] <= 0
235
+ raise ArgumentError, ':chunk_size must be positive'
236
+ end
237
+ end
238
+
239
+ def validate_range(actual, expected)
240
+ return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
241
+
242
+ raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
243
+ end
244
+
245
+ def write(body, range, opts)
246
+ path = opts[:temp_path] || opts[:destination]
247
+ File.write(path, body.read, range.split('-').first.to_i)
248
+ end
249
+
189
250
  # @api private
190
251
  class Part < Struct.new(:part_number, :size, :params)
191
252
  include Aws::Structure
@@ -225,6 +286,8 @@ module Aws
225
286
  @progress_callback = progress_callback
226
287
  end
227
288
 
289
+ attr_reader :progress_callback
290
+
228
291
  def call(part_number, bytes_received, total)
229
292
  # part numbers start at 1
230
293
  @bytes_received[part_number - 1] = bytes_received