aws-sdk-s3 1.199.1 → 1.203.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,184 +8,262 @@ module Aws
8
8
  module S3
9
9
  # @api private
10
10
  class FileDownloader
11
-
12
11
  MIN_CHUNK_SIZE = 5 * 1024 * 1024
13
12
  MAX_PARTS = 10_000
13
+ HEAD_OPTIONS = Set.new(Client.api.operation(:head_object).input.shape.member_names)
14
+ GET_OPTIONS = Set.new(Client.api.operation(:get_object).input.shape.member_names)
14
15
 
15
16
  def initialize(options = {})
16
17
  @client = options[:client] || Client.new
18
+ @executor = options[:executor]
17
19
  end
18
20
 
19
21
  # @return [Client]
20
22
  attr_reader :client
21
23
 
22
24
  def download(destination, options = {})
23
- valid_types = [String, Pathname, File, Tempfile]
24
- unless valid_types.include?(destination.class)
25
- raise ArgumentError, "Invalid destination, expected #{valid_types.join(', ')} but got: #{destination.class}"
26
- end
27
-
28
- @destination = destination
29
- @mode = options.delete(:mode) || 'auto'
30
- @thread_count = options.delete(:thread_count) || 10
31
- @chunk_size = options.delete(:chunk_size)
32
- @on_checksum_validated = options.delete(:on_checksum_validated)
33
- @progress_callback = options.delete(:progress_callback)
34
- @params = options
35
- validate!
25
+ validate_destination!(destination)
26
+ opts = build_download_opts(destination, options)
27
+ validate_opts!(opts)
36
28
 
37
29
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
38
- case @mode
39
- when 'auto' then multipart_download
40
- when 'single_request' then single_request
41
- when 'get_range'
42
- raise ArgumentError, 'In get_range mode, :chunk_size must be provided' unless @chunk_size
43
-
44
- resp = @client.head_object(@params)
45
- multithreaded_get_by_ranges(resp.content_length, resp.etag)
46
- else
47
- raise ArgumentError, "Invalid mode #{@mode} provided, :mode should be single_request, get_range or auto"
30
+ case opts[:mode]
31
+ when 'auto' then multipart_download(opts)
32
+ when 'single_request' then single_request(opts)
33
+ when 'get_range' then range_request(opts)
48
34
  end
49
35
  end
50
- File.rename(@temp_path, @destination) if @temp_path
36
+ File.rename(opts[:temp_path], destination) if opts[:temp_path]
51
37
  ensure
52
- File.delete(@temp_path) if @temp_path && File.exist?(@temp_path)
38
+ cleanup_temp_file(opts)
53
39
  end
54
40
 
55
41
  private
56
42
 
57
- def validate!
58
- return unless @on_checksum_validated && !@on_checksum_validated.respond_to?(:call)
43
+ def build_download_opts(destination, opts)
44
+ {
45
+ destination: destination,
46
+ mode: opts.delete(:mode) || 'auto',
47
+ chunk_size: opts.delete(:chunk_size),
48
+ on_checksum_validated: opts.delete(:on_checksum_validated),
49
+ progress_callback: opts.delete(:progress_callback),
50
+ params: opts,
51
+ temp_path: nil
52
+ }
53
+ end
59
54
 
60
- raise ArgumentError, ':on_checksum_validated must be callable'
55
+ def cleanup_temp_file(opts)
56
+ return unless opts
57
+
58
+ temp_file = opts[:temp_path]
59
+ File.delete(temp_file) if temp_file && File.exist?(temp_file)
61
60
  end
62
61
 
63
- def multipart_download
64
- resp = @client.head_object(@params.merge(part_number: 1))
62
+ def download_with_executor(part_list, total_size, opts)
63
+ download_attempts = 0
64
+ completion_queue = Queue.new
65
+ abort_download = false
66
+ error = nil
67
+ progress = MultipartProgress.new(part_list, total_size, opts[:progress_callback])
68
+
69
+ while (part = part_list.shift)
70
+ break if abort_download
71
+
72
+ download_attempts += 1
73
+ @executor.post(part) do |p|
74
+ update_progress(progress, p)
75
+ resp = @client.get_object(p.params)
76
+ range = extract_range(resp.content_range)
77
+ validate_range(range, p.params[:range]) if p.params[:range]
78
+ write(resp.body, range, opts)
79
+
80
+ execute_checksum_callback(resp, opts)
81
+ rescue StandardError => e
82
+ abort_download = true
83
+ error = e
84
+ ensure
85
+ completion_queue << :done
86
+ end
87
+ end
88
+
89
+ download_attempts.times { completion_queue.pop }
90
+ raise error unless error.nil?
91
+ end
92
+
93
+ def handle_checksum_mode_option(option_key, opts)
94
+ return false unless option_key == :checksum_mode && opts[:checksum_mode] == 'DISABLED'
95
+
96
+ msg = ':checksum_mode option is deprecated. Checksums will be validated by default. ' \
97
+ 'To disable checksum validation, set :response_checksum_validation to "when_required" on your S3 client.'
98
+ warn(msg)
99
+ true
100
+ end
101
+
102
+ def get_opts(opts)
103
+ GET_OPTIONS.each_with_object({}) do |k, h|
104
+ next if k == :checksum_mode
105
+
106
+ h[k] = opts[k] if opts.key?(k)
107
+ end
108
+ end
109
+
110
+ def head_opts(opts)
111
+ HEAD_OPTIONS.each_with_object({}) do |k, h|
112
+ next if handle_checksum_mode_option(k, opts)
113
+
114
+ h[k] = opts[k] if opts.key?(k)
115
+ end
116
+ end
117
+
118
+ def compute_chunk(chunk_size, file_size)
119
+ raise ArgumentError, ":chunk_size shouldn't exceed total file size." if chunk_size && chunk_size > file_size
120
+
121
+ chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
122
+ end
123
+
124
+ def compute_mode(file_size, total_parts, etag, opts)
125
+ chunk_size = compute_chunk(opts[:chunk_size], file_size)
126
+ part_size = (file_size.to_f / total_parts).ceil
127
+
128
+ resolve_temp_path(opts)
129
+ if chunk_size < part_size
130
+ multithreaded_get_by_ranges(file_size, etag, opts)
131
+ else
132
+ multithreaded_get_by_parts(total_parts, file_size, etag, opts)
133
+ end
134
+ end
135
+
136
+ def extract_range(value)
137
+ value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
138
+ end
139
+
140
+ def multipart_download(opts)
141
+ resp = @client.head_object(head_opts(opts[:params].merge(part_number: 1)))
65
142
  count = resp.parts_count
66
143
 
67
144
  if count.nil? || count <= 1
68
145
  if resp.content_length <= MIN_CHUNK_SIZE
69
- single_request
146
+ single_request(opts)
70
147
  else
71
- multithreaded_get_by_ranges(resp.content_length, resp.etag)
148
+ resolve_temp_path(opts)
149
+ multithreaded_get_by_ranges(resp.content_length, resp.etag, opts)
72
150
  end
73
151
  else
74
152
  # covers cases when given object is not uploaded via UploadPart API
75
- resp = @client.head_object(@params) # partNumber is an option
153
+ resp = @client.head_object(head_opts(opts[:params])) # partNumber is an option
76
154
  if resp.content_length <= MIN_CHUNK_SIZE
77
- single_request
155
+ single_request(opts)
78
156
  else
79
- compute_mode(resp.content_length, count, resp.etag)
157
+ compute_mode(resp.content_length, count, resp.etag, opts)
80
158
  end
81
159
  end
82
160
  end
83
161
 
84
- def compute_mode(file_size, count, etag)
85
- chunk_size = compute_chunk(file_size)
86
- part_size = (file_size.to_f / count).ceil
87
- if chunk_size < part_size
88
- multithreaded_get_by_ranges(file_size, etag)
89
- else
90
- multithreaded_get_by_parts(count, file_size, etag)
162
+ def multithreaded_get_by_parts(total_parts, file_size, etag, opts)
163
+ parts = (1..total_parts).map do |part|
164
+ params = get_opts(opts[:params].merge(part_number: part, if_match: etag))
165
+ Part.new(part_number: part, params: params)
91
166
  end
167
+ download_with_executor(PartList.new(parts), file_size, opts)
92
168
  end
93
169
 
94
- def compute_chunk(file_size)
95
- raise ArgumentError, ":chunk_size shouldn't exceed total file size." if @chunk_size && @chunk_size > file_size
96
-
97
- @chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
98
- end
99
-
100
- def multithreaded_get_by_ranges(file_size, etag)
170
+ def multithreaded_get_by_ranges(file_size, etag, opts)
101
171
  offset = 0
102
- default_chunk_size = compute_chunk(file_size)
172
+ default_chunk_size = compute_chunk(opts[:chunk_size], file_size)
103
173
  chunks = []
104
174
  part_number = 1 # parts start at 1
105
175
  while offset < file_size
106
176
  progress = offset + default_chunk_size
107
177
  progress = file_size if progress > file_size
108
- params = @params.merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag)
178
+ params = get_opts(opts[:params].merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag))
109
179
  chunks << Part.new(part_number: part_number, size: (progress - offset), params: params)
110
180
  part_number += 1
111
181
  offset = progress
112
182
  end
113
- download_in_threads(PartList.new(chunks), file_size)
114
- end
115
-
116
- def multithreaded_get_by_parts(n_parts, total_size, etag)
117
- parts = (1..n_parts).map do |part|
118
- Part.new(part_number: part, params: @params.merge(part_number: part, if_match: etag))
119
- end
120
- download_in_threads(PartList.new(parts), total_size)
121
- end
122
-
123
- def download_in_threads(pending, total_size)
124
- threads = []
125
- progress = MultipartProgress.new(pending, total_size, @progress_callback) if @progress_callback
126
- unless [File, Tempfile].include?(@destination.class)
127
- @temp_path = "#{@destination}.s3tmp.#{SecureRandom.alphanumeric(8)}"
128
- end
129
- @thread_count.times do
130
- thread = Thread.new do
131
- begin
132
- while (part = pending.shift)
133
- if progress
134
- part.params[:on_chunk_received] =
135
- proc do |_chunk, bytes, total|
136
- progress.call(part.part_number, bytes, total)
137
- end
138
- end
139
- resp = @client.get_object(part.params)
140
- range = extract_range(resp.content_range)
141
- validate_range(range, part.params[:range]) if part.params[:range]
142
- write(resp.body, range)
143
- if @on_checksum_validated && resp.checksum_validated
144
- @on_checksum_validated.call(resp.checksum_validated, resp)
145
- end
146
- end
147
- nil
148
- rescue StandardError => e
149
- pending.clear! # keep other threads from downloading other parts
150
- raise e
151
- end
152
- end
153
- threads << thread
154
- end
155
- threads.map(&:value).compact
183
+ download_with_executor(PartList.new(chunks), file_size, opts)
156
184
  end
157
185
 
158
- def extract_range(value)
159
- value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
186
+ def range_request(opts)
187
+ resp = @client.head_object(head_opts(opts[:params]))
188
+ resolve_temp_path(opts)
189
+ multithreaded_get_by_ranges(resp.content_length, resp.etag, opts)
160
190
  end
161
191
 
162
- def validate_range(actual, expected)
163
- return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
164
-
165
- raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
166
- end
192
+ def resolve_temp_path(opts)
193
+ return if [File, Tempfile].include?(opts[:destination].class)
167
194
 
168
- def write(body, range)
169
- path = @temp_path || @destination
170
- File.write(path, body.read, range.split('-').first.to_i)
195
+ opts[:temp_path] ||= "#{opts[:destination]}.s3tmp.#{SecureRandom.alphanumeric(8)}"
171
196
  end
172
197
 
173
- def single_request
174
- params = @params.merge(response_target: @destination)
175
- params[:on_chunk_received] = single_part_progress if @progress_callback
198
+ def single_request(opts)
199
+ params = get_opts(opts[:params]).merge(response_target: opts[:destination])
200
+ params[:on_chunk_received] = single_part_progress(opts) if opts[:progress_callback]
176
201
  resp = @client.get_object(params)
177
- return resp unless @on_checksum_validated
202
+ return resp unless opts[:on_checksum_validated]
178
203
 
179
- @on_checksum_validated.call(resp.checksum_validated, resp) if resp.checksum_validated
204
+ opts[:on_checksum_validated].call(resp.checksum_validated, resp) if resp.checksum_validated
180
205
  resp
181
206
  end
182
207
 
183
- def single_part_progress
208
+ def single_part_progress(opts)
184
209
  proc do |_chunk, bytes_read, total_size|
185
- @progress_callback.call([bytes_read], [total_size], total_size)
210
+ opts[:progress_callback].call([bytes_read], [total_size], total_size)
211
+ end
212
+ end
213
+
214
+ def update_progress(progress, part)
215
+ return unless progress.progress_callback
216
+
217
+ part.params[:on_chunk_received] =
218
+ proc do |_chunk, bytes, total|
219
+ progress.call(part.part_number, bytes, total)
220
+ end
221
+ end
222
+
223
+ def execute_checksum_callback(resp, opts)
224
+ return unless opts[:on_checksum_validated] && resp.checksum_validated
225
+
226
+ opts[:on_checksum_validated].call(resp.checksum_validated, resp)
227
+ end
228
+
229
+ def validate_destination!(destination)
230
+ valid_types = [String, Pathname, File, Tempfile]
231
+ return if valid_types.include?(destination.class)
232
+
233
+ raise ArgumentError, "Invalid destination, expected #{valid_types.join(', ')} but got: #{destination.class}"
234
+ end
235
+
236
+ def validate_opts!(opts)
237
+ if opts[:on_checksum_validated] && !opts[:on_checksum_validated].respond_to?(:call)
238
+ raise ArgumentError, ':on_checksum_validated must be callable'
239
+ end
240
+
241
+ valid_modes = %w[auto get_range single_request]
242
+ unless valid_modes.include?(opts[:mode])
243
+ msg = "Invalid mode #{opts[:mode]} provided, :mode should be single_request, get_range or auto"
244
+ raise ArgumentError, msg
245
+ end
246
+
247
+ if opts[:mode] == 'get_range' && opts[:chunk_size].nil?
248
+ raise ArgumentError, 'In get_range mode, :chunk_size must be provided'
249
+ end
250
+
251
+ if opts[:chunk_size] && opts[:chunk_size] <= 0
252
+ raise ArgumentError, ':chunk_size must be positive'
186
253
  end
187
254
  end
188
255
 
256
+ def validate_range(actual, expected)
257
+ return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
258
+
259
+ raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
260
+ end
261
+
262
+ def write(body, range, opts)
263
+ path = opts[:temp_path] || opts[:destination]
264
+ File.write(path, body.read, range.split('-').first.to_i)
265
+ end
266
+
189
267
  # @api private
190
268
  class Part < Struct.new(:part_number, :size, :params)
191
269
  include Aws::Structure
@@ -225,6 +303,8 @@ module Aws
225
303
  @progress_callback = progress_callback
226
304
  end
227
305
 
306
+ attr_reader :progress_callback
307
+
228
308
  def call(part_number, bytes_received, total)
229
309
  # part numbers start at 1
230
310
  @bytes_received[part_number - 1] = bytes_received
@@ -13,8 +13,8 @@ module Aws
13
13
  # @option options [Client] :client
14
14
  # @option options [Integer] :multipart_threshold (104857600)
15
15
  def initialize(options = {})
16
- @options = options
17
16
  @client = options[:client] || Client.new
17
+ @executor = options[:executor]
18
18
  @multipart_threshold = options[:multipart_threshold] || DEFAULT_MULTIPART_THRESHOLD
19
19
  end
20
20
 
@@ -36,11 +36,9 @@ module Aws
36
36
  # @return [void]
37
37
  def upload(source, options = {})
38
38
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
39
- if File.size(source) >= multipart_threshold
40
- MultipartFileUploader.new(@options).upload(source, options)
39
+ if File.size(source) >= @multipart_threshold
40
+ MultipartFileUploader.new(client: @client, executor: @executor).upload(source, options)
41
41
  else
42
- # remove multipart parameters not supported by put_object
43
- options.delete(:thread_count)
44
42
  put_object(source, options)
45
43
  end
46
44
  end
@@ -48,9 +46,9 @@ module Aws
48
46
 
49
47
  private
50
48
 
51
- def open_file(source)
52
- if String === source || Pathname === source
53
- File.open(source, 'rb') { |file| yield(file) }
49
+ def open_file(source, &block)
50
+ if source.is_a?(String) || source.is_a?(Pathname)
51
+ File.open(source, 'rb', &block)
54
52
  else
55
53
  yield(source)
56
54
  end
@@ -7,10 +7,8 @@ module Aws
7
7
  module S3
8
8
  # @api private
9
9
  class MultipartFileUploader
10
-
11
10
  MIN_PART_SIZE = 5 * 1024 * 1024 # 5MB
12
11
  MAX_PARTS = 10_000
13
- DEFAULT_THREAD_COUNT = 10
14
12
  CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
15
13
  COMPLETE_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
16
14
  UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
@@ -21,10 +19,9 @@ module Aws
21
19
  )
22
20
 
23
21
  # @option options [Client] :client
24
- # @option options [Integer] :thread_count (DEFAULT_THREAD_COUNT)
25
22
  def initialize(options = {})
26
23
  @client = options[:client] || Client.new
27
- @thread_count = options[:thread_count] || DEFAULT_THREAD_COUNT
24
+ @executor = options[:executor]
28
25
  end
29
26
 
30
27
  # @return [Client]
@@ -38,11 +35,12 @@ module Aws
38
35
  # It will be invoked with [bytes_read], [total_sizes]
39
36
  # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
40
37
  def upload(source, options = {})
41
- raise ArgumentError, 'unable to multipart upload files smaller than 5MB' if File.size(source) < MIN_PART_SIZE
38
+ file_size = File.size(source)
39
+ raise ArgumentError, 'unable to multipart upload files smaller than 5MB' if file_size < MIN_PART_SIZE
42
40
 
43
41
  upload_id = initiate_upload(options)
44
- parts = upload_parts(upload_id, source, options)
45
- complete_upload(upload_id, parts, source, options)
42
+ parts = upload_parts(upload_id, source, file_size, options)
43
+ complete_upload(upload_id, parts, file_size, options)
46
44
  end
47
45
 
48
46
  private
@@ -51,22 +49,21 @@ module Aws
51
49
  @client.create_multipart_upload(create_opts(options)).upload_id
52
50
  end
53
51
 
54
- def complete_upload(upload_id, parts, source, options)
52
+ def complete_upload(upload_id, parts, file_size, options)
55
53
  @client.complete_multipart_upload(
56
- **complete_opts(options).merge(
57
- upload_id: upload_id,
58
- multipart_upload: { parts: parts },
59
- mpu_object_size: File.size(source)
60
- )
54
+ **complete_opts(options),
55
+ upload_id: upload_id,
56
+ multipart_upload: { parts: parts },
57
+ mpu_object_size: file_size
61
58
  )
62
59
  rescue StandardError => e
63
60
  abort_upload(upload_id, options, [e])
64
61
  end
65
62
 
66
- def upload_parts(upload_id, source, options)
63
+ def upload_parts(upload_id, source, file_size, options)
67
64
  completed = PartList.new
68
- pending = PartList.new(compute_parts(upload_id, source, options))
69
- errors = upload_in_threads(pending, completed, options)
65
+ pending = PartList.new(compute_parts(upload_id, source, file_size, options))
66
+ errors = upload_with_executor(pending, completed, options)
70
67
  if errors.empty?
71
68
  completed.to_a.sort_by { |part| part[:part_number] }
72
69
  else
@@ -81,22 +78,25 @@ module Aws
81
78
  rescue MultipartUploadError => e
82
79
  raise e
83
80
  rescue StandardError => e
84
- msg = "failed to abort multipart upload: #{e.message}. "\
85
- "Multipart upload failed: #{errors.map(&:message).join('; ')}"
81
+ msg = "failed to abort multipart upload: #{e.message}. " \
82
+ "Multipart upload failed: #{errors.map(&:message).join('; ')}"
86
83
  raise MultipartUploadError.new(msg, errors + [e])
87
84
  end
88
85
 
89
- def compute_parts(upload_id, source, options)
90
- size = File.size(source)
91
- default_part_size = compute_default_part_size(size)
86
+ def compute_parts(upload_id, source, file_size, options)
87
+ default_part_size = compute_default_part_size(file_size)
92
88
  offset = 0
93
89
  part_number = 1
94
90
  parts = []
95
- while offset < size
91
+ while offset < file_size
96
92
  parts << upload_part_opts(options).merge(
97
93
  upload_id: upload_id,
98
94
  part_number: part_number,
99
- body: FilePart.new(source: source, offset: offset, size: part_size(size, default_part_size, offset))
95
+ body: FilePart.new(
96
+ source: source,
97
+ offset: offset,
98
+ size: part_size(file_size, default_part_size, offset)
99
+ )
100
100
  )
101
101
  part_number += 1
102
102
  offset += default_part_size
@@ -112,20 +112,23 @@ module Aws
112
112
  keys.any? { |key| checksum_key?(key) }
113
113
  end
114
114
 
115
+ def checksum_not_required?(options)
116
+ @client.config.request_checksum_calculation == 'when_required' && !options[:checksum_algorithm]
117
+ end
118
+
115
119
  def create_opts(options)
116
- opts = { checksum_algorithm: Aws::Plugins::ChecksumAlgorithm::DEFAULT_CHECKSUM }
117
- opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
118
- CREATE_OPTIONS.each_with_object(opts) do |key, hash|
119
- hash[key] = options[key] if options.key?(key)
120
+ opts = {}
121
+ unless checksum_not_required?(options)
122
+ opts[:checksum_algorithm] = Aws::Plugins::ChecksumAlgorithm::DEFAULT_CHECKSUM
120
123
  end
124
+ opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
125
+ CREATE_OPTIONS.each_with_object(opts) { |k, h| h[k] = options[k] if options.key?(k) }
121
126
  end
122
127
 
123
128
  def complete_opts(options)
124
129
  opts = {}
125
130
  opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
126
- COMPLETE_OPTIONS.each_with_object(opts) do |key, hash|
127
- hash[key] = options[key] if options.key?(key)
128
- end
131
+ COMPLETE_OPTIONS.each_with_object(opts) { |k, h| h[k] = options[k] if options.key?(k) }
129
132
  end
130
133
 
131
134
  def upload_part_opts(options)
@@ -135,43 +138,45 @@ module Aws
135
138
  end
136
139
  end
137
140
 
138
- def upload_in_threads(pending, completed, options)
139
- threads = []
140
- if (callback = options[:progress_callback])
141
- progress = MultipartProgress.new(pending, callback)
142
- end
143
- options.fetch(:thread_count, @thread_count).times do
144
- thread = Thread.new do
145
- begin
146
- while (part = pending.shift)
147
- if progress
148
- part[:on_chunk_sent] =
149
- proc do |_chunk, bytes, _total|
150
- progress.call(part[:part_number], bytes)
151
- end
152
- end
153
- resp = @client.upload_part(part)
154
- part[:body].close
155
- completed_part = { etag: resp.etag, part_number: part[:part_number] }
156
- algorithm = resp.context.params[:checksum_algorithm]
157
- k = "checksum_#{algorithm.downcase}".to_sym
158
- completed_part[k] = resp.send(k)
159
- completed.push(completed_part)
160
- end
161
- nil
162
- rescue StandardError => e
163
- # keep other threads from uploading other parts
164
- pending.clear!
165
- e
166
- end
141
+ def upload_with_executor(pending, completed, options)
142
+ upload_attempts = 0
143
+ completion_queue = Queue.new
144
+ abort_upload = false
145
+ errors = []
146
+ progress = MultipartProgress.new(pending, options[:progress_callback])
147
+
148
+ while (part = pending.shift)
149
+ break if abort_upload
150
+
151
+ upload_attempts += 1
152
+ @executor.post(part) do |p|
153
+ update_progress(progress, p)
154
+ resp = @client.upload_part(p)
155
+ p[:body].close
156
+ completed_part = { etag: resp.etag, part_number: p[:part_number] }
157
+ apply_part_checksum(resp, completed_part)
158
+ completed.push(completed_part)
159
+ rescue StandardError => e
160
+ abort_upload = true
161
+ errors << e
162
+ ensure
163
+ completion_queue << :done
167
164
  end
168
- threads << thread
169
165
  end
170
- threads.map(&:value).compact
166
+
167
+ upload_attempts.times { completion_queue.pop }
168
+ errors
171
169
  end
172
170
 
173
- def compute_default_part_size(source_size)
174
- [(source_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i
171
+ def apply_part_checksum(resp, part)
172
+ return unless (checksum = resp.context.params[:checksum_algorithm])
173
+
174
+ k = :"checksum_#{checksum.downcase}"
175
+ part[k] = resp.send(k)
176
+ end
177
+
178
+ def compute_default_part_size(file_size)
179
+ [(file_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i
175
180
  end
176
181
 
177
182
  def part_size(total_size, part_size, offset)
@@ -182,6 +187,15 @@ module Aws
182
187
  end
183
188
  end
184
189
 
190
+ def update_progress(progress, part)
191
+ return unless progress.progress_callback
192
+
193
+ part[:on_chunk_sent] =
194
+ proc do |_chunk, bytes, _total|
195
+ progress.call(part[:part_number], bytes)
196
+ end
197
+ end
198
+
185
199
  # @api private
186
200
  class PartList
187
201
  def initialize(parts = [])
@@ -222,6 +236,8 @@ module Aws
222
236
  @progress_callback = progress_callback
223
237
  end
224
238
 
239
+ attr_reader :progress_callback
240
+
225
241
  def call(part_number, bytes_read)
226
242
  # part numbers start at 1
227
243
  @bytes_sent[part_number - 1] = bytes_read