aws-sdk-s3 1.198.0 → 1.202.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,184 +8,245 @@ module Aws
8
8
  module S3
9
9
  # @api private
10
10
  class FileDownloader
11
-
12
11
  MIN_CHUNK_SIZE = 5 * 1024 * 1024
13
12
  MAX_PARTS = 10_000
13
+ HEAD_OPTIONS = Set.new(Client.api.operation(:head_object).input.shape.member_names)
14
+ GET_OPTIONS = Set.new(Client.api.operation(:get_object).input.shape.member_names)
14
15
 
15
16
  def initialize(options = {})
16
17
  @client = options[:client] || Client.new
18
+ @executor = options[:executor]
17
19
  end
18
20
 
19
21
  # @return [Client]
20
22
  attr_reader :client
21
23
 
22
24
  def download(destination, options = {})
23
- valid_types = [String, Pathname, File, Tempfile]
24
- unless valid_types.include?(destination.class)
25
- raise ArgumentError, "Invalid destination, expected #{valid_types.join(', ')} but got: #{destination.class}"
26
- end
27
-
28
- @destination = destination
29
- @mode = options.delete(:mode) || 'auto'
30
- @thread_count = options.delete(:thread_count) || 10
31
- @chunk_size = options.delete(:chunk_size)
32
- @on_checksum_validated = options.delete(:on_checksum_validated)
33
- @progress_callback = options.delete(:progress_callback)
34
- @params = options
35
- validate!
25
+ validate_destination!(destination)
26
+ opts = build_download_opts(destination, options)
27
+ validate_opts!(opts)
36
28
 
37
29
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
38
- case @mode
39
- when 'auto' then multipart_download
40
- when 'single_request' then single_request
41
- when 'get_range'
42
- raise ArgumentError, 'In get_range mode, :chunk_size must be provided' unless @chunk_size
43
-
44
- resp = @client.head_object(@params)
45
- multithreaded_get_by_ranges(resp.content_length, resp.etag)
46
- else
47
- raise ArgumentError, "Invalid mode #{@mode} provided, :mode should be single_request, get_range or auto"
30
+ case opts[:mode]
31
+ when 'auto' then multipart_download(opts)
32
+ when 'single_request' then single_request(opts)
33
+ when 'get_range' then range_request(opts)
48
34
  end
49
35
  end
50
- File.rename(@temp_path, @destination) if @temp_path
36
+ File.rename(opts[:temp_path], destination) if opts[:temp_path]
51
37
  ensure
52
- File.delete(@temp_path) if @temp_path && File.exist?(@temp_path)
38
+ cleanup_temp_file(opts)
53
39
  end
54
40
 
55
41
  private
56
42
 
57
- def validate!
58
- return unless @on_checksum_validated && !@on_checksum_validated.respond_to?(:call)
43
+ def build_download_opts(destination, opts)
44
+ {
45
+ destination: destination,
46
+ mode: opts.delete(:mode) || 'auto',
47
+ chunk_size: opts.delete(:chunk_size),
48
+ on_checksum_validated: opts.delete(:on_checksum_validated),
49
+ progress_callback: opts.delete(:progress_callback),
50
+ params: opts,
51
+ temp_path: nil
52
+ }
53
+ end
54
+
55
+ def cleanup_temp_file(opts)
56
+ return unless opts
57
+
58
+ temp_file = opts[:temp_path]
59
+ File.delete(temp_file) if temp_file && File.exist?(temp_file)
60
+ end
61
+
62
+ def download_with_executor(part_list, total_size, opts)
63
+ download_attempts = 0
64
+ completion_queue = Queue.new
65
+ abort_download = false
66
+ error = nil
67
+ progress = MultipartProgress.new(part_list, total_size, opts[:progress_callback])
68
+
69
+ while (part = part_list.shift)
70
+ break if abort_download
71
+
72
+ download_attempts += 1
73
+ @executor.post(part) do |p|
74
+ update_progress(progress, p)
75
+ resp = @client.get_object(p.params)
76
+ range = extract_range(resp.content_range)
77
+ validate_range(range, p.params[:range]) if p.params[:range]
78
+ write(resp.body, range, opts)
79
+
80
+ execute_checksum_callback(resp, opts)
81
+ rescue StandardError => e
82
+ abort_download = true
83
+ error = e
84
+ ensure
85
+ completion_queue << :done
86
+ end
87
+ end
88
+
89
+ download_attempts.times { completion_queue.pop }
90
+ raise error unless error.nil?
91
+ end
92
+
93
+ def get_opts(opts)
94
+ GET_OPTIONS.each_with_object({}) { |k, h| h[k] = opts[k] if opts.key?(k) }
95
+ end
96
+
97
+ def head_opts(opts)
98
+ HEAD_OPTIONS.each_with_object({}) { |k, h| h[k] = opts[k] if opts.key?(k) }
99
+ end
100
+
101
+ def compute_chunk(chunk_size, file_size)
102
+ raise ArgumentError, ":chunk_size shouldn't exceed total file size." if chunk_size && chunk_size > file_size
59
103
 
60
- raise ArgumentError, ':on_checksum_validated must be callable'
104
+ chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
61
105
  end
62
106
 
63
- def multipart_download
64
- resp = @client.head_object(@params.merge(part_number: 1))
107
+ def compute_mode(file_size, total_parts, etag, opts)
108
+ chunk_size = compute_chunk(opts[:chunk_size], file_size)
109
+ part_size = (file_size.to_f / total_parts).ceil
110
+
111
+ resolve_temp_path(opts)
112
+ if chunk_size < part_size
113
+ multithreaded_get_by_ranges(file_size, etag, opts)
114
+ else
115
+ multithreaded_get_by_parts(total_parts, file_size, etag, opts)
116
+ end
117
+ end
118
+
119
+ def extract_range(value)
120
+ value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
121
+ end
122
+
123
+ def multipart_download(opts)
124
+ resp = @client.head_object(head_opts(opts[:params].merge(part_number: 1)))
65
125
  count = resp.parts_count
66
126
 
67
127
  if count.nil? || count <= 1
68
128
  if resp.content_length <= MIN_CHUNK_SIZE
69
- single_request
129
+ single_request(opts)
70
130
  else
71
- multithreaded_get_by_ranges(resp.content_length, resp.etag)
131
+ resolve_temp_path(opts)
132
+ multithreaded_get_by_ranges(resp.content_length, resp.etag, opts)
72
133
  end
73
134
  else
74
135
  # covers cases when given object is not uploaded via UploadPart API
75
- resp = @client.head_object(@params) # partNumber is an option
136
+ resp = @client.head_object(head_opts(opts[:params])) # partNumber is an option
76
137
  if resp.content_length <= MIN_CHUNK_SIZE
77
- single_request
138
+ single_request(opts)
78
139
  else
79
- compute_mode(resp.content_length, count, resp.etag)
140
+ compute_mode(resp.content_length, count, resp.etag, opts)
80
141
  end
81
142
  end
82
143
  end
83
144
 
84
- def compute_mode(file_size, count, etag)
85
- chunk_size = compute_chunk(file_size)
86
- part_size = (file_size.to_f / count).ceil
87
- if chunk_size < part_size
88
- multithreaded_get_by_ranges(file_size, etag)
89
- else
90
- multithreaded_get_by_parts(count, file_size, etag)
145
+ def multithreaded_get_by_parts(total_parts, file_size, etag, opts)
146
+ parts = (1..total_parts).map do |part|
147
+ params = get_opts(opts[:params].merge(part_number: part, if_match: etag))
148
+ Part.new(part_number: part, params: params)
91
149
  end
150
+ download_with_executor(PartList.new(parts), file_size, opts)
92
151
  end
93
152
 
94
- def compute_chunk(file_size)
95
- raise ArgumentError, ":chunk_size shouldn't exceed total file size." if @chunk_size && @chunk_size > file_size
96
-
97
- @chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
98
- end
99
-
100
- def multithreaded_get_by_ranges(file_size, etag)
153
+ def multithreaded_get_by_ranges(file_size, etag, opts)
101
154
  offset = 0
102
- default_chunk_size = compute_chunk(file_size)
155
+ default_chunk_size = compute_chunk(opts[:chunk_size], file_size)
103
156
  chunks = []
104
157
  part_number = 1 # parts start at 1
105
158
  while offset < file_size
106
159
  progress = offset + default_chunk_size
107
160
  progress = file_size if progress > file_size
108
- params = @params.merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag)
161
+ params = get_opts(opts[:params].merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag))
109
162
  chunks << Part.new(part_number: part_number, size: (progress - offset), params: params)
110
163
  part_number += 1
111
164
  offset = progress
112
165
  end
113
- download_in_threads(PartList.new(chunks), file_size)
114
- end
115
-
116
- def multithreaded_get_by_parts(n_parts, total_size, etag)
117
- parts = (1..n_parts).map do |part|
118
- Part.new(part_number: part, params: @params.merge(part_number: part, if_match: etag))
119
- end
120
- download_in_threads(PartList.new(parts), total_size)
121
- end
122
-
123
- def download_in_threads(pending, total_size)
124
- threads = []
125
- progress = MultipartProgress.new(pending, total_size, @progress_callback) if @progress_callback
126
- unless [File, Tempfile].include?(@destination.class)
127
- @temp_path = "#{@destination}.s3tmp.#{SecureRandom.alphanumeric(8)}"
128
- end
129
- @thread_count.times do
130
- thread = Thread.new do
131
- begin
132
- while (part = pending.shift)
133
- if progress
134
- part.params[:on_chunk_received] =
135
- proc do |_chunk, bytes, total|
136
- progress.call(part.part_number, bytes, total)
137
- end
138
- end
139
- resp = @client.get_object(part.params)
140
- range = extract_range(resp.content_range)
141
- validate_range(range, part.params[:range]) if part.params[:range]
142
- write(resp.body, range)
143
- if @on_checksum_validated && resp.checksum_validated
144
- @on_checksum_validated.call(resp.checksum_validated, resp)
145
- end
146
- end
147
- nil
148
- rescue StandardError => e
149
- pending.clear! # keep other threads from downloading other parts
150
- raise e
151
- end
152
- end
153
- threads << thread
154
- end
155
- threads.map(&:value).compact
166
+ download_with_executor(PartList.new(chunks), file_size, opts)
156
167
  end
157
168
 
158
- def extract_range(value)
159
- value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
169
+ def range_request(opts)
170
+ resp = @client.head_object(head_opts(opts[:params]))
171
+ resolve_temp_path(opts)
172
+ multithreaded_get_by_ranges(resp.content_length, resp.etag, opts)
160
173
  end
161
174
 
162
- def validate_range(actual, expected)
163
- return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
164
-
165
- raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
166
- end
175
+ def resolve_temp_path(opts)
176
+ return if [File, Tempfile].include?(opts[:destination].class)
167
177
 
168
- def write(body, range)
169
- path = @temp_path || @destination
170
- File.write(path, body.read, range.split('-').first.to_i)
178
+ opts[:temp_path] ||= "#{opts[:destination]}.s3tmp.#{SecureRandom.alphanumeric(8)}"
171
179
  end
172
180
 
173
- def single_request
174
- params = @params.merge(response_target: @destination)
175
- params[:on_chunk_received] = single_part_progress if @progress_callback
181
+ def single_request(opts)
182
+ params = get_opts(opts[:params]).merge(response_target: opts[:destination])
183
+ params[:on_chunk_received] = single_part_progress(opts) if opts[:progress_callback]
176
184
  resp = @client.get_object(params)
177
- return resp unless @on_checksum_validated
185
+ return resp unless opts[:on_checksum_validated]
178
186
 
179
- @on_checksum_validated.call(resp.checksum_validated, resp) if resp.checksum_validated
187
+ opts[:on_checksum_validated].call(resp.checksum_validated, resp) if resp.checksum_validated
180
188
  resp
181
189
  end
182
190
 
183
- def single_part_progress
191
+ def single_part_progress(opts)
184
192
  proc do |_chunk, bytes_read, total_size|
185
- @progress_callback.call([bytes_read], [total_size], total_size)
193
+ opts[:progress_callback].call([bytes_read], [total_size], total_size)
186
194
  end
187
195
  end
188
196
 
197
+ def update_progress(progress, part)
198
+ return unless progress.progress_callback
199
+
200
+ part.params[:on_chunk_received] =
201
+ proc do |_chunk, bytes, total|
202
+ progress.call(part.part_number, bytes, total)
203
+ end
204
+ end
205
+
206
+ def execute_checksum_callback(resp, opts)
207
+ return unless opts[:on_checksum_validated] && resp.checksum_validated
208
+
209
+ opts[:on_checksum_validated].call(resp.checksum_validated, resp)
210
+ end
211
+
212
+ def validate_destination!(destination)
213
+ valid_types = [String, Pathname, File, Tempfile]
214
+ return if valid_types.include?(destination.class)
215
+
216
+ raise ArgumentError, "Invalid destination, expected #{valid_types.join(', ')} but got: #{destination.class}"
217
+ end
218
+
219
+ def validate_opts!(opts)
220
+ if opts[:on_checksum_validated] && !opts[:on_checksum_validated].respond_to?(:call)
221
+ raise ArgumentError, ':on_checksum_validated must be callable'
222
+ end
223
+
224
+ valid_modes = %w[auto get_range single_request]
225
+ unless valid_modes.include?(opts[:mode])
226
+ msg = "Invalid mode #{opts[:mode]} provided, :mode should be single_request, get_range or auto"
227
+ raise ArgumentError, msg
228
+ end
229
+
230
+ if opts[:mode] == 'get_range' && opts[:chunk_size].nil?
231
+ raise ArgumentError, 'In get_range mode, :chunk_size must be provided'
232
+ end
233
+
234
+ if opts[:chunk_size] && opts[:chunk_size] <= 0
235
+ raise ArgumentError, ':chunk_size must be positive'
236
+ end
237
+ end
238
+
239
+ def validate_range(actual, expected)
240
+ return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
241
+
242
+ raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
243
+ end
244
+
245
+ def write(body, range, opts)
246
+ path = opts[:temp_path] || opts[:destination]
247
+ File.write(path, body.read, range.split('-').first.to_i)
248
+ end
249
+
189
250
  # @api private
190
251
  class Part < Struct.new(:part_number, :size, :params)
191
252
  include Aws::Structure
@@ -225,6 +286,8 @@ module Aws
225
286
  @progress_callback = progress_callback
226
287
  end
227
288
 
289
+ attr_reader :progress_callback
290
+
228
291
  def call(part_number, bytes_received, total)
229
292
  # part numbers start at 1
230
293
  @bytes_received[part_number - 1] = bytes_received
@@ -13,8 +13,8 @@ module Aws
13
13
  # @option options [Client] :client
14
14
  # @option options [Integer] :multipart_threshold (104857600)
15
15
  def initialize(options = {})
16
- @options = options
17
16
  @client = options[:client] || Client.new
17
+ @executor = options[:executor]
18
18
  @multipart_threshold = options[:multipart_threshold] || DEFAULT_MULTIPART_THRESHOLD
19
19
  end
20
20
 
@@ -36,11 +36,9 @@ module Aws
36
36
  # @return [void]
37
37
  def upload(source, options = {})
38
38
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
39
- if File.size(source) >= multipart_threshold
40
- MultipartFileUploader.new(@options).upload(source, options)
39
+ if File.size(source) >= @multipart_threshold
40
+ MultipartFileUploader.new(client: @client, executor: @executor).upload(source, options)
41
41
  else
42
- # remove multipart parameters not supported by put_object
43
- options.delete(:thread_count)
44
42
  put_object(source, options)
45
43
  end
46
44
  end
@@ -48,9 +46,9 @@ module Aws
48
46
 
49
47
  private
50
48
 
51
- def open_file(source)
52
- if String === source || Pathname === source
53
- File.open(source, 'rb') { |file| yield(file) }
49
+ def open_file(source, &block)
50
+ if source.is_a?(String) || source.is_a?(Pathname)
51
+ File.open(source, 'rb', &block)
54
52
  else
55
53
  yield(source)
56
54
  end
@@ -7,10 +7,8 @@ module Aws
7
7
  module S3
8
8
  # @api private
9
9
  class MultipartFileUploader
10
-
11
10
  MIN_PART_SIZE = 5 * 1024 * 1024 # 5MB
12
11
  MAX_PARTS = 10_000
13
- DEFAULT_THREAD_COUNT = 10
14
12
  CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
15
13
  COMPLETE_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
16
14
  UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
@@ -21,10 +19,9 @@ module Aws
21
19
  )
22
20
 
23
21
  # @option options [Client] :client
24
- # @option options [Integer] :thread_count (DEFAULT_THREAD_COUNT)
25
22
  def initialize(options = {})
26
23
  @client = options[:client] || Client.new
27
- @thread_count = options[:thread_count] || DEFAULT_THREAD_COUNT
24
+ @executor = options[:executor]
28
25
  end
29
26
 
30
27
  # @return [Client]
@@ -38,11 +35,12 @@ module Aws
38
35
  # It will be invoked with [bytes_read], [total_sizes]
39
36
  # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
40
37
  def upload(source, options = {})
41
- raise ArgumentError, 'unable to multipart upload files smaller than 5MB' if File.size(source) < MIN_PART_SIZE
38
+ file_size = File.size(source)
39
+ raise ArgumentError, 'unable to multipart upload files smaller than 5MB' if file_size < MIN_PART_SIZE
42
40
 
43
41
  upload_id = initiate_upload(options)
44
- parts = upload_parts(upload_id, source, options)
45
- complete_upload(upload_id, parts, source, options)
42
+ parts = upload_parts(upload_id, source, file_size, options)
43
+ complete_upload(upload_id, parts, file_size, options)
46
44
  end
47
45
 
48
46
  private
@@ -51,22 +49,21 @@ module Aws
51
49
  @client.create_multipart_upload(create_opts(options)).upload_id
52
50
  end
53
51
 
54
- def complete_upload(upload_id, parts, source, options)
52
+ def complete_upload(upload_id, parts, file_size, options)
55
53
  @client.complete_multipart_upload(
56
- **complete_opts(options).merge(
57
- upload_id: upload_id,
58
- multipart_upload: { parts: parts },
59
- mpu_object_size: File.size(source)
60
- )
54
+ **complete_opts(options),
55
+ upload_id: upload_id,
56
+ multipart_upload: { parts: parts },
57
+ mpu_object_size: file_size
61
58
  )
62
59
  rescue StandardError => e
63
60
  abort_upload(upload_id, options, [e])
64
61
  end
65
62
 
66
- def upload_parts(upload_id, source, options)
63
+ def upload_parts(upload_id, source, file_size, options)
67
64
  completed = PartList.new
68
- pending = PartList.new(compute_parts(upload_id, source, options))
69
- errors = upload_in_threads(pending, completed, options)
65
+ pending = PartList.new(compute_parts(upload_id, source, file_size, options))
66
+ errors = upload_with_executor(pending, completed, options)
70
67
  if errors.empty?
71
68
  completed.to_a.sort_by { |part| part[:part_number] }
72
69
  else
@@ -81,22 +78,25 @@ module Aws
81
78
  rescue MultipartUploadError => e
82
79
  raise e
83
80
  rescue StandardError => e
84
- msg = "failed to abort multipart upload: #{e.message}. "\
85
- "Multipart upload failed: #{errors.map(&:message).join('; ')}"
81
+ msg = "failed to abort multipart upload: #{e.message}. " \
82
+ "Multipart upload failed: #{errors.map(&:message).join('; ')}"
86
83
  raise MultipartUploadError.new(msg, errors + [e])
87
84
  end
88
85
 
89
- def compute_parts(upload_id, source, options)
90
- size = File.size(source)
91
- default_part_size = compute_default_part_size(size)
86
+ def compute_parts(upload_id, source, file_size, options)
87
+ default_part_size = compute_default_part_size(file_size)
92
88
  offset = 0
93
89
  part_number = 1
94
90
  parts = []
95
- while offset < size
91
+ while offset < file_size
96
92
  parts << upload_part_opts(options).merge(
97
93
  upload_id: upload_id,
98
94
  part_number: part_number,
99
- body: FilePart.new(source: source, offset: offset, size: part_size(size, default_part_size, offset))
95
+ body: FilePart.new(
96
+ source: source,
97
+ offset: offset,
98
+ size: part_size(file_size, default_part_size, offset)
99
+ )
100
100
  )
101
101
  part_number += 1
102
102
  offset += default_part_size
@@ -112,20 +112,23 @@ module Aws
112
112
  keys.any? { |key| checksum_key?(key) }
113
113
  end
114
114
 
115
+ def checksum_not_required?(options)
116
+ @client.config.request_checksum_calculation == 'when_required' && !options[:checksum_algorithm]
117
+ end
118
+
115
119
  def create_opts(options)
116
- opts = { checksum_algorithm: Aws::Plugins::ChecksumAlgorithm::DEFAULT_CHECKSUM }
117
- opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
118
- CREATE_OPTIONS.each_with_object(opts) do |key, hash|
119
- hash[key] = options[key] if options.key?(key)
120
+ opts = {}
121
+ unless checksum_not_required?(options)
122
+ opts[:checksum_algorithm] = Aws::Plugins::ChecksumAlgorithm::DEFAULT_CHECKSUM
120
123
  end
124
+ opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
125
+ CREATE_OPTIONS.each_with_object(opts) { |k, h| h[k] = options[k] if options.key?(k) }
121
126
  end
122
127
 
123
128
  def complete_opts(options)
124
129
  opts = {}
125
130
  opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
126
- COMPLETE_OPTIONS.each_with_object(opts) do |key, hash|
127
- hash[key] = options[key] if options.key?(key)
128
- end
131
+ COMPLETE_OPTIONS.each_with_object(opts) { |k, h| h[k] = options[k] if options.key?(k) }
129
132
  end
130
133
 
131
134
  def upload_part_opts(options)
@@ -135,43 +138,45 @@ module Aws
135
138
  end
136
139
  end
137
140
 
138
- def upload_in_threads(pending, completed, options)
139
- threads = []
140
- if (callback = options[:progress_callback])
141
- progress = MultipartProgress.new(pending, callback)
142
- end
143
- options.fetch(:thread_count, @thread_count).times do
144
- thread = Thread.new do
145
- begin
146
- while (part = pending.shift)
147
- if progress
148
- part[:on_chunk_sent] =
149
- proc do |_chunk, bytes, _total|
150
- progress.call(part[:part_number], bytes)
151
- end
152
- end
153
- resp = @client.upload_part(part)
154
- part[:body].close
155
- completed_part = { etag: resp.etag, part_number: part[:part_number] }
156
- algorithm = resp.context.params[:checksum_algorithm]
157
- k = "checksum_#{algorithm.downcase}".to_sym
158
- completed_part[k] = resp.send(k)
159
- completed.push(completed_part)
160
- end
161
- nil
162
- rescue StandardError => e
163
- # keep other threads from uploading other parts
164
- pending.clear!
165
- e
166
- end
141
+ def upload_with_executor(pending, completed, options)
142
+ upload_attempts = 0
143
+ completion_queue = Queue.new
144
+ abort_upload = false
145
+ errors = []
146
+ progress = MultipartProgress.new(pending, options[:progress_callback])
147
+
148
+ while (part = pending.shift)
149
+ break if abort_upload
150
+
151
+ upload_attempts += 1
152
+ @executor.post(part) do |p|
153
+ update_progress(progress, p)
154
+ resp = @client.upload_part(p)
155
+ p[:body].close
156
+ completed_part = { etag: resp.etag, part_number: p[:part_number] }
157
+ apply_part_checksum(resp, completed_part)
158
+ completed.push(completed_part)
159
+ rescue StandardError => e
160
+ abort_upload = true
161
+ errors << e
162
+ ensure
163
+ completion_queue << :done
167
164
  end
168
- threads << thread
169
165
  end
170
- threads.map(&:value).compact
166
+
167
+ upload_attempts.times { completion_queue.pop }
168
+ errors
171
169
  end
172
170
 
173
- def compute_default_part_size(source_size)
174
- [(source_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i
171
+ def apply_part_checksum(resp, part)
172
+ return unless (checksum = resp.context.params[:checksum_algorithm])
173
+
174
+ k = :"checksum_#{checksum.downcase}"
175
+ part[k] = resp.send(k)
176
+ end
177
+
178
+ def compute_default_part_size(file_size)
179
+ [(file_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i
175
180
  end
176
181
 
177
182
  def part_size(total_size, part_size, offset)
@@ -182,6 +187,15 @@ module Aws
182
187
  end
183
188
  end
184
189
 
190
+ def update_progress(progress, part)
191
+ return unless progress.progress_callback
192
+
193
+ part[:on_chunk_sent] =
194
+ proc do |_chunk, bytes, _total|
195
+ progress.call(part[:part_number], bytes)
196
+ end
197
+ end
198
+
185
199
  # @api private
186
200
  class PartList
187
201
  def initialize(parts = [])
@@ -222,6 +236,8 @@ module Aws
222
236
  @progress_callback = progress_callback
223
237
  end
224
238
 
239
+ attr_reader :progress_callback
240
+
225
241
  def call(part_number, bytes_read)
226
242
  # part numbers start at 1
227
243
  @bytes_sent[part_number - 1] = bytes_read