aws-sdk-s3 1.196.1 → 1.213.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +116 -0
  3. data/VERSION +1 -1
  4. data/lib/aws-sdk-s3/bucket.rb +17 -17
  5. data/lib/aws-sdk-s3/bucket_acl.rb +1 -1
  6. data/lib/aws-sdk-s3/bucket_versioning.rb +33 -0
  7. data/lib/aws-sdk-s3/client.rb +1271 -453
  8. data/lib/aws-sdk-s3/client_api.rb +115 -0
  9. data/lib/aws-sdk-s3/customizations/object.rb +39 -24
  10. data/lib/aws-sdk-s3/customizations.rb +3 -1
  11. data/lib/aws-sdk-s3/default_executor.rb +103 -0
  12. data/lib/aws-sdk-s3/encryption/client.rb +2 -2
  13. data/lib/aws-sdk-s3/encryption/default_cipher_provider.rb +2 -0
  14. data/lib/aws-sdk-s3/encryption/encrypt_handler.rb +2 -0
  15. data/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb +2 -0
  16. data/lib/aws-sdk-s3/encryptionV2/client.rb +98 -23
  17. data/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb +7 -162
  18. data/lib/aws-sdk-s3/encryptionV2/decryption.rb +205 -0
  19. data/lib/aws-sdk-s3/encryptionV2/default_cipher_provider.rb +17 -0
  20. data/lib/aws-sdk-s3/encryptionV2/encrypt_handler.rb +2 -0
  21. data/lib/aws-sdk-s3/encryptionV2/io_encrypter.rb +2 -0
  22. data/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb +8 -0
  23. data/lib/aws-sdk-s3/encryptionV2/utils.rb +5 -0
  24. data/lib/aws-sdk-s3/encryptionV3/client.rb +885 -0
  25. data/lib/aws-sdk-s3/encryptionV3/decrypt_handler.rb +98 -0
  26. data/lib/aws-sdk-s3/encryptionV3/decryption.rb +244 -0
  27. data/lib/aws-sdk-s3/encryptionV3/default_cipher_provider.rb +159 -0
  28. data/lib/aws-sdk-s3/encryptionV3/default_key_provider.rb +35 -0
  29. data/lib/aws-sdk-s3/encryptionV3/encrypt_handler.rb +98 -0
  30. data/lib/aws-sdk-s3/encryptionV3/errors.rb +47 -0
  31. data/lib/aws-sdk-s3/encryptionV3/io_auth_decrypter.rb +60 -0
  32. data/lib/aws-sdk-s3/encryptionV3/io_decrypter.rb +35 -0
  33. data/lib/aws-sdk-s3/encryptionV3/io_encrypter.rb +84 -0
  34. data/lib/aws-sdk-s3/encryptionV3/key_provider.rb +28 -0
  35. data/lib/aws-sdk-s3/encryptionV3/kms_cipher_provider.rb +159 -0
  36. data/lib/aws-sdk-s3/encryptionV3/materials.rb +58 -0
  37. data/lib/aws-sdk-s3/encryptionV3/utils.rb +321 -0
  38. data/lib/aws-sdk-s3/encryption_v2.rb +1 -0
  39. data/lib/aws-sdk-s3/encryption_v3.rb +24 -0
  40. data/lib/aws-sdk-s3/endpoint_parameters.rb +17 -17
  41. data/lib/aws-sdk-s3/endpoint_provider.rb +241 -68
  42. data/lib/aws-sdk-s3/endpoints.rb +39 -0
  43. data/lib/aws-sdk-s3/errors.rb +11 -0
  44. data/lib/aws-sdk-s3/file_downloader.rb +192 -104
  45. data/lib/aws-sdk-s3/file_uploader.rb +17 -13
  46. data/lib/aws-sdk-s3/multipart_file_uploader.rb +82 -69
  47. data/lib/aws-sdk-s3/multipart_stream_uploader.rb +96 -107
  48. data/lib/aws-sdk-s3/multipart_upload.rb +12 -12
  49. data/lib/aws-sdk-s3/multipart_upload_part.rb +8 -8
  50. data/lib/aws-sdk-s3/object.rb +88 -59
  51. data/lib/aws-sdk-s3/object_acl.rb +5 -5
  52. data/lib/aws-sdk-s3/object_summary.rb +70 -41
  53. data/lib/aws-sdk-s3/object_version.rb +23 -25
  54. data/lib/aws-sdk-s3/plugins/checksum_algorithm.rb +18 -5
  55. data/lib/aws-sdk-s3/plugins/endpoints.rb +1 -1
  56. data/lib/aws-sdk-s3/plugins/http_200_errors.rb +58 -34
  57. data/lib/aws-sdk-s3/transfer_manager.rb +321 -0
  58. data/lib/aws-sdk-s3/types.rb +687 -330
  59. data/lib/aws-sdk-s3.rb +1 -1
  60. data/sig/bucket.rbs +1 -1
  61. data/sig/client.rbs +62 -12
  62. data/sig/errors.rbs +2 -0
  63. data/sig/multipart_upload.rbs +1 -1
  64. data/sig/object.rbs +7 -5
  65. data/sig/object_summary.rbs +7 -5
  66. data/sig/types.rbs +84 -14
  67. metadata +21 -3
@@ -7,19 +7,11 @@ module Aws
7
7
  module S3
8
8
  # @api private
9
9
  class MultipartFileUploader
10
-
11
10
  MIN_PART_SIZE = 5 * 1024 * 1024 # 5MB
12
-
13
11
  MAX_PARTS = 10_000
14
-
15
- THREAD_COUNT = 10
16
-
17
12
  CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
18
-
19
13
  COMPLETE_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
20
-
21
14
  UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
22
-
23
15
  CHECKSUM_KEYS = Set.new(
24
16
  Client.api.operation(:upload_part).input.shape.members.map do |n, s|
25
17
  n if s.location == 'header' && s.location_name.start_with?('x-amz-checksum-')
@@ -27,10 +19,10 @@ module Aws
27
19
  )
28
20
 
29
21
  # @option options [Client] :client
30
- # @option options [Integer] :thread_count (THREAD_COUNT)
31
22
  def initialize(options = {})
32
23
  @client = options[:client] || Client.new
33
- @thread_count = options[:thread_count] || THREAD_COUNT
24
+ @executor = options[:executor]
25
+ @http_chunk_size = options[:http_chunk_size]
34
26
  end
35
27
 
36
28
  # @return [Client]
@@ -44,11 +36,12 @@ module Aws
44
36
  # It will be invoked with [bytes_read], [total_sizes]
45
37
  # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
46
38
  def upload(source, options = {})
47
- raise ArgumentError, 'unable to multipart upload files smaller than 5MB' if File.size(source) < MIN_PART_SIZE
39
+ file_size = File.size(source)
40
+ raise ArgumentError, 'unable to multipart upload files smaller than 5MB' if file_size < MIN_PART_SIZE
48
41
 
49
42
  upload_id = initiate_upload(options)
50
- parts = upload_parts(upload_id, source, options)
51
- complete_upload(upload_id, parts, source, options)
43
+ parts = upload_parts(upload_id, source, file_size, options)
44
+ complete_upload(upload_id, parts, file_size, options)
52
45
  end
53
46
 
54
47
  private
@@ -57,22 +50,21 @@ module Aws
57
50
  @client.create_multipart_upload(create_opts(options)).upload_id
58
51
  end
59
52
 
60
- def complete_upload(upload_id, parts, source, options)
53
+ def complete_upload(upload_id, parts, file_size, options)
61
54
  @client.complete_multipart_upload(
62
- **complete_opts(options).merge(
63
- upload_id: upload_id,
64
- multipart_upload: { parts: parts },
65
- mpu_object_size: File.size(source)
66
- )
55
+ **complete_opts(options),
56
+ upload_id: upload_id,
57
+ multipart_upload: { parts: parts },
58
+ mpu_object_size: file_size
67
59
  )
68
60
  rescue StandardError => e
69
61
  abort_upload(upload_id, options, [e])
70
62
  end
71
63
 
72
- def upload_parts(upload_id, source, options)
64
+ def upload_parts(upload_id, source, file_size, options)
73
65
  completed = PartList.new
74
- pending = PartList.new(compute_parts(upload_id, source, options))
75
- errors = upload_in_threads(pending, completed, options)
66
+ pending = PartList.new(compute_parts(upload_id, source, file_size, options))
67
+ errors = upload_with_executor(pending, completed, options)
76
68
  if errors.empty?
77
69
  completed.to_a.sort_by { |part| part[:part_number] }
78
70
  else
@@ -87,22 +79,25 @@ module Aws
87
79
  rescue MultipartUploadError => e
88
80
  raise e
89
81
  rescue StandardError => e
90
- msg = "failed to abort multipart upload: #{e.message}. "\
91
- "Multipart upload failed: #{errors.map(&:message).join('; ')}"
82
+ msg = "failed to abort multipart upload: #{e&.message}. " \
83
+ "Multipart upload failed: #{errors.map(&:message).join('; ')}"
92
84
  raise MultipartUploadError.new(msg, errors + [e])
93
85
  end
94
86
 
95
- def compute_parts(upload_id, source, options)
96
- size = File.size(source)
97
- default_part_size = compute_default_part_size(size)
87
+ def compute_parts(upload_id, source, file_size, options)
88
+ default_part_size = compute_default_part_size(file_size)
98
89
  offset = 0
99
90
  part_number = 1
100
91
  parts = []
101
- while offset < size
92
+ while offset < file_size
102
93
  parts << upload_part_opts(options).merge(
103
94
  upload_id: upload_id,
104
95
  part_number: part_number,
105
- body: FilePart.new(source: source, offset: offset, size: part_size(size, default_part_size, offset))
96
+ body: FilePart.new(
97
+ source: source,
98
+ offset: offset,
99
+ size: part_size(file_size, default_part_size, offset)
100
+ )
106
101
  )
107
102
  part_number += 1
108
103
  offset += default_part_size
@@ -118,20 +113,23 @@ module Aws
118
113
  keys.any? { |key| checksum_key?(key) }
119
114
  end
120
115
 
116
+ def checksum_not_required?(options)
117
+ @client.config.request_checksum_calculation == 'when_required' && !options[:checksum_algorithm]
118
+ end
119
+
121
120
  def create_opts(options)
122
- opts = { checksum_algorithm: Aws::Plugins::ChecksumAlgorithm::DEFAULT_CHECKSUM }
123
- opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
124
- CREATE_OPTIONS.each_with_object(opts) do |key, hash|
125
- hash[key] = options[key] if options.key?(key)
121
+ opts = {}
122
+ unless checksum_not_required?(options)
123
+ opts[:checksum_algorithm] = Aws::Plugins::ChecksumAlgorithm::DEFAULT_CHECKSUM
126
124
  end
125
+ opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
126
+ CREATE_OPTIONS.each_with_object(opts) { |k, h| h[k] = options[k] if options.key?(k) }
127
127
  end
128
128
 
129
129
  def complete_opts(options)
130
130
  opts = {}
131
131
  opts[:checksum_type] = 'FULL_OBJECT' if has_checksum_key?(options.keys)
132
- COMPLETE_OPTIONS.each_with_object(opts) do |key, hash|
133
- hash[key] = options[key] if options.key?(key)
134
- end
132
+ COMPLETE_OPTIONS.each_with_object(opts) { |k, h| h[k] = options[k] if options.key?(k) }
135
133
  end
136
134
 
137
135
  def upload_part_opts(options)
@@ -141,43 +139,47 @@ module Aws
141
139
  end
142
140
  end
143
141
 
144
- def upload_in_threads(pending, completed, options)
145
- threads = []
146
- if (callback = options[:progress_callback])
147
- progress = MultipartProgress.new(pending, callback)
148
- end
149
- options.fetch(:thread_count, @thread_count).times do
150
- thread = Thread.new do
151
- begin
152
- while (part = pending.shift)
153
- if progress
154
- part[:on_chunk_sent] =
155
- proc do |_chunk, bytes, _total|
156
- progress.call(part[:part_number], bytes)
157
- end
158
- end
159
- resp = @client.upload_part(part)
160
- part[:body].close
161
- completed_part = { etag: resp.etag, part_number: part[:part_number] }
162
- algorithm = resp.context.params[:checksum_algorithm]
163
- k = "checksum_#{algorithm.downcase}".to_sym
164
- completed_part[k] = resp.send(k)
165
- completed.push(completed_part)
166
- end
167
- nil
168
- rescue StandardError => e
169
- # keep other threads from uploading other parts
170
- pending.clear!
171
- e
172
- end
142
+ def upload_with_executor(pending, completed, options)
143
+ upload_attempts = 0
144
+ completion_queue = Queue.new
145
+ abort_upload = false
146
+ errors = []
147
+ progress = MultipartProgress.new(pending, options[:progress_callback])
148
+
149
+ while (part = pending.shift)
150
+ break if abort_upload
151
+
152
+ upload_attempts += 1
153
+ @executor.post(part) do |p|
154
+ Thread.current[:net_http_override_body_stream_chunk] = @http_chunk_size if @http_chunk_size
155
+ update_progress(progress, p)
156
+ resp = @client.upload_part(p)
157
+ p[:body].close
158
+ completed_part = { etag: resp.etag, part_number: p[:part_number] }
159
+ apply_part_checksum(resp, completed_part)
160
+ completed.push(completed_part)
161
+ rescue StandardError => e
162
+ abort_upload = true
163
+ errors << e
164
+ ensure
165
+ Thread.current[:net_http_override_body_stream_chunk] = nil if @http_chunk_size
166
+ completion_queue << :done
173
167
  end
174
- threads << thread
175
168
  end
176
- threads.map(&:value).compact
169
+
170
+ upload_attempts.times { completion_queue.pop }
171
+ errors
172
+ end
173
+
174
+ def apply_part_checksum(resp, part)
175
+ return unless (checksum = resp.context.params[:checksum_algorithm])
176
+
177
+ k = :"checksum_#{checksum.downcase}"
178
+ part[k] = resp.send(k)
177
179
  end
178
180
 
179
- def compute_default_part_size(source_size)
180
- [(source_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i
181
+ def compute_default_part_size(file_size)
182
+ [(file_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i
181
183
  end
182
184
 
183
185
  def part_size(total_size, part_size, offset)
@@ -188,6 +190,15 @@ module Aws
188
190
  end
189
191
  end
190
192
 
193
+ def update_progress(progress, part)
194
+ return unless progress.progress_callback
195
+
196
+ part[:on_chunk_sent] =
197
+ proc do |_chunk, bytes, _total|
198
+ progress.call(part[:part_number], bytes)
199
+ end
200
+ end
201
+
191
202
  # @api private
192
203
  class PartList
193
204
  def initialize(parts = [])
@@ -228,6 +239,8 @@ module Aws
228
239
  @progress_callback = progress_callback
229
240
  end
230
241
 
242
+ attr_reader :progress_callback
243
+
231
244
  def call(part_number, bytes_read)
232
245
  # part numbers start at 1
233
246
  @bytes_sent[part_number - 1] = bytes_read
@@ -9,33 +9,18 @@ module Aws
9
9
  module S3
10
10
  # @api private
11
11
  class MultipartStreamUploader
12
- # api private
13
- PART_SIZE = 5 * 1024 * 1024 # 5MB
14
12
 
15
- # api private
16
- THREAD_COUNT = 10
17
-
18
- # api private
19
- TEMPFILE_PREIX = 'aws-sdk-s3-upload_stream'.freeze
20
-
21
- # @api private
22
- CREATE_OPTIONS =
23
- Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
24
-
25
- # @api private
26
- UPLOAD_PART_OPTIONS =
27
- Set.new(Client.api.operation(:upload_part).input.shape.member_names)
28
-
29
- # @api private
30
- COMPLETE_UPLOAD_OPTIONS =
31
- Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
13
+ DEFAULT_PART_SIZE = 5 * 1024 * 1024 # 5MB
14
+ CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
15
+ UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
16
+ COMPLETE_UPLOAD_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
32
17
 
33
18
  # @option options [Client] :client
34
19
  def initialize(options = {})
35
20
  @client = options[:client] || Client.new
21
+ @executor = options[:executor]
36
22
  @tempfile = options[:tempfile]
37
- @part_size = options[:part_size] || PART_SIZE
38
- @thread_count = options[:thread_count] || THREAD_COUNT
23
+ @part_size = options[:part_size] || DEFAULT_PART_SIZE
39
24
  end
40
25
 
41
26
  # @return [Client]
@@ -43,7 +28,6 @@ module Aws
43
28
 
44
29
  # @option options [required,String] :bucket
45
30
  # @option options [required,String] :key
46
- # @option options [Integer] :thread_count (THREAD_COUNT)
47
31
  # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
48
32
  def upload(options = {}, &block)
49
33
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
@@ -61,86 +45,80 @@ module Aws
61
45
 
62
46
  def complete_upload(upload_id, parts, options)
63
47
  @client.complete_multipart_upload(
64
- **complete_opts(options).merge(
65
- upload_id: upload_id,
66
- multipart_upload: { parts: parts }
67
- )
48
+ **complete_opts(options).merge(upload_id: upload_id, multipart_upload: { parts: parts })
68
49
  )
50
+ rescue StandardError => e
51
+ abort_upload(upload_id, options, [e])
69
52
  end
70
53
 
71
54
  def upload_parts(upload_id, options, &block)
72
- completed = Queue.new
73
- thread_errors = []
74
- errors = begin
55
+ completed_parts = Queue.new
56
+ errors = []
57
+
58
+ begin
75
59
  IO.pipe do |read_pipe, write_pipe|
76
- threads = upload_in_threads(
77
- read_pipe, completed,
78
- upload_part_opts(options).merge(upload_id: upload_id),
79
- thread_errors)
80
- begin
81
- block.call(write_pipe)
82
- ensure
83
- # Ensure the pipe is closed to avoid https://github.com/jruby/jruby/issues/6111
84
- write_pipe.close
60
+ upload_thread = Thread.new do
61
+ upload_with_executor(
62
+ read_pipe,
63
+ completed_parts,
64
+ errors,
65
+ upload_part_opts(options).merge(upload_id: upload_id)
66
+ )
85
67
  end
86
- threads.map(&:value).compact
68
+
69
+ block.call(write_pipe)
70
+ ensure
71
+ # Ensure the pipe is closed to avoid https://github.com/jruby/jruby/issues/6111
72
+ write_pipe.close
73
+ upload_thread.join
87
74
  end
88
- rescue => e
89
- thread_errors + [e]
75
+ rescue StandardError => e
76
+ errors << e
90
77
  end
78
+ return ordered_parts(completed_parts) if errors.empty?
91
79
 
92
- if errors.empty?
93
- Array.new(completed.size) { completed.pop }.sort_by { |part| part[:part_number] }
94
- else
95
- abort_upload(upload_id, options, errors)
96
- end
80
+ abort_upload(upload_id, options, errors)
97
81
  end
98
82
 
99
83
  def abort_upload(upload_id, options, errors)
100
- @client.abort_multipart_upload(
101
- bucket: options[:bucket],
102
- key: options[:key],
103
- upload_id: upload_id
104
- )
84
+ @client.abort_multipart_upload(bucket: options[:bucket], key: options[:key], upload_id: upload_id)
105
85
  msg = "multipart upload failed: #{errors.map(&:message).join('; ')}"
106
86
  raise MultipartUploadError.new(msg, errors)
107
- rescue MultipartUploadError => error
108
- raise error
109
- rescue => error
110
- msg = "failed to abort multipart upload: #{error.message}. "\
87
+ rescue MultipartUploadError => e
88
+ raise e
89
+ rescue StandardError => e
90
+ msg = "failed to abort multipart upload: #{e.message}. "\
111
91
  "Multipart upload failed: #{errors.map(&:message).join('; ')}"
112
- raise MultipartUploadError.new(msg, errors + [error])
92
+ raise MultipartUploadError.new(msg, errors + [e])
113
93
  end
114
94
 
115
95
  def create_opts(options)
116
- CREATE_OPTIONS.inject({}) do |hash, key|
96
+ CREATE_OPTIONS.each_with_object({}) do |key, hash|
117
97
  hash[key] = options[key] if options.key?(key)
118
- hash
119
98
  end
120
99
  end
121
100
 
122
101
  def upload_part_opts(options)
123
- UPLOAD_PART_OPTIONS.inject({}) do |hash, key|
102
+ UPLOAD_PART_OPTIONS.each_with_object({}) do |key, hash|
124
103
  hash[key] = options[key] if options.key?(key)
125
- hash
126
104
  end
127
105
  end
128
106
 
129
107
  def complete_opts(options)
130
- COMPLETE_UPLOAD_OPTIONS.inject({}) do |hash, key|
108
+ COMPLETE_UPLOAD_OPTIONS.each_with_object({}) do |key, hash|
131
109
  hash[key] = options[key] if options.key?(key)
132
- hash
133
110
  end
134
111
  end
135
112
 
136
113
  def read_to_part_body(read_pipe)
137
114
  return if read_pipe.closed?
138
- temp_io = @tempfile ? Tempfile.new(TEMPFILE_PREIX) : StringIO.new(String.new)
115
+
116
+ temp_io = @tempfile ? Tempfile.new('aws-sdk-s3-upload_stream') : StringIO.new(String.new)
139
117
  temp_io.binmode
140
118
  bytes_copied = IO.copy_stream(read_pipe, temp_io, @part_size)
141
119
  temp_io.rewind
142
- if bytes_copied == 0
143
- if Tempfile === temp_io
120
+ if bytes_copied.zero?
121
+ if temp_io.is_a?(Tempfile)
144
122
  temp_io.close
145
123
  temp_io.unlink
146
124
  end
@@ -150,51 +128,62 @@ module Aws
150
128
  end
151
129
  end
152
130
 
153
- def upload_in_threads(read_pipe, completed, options, thread_errors)
154
- mutex = Mutex.new
131
+ def upload_with_executor(read_pipe, completed, errors, options)
132
+ completion_queue = Queue.new
133
+ queued_parts = 0
155
134
  part_number = 0
156
- options.fetch(:thread_count, @thread_count).times.map do
157
- thread = Thread.new do
158
- begin
159
- loop do
160
- body, thread_part_number = mutex.synchronize do
161
- [read_to_part_body(read_pipe), part_number += 1]
162
- end
163
- break unless (body || thread_part_number == 1)
164
- begin
165
- part = options.merge(
166
- body: body,
167
- part_number: thread_part_number,
168
- )
169
- resp = @client.upload_part(part)
170
- completed_part = {etag: resp.etag, part_number: part[:part_number]}
171
-
172
- # get the requested checksum from the response
173
- if part[:checksum_algorithm]
174
- k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
175
- completed_part[k] = resp[k]
176
- end
177
- completed.push(completed_part)
178
- ensure
179
- if Tempfile === body
180
- body.close
181
- body.unlink
182
- elsif StringIO === body
183
- body.string.clear
184
- end
185
- end
186
- end
187
- nil
188
- rescue => error
189
- # keep other threads from uploading other parts
190
- mutex.synchronize do
191
- thread_errors.push(error)
192
- read_pipe.close_read unless read_pipe.closed?
193
- end
194
- error
135
+ mutex = Mutex.new
136
+ loop do
137
+ part_body, current_part_num = mutex.synchronize do
138
+ [read_to_part_body(read_pipe), part_number += 1]
139
+ end
140
+ break unless part_body || current_part_num == 1
141
+
142
+ queued_parts += 1
143
+ @executor.post(part_body, current_part_num, options) do |body, num, opts|
144
+ part = opts.merge(body: body, part_number: num)
145
+ resp = @client.upload_part(part)
146
+ completed_part = create_completed_part(resp, part)
147
+ completed.push(completed_part)
148
+ rescue StandardError => e
149
+ mutex.synchronize do
150
+ errors.push(e)
151
+ read_pipe.close_read unless read_pipe.closed?
195
152
  end
153
+ ensure
154
+ clear_body(body)
155
+ completion_queue << :done
196
156
  end
197
- thread
157
+ end
158
+ queued_parts.times { completion_queue.pop }
159
+ end
160
+
161
+ def create_completed_part(resp, part)
162
+ completed_part = { etag: resp.etag, part_number: part[:part_number] }
163
+ return completed_part unless part[:checksum_algorithm]
164
+
165
+ # get the requested checksum from the response
166
+ k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
167
+ completed_part[k] = resp[k]
168
+ completed_part
169
+ end
170
+
171
+ def ordered_parts(parts)
172
+ sorted = []
173
+ until parts.empty?
174
+ part = parts.pop
175
+ index = sorted.bsearch_index { |p| p[:part_number] >= part[:part_number] } || sorted.size
176
+ sorted.insert(index, part)
177
+ end
178
+ sorted
179
+ end
180
+
181
+ def clear_body(body)
182
+ if body.is_a?(Tempfile)
183
+ body.close
184
+ body.unlink
185
+ elsif body.is_a?(StringIO)
186
+ body.string.clear
198
187
  end
199
188
  end
200
189
  end
@@ -260,10 +260,10 @@ module Aws::S3
260
260
  # Confirms that the requester knows that they will be charged for the
261
261
  # request. Bucket owners need not specify this parameter in their
262
262
  # requests. If either the source or destination S3 bucket has Requester
263
- # Pays enabled, the requester will pay for corresponding charges to copy
264
- # the object. For information about downloading objects from Requester
265
- # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1]
266
- # in the *Amazon S3 User Guide*.
263
+ # Pays enabled, the requester will pay for the corresponding charges.
264
+ # For information about downloading objects from Requester Pays buckets,
265
+ # see [Downloading Objects in Requester Pays Buckets][1] in the *Amazon
266
+ # S3 User Guide*.
267
267
  #
268
268
  # <note markdown="1"> This functionality is not supported for directory buckets.
269
269
  #
@@ -404,10 +404,10 @@ module Aws::S3
404
404
  # Confirms that the requester knows that they will be charged for the
405
405
  # request. Bucket owners need not specify this parameter in their
406
406
  # requests. If either the source or destination S3 bucket has Requester
407
- # Pays enabled, the requester will pay for corresponding charges to copy
408
- # the object. For information about downloading objects from Requester
409
- # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1]
410
- # in the *Amazon S3 User Guide*.
407
+ # Pays enabled, the requester will pay for the corresponding charges.
408
+ # For information about downloading objects from Requester Pays buckets,
409
+ # see [Downloading Objects in Requester Pays Buckets][1] in the *Amazon
410
+ # S3 User Guide*.
411
411
  #
412
412
  # <note markdown="1"> This functionality is not supported for directory buckets.
413
413
  #
@@ -553,10 +553,10 @@ module Aws::S3
553
553
  # Confirms that the requester knows that they will be charged for the
554
554
  # request. Bucket owners need not specify this parameter in their
555
555
  # requests. If either the source or destination S3 bucket has Requester
556
- # Pays enabled, the requester will pay for corresponding charges to copy
557
- # the object. For information about downloading objects from Requester
558
- # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1]
559
- # in the *Amazon S3 User Guide*.
556
+ # Pays enabled, the requester will pay for the corresponding charges.
557
+ # For information about downloading objects from Requester Pays buckets,
558
+ # see [Downloading Objects in Requester Pays Buckets][1] in the *Amazon
559
+ # S3 User Guide*.
560
560
  #
561
561
  # <note markdown="1"> This functionality is not supported for directory buckets.
562
562
  #
@@ -475,10 +475,10 @@ module Aws::S3
475
475
  # Confirms that the requester knows that they will be charged for the
476
476
  # request. Bucket owners need not specify this parameter in their
477
477
  # requests. If either the source or destination S3 bucket has Requester
478
- # Pays enabled, the requester will pay for corresponding charges to copy
479
- # the object. For information about downloading objects from Requester
480
- # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1]
481
- # in the *Amazon S3 User Guide*.
478
+ # Pays enabled, the requester will pay for the corresponding charges.
479
+ # For information about downloading objects from Requester Pays buckets,
480
+ # see [Downloading Objects in Requester Pays Buckets][1] in the *Amazon
481
+ # S3 User Guide*.
482
482
  #
483
483
  # <note markdown="1"> This functionality is not supported for directory buckets.
484
484
  #
@@ -642,10 +642,10 @@ module Aws::S3
642
642
  # Confirms that the requester knows that they will be charged for the
643
643
  # request. Bucket owners need not specify this parameter in their
644
644
  # requests. If either the source or destination S3 bucket has Requester
645
- # Pays enabled, the requester will pay for corresponding charges to copy
646
- # the object. For information about downloading objects from Requester
647
- # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1]
648
- # in the *Amazon S3 User Guide*.
645
+ # Pays enabled, the requester will pay for the corresponding charges.
646
+ # For information about downloading objects from Requester Pays buckets,
647
+ # see [Downloading Objects in Requester Pays Buckets][1] in the *Amazon
648
+ # S3 User Guide*.
649
649
  #
650
650
  # <note markdown="1"> This functionality is not supported for directory buckets.
651
651
  #