aws-sdk-s3 1.70.0 → 1.75.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5a991d91a05dd60c0952369cfe5c4ca7c62b18c01b26afa41d0bc19ca4571c86
4
- data.tar.gz: 03d8fdd2a61fc25246e8ef5c54a61091e6e28d7dceb2070ec2b176b1a491a709
3
+ metadata.gz: 75f8d275e892e31dfb80d004f49fb465fc4388e92f20e2827089d5dc76e0d821
4
+ data.tar.gz: 1f84cba631bb2e6a81d07ea671fec120d6bf33dcbfe6e427de26444c36627cf8
5
5
  SHA512:
6
- metadata.gz: 1b53eacdfcfe2bdcd3dcc1d8b3719664ea6647a3156399c403f3fb5733ac1b43820e663095f98a0284235b69d0ec6ed40314f41da3647810989824357f891d42
7
- data.tar.gz: b28160c528d2554dd6c5c8d1f4bc2dbbddc5d91cf08d7df81de8a87400225f7d87b75d89e974d07e38af82bdd41aa1d421d9b7383d31007e76196efcb4d991f3
6
+ metadata.gz: f034970098a597fd9d6eeb87a8a82c1ed30ff918ca292b1a0a3f380f08e5b8f504ec619b1763312720f1b28e1df6ba7b56c0bac19dc09c9923bc21aa8be189d1
7
+ data.tar.gz: 0bb581e24c1e3d0fa6306c574146f7ef1e85ff949911d7917c0f4cf8cf85b364cf8b0210ed2ba54a85ec5838a463368ede8f978846e123f79f78e92e3fffd4c9
@@ -68,6 +68,6 @@ require_relative 'aws-sdk-s3/event_streams'
68
68
  # @service
69
69
  module Aws::S3
70
70
 
71
- GEM_VERSION = '1.70.0'
71
+ GEM_VERSION = '1.75.0'
72
72
 
73
73
  end
@@ -44,6 +44,7 @@ require 'aws-sdk-s3/plugins/sse_cpk.rb'
44
44
  require 'aws-sdk-s3/plugins/url_encoded_keys.rb'
45
45
  require 'aws-sdk-s3/plugins/s3_signer.rb'
46
46
  require 'aws-sdk-s3/plugins/bucket_name_restrictions.rb'
47
+ require 'aws-sdk-s3/plugins/streaming_retry.rb'
47
48
  require 'aws-sdk-core/plugins/event_stream_configuration.rb'
48
49
 
49
50
  Aws::Plugins::GlobalConfiguration.add_identifier(:s3)
@@ -106,6 +107,7 @@ module Aws::S3
106
107
  add_plugin(Aws::S3::Plugins::UrlEncodedKeys)
107
108
  add_plugin(Aws::S3::Plugins::S3Signer)
108
109
  add_plugin(Aws::S3::Plugins::BucketNameRestrictions)
110
+ add_plugin(Aws::S3::Plugins::StreamingRetry)
109
111
  add_plugin(Aws::Plugins::EventStreamConfiguration)
110
112
 
111
113
  # @overload initialize(options)
@@ -333,7 +335,7 @@ module Aws::S3
333
335
  # @option options [Boolean] :use_accelerate_endpoint (false)
334
336
  # When set to `true`, accelerated bucket endpoints will be used
335
337
  # for all object operations. You must first enable accelerate for
336
- # each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
338
+ # each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
337
339
  #
338
340
  # @option options [Boolean] :use_dualstack_endpoint (false)
339
341
  # When set to `true`, IPv6-compatible bucket endpoints will be used
@@ -4587,8 +4589,10 @@ module Aws::S3
4587
4589
  #
4588
4590
  # @example Streaming data to a block
4589
4591
  # # WARNING: yielding data to a block disables retries of networking errors
4592
+ # # However truncation of the body will be retried automatically using a range request
4590
4593
  # File.open('/path/to/file', 'wb') do |file|
4591
- # s3.get_object(bucket: 'bucket-name', key: 'object-key') do |chunk|
4594
+ # s3.get_object(bucket: 'bucket-name', key: 'object-key') do |chunk, headers|
4595
+ # # headers['content-length']
4592
4596
  # file.write(chunk)
4593
4597
  # end
4594
4598
  # end
@@ -11670,7 +11674,7 @@ module Aws::S3
11670
11674
  params: params,
11671
11675
  config: config)
11672
11676
  context[:gem_name] = 'aws-sdk-s3'
11673
- context[:gem_version] = '1.70.0'
11677
+ context[:gem_version] = '1.75.0'
11674
11678
  Seahorse::Client::Request.new(handlers, context)
11675
11679
  end
11676
11680
 
@@ -296,6 +296,14 @@ module Aws
296
296
  # etag = response.etag
297
297
  # end
298
298
  #
299
+ # You can provide a callback to monitor progress of the upload:
300
+ #
301
+ # # bytes and totals are each an array with 1 entry per part
302
+ # progress = Proc.new do |bytes, totals|
303
+ # puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%" }
304
+ # end
305
+ # obj.upload_file('/path/to/file')
306
+ #
299
307
  # @param [String, Pathname, File, Tempfile] source A file on the local
300
308
  # file system that will be uploaded as this object. This can either be
301
309
  # a String or Pathname to the file, an open File object, or an open
@@ -312,6 +320,10 @@ module Aws
312
320
  # multipart uploads. This option is not used if the file is smaller than
313
321
  # `:multipart_threshold`.
314
322
  #
323
+ # @option options [Proc] :progress_callback
324
+ # A Proc that will be called when each chunk of the upload is sent.
325
+ # It will be invoked with [bytes_read], [total_sizes]
326
+ #
315
327
  # @raise [MultipartUploadError] If an object is being uploaded in
316
328
  # parts, and the upload can not be completed, then the upload is
317
329
  # aborted and this error is raised. The raised error has a `#errors`
@@ -320,7 +332,6 @@ module Aws
320
332
  #
321
333
  # @return [Boolean] Returns `true` when the object is uploaded
322
334
  # without any errors.
323
- #
324
335
  def upload_file(source, options = {})
325
336
  uploading_options = options.dup
326
337
  uploader = FileUploader.new(
@@ -10,6 +10,7 @@ module Aws
10
10
  @cipher = cipher
11
11
  # Ensure that IO is reset between retries
12
12
  @io = io.tap { |io| io.truncate(0) if io.respond_to?(:truncate) }
13
+ @cipher_buffer = String.new
13
14
  end
14
15
 
15
16
  # @return [#write]
@@ -17,7 +18,11 @@ module Aws
17
18
 
18
19
  def write(chunk)
19
20
  # decrypt and write
20
- @io.write(@cipher.update(chunk))
21
+ if @cipher.method(:update).arity == 1
22
+ @io.write(@cipher.update(chunk))
23
+ else
24
+ @io.write(@cipher.update(chunk, @cipher_buffer))
25
+ end
21
26
  end
22
27
 
23
28
  def finalize
@@ -52,8 +52,12 @@ module Aws
52
52
  def encrypt_to_tempfile(cipher, io)
53
53
  encrypted = Tempfile.new(self.object_id.to_s)
54
54
  encrypted.binmode
55
- while chunk = io.read(ONE_MEGABYTE)
56
- encrypted.write(cipher.update(chunk))
55
+ while chunk = io.read(ONE_MEGABYTE, read_buffer ||= String.new)
56
+ if cipher.method(:update).arity == 1
57
+ encrypted.write(cipher.update(chunk))
58
+ else
59
+ encrypted.write(cipher.update(chunk, cipher_buffer ||= String.new))
60
+ end
57
61
  end
58
62
  encrypted.write(cipher.final)
59
63
  encrypted.write(cipher.auth_tag)
@@ -29,6 +29,9 @@ module Aws
29
29
  # @param [String, Pathname, File, Tempfile] source The file to upload.
30
30
  # @option options [required, String] :bucket The bucket to upload to.
31
31
  # @option options [required, String] :key The key for the object.
32
+ # @option options [Proc] :progress_callback
33
+ # A Proc that will be called when each chunk of the upload is sent.
34
+ # It will be invoked with [bytes_read], [total_sizes]
32
35
  # @return [void]
33
36
  def upload(source, options = {})
34
37
  if File.size(source) >= multipart_threshold
@@ -49,11 +52,19 @@ module Aws
49
52
  end
50
53
 
51
54
  def put_object(source, options)
55
+ if (callback = options.delete(:progress_callback))
56
+ options[:on_chunk_sent] = single_part_progress(callback)
57
+ end
52
58
  open_file(source) do |file|
53
59
  @client.put_object(options.merge(body: file))
54
60
  end
55
61
  end
56
62
 
63
+ def single_part_progress(progress_callback)
64
+ proc do |_chunk, bytes_read, total_size|
65
+ progress_callback.call([bytes_read], [total_size])
66
+ end
67
+ end
57
68
  end
58
69
  end
59
70
  end
@@ -39,6 +39,9 @@ module Aws
39
39
  # @param [String, Pathname, File, Tempfile] source The file to upload.
40
40
  # @option options [required, String] :bucket The bucket to upload to.
41
41
  # @option options [required, String] :key The key for the object.
42
+ # @option options [Proc] :progress_callback
43
+ # A Proc that will be called when each chunk of the upload is sent.
44
+ # It will be invoked with [bytes_read], [total_sizes]
42
45
  # @return [void]
43
46
  def upload(source, options = {})
44
47
  if File.size(source) < MIN_PART_SIZE
@@ -68,7 +71,7 @@ module Aws
68
71
  def upload_parts(upload_id, source, options)
69
72
  pending = PartList.new(compute_parts(upload_id, source, options))
70
73
  completed = PartList.new
71
- errors = upload_in_threads(pending, completed)
74
+ errors = upload_in_threads(pending, completed, options)
72
75
  if errors.empty?
73
76
  completed.to_a.sort_by { |part| part[:part_number] }
74
77
  else
@@ -127,12 +130,21 @@ module Aws
127
130
  end
128
131
  end
129
132
 
130
- def upload_in_threads(pending, completed)
133
+ def upload_in_threads(pending, completed, options)
131
134
  threads = []
135
+ if (callback = options[:progress_callback])
136
+ progress = MultipartProgress.new(pending, callback)
137
+ end
132
138
  @thread_count.times do
133
139
  thread = Thread.new do
134
140
  begin
135
141
  while part = pending.shift
142
+ if progress
143
+ part[:on_chunk_sent] =
144
+ proc do |_chunk, bytes, _total|
145
+ progress.call(part[:part_number], bytes)
146
+ end
147
+ end
136
148
  resp = @client.upload_part(part)
137
149
  part[:body].close
138
150
  completed.push(etag: resp.etag, part_number: part[:part_number])
@@ -182,11 +194,34 @@ module Aws
182
194
  @mutex.synchronize { @parts.clear }
183
195
  end
184
196
 
197
+ def size
198
+ @mutex.synchronize { @parts.size }
199
+ end
200
+
201
+ def part_sizes
202
+ @mutex.synchronize { @parts.map { |p| p[:body].size } }
203
+ end
204
+
185
205
  def to_a
186
206
  @mutex.synchronize { @parts.dup }
187
207
  end
188
208
 
189
209
  end
210
+
211
+ # @api private
212
+ class MultipartProgress
213
+ def initialize(parts, progress_callback)
214
+ @bytes_sent = Array.new(parts.size, 0)
215
+ @total_sizes = parts.part_sizes
216
+ @progress_callback = progress_callback
217
+ end
218
+
219
+ def call(part_number, bytes_read)
220
+ # part numbers start at 1
221
+ @bytes_sent[part_number - 1] = bytes_read
222
+ @progress_callback.call(@bytes_sent, @total_sizes)
223
+ end
224
+ end
190
225
  end
191
226
  end
192
227
  end
@@ -3,36 +3,46 @@
3
3
  module Aws
4
4
  module S3
5
5
  module Plugins
6
-
7
6
  # Provides support for using `Aws::S3::Client` with Amazon S3 Transfer
8
7
  # Acceleration.
9
8
  #
10
9
  # Go here for more information about transfer acceleration:
11
10
  # [http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
12
11
  class Accelerate < Seahorse::Client::Plugin
13
-
14
- option(:use_accelerate_endpoint,
12
+ option(
13
+ :use_accelerate_endpoint,
15
14
  default: false,
16
15
  doc_type: 'Boolean',
17
16
  docstring: <<-DOCS)
18
17
  When set to `true`, accelerated bucket endpoints will be used
19
18
  for all object operations. You must first enable accelerate for
20
- each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
19
+ each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
21
20
  DOCS
22
21
 
23
22
  def add_handlers(handlers, config)
24
23
  operations = config.api.operation_names - [
25
- :create_bucket, :list_buckets, :delete_bucket,
24
+ :create_bucket, :list_buckets, :delete_bucket
26
25
  ]
27
- handlers.add(OptionHandler, step: :initialize, operations: operations)
28
- handlers.add(AccelerateHandler, step: :build, priority: 0, operations: operations)
26
+ # Need 2 handlers so that the context can be set for other plugins
27
+ # and to remove :use_accelerate_endpoint from the params.
28
+ handlers.add(
29
+ OptionHandler, step: :initialize, operations: operations
30
+ )
31
+ handlers.add(
32
+ AccelerateHandler, step: :build, priority: 0, operations: operations
33
+ )
29
34
  end
30
35
 
31
36
  # @api private
32
37
  class OptionHandler < Seahorse::Client::Handler
33
38
  def call(context)
34
- accelerate = context.params.delete(:use_accelerate_endpoint)
35
- accelerate = context.config.use_accelerate_endpoint if accelerate.nil?
39
+ # Support client configuration and per-operation configuration
40
+ if context.params.is_a?(Hash)
41
+ accelerate = context.params.delete(:use_accelerate_endpoint)
42
+ end
43
+ if accelerate.nil?
44
+ accelerate = context.config.use_accelerate_endpoint
45
+ end
36
46
  context[:use_accelerate_endpoint] = accelerate
37
47
  @handler.call(context)
38
48
  end
@@ -40,39 +50,24 @@ each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3
40
50
 
41
51
  # @api private
42
52
  class AccelerateHandler < Seahorse::Client::Handler
43
-
44
53
  def call(context)
45
54
  if context[:use_accelerate_endpoint]
46
- if context[:use_dualstack_endpoint]
47
- use_combined_accelerate_dualstack_endpoint(context)
48
- else
49
- use_accelerate_endpoint(context)
50
- end
55
+ dualstack = !!context[:use_dualstack_endpoint]
56
+ use_accelerate_endpoint(context, dualstack)
51
57
  end
52
58
  @handler.call(context)
53
59
  end
54
60
 
55
61
  private
56
62
 
57
- def use_accelerate_endpoint(context)
63
+ def use_accelerate_endpoint(context, dualstack)
58
64
  bucket_name = context.params[:bucket]
59
65
  validate_bucket_name!(bucket_name)
60
66
  endpoint = URI.parse(context.http_request.endpoint.to_s)
61
67
  endpoint.scheme = 'https'
62
68
  endpoint.port = 443
63
- endpoint.host = "#{bucket_name}.s3-accelerate.amazonaws.com"
64
- context.http_request.endpoint = endpoint.to_s
65
- # s3 accelerate endpoint doesn't work with 'expect' header
66
- context.http_request.headers.delete('expect')
67
- end
68
-
69
- def use_combined_accelerate_dualstack_endpoint(context)
70
- bucket_name = context.params[:bucket]
71
- validate_bucket_name!(bucket_name)
72
- endpoint = URI.parse(context.http_request.endpoint.to_s)
73
- endpoint.scheme = 'https'
74
- endpoint.port = 443
75
- endpoint.host = "#{bucket_name}.s3-accelerate.dualstack.amazonaws.com"
69
+ endpoint.host = "#{bucket_name}.s3-accelerate"\
70
+ "#{'.dualstack' if dualstack}.amazonaws.com"
76
71
  context.http_request.endpoint = endpoint.to_s
77
72
  # s3 accelerate endpoint doesn't work with 'expect' header
78
73
  context.http_request.headers.delete('expect')
@@ -80,17 +75,11 @@ each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3
80
75
 
81
76
  def validate_bucket_name!(bucket_name)
82
77
  unless BucketDns.dns_compatible?(bucket_name, _ssl = true)
83
- msg = 'unable to use `accelerate: true` on buckets with '\
84
- 'non-DNS compatible names'
85
- raise ArgumentError, msg
86
- end
87
- if bucket_name.include?('.')
88
- msg = 'unable to use `accelerate: true` on buckets with dots'\
89
- "in their name: #{bucket_name.inspect}"
90
- raise ArgumentError, msg
78
+ raise ArgumentError,
79
+ 'Unable to use `use_accelerate_endpoint: true` on buckets '\
80
+ 'with non-DNS compatible names.'
91
81
  end
92
82
  end
93
-
94
83
  end
95
84
  end
96
85
  end
@@ -22,7 +22,9 @@ for all operations.
22
22
  # @api private
23
23
  class OptionHandler < Seahorse::Client::Handler
24
24
  def call(context)
25
- dualstack = context.params.delete(:use_dualstack_endpoint)
25
+ if context.params.is_a?(Hash)
26
+ dualstack = context.params.delete(:use_dualstack_endpoint)
27
+ end
26
28
  dualstack = context.config.use_dualstack_endpoint if dualstack.nil?
27
29
  context[:use_dualstack_endpoint] = dualstack
28
30
  @handler.call(context)
@@ -40,7 +40,8 @@ module Aws
40
40
  end
41
41
  end
42
42
 
43
- handler(Handler,
43
+ handler(
44
+ Handler,
44
45
  step: :sign,
45
46
  operations: [
46
47
  :complete_multipart_upload,
@@ -20,7 +20,7 @@ This should only be disabled for local testing.
20
20
  class Handler < Seahorse::Client::Handler
21
21
 
22
22
  def call(context)
23
- compute_key_md5(context)
23
+ compute_key_md5(context) if context.params.is_a?(Hash)
24
24
  @handler.call(context)
25
25
  end
26
26
 
@@ -0,0 +1,118 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'forwardable'
4
+
5
+ module Aws
6
+ module S3
7
+ module Plugins
8
+
9
+ # A wrapper around BlockIO that adds no-ops for truncate and rewind
10
+ # @api private
11
+ class RetryableBlockIO
12
+ extend Forwardable
13
+ def_delegators :@block_io, :write, :read, :size
14
+
15
+ def initialize(block_io)
16
+ @block_io = block_io
17
+ end
18
+
19
+ def truncate(_integer); end
20
+
21
+ def rewind; end
22
+ end
23
+
24
+ # A wrapper around ManagedFile that adds no-ops for truncate and rewind
25
+ # @api private
26
+ class RetryableManagedFile
27
+ extend Forwardable
28
+ def_delegators :@file, :write, :read, :size, :open?, :close
29
+
30
+ def initialize(managed_file)
31
+ @file = managed_file
32
+ end
33
+
34
+ def truncate(_integer); end
35
+
36
+ def rewind; end
37
+ end
38
+
39
+ # This handler works with the ResponseTarget plugin to provide smart
40
+ # retries of S3 streaming operations that support the range parameter
41
+ # (currently only: get_object). When a 200 OK with a TruncatedBodyError
42
+ # is received this handler will add a range header that excludes the
43
+ # data that has already been processed (written to file or sent to
44
+ # the target Proc).
45
+ # It is important to not write data to the custom target in the case of
46
+ # a non-success response. We do not want to write an XML error
47
+ # message to someone's file or pass it to a user's Proc.
48
+ # @api private
49
+ class StreamingRetry < Seahorse::Client::Plugin
50
+
51
+ class Handler < Seahorse::Client::Handler
52
+
53
+ def call(context)
54
+ target = context.params[:response_target] || context[:response_target]
55
+
56
+ # retry is only supported when range is NOT set on the initial request
57
+ if supported_target?(target) && !context.params[:range]
58
+ add_event_listeners(context, target)
59
+ end
60
+ @handler.call(context)
61
+ end
62
+
63
+ private
64
+
65
+ def add_event_listeners(context, target)
66
+ context.http_response.on_headers(200..299) do
67
+ case context.http_response.body
68
+ when Seahorse::Client::BlockIO then
69
+ context.http_response.body = RetryableBlockIO.new(context.http_response.body)
70
+ when Seahorse::Client::ManagedFile then
71
+ context.http_response.body = RetryableManagedFile.new(context.http_response.body)
72
+ end
73
+ end
74
+
75
+ context.http_response.on_headers(400..599) do
76
+ context.http_response.body = StringIO.new # something to write the error to
77
+ end
78
+
79
+ context.http_response.on_success(200..299) do
80
+ body = context.http_response.body
81
+ if body.is_a?(RetryableManagedFile) && body.open?
82
+ body.close
83
+ end
84
+ end
85
+
86
+ context.http_response.on_error do |error|
87
+ if retryable_body?(context) && truncated_body?(error)
88
+ context.http_request.headers[:range] = "bytes=#{context.http_response.body.size}-"
89
+ end
90
+ end
91
+ end
92
+
93
+ def truncated_body?(error)
94
+ error.is_a?(Seahorse::Client::NetworkingError) &&
95
+ error.original_error.is_a?(
96
+ Seahorse::Client::NetHttp::Handler::TruncatedBodyError
97
+ )
98
+ end
99
+
100
+ def retryable_body?(context)
101
+ context.http_response.body.is_a?(RetryableBlockIO) ||
102
+ context.http_response.body.is_a?(RetryableManagedFile)
103
+ end
104
+
105
+ def supported_target?(target)
106
+ case target
107
+ when Proc, String, Pathname then true
108
+ else false
109
+ end
110
+ end
111
+ end
112
+
113
+ handler(Handler, step: :sign, operations: [:get_object], priority: 10)
114
+
115
+ end
116
+ end
117
+ end
118
+ end
@@ -185,35 +185,58 @@ module Aws
185
185
  # the post will expire. Defaults to one hour from creation of the
186
186
  # presigned post. May not exceed one week from creation time.
187
187
  # @option options [String] :key See {PresignedPost#key}.
188
- # @option options [String] :key_starts_with See {PresignedPost#key_starts_with}.
188
+ # @option options [String] :key_starts_with
189
+ # See {PresignedPost#key_starts_with}.
189
190
  # @option options [String] :acl See {PresignedPost#acl}.
190
- # @option options [String] :acl_starts_with See {PresignedPost#acl_starts_with}.
191
- # @option options [String] :cache_control See {PresignedPost#cache_control}.
192
- # @option options [String] :cache_control_starts_with See {PresignedPost#cache_control_starts_with}.
191
+ # @option options [String] :acl_starts_with
192
+ # See {PresignedPost#acl_starts_with}.
193
+ # @option options [String] :cache_control
194
+ # See {PresignedPost#cache_control}.
195
+ # @option options [String] :cache_control_starts_with
196
+ # See {PresignedPost#cache_control_starts_with}.
193
197
  # @option options [String] :content_type See {PresignedPost#content_type}.
194
- # @option options [String] :content_type_starts_with See {PresignedPost#content_type_starts_with}.
195
- # @option options [String] :content_disposition See {PresignedPost#content_disposition}.
196
- # @option options [String] :content_disposition_starts_with See {PresignedPost#content_disposition_starts_with}.
197
- # @option options [String] :content_encoding See {PresignedPost#content_encoding}.
198
- # @option options [String] :content_encoding_starts_with See {PresignedPost#content_encoding_starts_with}.
198
+ # @option options [String] :content_type_starts_with
199
+ # See {PresignedPost#content_type_starts_with}.
200
+ # @option options [String] :content_disposition
201
+ # See {PresignedPost#content_disposition}.
202
+ # @option options [String] :content_disposition_starts_with
203
+ # See {PresignedPost#content_disposition_starts_with}.
204
+ # @option options [String] :content_encoding
205
+ # See {PresignedPost#content_encoding}.
206
+ # @option options [String] :content_encoding_starts_with
207
+ # See {PresignedPost#content_encoding_starts_with}.
199
208
  # @option options [String] :expires See {PresignedPost#expires}.
200
- # @option options [String] :expires_starts_with See {PresignedPost#expires_starts_with}.
201
- # @option options [Range<Integer>] :content_length_range See {PresignedPost#content_length_range}.
202
- # @option options [String] :success_action_redirect See {PresignedPost#success_action_redirect}.
203
- # @option options [String] :success_action_redirect_starts_with See {PresignedPost#success_action_redirect_starts_with}.
204
- # @option options [String] :success_action_status See {PresignedPost#success_action_status}.
205
- # @option options [String] :storage_class See {PresignedPost#storage_class}.
206
- # @option options [String] :website_redirect_location See {PresignedPost#website_redirect_location}.
207
- # @option options [Hash<String,String>] :metadata See {PresignedPost#metadata}.
208
- # @option options [Hash<String,String>] :metadata_starts_with See {PresignedPost#metadata_starts_with}.
209
- # @option options [String] :server_side_encryption See {PresignedPost#server_side_encryption}.
210
- # @option options [String] :server_side_encryption_aws_kms_key_id See {PresignedPost#server_side_encryption_aws_kms_key_id}.
211
- # @option options [String] :server_side_encryption_customer_algorithm See {PresignedPost#server_side_encryption_customer_algorithm}.
212
- # @option options [String] :server_side_encryption_customer_key See {PresignedPost#server_side_encryption_customer_key}.
209
+ # @option options [String] :expires_starts_with
210
+ # See {PresignedPost#expires_starts_with}.
211
+ # @option options [Range<Integer>] :content_length_range
212
+ # See {PresignedPost#content_length_range}.
213
+ # @option options [String] :success_action_redirect
214
+ # See {PresignedPost#success_action_redirect}.
215
+ # @option options [String] :success_action_redirect_starts_with
216
+ # See {PresignedPost#success_action_redirect_starts_with}.
217
+ # @option options [String] :success_action_status
218
+ # See {PresignedPost#success_action_status}.
219
+ # @option options [String] :storage_class
220
+ # See {PresignedPost#storage_class}.
221
+ # @option options [String] :website_redirect_location
222
+ # See {PresignedPost#website_redirect_location}.
223
+ # @option options [Hash<String,String>] :metadata
224
+ # See {PresignedPost#metadata}.
225
+ # @option options [Hash<String,String>] :metadata_starts_with
226
+ # See {PresignedPost#metadata_starts_with}.
227
+ # @option options [String] :server_side_encryption
228
+ # See {PresignedPost#server_side_encryption}.
229
+ # @option options [String] :server_side_encryption_aws_kms_key_id
230
+ # See {PresignedPost#server_side_encryption_aws_kms_key_id}.
231
+ # @option options [String] :server_side_encryption_customer_algorithm
232
+ # See {PresignedPost#server_side_encryption_customer_algorithm}.
233
+ # @option options [String] :server_side_encryption_customer_key
234
+ # See {PresignedPost#server_side_encryption_customer_key}.
213
235
  def initialize(credentials, bucket_region, bucket_name, options = {})
214
236
  @credentials = credentials.credentials
215
237
  @bucket_region = bucket_region
216
238
  @bucket_name = bucket_name
239
+ @accelerate = !!options.delete(:use_accelerate_endpoint)
217
240
  @url = options.delete(:url) || bucket_url
218
241
  @fields = {}
219
242
  @key_set = false
@@ -272,7 +295,7 @@ module Aws
272
295
 
273
296
  # @!group Fields
274
297
 
275
- # The key to use for the uploaded object. Use can use `${filename}`
298
+ # The key to use for the uploaded object. You can use `${filename}`
276
299
  # as a variable in the key. This will be replaced with the name
277
300
  # of the file as provided by the user.
278
301
  #
@@ -507,7 +530,10 @@ module Aws
507
530
  # (KMS) master encryption key to use for the object.
508
531
  # @param [String] value
509
532
  # @return [self]
510
- define_field(:server_side_encryption_aws_kms_key_id, 'x-amz-server-side-encryption-aws-kms-key-id')
533
+ define_field(
534
+ :server_side_encryption_aws_kms_key_id,
535
+ 'x-amz-server-side-encryption-aws-kms-key-id'
536
+ )
511
537
 
512
538
  # @!endgroup
513
539
 
@@ -520,7 +546,10 @@ module Aws
520
546
  # @param [String] value
521
547
  # @see #server_side_encryption_customer_key
522
548
  # @return [self]
523
- define_field(:server_side_encryption_customer_algorithm, 'x-amz-server-side-encryption-customer-algorithm')
549
+ define_field(
550
+ :server_side_encryption_customer_algorithm,
551
+ 'x-amz-server-side-encryption-customer-algorithm'
552
+ )
524
553
 
525
554
  # Specifies the customer-provided encryption key for Amazon S3 to use
526
555
  # in encrypting data. This value is used to store the object and then
@@ -582,10 +611,14 @@ module Aws
582
611
  def bucket_url
583
612
  url = Aws::Partitions::EndpointProvider.resolve(@bucket_region, 's3')
584
613
  url = URI.parse(url)
585
- if Plugins::BucketDns.dns_compatible?(@bucket_name, true)
586
- url.host = @bucket_name + '.' + url.host
614
+ if Plugins::BucketDns.dns_compatible?(@bucket_name, _ssl = true)
615
+ if @accelerate
616
+ url.host = "#{@bucket_name}.s3-accelerate.amazonaws.com"
617
+ else
618
+ url.host = "#{@bucket_name}.#{url.host}"
619
+ end
587
620
  else
588
- url.path = '/' + @bucket_name
621
+ url.path = "/#{@bucket_name}"
589
622
  end
590
623
  if @bucket_region == 'us-east-1'
591
624
  # keep legacy behavior by default
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-s3
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.70.0
4
+ version: 1.75.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-06-23 00:00:00.000000000 Z
11
+ date: 2020-07-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-kms
@@ -47,7 +47,7 @@ dependencies:
47
47
  version: '3'
48
48
  - - ">="
49
49
  - !ruby/object:Gem::Version
50
- version: 3.99.0
50
+ version: 3.104.1
51
51
  type: :runtime
52
52
  prerelease: false
53
53
  version_requirements: !ruby/object:Gem::Requirement
@@ -57,7 +57,7 @@ dependencies:
57
57
  version: '3'
58
58
  - - ">="
59
59
  - !ruby/object:Gem::Version
60
- version: 3.99.0
60
+ version: 3.104.1
61
61
  description: Official AWS Ruby gem for Amazon Simple Storage Service (Amazon S3).
62
62
  This gem is part of the AWS SDK for Ruby.
63
63
  email:
@@ -148,6 +148,7 @@ files:
148
148
  - lib/aws-sdk-s3/plugins/s3_host_id.rb
149
149
  - lib/aws-sdk-s3/plugins/s3_signer.rb
150
150
  - lib/aws-sdk-s3/plugins/sse_cpk.rb
151
+ - lib/aws-sdk-s3/plugins/streaming_retry.rb
151
152
  - lib/aws-sdk-s3/plugins/url_encoded_keys.rb
152
153
  - lib/aws-sdk-s3/presigned_post.rb
153
154
  - lib/aws-sdk-s3/presigner.rb