aws-sdk-s3 1.173.0 → 1.175.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -785,6 +785,7 @@ module Aws::S3
785
785
  CompleteMultipartUploadRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256"))
786
786
  CompleteMultipartUploadRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer"))
787
787
  CompleteMultipartUploadRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner"))
788
+ CompleteMultipartUploadRequest.add_member(:if_match, Shapes::ShapeRef.new(shape: IfMatch, location: "header", location_name: "If-Match"))
788
789
  CompleteMultipartUploadRequest.add_member(:if_none_match, Shapes::ShapeRef.new(shape: IfNoneMatch, location: "header", location_name: "If-None-Match"))
789
790
  CompleteMultipartUploadRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm"))
790
791
  CompleteMultipartUploadRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key"))
@@ -2363,6 +2364,7 @@ module Aws::S3
2363
2364
  PutObjectRequest.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1"))
2364
2365
  PutObjectRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256"))
2365
2366
  PutObjectRequest.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires"))
2367
+ PutObjectRequest.add_member(:if_match, Shapes::ShapeRef.new(shape: IfMatch, location: "header", location_name: "If-Match"))
2366
2368
  PutObjectRequest.add_member(:if_none_match, Shapes::ShapeRef.new(shape: IfNoneMatch, location: "header", location_name: "If-None-Match"))
2367
2369
  PutObjectRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control"))
2368
2370
  PutObjectRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read"))
@@ -94,6 +94,24 @@ module Aws::S3
94
94
  end
95
95
  return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
96
96
  end
97
+ if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 19, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 19, 21, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--")
98
+ if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
99
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
100
+ end
101
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
102
+ end
103
+ if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 20, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 20, 22, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--")
104
+ if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
105
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
106
+ end
107
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
108
+ end
109
+ if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 26, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 26, 28, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--")
110
+ if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
111
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
112
+ end
113
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
114
+ end
97
115
  raise ArgumentError, "Unrecognized S3Express bucket name format."
98
116
  end
99
117
  if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 14, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 14, 16, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--")
@@ -108,6 +126,24 @@ module Aws::S3
108
126
  end
109
127
  return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
110
128
  end
129
+ if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 19, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 19, 21, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--")
130
+ if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
131
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
132
+ end
133
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
134
+ end
135
+ if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 20, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 20, 22, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--")
136
+ if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
137
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
138
+ end
139
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
140
+ end
141
+ if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 26, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 26, 28, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--")
142
+ if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
143
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
144
+ end
145
+ return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]})
146
+ end
111
147
  raise ArgumentError, "Unrecognized S3Express bucket name format."
112
148
  end
113
149
  raise ArgumentError, "S3Express bucket name is not a valid virtual hostable name."
@@ -307,6 +307,7 @@ module Aws::S3
307
307
  # checksum_sha256: "ChecksumSHA256",
308
308
  # request_payer: "requester", # accepts requester
309
309
  # expected_bucket_owner: "AccountId",
310
+ # if_match: "IfMatch",
310
311
  # if_none_match: "IfNoneMatch",
311
312
  # sse_customer_algorithm: "SSECustomerAlgorithm",
312
313
  # sse_customer_key: "SSECustomerKey",
@@ -375,6 +376,26 @@ module Aws::S3
375
376
  # The account ID of the expected bucket owner. If the account ID that
376
377
  # you provide does not match the actual owner of the bucket, the request
377
378
  # fails with the HTTP status code `403 Forbidden` (access denied).
379
+ # @option options [String] :if_match
380
+ # Uploads the object only if the ETag (entity tag) value provided during
381
+ # the WRITE operation matches the ETag of the object in S3. If the ETag
382
+ # values do not match, the operation returns a `412 Precondition Failed`
383
+ # error.
384
+ #
385
+ # If a conflicting operation occurs during the upload S3 returns a `409
386
+ # ConditionalRequestConflict` response. On a 409 failure you should
387
+ # fetch the object's ETag, re-initiate the multipart upload with
388
+ # `CreateMultipartUpload`, and re-upload each part.
389
+ #
390
+ # Expects the ETag value as a string.
391
+ #
392
+ # For more information about conditional requests, see [RFC 7232][1], or
393
+ # [Conditional requests][2] in the *Amazon S3 User Guide*.
394
+ #
395
+ #
396
+ #
397
+ # [1]: https://tools.ietf.org/html/rfc7232
398
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html
378
399
  # @option options [String] :if_none_match
379
400
  # Uploads the object only if the object key name does not already exist
380
401
  # in the bucket specified. Otherwise, Amazon S3 returns a `412
@@ -66,7 +66,9 @@ module Aws::S3
66
66
  # providing object expiration information. The value of the `rule-id` is
67
67
  # URL-encoded.
68
68
  #
69
- # <note markdown="1"> This functionality is not supported for directory buckets.
69
+ # <note markdown="1"> Object expiration information is not returned in directory buckets and
70
+ # this header returns the value "`NotImplemented`" in all responses
71
+ # for directory buckets.
70
72
  #
71
73
  # </note>
72
74
  #
@@ -2368,6 +2370,7 @@ module Aws::S3
2368
2370
  # checksum_sha1: "ChecksumSHA1",
2369
2371
  # checksum_sha256: "ChecksumSHA256",
2370
2372
  # expires: Time.now,
2373
+ # if_match: "IfMatch",
2371
2374
  # if_none_match: "IfNoneMatch",
2372
2375
  # grant_full_control: "GrantFullControl",
2373
2376
  # grant_read: "GrantRead",
@@ -2592,6 +2595,25 @@ module Aws::S3
2592
2595
  #
2593
2596
  #
2594
2597
  # [1]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3
2598
+ # @option options [String] :if_match
2599
+ # Uploads the object only if the ETag (entity tag) value provided during
2600
+ # the WRITE operation matches the ETag of the object in S3. If the ETag
2601
+ # values do not match, the operation returns a `412 Precondition Failed`
2602
+ # error.
2603
+ #
2604
+ # If a conflicting operation occurs during the upload S3 returns a `409
2605
+ # ConditionalRequestConflict` response. On a 409 failure you should
2606
+ # fetch the object's ETag and retry the upload.
2607
+ #
2608
+ # Expects the ETag value as a string.
2609
+ #
2610
+ # For more information about conditional requests, see [RFC 7232][1], or
2611
+ # [Conditional requests][2] in the *Amazon S3 User Guide*.
2612
+ #
2613
+ #
2614
+ #
2615
+ # [1]: https://tools.ietf.org/html/rfc7232
2616
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html
2595
2617
  # @option options [String] :if_none_match
2596
2618
  # Uploads the object only if the object key name does not already exist
2597
2619
  # in the bucket specified. Otherwise, Amazon S3 returns a `412
@@ -2018,6 +2018,7 @@ module Aws::S3
2018
2018
  # checksum_sha1: "ChecksumSHA1",
2019
2019
  # checksum_sha256: "ChecksumSHA256",
2020
2020
  # expires: Time.now,
2021
+ # if_match: "IfMatch",
2021
2022
  # if_none_match: "IfNoneMatch",
2022
2023
  # grant_full_control: "GrantFullControl",
2023
2024
  # grant_read: "GrantRead",
@@ -2242,6 +2243,25 @@ module Aws::S3
2242
2243
  #
2243
2244
  #
2244
2245
  # [1]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3
2246
+ # @option options [String] :if_match
2247
+ # Uploads the object only if the ETag (entity tag) value provided during
2248
+ # the WRITE operation matches the ETag of the object in S3. If the ETag
2249
+ # values do not match, the operation returns a `412 Precondition Failed`
2250
+ # error.
2251
+ #
2252
+ # If a conflicting operation occurs during the upload S3 returns a `409
2253
+ # ConditionalRequestConflict` response. On a 409 failure you should
2254
+ # fetch the object's ETag and retry the upload.
2255
+ #
2256
+ # Expects the ETag value as a string.
2257
+ #
2258
+ # For more information about conditional requests, see [RFC 7232][1], or
2259
+ # [Conditional requests][2] in the *Amazon S3 User Guide*.
2260
+ #
2261
+ #
2262
+ #
2263
+ # [1]: https://tools.ietf.org/html/rfc7232
2264
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html
2245
2265
  # @option options [String] :if_none_match
2246
2266
  # Uploads the object only if the object key name does not already exist
2247
2267
  # in the bucket specified. Otherwise, Amazon S3 returns a `412
@@ -43,11 +43,11 @@ module Aws::S3
43
43
  # create_bucket_configuration: {
44
44
  # location_constraint: "af-south-1", # accepts af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-northeast-3, ap-south-1, ap-south-2, ap-southeast-1, ap-southeast-2, ap-southeast-3, ca-central-1, cn-north-1, cn-northwest-1, EU, eu-central-1, eu-north-1, eu-south-1, eu-south-2, eu-west-1, eu-west-2, eu-west-3, me-south-1, sa-east-1, us-east-2, us-gov-east-1, us-gov-west-1, us-west-1, us-west-2
45
45
  # location: {
46
- # type: "AvailabilityZone", # accepts AvailabilityZone
46
+ # type: "AvailabilityZone", # accepts AvailabilityZone, LocalZone
47
47
  # name: "LocationNameAsString",
48
48
  # },
49
49
  # bucket: {
50
- # data_redundancy: "SingleAvailabilityZone", # accepts SingleAvailabilityZone
50
+ # data_redundancy: "SingleAvailabilityZone", # accepts SingleAvailabilityZone, SingleLocalZone
51
51
  # type: "Directory", # accepts Directory
52
52
  # },
53
53
  # },
@@ -75,13 +75,14 @@ module Aws::S3
75
75
  #
76
76
  # <b>Directory buckets </b> - When you use this operation with a
77
77
  # directory bucket, you must use path-style requests in the format
78
- # `https://s3express-control.region_code.amazonaws.com/bucket-name `.
78
+ # `https://s3express-control.region-code.amazonaws.com/bucket-name `.
79
79
  # Virtual-hosted-style requests aren't supported. Directory bucket
80
- # names must be unique in the chosen Availability Zone. Bucket names
81
- # must also follow the format ` bucket_base_name--az_id--x-s3` (for
82
- # example, ` DOC-EXAMPLE-BUCKET--usw2-az1--x-s3`). For information about
83
- # bucket naming restrictions, see [Directory bucket naming rules][2] in
84
- # the *Amazon S3 User Guide*
80
+ # names must be unique in the chosen Zone (Availability Zone or Local
81
+ # Zone). Bucket names must also follow the format `
82
+ # bucket-base-name--zone-id--x-s3` (for example, `
83
+ # DOC-EXAMPLE-BUCKET--usw2-az1--x-s3`). For information about bucket
84
+ # naming restrictions, see [Directory bucket naming rules][2] in the
85
+ # *Amazon S3 User Guide*
85
86
  #
86
87
  #
87
88
  #