aws-sdk-s3 1.107.0 → 1.111.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,6 +27,7 @@ require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
27
27
  require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
28
28
  require 'aws-sdk-core/plugins/transfer_encoding.rb'
29
29
  require 'aws-sdk-core/plugins/http_checksum.rb'
30
+ require 'aws-sdk-core/plugins/defaults_mode.rb'
30
31
  require 'aws-sdk-core/plugins/protocols/rest_xml.rb'
31
32
  require 'aws-sdk-s3/plugins/accelerate.rb'
32
33
  require 'aws-sdk-s3/plugins/arn.rb'
@@ -91,6 +92,7 @@ module Aws::S3
91
92
  add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
92
93
  add_plugin(Aws::Plugins::TransferEncoding)
93
94
  add_plugin(Aws::Plugins::HttpChecksum)
95
+ add_plugin(Aws::Plugins::DefaultsMode)
94
96
  add_plugin(Aws::Plugins::Protocols::RestXml)
95
97
  add_plugin(Aws::S3::Plugins::Accelerate)
96
98
  add_plugin(Aws::S3::Plugins::ARN)
@@ -155,7 +157,9 @@ module Aws::S3
155
157
  # * EC2/ECS IMDS instance profile - When used by default, the timeouts
156
158
  # are very aggressive. Construct and pass an instance of
157
159
  # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
158
- # enable retries and extended timeouts.
160
+ # enable retries and extended timeouts. Instance profile credential
161
+ # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED']
162
+ # to true.
159
163
  #
160
164
  # @option options [required, String] :region
161
165
  # The AWS region to connect to. The configured `:region` is
@@ -216,6 +220,10 @@ module Aws::S3
216
220
  # Used only in `standard` and adaptive retry modes. Specifies whether to apply
217
221
  # a clock skew correction and retry requests with skewed client clocks.
218
222
  #
223
+ # @option options [String] :defaults_mode ("legacy")
224
+ # See {Aws::DefaultsModeConfiguration} for a list of the
225
+ # accepted modes and the configuration defaults that are included.
226
+ #
219
227
  # @option options [Boolean] :disable_host_prefix_injection (false)
220
228
  # Set to true to disable SDK automatically adding host prefix
221
229
  # to default service endpoint when available.
@@ -379,7 +387,7 @@ module Aws::S3
379
387
  # seconds to wait when opening a HTTP session before raising a
380
388
  # `Timeout::Error`.
381
389
  #
382
- # @option options [Integer] :http_read_timeout (60) The default
390
+ # @option options [Float] :http_read_timeout (60) The default
383
391
  # number of seconds to wait for response data. This value can
384
392
  # safely be set per-request on the session.
385
393
  #
@@ -395,6 +403,9 @@ module Aws::S3
395
403
  # disables this behaviour. This value can safely be set per
396
404
  # request on the session.
397
405
  #
406
+ # @option options [Float] :ssl_timeout (nil) Sets the SSL timeout
407
+ # in seconds.
408
+ #
398
409
  # @option options [Boolean] :http_wire_trace (false) When `true`,
399
410
  # HTTP debug output will be sent to the `:logger`.
400
411
  #
@@ -896,11 +907,28 @@ module Aws::S3
896
907
  # Control List (ACL) Overview][10] and [Managing ACLs Using the REST
897
908
  # API][11].
898
909
  #
910
+ # If the bucket that you're copying objects to uses the bucket owner
911
+ # enforced setting for S3 Object Ownership, ACLs are disabled and no
912
+ # longer affect permissions. Buckets that use this setting only accept
913
+ # PUT requests that don't specify an ACL or PUT requests that specify
914
+ # bucket owner full control ACLs, such as the
915
+ # `bucket-owner-full-control` canned ACL or an equivalent form of this
916
+ # ACL expressed in the XML format.
917
+ #
918
+ # For more information, see [ Controlling ownership of objects and
919
+ # disabling ACLs][12] in the *Amazon S3 User Guide*.
920
+ #
921
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for Object
922
+ # Ownership, all objects written to the bucket by any account will be
923
+ # owned by the bucket owner.
924
+ #
925
+ # </note>
926
+ #
899
927
  # **Storage Class Options**
900
928
  #
901
929
  # You can use the `CopyObject` action to change the storage class of an
902
930
  # object that is already stored in Amazon S3 using the `StorageClass`
903
- # parameter. For more information, see [Storage Classes][12] in the
931
+ # parameter. For more information, see [Storage Classes][13] in the
904
932
  # *Amazon S3 User Guide*.
905
933
  #
906
934
  # **Versioning**
@@ -921,15 +949,15 @@ module Aws::S3
921
949
  #
922
950
  # If the source object's storage class is GLACIER, you must restore a
923
951
  # copy of this object before you can use it as a source object for the
924
- # copy operation. For more information, see [RestoreObject][13].
952
+ # copy operation. For more information, see [RestoreObject][14].
925
953
  #
926
954
  # The following operations are related to `CopyObject`\:
927
955
  #
928
- # * [PutObject][14]
956
+ # * [PutObject][15]
929
957
  #
930
- # * [GetObject][15]
958
+ # * [GetObject][16]
931
959
  #
932
- # For more information, see [Copying Objects][16].
960
+ # For more information, see [Copying Objects][17].
933
961
  #
934
962
  #
935
963
  #
@@ -944,11 +972,12 @@ module Aws::S3
944
972
  # [9]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
945
973
  # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
946
974
  # [11]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html
947
- # [12]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
948
- # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
949
- # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
950
- # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
951
- # [16]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html
975
+ # [12]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
976
+ # [13]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
977
+ # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
978
+ # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
979
+ # [16]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
980
+ # [17]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html
952
981
  #
953
982
  # @option params [String] :acl
954
983
  # The canned ACL to apply to the object.
@@ -1270,7 +1299,7 @@ module Aws::S3
1270
1299
  # metadata_directive: "COPY", # accepts COPY, REPLACE
1271
1300
  # tagging_directive: "COPY", # accepts COPY, REPLACE
1272
1301
  # server_side_encryption: "AES256", # accepts AES256, aws:kms
1273
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
1302
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
1274
1303
  # website_redirect_location: "WebsiteRedirectLocation",
1275
1304
  # sse_customer_algorithm: "SSECustomerAlgorithm",
1276
1305
  # sse_customer_key: "SSECustomerKey",
@@ -1343,22 +1372,33 @@ module Aws::S3
1343
1372
  #
1344
1373
  # </note>
1345
1374
  #
1375
+ # **Access control lists (ACLs)**
1376
+ #
1346
1377
  # When creating a bucket using this operation, you can optionally
1347
- # specify the accounts or groups that should be granted specific
1348
- # permissions on the bucket. There are two ways to grant the appropriate
1349
- # permissions using the request headers.
1378
+ # configure the bucket ACL to specify the accounts or groups that should
1379
+ # be granted specific permissions on the bucket.
1380
+ #
1381
+ # If your CreateBucket request sets bucket owner enforced for S3 Object
1382
+ # Ownership and specifies a bucket ACL that provides access to an
1383
+ # external Amazon Web Services account, your request fails with a `400`
1384
+ # error and returns the `InvalidBucketAclWithObjectOwnership` error
1385
+ # code. For more information, see [Controlling object ownership][5] in
1386
+ # the *Amazon S3 User Guide*.
1387
+ #
1388
+ # There are two ways to grant the appropriate permissions using the
1389
+ # request headers.
1350
1390
  #
1351
1391
  # * Specify a canned ACL using the `x-amz-acl` request header. Amazon S3
1352
1392
  # supports a set of predefined ACLs, known as *canned ACLs*. Each
1353
1393
  # canned ACL has a predefined set of grantees and permissions. For
1354
- # more information, see [Canned ACL][5].
1394
+ # more information, see [Canned ACL][6].
1355
1395
  #
1356
1396
  # * Specify access permissions explicitly using the `x-amz-grant-read`,
1357
1397
  # `x-amz-grant-write`, `x-amz-grant-read-acp`,
1358
1398
  # `x-amz-grant-write-acp`, and `x-amz-grant-full-control` headers.
1359
1399
  # These headers map to the set of permissions Amazon S3 supports in an
1360
1400
  # ACL. For more information, see [Access control list (ACL)
1361
- # overview][6].
1401
+ # overview][7].
1362
1402
  #
1363
1403
  # You specify each grantee as a type=value pair, where the type is one
1364
1404
  # of the following:
@@ -1391,7 +1431,7 @@ module Aws::S3
1391
1431
  # * South America (São Paulo)
1392
1432
  #
1393
1433
  # For a list of all the Amazon S3 supported Regions and endpoints,
1394
- # see [Regions and Endpoints][7] in the Amazon Web Services General
1434
+ # see [Regions and Endpoints][8] in the Amazon Web Services General
1395
1435
  # Reference.
1396
1436
  #
1397
1437
  # </note>
@@ -1409,22 +1449,29 @@ module Aws::S3
1409
1449
  #
1410
1450
  # **Permissions**
1411
1451
  #
1412
- # If your `CreateBucket` request specifies ACL permissions and the ACL
1413
- # is public-read, public-read-write, authenticated-read, or if you
1414
- # specify access permissions explicitly through any other ACL, both
1415
- # `s3:CreateBucket` and `s3:PutBucketAcl` permissions are needed. If the
1416
- # ACL the `CreateBucket` request is private, only `s3:CreateBucket`
1417
- # permission is needed.
1452
+ # In addition to `s3:CreateBucket`, the following permissions are
1453
+ # required when your CreateBucket includes specific headers:
1454
+ #
1455
+ # * **ACLs** - If your `CreateBucket` request specifies ACL permissions
1456
+ # and the ACL is public-read, public-read-write, authenticated-read,
1457
+ # or if you specify access permissions explicitly through any other
1458
+ # ACL, both `s3:CreateBucket` and `s3:PutBucketAcl` permissions are
1459
+ # needed. If the ACL the `CreateBucket` request is private or doesn't
1460
+ # specify any ACLs, only `s3:CreateBucket` permission is needed.
1461
+ #
1462
+ # * **Object Lock** - If `ObjectLockEnabledForBucket` is set to true in
1463
+ # your `CreateBucket` request, `s3:PutBucketObjectLockConfiguration`
1464
+ # and `s3:PutBucketVersioning` permissions are required.
1418
1465
  #
1419
- # If `ObjectLockEnabledForBucket` is set to true in your `CreateBucket`
1420
- # request, `s3:PutBucketObjectLockConfiguration` and
1421
- # `s3:PutBucketVersioning` permissions are required.
1466
+ # * **S3 Object Ownership** - If your CreateBucket request includes the
1467
+ # the `x-amz-object-ownership` header, `s3:PutBucketOwnershipControls`
1468
+ # permission is required.
1422
1469
  #
1423
1470
  # The following operations are related to `CreateBucket`\:
1424
1471
  #
1425
- # * [PutObject][8]
1472
+ # * [PutObject][9]
1426
1473
  #
1427
- # * [DeleteBucket][9]
1474
+ # * [DeleteBucket][10]
1428
1475
  #
1429
1476
  #
1430
1477
  #
@@ -1432,11 +1479,12 @@ module Aws::S3
1432
1479
  # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html
1433
1480
  # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
1434
1481
  # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
1435
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
1436
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
1437
- # [7]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
1438
- # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
1439
- # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
1482
+ # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
1483
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
1484
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html
1485
+ # [8]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
1486
+ # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
1487
+ # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
1440
1488
  #
1441
1489
  # @option params [String] :acl
1442
1490
  # The canned ACL to apply to the bucket.
@@ -1470,6 +1518,24 @@ module Aws::S3
1470
1518
  # Specifies whether you want S3 Object Lock to be enabled for the new
1471
1519
  # bucket.
1472
1520
  #
1521
+ # @option params [String] :object_ownership
1522
+ # The container element for object ownership for a bucket's ownership
1523
+ # controls.
1524
+ #
1525
+ # BucketOwnerPreferred - Objects uploaded to the bucket change ownership
1526
+ # to the bucket owner if the objects are uploaded with the
1527
+ # `bucket-owner-full-control` canned ACL.
1528
+ #
1529
+ # ObjectWriter - The uploading account will own the object if the object
1530
+ # is uploaded with the `bucket-owner-full-control` canned ACL.
1531
+ #
1532
+ # BucketOwnerEnforced - Access control lists (ACLs) are disabled and no
1533
+ # longer affect permissions. The bucket owner automatically owns and has
1534
+ # full control over every object in the bucket. The bucket only accepts
1535
+ # PUT requests that don't specify an ACL or bucket owner full control
1536
+ # ACLs, such as the `bucket-owner-full-control` canned ACL or an
1537
+ # equivalent form of this ACL expressed in the XML format.
1538
+ #
1473
1539
  # @return [Types::CreateBucketOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1474
1540
  #
1475
1541
  # * {Types::CreateBucketOutput#location #location} => String
@@ -1518,6 +1584,7 @@ module Aws::S3
1518
1584
  # grant_write: "GrantWrite",
1519
1585
  # grant_write_acp: "GrantWriteACP",
1520
1586
  # object_lock_enabled_for_bucket: false,
1587
+ # object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced
1521
1588
  # })
1522
1589
  #
1523
1590
  # @example Response structure
@@ -1996,7 +2063,7 @@ module Aws::S3
1996
2063
  # "MetadataKey" => "MetadataValue",
1997
2064
  # },
1998
2065
  # server_side_encryption: "AES256", # accepts AES256, aws:kms
1999
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
2066
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
2000
2067
  # website_redirect_location: "WebsiteRedirectLocation",
2001
2068
  # sse_customer_algorithm: "SSECustomerAlgorithm",
2002
2069
  # sse_customer_key: "SSECustomerKey",
@@ -2265,18 +2332,17 @@ module Aws::S3
2265
2332
  # storage costs by automatically moving data to the most cost-effective
2266
2333
  # storage access tier, without performance impact or operational
2267
2334
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
2268
- # two low latency and high throughput access tiers. For data that can be
2269
- # accessed asynchronously, you can choose to activate automatic
2270
- # archiving capabilities within the S3 Intelligent-Tiering storage
2271
- # class.
2335
+ # three low latency and high throughput access tiers. To get the lowest
2336
+ # storage cost on data that can be accessed in minutes to hours, you can
2337
+ # choose to activate additional archiving capabilities.
2272
2338
  #
2273
2339
  # The S3 Intelligent-Tiering storage class is the ideal storage class
2274
2340
  # for data with unknown, changing, or unpredictable access patterns,
2275
2341
  # independent of object size or retention period. If the size of an
2276
- # object is less than 128 KB, it is not eligible for auto-tiering.
2277
- # Smaller objects can be stored, but they are always charged at the
2278
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
2279
- # class.
2342
+ # object is less than 128 KB, it is not monitored and not eligible for
2343
+ # auto-tiering. Smaller objects can be stored, but they are always
2344
+ # charged at the Frequent Access tier rates in the S3
2345
+ # Intelligent-Tiering storage class.
2280
2346
  #
2281
2347
  # For more information, see [Storage class for automatically optimizing
2282
2348
  # frequently and infrequently accessed objects][1].
@@ -3024,35 +3090,35 @@ module Aws::S3
3024
3090
  # * {Types::DeleteObjectTaggingOutput#version_id #version_id} => String
3025
3091
  #
3026
3092
  #
3027
- # @example Example: To remove tag set from an object
3093
+ # @example Example: To remove tag set from an object version
3028
3094
  #
3029
- # # The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the
3030
- # # operation removes tag set from the latest object version.
3095
+ # # The following example removes tag set associated with the specified object version. The request specifies both the
3096
+ # # object key and object version.
3031
3097
  #
3032
3098
  # resp = client.delete_object_tagging({
3033
3099
  # bucket: "examplebucket",
3034
3100
  # key: "HappyFace.jpg",
3101
+ # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
3035
3102
  # })
3036
3103
  #
3037
3104
  # resp.to_h outputs the following:
3038
3105
  # {
3039
- # version_id: "null",
3106
+ # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
3040
3107
  # }
3041
3108
  #
3042
- # @example Example: To remove tag set from an object version
3109
+ # @example Example: To remove tag set from an object
3043
3110
  #
3044
- # # The following example removes tag set associated with the specified object version. The request specifies both the
3045
- # # object key and object version.
3111
+ # # The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the
3112
+ # # operation removes tag set from the latest object version.
3046
3113
  #
3047
3114
  # resp = client.delete_object_tagging({
3048
3115
  # bucket: "examplebucket",
3049
3116
  # key: "HappyFace.jpg",
3050
- # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
3051
3117
  # })
3052
3118
  #
3053
3119
  # resp.to_h outputs the following:
3054
3120
  # {
3055
- # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
3121
+ # version_id: "null",
3056
3122
  # }
3057
3123
  #
3058
3124
  # @example Request syntax with placeholder values
@@ -3437,15 +3503,24 @@ module Aws::S3
3437
3503
  # can return the ACL of the bucket without using an authorization
3438
3504
  # header.
3439
3505
  #
3506
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for S3 Object
3507
+ # Ownership, requests to read ACLs are still supported and return the
3508
+ # `bucket-owner-full-control` ACL with the owner being the account that
3509
+ # created the bucket. For more information, see [ Controlling object
3510
+ # ownership and disabling ACLs][1] in the *Amazon S3 User Guide*.
3511
+ #
3512
+ # </note>
3513
+ #
3440
3514
  # **Related Resources**
3441
3515
  #
3442
- # * [ListObjects][1]
3516
+ # * [ListObjects][2]
3443
3517
  #
3444
3518
  # ^
3445
3519
  #
3446
3520
  #
3447
3521
  #
3448
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
3522
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
3523
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
3449
3524
  #
3450
3525
  # @option params [required, String] :bucket
3451
3526
  # Specifies the S3 bucket whose ACL is being requested.
@@ -3731,18 +3806,17 @@ module Aws::S3
3731
3806
  # storage costs by automatically moving data to the most cost-effective
3732
3807
  # storage access tier, without performance impact or operational
3733
3808
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
3734
- # two low latency and high throughput access tiers. For data that can be
3735
- # accessed asynchronously, you can choose to activate automatic
3736
- # archiving capabilities within the S3 Intelligent-Tiering storage
3737
- # class.
3809
+ # three low latency and high throughput access tiers. To get the lowest
3810
+ # storage cost on data that can be accessed in minutes to hours, you can
3811
+ # choose to activate additional archiving capabilities.
3738
3812
  #
3739
3813
  # The S3 Intelligent-Tiering storage class is the ideal storage class
3740
3814
  # for data with unknown, changing, or unpredictable access patterns,
3741
3815
  # independent of object size or retention period. If the size of an
3742
- # object is less than 128 KB, it is not eligible for auto-tiering.
3743
- # Smaller objects can be stored, but they are always charged at the
3744
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
3745
- # class.
3816
+ # object is less than 128 KB, it is not monitored and not eligible for
3817
+ # auto-tiering. Smaller objects can be stored, but they are always
3818
+ # charged at the Frequent Access tier rates in the S3
3819
+ # Intelligent-Tiering storage class.
3746
3820
  #
3747
3821
  # For more information, see [Storage class for automatically optimizing
3748
3822
  # frequently and infrequently accessed objects][1].
@@ -3981,9 +4055,9 @@ module Aws::S3
3981
4055
  # resp.rules[0].status #=> String, one of "Enabled", "Disabled"
3982
4056
  # resp.rules[0].transition.date #=> Time
3983
4057
  # resp.rules[0].transition.days #=> Integer
3984
- # resp.rules[0].transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4058
+ # resp.rules[0].transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
3985
4059
  # resp.rules[0].noncurrent_version_transition.noncurrent_days #=> Integer
3986
- # resp.rules[0].noncurrent_version_transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4060
+ # resp.rules[0].noncurrent_version_transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
3987
4061
  # resp.rules[0].noncurrent_version_transition.newer_noncurrent_versions #=> Integer
3988
4062
  # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer
3989
4063
  # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer
@@ -4116,10 +4190,10 @@ module Aws::S3
4116
4190
  # resp.rules[0].transitions #=> Array
4117
4191
  # resp.rules[0].transitions[0].date #=> Time
4118
4192
  # resp.rules[0].transitions[0].days #=> Integer
4119
- # resp.rules[0].transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4193
+ # resp.rules[0].transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
4120
4194
  # resp.rules[0].noncurrent_version_transitions #=> Array
4121
4195
  # resp.rules[0].noncurrent_version_transitions[0].noncurrent_days #=> Integer
4122
- # resp.rules[0].noncurrent_version_transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4196
+ # resp.rules[0].noncurrent_version_transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
4123
4197
  # resp.rules[0].noncurrent_version_transitions[0].newer_noncurrent_versions #=> Integer
4124
4198
  # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer
4125
4199
  # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer
@@ -4425,18 +4499,18 @@ module Aws::S3
4425
4499
  #
4426
4500
  # resp.topic_configuration.id #=> String
4427
4501
  # resp.topic_configuration.events #=> Array
4428
- # resp.topic_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4429
- # resp.topic_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4502
+ # resp.topic_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4503
+ # resp.topic_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4430
4504
  # resp.topic_configuration.topic #=> String
4431
4505
  # resp.queue_configuration.id #=> String
4432
- # resp.queue_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4506
+ # resp.queue_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4433
4507
  # resp.queue_configuration.events #=> Array
4434
- # resp.queue_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4508
+ # resp.queue_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4435
4509
  # resp.queue_configuration.queue #=> String
4436
4510
  # resp.cloud_function_configuration.id #=> String
4437
- # resp.cloud_function_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4511
+ # resp.cloud_function_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4438
4512
  # resp.cloud_function_configuration.events #=> Array
4439
- # resp.cloud_function_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4513
+ # resp.cloud_function_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4440
4514
  # resp.cloud_function_configuration.cloud_function #=> String
4441
4515
  # resp.cloud_function_configuration.invocation_role #=> String
4442
4516
  #
@@ -4490,6 +4564,7 @@ module Aws::S3
4490
4564
  # * {Types::NotificationConfiguration#topic_configurations #topic_configurations} => Array&lt;Types::TopicConfiguration&gt;
4491
4565
  # * {Types::NotificationConfiguration#queue_configurations #queue_configurations} => Array&lt;Types::QueueConfiguration&gt;
4492
4566
  # * {Types::NotificationConfiguration#lambda_function_configurations #lambda_function_configurations} => Array&lt;Types::LambdaFunctionConfiguration&gt;
4567
+ # * {Types::NotificationConfiguration#event_bridge_configuration #event_bridge_configuration} => Types::EventBridgeConfiguration
4493
4568
  #
4494
4569
  # @example Request syntax with placeholder values
4495
4570
  #
@@ -4504,7 +4579,7 @@ module Aws::S3
4504
4579
  # resp.topic_configurations[0].id #=> String
4505
4580
  # resp.topic_configurations[0].topic_arn #=> String
4506
4581
  # resp.topic_configurations[0].events #=> Array
4507
- # resp.topic_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4582
+ # resp.topic_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4508
4583
  # resp.topic_configurations[0].filter.key.filter_rules #=> Array
4509
4584
  # resp.topic_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix"
4510
4585
  # resp.topic_configurations[0].filter.key.filter_rules[0].value #=> String
@@ -4512,7 +4587,7 @@ module Aws::S3
4512
4587
  # resp.queue_configurations[0].id #=> String
4513
4588
  # resp.queue_configurations[0].queue_arn #=> String
4514
4589
  # resp.queue_configurations[0].events #=> Array
4515
- # resp.queue_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4590
+ # resp.queue_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4516
4591
  # resp.queue_configurations[0].filter.key.filter_rules #=> Array
4517
4592
  # resp.queue_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix"
4518
4593
  # resp.queue_configurations[0].filter.key.filter_rules[0].value #=> String
@@ -4520,7 +4595,7 @@ module Aws::S3
4520
4595
  # resp.lambda_function_configurations[0].id #=> String
4521
4596
  # resp.lambda_function_configurations[0].lambda_function_arn #=> String
4522
4597
  # resp.lambda_function_configurations[0].events #=> Array
4523
- # resp.lambda_function_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4598
+ # resp.lambda_function_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4524
4599
  # resp.lambda_function_configurations[0].filter.key.filter_rules #=> Array
4525
4600
  # resp.lambda_function_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix"
4526
4601
  # resp.lambda_function_configurations[0].filter.key.filter_rules[0].value #=> String
@@ -4537,7 +4612,7 @@ module Aws::S3
4537
4612
  # Retrieves `OwnershipControls` for an Amazon S3 bucket. To use this
4538
4613
  # operation, you must have the `s3:GetBucketOwnershipControls`
4539
4614
  # permission. For more information about Amazon S3 permissions, see
4540
- # [Specifying Permissions in a Policy][1].
4615
+ # [Specifying permissions in a policy][1].
4541
4616
  #
4542
4617
  # For information about Amazon S3 Object Ownership, see [Using Object
4543
4618
  # Ownership][2].
@@ -4550,8 +4625,8 @@ module Aws::S3
4550
4625
  #
4551
4626
  #
4552
4627
  #
4553
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
4554
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
4628
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html
4629
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
4555
4630
  #
4556
4631
  # @option params [required, String] :bucket
4557
4632
  # The name of the Amazon S3 bucket whose `OwnershipControls` you want to
@@ -4576,7 +4651,7 @@ module Aws::S3
4576
4651
  # @example Response structure
4577
4652
  #
4578
4653
  # resp.ownership_controls.rules #=> Array
4579
- # resp.ownership_controls.rules[0].object_ownership #=> String, one of "BucketOwnerPreferred", "ObjectWriter"
4654
+ # resp.ownership_controls.rules[0].object_ownership #=> String, one of "BucketOwnerPreferred", "ObjectWriter", "BucketOwnerEnforced"
4580
4655
  #
4581
4656
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls AWS API Documentation
4582
4657
  #
@@ -4825,7 +4900,7 @@ module Aws::S3
4825
4900
  # resp.replication_configuration.rules[0].existing_object_replication.status #=> String, one of "Enabled", "Disabled"
4826
4901
  # resp.replication_configuration.rules[0].destination.bucket #=> String
4827
4902
  # resp.replication_configuration.rules[0].destination.account #=> String
4828
- # resp.replication_configuration.rules[0].destination.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
4903
+ # resp.replication_configuration.rules[0].destination.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
4829
4904
  # resp.replication_configuration.rules[0].destination.access_control_translation.owner #=> String, one of "Destination"
4830
4905
  # resp.replication_configuration.rules[0].destination.encryption_configuration.replica_kms_key_id #=> String
4831
4906
  # resp.replication_configuration.rules[0].destination.replication_time.status #=> String, one of "Enabled", "Disabled"
@@ -5468,49 +5543,49 @@ module Aws::S3
5468
5543
  # * {Types::GetObjectOutput#object_lock_legal_hold_status #object_lock_legal_hold_status} => String
5469
5544
  #
5470
5545
  #
5471
- # @example Example: To retrieve an object
5546
+ # @example Example: To retrieve a byte range of an object
5472
5547
  #
5473
- # # The following example retrieves an object for an S3 bucket.
5548
+ # # The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a
5549
+ # # specific byte range.
5474
5550
  #
5475
5551
  # resp = client.get_object({
5476
5552
  # bucket: "examplebucket",
5477
- # key: "HappyFace.jpg",
5553
+ # key: "SampleFile.txt",
5554
+ # range: "bytes=0-9",
5478
5555
  # })
5479
5556
  #
5480
5557
  # resp.to_h outputs the following:
5481
5558
  # {
5482
5559
  # accept_ranges: "bytes",
5483
- # content_length: 3191,
5484
- # content_type: "image/jpeg",
5485
- # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
5486
- # last_modified: Time.parse("Thu, 15 Dec 2016 01:19:41 GMT"),
5560
+ # content_length: 10,
5561
+ # content_range: "bytes 0-9/43",
5562
+ # content_type: "text/plain",
5563
+ # etag: "\"0d94420ffd0bc68cd3d152506b97a9cc\"",
5564
+ # last_modified: Time.parse("Thu, 09 Oct 2014 22:57:28 GMT"),
5487
5565
  # metadata: {
5488
5566
  # },
5489
- # tag_count: 2,
5490
5567
  # version_id: "null",
5491
5568
  # }
5492
5569
  #
5493
- # @example Example: To retrieve a byte range of an object
5570
+ # @example Example: To retrieve an object
5494
5571
  #
5495
- # # The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a
5496
- # # specific byte range.
5572
+ # # The following example retrieves an object for an S3 bucket.
5497
5573
  #
5498
5574
  # resp = client.get_object({
5499
5575
  # bucket: "examplebucket",
5500
- # key: "SampleFile.txt",
5501
- # range: "bytes=0-9",
5576
+ # key: "HappyFace.jpg",
5502
5577
  # })
5503
5578
  #
5504
5579
  # resp.to_h outputs the following:
5505
5580
  # {
5506
5581
  # accept_ranges: "bytes",
5507
- # content_length: 10,
5508
- # content_range: "bytes 0-9/43",
5509
- # content_type: "text/plain",
5510
- # etag: "\"0d94420ffd0bc68cd3d152506b97a9cc\"",
5511
- # last_modified: Time.parse("Thu, 09 Oct 2014 22:57:28 GMT"),
5582
+ # content_length: 3191,
5583
+ # content_type: "image/jpeg",
5584
+ # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
5585
+ # last_modified: Time.parse("Thu, 15 Dec 2016 01:19:41 GMT"),
5512
5586
  # metadata: {
5513
5587
  # },
5588
+ # tag_count: 2,
5514
5589
  # version_id: "null",
5515
5590
  # }
5516
5591
  #
@@ -5596,7 +5671,7 @@ module Aws::S3
5596
5671
  # resp.sse_customer_key_md5 #=> String
5597
5672
  # resp.ssekms_key_id #=> String
5598
5673
  # resp.bucket_key_enabled #=> Boolean
5599
- # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
5674
+ # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
5600
5675
  # resp.request_charged #=> String, one of "requester"
5601
5676
  # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA"
5602
5677
  # resp.parts_count #=> Integer
@@ -5625,19 +5700,28 @@ module Aws::S3
5625
5700
  # an object. To return ACL information about a different version, use
5626
5701
  # the versionId subresource.
5627
5702
  #
5703
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for S3 Object
5704
+ # Ownership, requests to read ACLs are still supported and return the
5705
+ # `bucket-owner-full-control` ACL with the owner being the account that
5706
+ # created the bucket. For more information, see [ Controlling object
5707
+ # ownership and disabling ACLs][1] in the *Amazon S3 User Guide*.
5708
+ #
5709
+ # </note>
5710
+ #
5628
5711
  # The following operations are related to `GetObjectAcl`\:
5629
5712
  #
5630
- # * [GetObject][1]
5713
+ # * [GetObject][2]
5631
5714
  #
5632
- # * [DeleteObject][2]
5715
+ # * [DeleteObject][3]
5633
5716
  #
5634
- # * [PutObject][3]
5717
+ # * [PutObject][4]
5635
5718
  #
5636
5719
  #
5637
5720
  #
5638
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
5639
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
5640
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
5721
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
5722
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
5723
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
5724
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
5641
5725
  #
5642
5726
  # @option params [required, String] :bucket
5643
5727
  # The bucket name that contains the object for which to get the ACL
@@ -6521,18 +6605,8 @@ module Aws::S3
6521
6605
  # The object key.
6522
6606
  #
6523
6607
  # @option params [String] :range
6524
- # Downloads the specified range bytes of an object. For more information
6525
- # about the HTTP Range header, see
6526
- # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35][1].
6527
- #
6528
- # <note markdown="1"> Amazon S3 doesn't support retrieving multiple ranges of data per
6529
- # `GET` request.
6530
- #
6531
- # </note>
6532
- #
6533
- #
6534
- #
6535
- # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
6608
+ # Because `HeadObject` returns only the metadata for an object, this
6609
+ # parameter has no effect.
6536
6610
  #
6537
6611
  # @option params [String] :version_id
6538
6612
  # VersionId used to reference a specific version of the object.
@@ -6677,7 +6751,7 @@ module Aws::S3
6677
6751
  # resp.sse_customer_key_md5 #=> String
6678
6752
  # resp.ssekms_key_id #=> String
6679
6753
  # resp.bucket_key_enabled #=> Boolean
6680
- # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
6754
+ # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
6681
6755
  # resp.request_charged #=> String, one of "requester"
6682
6756
  # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA"
6683
6757
  # resp.parts_count #=> Integer
@@ -6804,18 +6878,17 @@ module Aws::S3
6804
6878
  # storage costs by automatically moving data to the most cost-effective
6805
6879
  # storage access tier, without performance impact or operational
6806
6880
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
6807
- # two low latency and high throughput access tiers. For data that can be
6808
- # accessed asynchronously, you can choose to activate automatic
6809
- # archiving capabilities within the S3 Intelligent-Tiering storage
6810
- # class.
6881
+ # three low latency and high throughput access tiers. To get the lowest
6882
+ # storage cost on data that can be accessed in minutes to hours, you can
6883
+ # choose to activate additional archiving capabilities.
6811
6884
  #
6812
6885
  # The S3 Intelligent-Tiering storage class is the ideal storage class
6813
6886
  # for data with unknown, changing, or unpredictable access patterns,
6814
6887
  # independent of object size or retention period. If the size of an
6815
- # object is less than 128 KB, it is not eligible for auto-tiering.
6816
- # Smaller objects can be stored, but they are always charged at the
6817
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
6818
- # class.
6888
+ # object is less than 128 KB, it is not monitored and not eligible for
6889
+ # auto-tiering. Smaller objects can be stored, but they are always
6890
+ # charged at the Frequent Access tier rates in the S3
6891
+ # Intelligent-Tiering storage class.
6819
6892
  #
6820
6893
  # For more information, see [Storage class for automatically optimizing
6821
6894
  # frequently and infrequently accessed objects][1].
@@ -7285,97 +7358,97 @@ module Aws::S3
7285
7358
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
7286
7359
  #
7287
7360
  #
7288
- # @example Example: To list in-progress multipart uploads on a bucket
7361
+ # @example Example: List next set of multipart uploads when previous result is truncated
7289
7362
  #
7290
- # # The following example lists in-progress multipart uploads on a specific bucket.
7363
+ # # The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next
7364
+ # # setup of multipart uploads.
7291
7365
  #
7292
7366
  # resp = client.list_multipart_uploads({
7293
7367
  # bucket: "examplebucket",
7368
+ # key_marker: "nextkeyfrompreviousresponse",
7369
+ # max_uploads: 2,
7370
+ # upload_id_marker: "valuefrompreviousresponse",
7294
7371
  # })
7295
7372
  #
7296
7373
  # resp.to_h outputs the following:
7297
7374
  # {
7375
+ # bucket: "acl1",
7376
+ # is_truncated: true,
7377
+ # key_marker: "",
7378
+ # max_uploads: 2,
7379
+ # next_key_marker: "someobjectkey",
7380
+ # next_upload_id_marker: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7381
+ # upload_id_marker: "",
7298
7382
  # uploads: [
7299
7383
  # {
7300
7384
  # initiated: Time.parse("2014-05-01T05:40:58.000Z"),
7301
7385
  # initiator: {
7302
- # display_name: "display-name",
7386
+ # display_name: "ownder-display-name",
7303
7387
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7304
7388
  # },
7305
7389
  # key: "JavaFile",
7306
7390
  # owner: {
7307
- # display_name: "display-name",
7308
- # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7391
+ # display_name: "mohanataws",
7392
+ # id: "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7309
7393
  # },
7310
7394
  # storage_class: "STANDARD",
7311
- # upload_id: "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--",
7395
+ # upload_id: "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--",
7312
7396
  # },
7313
7397
  # {
7314
7398
  # initiated: Time.parse("2014-05-01T05:41:27.000Z"),
7315
7399
  # initiator: {
7316
- # display_name: "display-name",
7400
+ # display_name: "ownder-display-name",
7317
7401
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7318
7402
  # },
7319
7403
  # key: "JavaFile",
7320
7404
  # owner: {
7321
- # display_name: "display-name",
7405
+ # display_name: "ownder-display-name",
7322
7406
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7323
7407
  # },
7324
7408
  # storage_class: "STANDARD",
7325
- # upload_id: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7409
+ # upload_id: "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7326
7410
  # },
7327
7411
  # ],
7328
7412
  # }
7329
7413
  #
7330
- # @example Example: List next set of multipart uploads when previous result is truncated
7414
+ # @example Example: To list in-progress multipart uploads on a bucket
7331
7415
  #
7332
- # # The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next
7333
- # # setup of multipart uploads.
7416
+ # # The following example lists in-progress multipart uploads on a specific bucket.
7334
7417
  #
7335
7418
  # resp = client.list_multipart_uploads({
7336
7419
  # bucket: "examplebucket",
7337
- # key_marker: "nextkeyfrompreviousresponse",
7338
- # max_uploads: 2,
7339
- # upload_id_marker: "valuefrompreviousresponse",
7340
7420
  # })
7341
7421
  #
7342
7422
  # resp.to_h outputs the following:
7343
7423
  # {
7344
- # bucket: "acl1",
7345
- # is_truncated: true,
7346
- # key_marker: "",
7347
- # max_uploads: 2,
7348
- # next_key_marker: "someobjectkey",
7349
- # next_upload_id_marker: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7350
- # upload_id_marker: "",
7351
7424
  # uploads: [
7352
7425
  # {
7353
7426
  # initiated: Time.parse("2014-05-01T05:40:58.000Z"),
7354
7427
  # initiator: {
7355
- # display_name: "ownder-display-name",
7428
+ # display_name: "display-name",
7356
7429
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7357
7430
  # },
7358
7431
  # key: "JavaFile",
7359
7432
  # owner: {
7360
- # display_name: "mohanataws",
7361
- # id: "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7433
+ # display_name: "display-name",
7434
+ # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7362
7435
  # },
7363
7436
  # storage_class: "STANDARD",
7364
- # upload_id: "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--",
7437
+ # upload_id: "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--",
7365
7438
  # },
7366
7439
  # {
7367
7440
  # initiated: Time.parse("2014-05-01T05:41:27.000Z"),
7368
7441
  # initiator: {
7369
- # display_name: "ownder-display-name",
7442
+ # display_name: "display-name",
7370
7443
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7371
7444
  # },
7372
7445
  # key: "JavaFile",
7373
7446
  # owner: {
7374
- # display_name: "ownder-display-name",
7447
+ # display_name: "display-name",
7375
7448
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7376
7449
  # },
7377
7450
  # storage_class: "STANDARD",
7378
- # upload_id: "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7451
+ # upload_id: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7379
7452
  # },
7380
7453
  # ],
7381
7454
  # }
@@ -7408,7 +7481,7 @@ module Aws::S3
7408
7481
  # resp.uploads[0].upload_id #=> String
7409
7482
  # resp.uploads[0].key #=> String
7410
7483
  # resp.uploads[0].initiated #=> Time
7411
- # resp.uploads[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
7484
+ # resp.uploads[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
7412
7485
  # resp.uploads[0].owner.display_name #=> String
7413
7486
  # resp.uploads[0].owner.id #=> String
7414
7487
  # resp.uploads[0].initiator.id #=> String
@@ -7788,7 +7861,7 @@ module Aws::S3
7788
7861
  # resp.contents[0].last_modified #=> Time
7789
7862
  # resp.contents[0].etag #=> String
7790
7863
  # resp.contents[0].size #=> Integer
7791
- # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS"
7864
+ # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
7792
7865
  # resp.contents[0].owner.display_name #=> String
7793
7866
  # resp.contents[0].owner.id #=> String
7794
7867
  # resp.name #=> String
@@ -7993,7 +8066,7 @@ module Aws::S3
7993
8066
  # resp.contents[0].last_modified #=> Time
7994
8067
  # resp.contents[0].etag #=> String
7995
8068
  # resp.contents[0].size #=> Integer
7996
- # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS"
8069
+ # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
7997
8070
  # resp.contents[0].owner.display_name #=> String
7998
8071
  # resp.contents[0].owner.id #=> String
7999
8072
  # resp.name #=> String
@@ -8202,7 +8275,7 @@ module Aws::S3
8202
8275
  # resp.initiator.display_name #=> String
8203
8276
  # resp.owner.display_name #=> String
8204
8277
  # resp.owner.id #=> String
8205
- # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
8278
+ # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
8206
8279
  # resp.request_charged #=> String, one of "requester"
8207
8280
  #
8208
8281
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts AWS API Documentation
@@ -8313,6 +8386,14 @@ module Aws::S3
8313
8386
  # you have an existing application that updates a bucket ACL using the
8314
8387
  # request body, then you can continue to use that approach.
8315
8388
  #
8389
+ # If your bucket uses the bucket owner enforced setting for S3 Object
8390
+ # Ownership, ACLs are disabled and no longer affect permissions. You
8391
+ # must use policies to grant access to your bucket and the objects in
8392
+ # it. Requests to set ACLs or update ACLs fail and return the
8393
+ # `AccessControlListNotSupported` error code. Requests to read ACLs are
8394
+ # still supported. For more information, see [Controlling object
8395
+ # ownership][2] in the *Amazon S3 User Guide*.
8396
+ #
8316
8397
  # **Access Permissions**
8317
8398
  #
8318
8399
  # You can set access permissions using one of the following methods:
@@ -8322,7 +8403,7 @@ module Aws::S3
8322
8403
  # canned ACL has a predefined set of grantees and permissions. Specify
8323
8404
  # the canned ACL name as the value of `x-amz-acl`. If you use this
8324
8405
  # header, you cannot use other access control-specific headers in your
8325
- # request. For more information, see [Canned ACL][2].
8406
+ # request. For more information, see [Canned ACL][3].
8326
8407
  #
8327
8408
  # * Specify access permissions explicitly with the `x-amz-grant-read`,
8328
8409
  # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and
@@ -8332,7 +8413,7 @@ module Aws::S3
8332
8413
  # permission. If you use these ACL-specific headers, you cannot use
8333
8414
  # the `x-amz-acl` header to set a canned ACL. These parameters map to
8334
8415
  # the set of permissions that Amazon S3 supports in an ACL. For more
8335
- # information, see [Access Control List (ACL) Overview][3].
8416
+ # information, see [Access Control List (ACL) Overview][4].
8336
8417
  #
8337
8418
  # You specify each grantee as a type=value pair, where the type is one
8338
8419
  # of the following:
@@ -8365,7 +8446,7 @@ module Aws::S3
8365
8446
  # * South America (São Paulo)
8366
8447
  #
8367
8448
  # For a list of all the Amazon S3 supported Regions and endpoints,
8368
- # see [Regions and Endpoints][4] in the Amazon Web Services General
8449
+ # see [Regions and Endpoints][5] in the Amazon Web Services General
8369
8450
  # Reference.
8370
8451
  #
8371
8452
  # </note>
@@ -8428,28 +8509,29 @@ module Aws::S3
8428
8509
  # * South America (São Paulo)
8429
8510
  #
8430
8511
  # For a list of all the Amazon S3 supported Regions and endpoints, see
8431
- # [Regions and Endpoints][4] in the Amazon Web Services General
8512
+ # [Regions and Endpoints][5] in the Amazon Web Services General
8432
8513
  # Reference.
8433
8514
  #
8434
8515
  # </note>
8435
8516
  #
8436
8517
  # **Related Resources**
8437
8518
  #
8438
- # * [CreateBucket][5]
8519
+ # * [CreateBucket][6]
8439
8520
  #
8440
- # * [DeleteBucket][6]
8521
+ # * [DeleteBucket][7]
8441
8522
  #
8442
- # * [GetObjectAcl][7]
8523
+ # * [GetObjectAcl][8]
8443
8524
  #
8444
8525
  #
8445
8526
  #
8446
8527
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
8447
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
8448
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
8449
- # [4]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
8450
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
8451
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
8452
- # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
8528
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
8529
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
8530
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
8531
+ # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
8532
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
8533
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
8534
+ # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
8453
8535
  #
8454
8536
  # @option params [String] :acl
8455
8537
  # The canned ACL to apply to the bucket.
@@ -8949,18 +9031,17 @@ module Aws::S3
8949
9031
  # storage costs by automatically moving data to the most cost-effective
8950
9032
  # storage access tier, without performance impact or operational
8951
9033
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
8952
- # two low latency and high throughput access tiers. For data that can be
8953
- # accessed asynchronously, you can choose to activate automatic
8954
- # archiving capabilities within the S3 Intelligent-Tiering storage
8955
- # class.
9034
+ # three low latency and high throughput access tiers. To get the lowest
9035
+ # storage cost on data that can be accessed in minutes to hours, you can
9036
+ # choose to activate additional archiving capabilities.
8956
9037
  #
8957
9038
  # The S3 Intelligent-Tiering storage class is the ideal storage class
8958
9039
  # for data with unknown, changing, or unpredictable access patterns,
8959
9040
  # independent of object size or retention period. If the size of an
8960
- # object is less than 128 KB, it is not eligible for auto-tiering.
8961
- # Smaller objects can be stored, but they are always charged at the
8962
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
8963
- # class.
9041
+ # object is less than 128 KB, it is not monitored and not eligible for
9042
+ # auto-tiering. Smaller objects can be stored, but they are always
9043
+ # charged at the Frequent Access tier rates in the S3
9044
+ # Intelligent-Tiering storage class.
8964
9045
  #
8965
9046
  # For more information, see [Storage class for automatically optimizing
8966
9047
  # frequently and infrequently accessed objects][1].
@@ -9297,11 +9378,11 @@ module Aws::S3
9297
9378
  # transition: {
9298
9379
  # date: Time.now,
9299
9380
  # days: 1,
9300
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9381
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9301
9382
  # },
9302
9383
  # noncurrent_version_transition: {
9303
9384
  # noncurrent_days: 1,
9304
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9385
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9305
9386
  # newer_noncurrent_versions: 1,
9306
9387
  # },
9307
9388
  # noncurrent_version_expiration: {
@@ -9487,13 +9568,13 @@ module Aws::S3
9487
9568
  # {
9488
9569
  # date: Time.now,
9489
9570
  # days: 1,
9490
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9571
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9491
9572
  # },
9492
9573
  # ],
9493
9574
  # noncurrent_version_transitions: [
9494
9575
  # {
9495
9576
  # noncurrent_days: 1,
9496
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9577
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9497
9578
  # newer_noncurrent_versions: 1,
9498
9579
  # },
9499
9580
  # ],
@@ -9529,6 +9610,12 @@ module Aws::S3
9529
9610
  # The `Permissions` request element specifies the kind of access the
9530
9611
  # grantee has to the logs.
9531
9612
  #
9613
+ # If the target bucket for log delivery uses the bucket owner enforced
9614
+ # setting for S3 Object Ownership, you can't use the `Grantee` request
9615
+ # element to grant access to others. Permissions can only be granted
9616
+ # using policies. For more information, see [Permissions for server
9617
+ # access log delivery][1] in the *Amazon S3 User Guide*.
9618
+ #
9532
9619
  # **Grantee Values**
9533
9620
  #
9534
9621
  # You can specify the person (grantee) to whom you're assigning access
@@ -9563,29 +9650,30 @@ module Aws::S3
9563
9650
  # />`
9564
9651
  #
9565
9652
  # For more information about server access logging, see [Server Access
9566
- # Logging][1].
9653
+ # Logging][2] in the *Amazon S3 User Guide*.
9567
9654
  #
9568
- # For more information about creating a bucket, see [CreateBucket][2].
9655
+ # For more information about creating a bucket, see [CreateBucket][3].
9569
9656
  # For more information about returning the logging status of a bucket,
9570
- # see [GetBucketLogging][3].
9657
+ # see [GetBucketLogging][4].
9571
9658
  #
9572
9659
  # The following operations are related to `PutBucketLogging`\:
9573
9660
  #
9574
- # * [PutObject][4]
9661
+ # * [PutObject][5]
9575
9662
  #
9576
- # * [DeleteBucket][5]
9663
+ # * [DeleteBucket][6]
9577
9664
  #
9578
- # * [CreateBucket][2]
9665
+ # * [CreateBucket][3]
9579
9666
  #
9580
- # * [GetBucketLogging][3]
9667
+ # * [GetBucketLogging][4]
9581
9668
  #
9582
9669
  #
9583
9670
  #
9584
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html
9585
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
9586
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html
9587
- # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
9588
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
9671
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general
9672
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html
9673
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
9674
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html
9675
+ # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
9676
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
9589
9677
  #
9590
9678
  # @option params [required, String] :bucket
9591
9679
  # The name of the bucket for which to set the logging parameters.
@@ -9800,20 +9888,20 @@ module Aws::S3
9800
9888
  # notification_configuration: { # required
9801
9889
  # topic_configuration: {
9802
9890
  # id: "NotificationId",
9803
- # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9804
- # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9891
+ # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9892
+ # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9805
9893
  # topic: "TopicArn",
9806
9894
  # },
9807
9895
  # queue_configuration: {
9808
9896
  # id: "NotificationId",
9809
- # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9810
- # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9897
+ # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9898
+ # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9811
9899
  # queue: "QueueArn",
9812
9900
  # },
9813
9901
  # cloud_function_configuration: {
9814
9902
  # id: "NotificationId",
9815
- # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9816
- # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9903
+ # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9904
+ # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9817
9905
  # cloud_function: "CloudFunction",
9818
9906
  # invocation_role: "CloudFunctionInvocationRole",
9819
9907
  # },
@@ -9910,6 +9998,10 @@ module Aws::S3
9910
9998
  # a different account, the request will fail with an HTTP `403 (Access
9911
9999
  # Denied)` error.
9912
10000
  #
10001
+ # @option params [Boolean] :skip_destination_validation
10002
+ # Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations.
10003
+ # True or false value.
10004
+ #
9913
10005
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
9914
10006
  #
9915
10007
  #
@@ -9940,7 +10032,7 @@ module Aws::S3
9940
10032
  # {
9941
10033
  # id: "NotificationId",
9942
10034
  # topic_arn: "TopicArn", # required
9943
- # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
10035
+ # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9944
10036
  # filter: {
9945
10037
  # key: {
9946
10038
  # filter_rules: [
@@ -9957,7 +10049,7 @@ module Aws::S3
9957
10049
  # {
9958
10050
  # id: "NotificationId",
9959
10051
  # queue_arn: "QueueArn", # required
9960
- # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
10052
+ # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9961
10053
  # filter: {
9962
10054
  # key: {
9963
10055
  # filter_rules: [
@@ -9974,7 +10066,7 @@ module Aws::S3
9974
10066
  # {
9975
10067
  # id: "NotificationId",
9976
10068
  # lambda_function_arn: "LambdaFunctionArn", # required
9977
- # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
10069
+ # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9978
10070
  # filter: {
9979
10071
  # key: {
9980
10072
  # filter_rules: [
@@ -9987,8 +10079,11 @@ module Aws::S3
9987
10079
  # },
9988
10080
  # },
9989
10081
  # ],
10082
+ # event_bridge_configuration: {
10083
+ # },
9990
10084
  # },
9991
10085
  # expected_bucket_owner: "AccountId",
10086
+ # skip_destination_validation: false,
9992
10087
  # })
9993
10088
  #
9994
10089
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration AWS API Documentation
@@ -10003,10 +10098,10 @@ module Aws::S3
10003
10098
  # Creates or modifies `OwnershipControls` for an Amazon S3 bucket. To
10004
10099
  # use this operation, you must have the `s3:PutBucketOwnershipControls`
10005
10100
  # permission. For more information about Amazon S3 permissions, see
10006
- # [Specifying Permissions in a Policy][1].
10101
+ # [Specifying permissions in a policy][1].
10007
10102
  #
10008
- # For information about Amazon S3 Object Ownership, see [Using Object
10009
- # Ownership][2].
10103
+ # For information about Amazon S3 Object Ownership, see [Using object
10104
+ # ownership][2].
10010
10105
  #
10011
10106
  # The following operations are related to `PutBucketOwnershipControls`\:
10012
10107
  #
@@ -10016,8 +10111,8 @@ module Aws::S3
10016
10111
  #
10017
10112
  #
10018
10113
  #
10019
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
10020
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
10114
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html
10115
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html
10021
10116
  #
10022
10117
  # @option params [required, String] :bucket
10023
10118
  # The name of the Amazon S3 bucket whose `OwnershipControls` you want to
@@ -10036,8 +10131,8 @@ module Aws::S3
10036
10131
  # Denied)` error.
10037
10132
  #
10038
10133
  # @option params [required, Types::OwnershipControls] :ownership_controls
10039
- # The `OwnershipControls` (BucketOwnerPreferred or ObjectWriter) that
10040
- # you want to apply to this Amazon S3 bucket.
10134
+ # The `OwnershipControls` (BucketOwnerEnforced, BucketOwnerPreferred, or
10135
+ # ObjectWriter) that you want to apply to this Amazon S3 bucket.
10041
10136
  #
10042
10137
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
10043
10138
  #
@@ -10050,7 +10145,7 @@ module Aws::S3
10050
10145
  # ownership_controls: { # required
10051
10146
  # rules: [ # required
10052
10147
  # {
10053
- # object_ownership: "BucketOwnerPreferred", # required, accepts BucketOwnerPreferred, ObjectWriter
10148
+ # object_ownership: "BucketOwnerPreferred", # required, accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced
10054
10149
  # },
10055
10150
  # ],
10056
10151
  # },
@@ -10323,7 +10418,7 @@ module Aws::S3
10323
10418
  # destination: { # required
10324
10419
  # bucket: "BucketName", # required
10325
10420
  # account: "AccountId",
10326
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
10421
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
10327
10422
  # access_control_translation: {
10328
10423
  # owner: "Destination", # required, accepts Destination
10329
10424
  # },
@@ -10912,13 +11007,33 @@ module Aws::S3
10912
11007
  # information, see [Access Control List (ACL) Overview][4] and [Managing
10913
11008
  # ACLs Using the REST API][5].
10914
11009
  #
11010
+ # If the bucket that you're uploading objects to uses the bucket owner
11011
+ # enforced setting for S3 Object Ownership, ACLs are disabled and no
11012
+ # longer affect permissions. Buckets that use this setting only accept
11013
+ # PUT requests that don't specify an ACL or PUT requests that specify
11014
+ # bucket owner full control ACLs, such as the
11015
+ # `bucket-owner-full-control` canned ACL or an equivalent form of this
11016
+ # ACL expressed in the XML format. PUT requests that contain other ACLs
11017
+ # (for example, custom grants to certain Amazon Web Services accounts)
11018
+ # fail and return a `400` error with the error code
11019
+ # `AccessControlListNotSupported`.
11020
+ #
11021
+ # For more information, see [ Controlling ownership of objects and
11022
+ # disabling ACLs][6] in the *Amazon S3 User Guide*.
11023
+ #
11024
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for Object
11025
+ # Ownership, all objects written to the bucket by any account will be
11026
+ # owned by the bucket owner.
11027
+ #
11028
+ # </note>
11029
+ #
10915
11030
  # **Storage Class Options**
10916
11031
  #
10917
11032
  # By default, Amazon S3 uses the STANDARD Storage Class to store newly
10918
11033
  # created objects. The STANDARD storage class provides high durability
10919
11034
  # and high availability. Depending on performance needs, you can specify
10920
11035
  # a different Storage Class. Amazon S3 on Outposts only uses the
10921
- # OUTPOSTS Storage Class. For more information, see [Storage Classes][6]
11036
+ # OUTPOSTS Storage Class. For more information, see [Storage Classes][7]
10922
11037
  # in the *Amazon S3 User Guide*.
10923
11038
  #
10924
11039
  # **Versioning**
@@ -10930,14 +11045,14 @@ module Aws::S3
10930
11045
  # object simultaneously, it stores all of the objects.
10931
11046
  #
10932
11047
  # For more information about versioning, see [Adding Objects to
10933
- # Versioning Enabled Buckets][7]. For information about returning the
10934
- # versioning state of a bucket, see [GetBucketVersioning][8].
11048
+ # Versioning Enabled Buckets][8]. For information about returning the
11049
+ # versioning state of a bucket, see [GetBucketVersioning][9].
10935
11050
  #
10936
11051
  # **Related Resources**
10937
11052
  #
10938
- # * [CopyObject][9]
11053
+ # * [CopyObject][10]
10939
11054
  #
10940
- # * [DeleteObject][10]
11055
+ # * [DeleteObject][11]
10941
11056
  #
10942
11057
  #
10943
11058
  #
@@ -10946,11 +11061,12 @@ module Aws::S3
10946
11061
  # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
10947
11062
  # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
10948
11063
  # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html
10949
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
10950
- # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
10951
- # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
10952
- # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
10953
- # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
11064
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
11065
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
11066
+ # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
11067
+ # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
11068
+ # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
11069
+ # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
10954
11070
  #
10955
11071
  # @option params [String] :acl
10956
11072
  # The canned ACL to apply to the object. For more information, see
@@ -11220,6 +11336,26 @@ module Aws::S3
11220
11336
  # * {Types::PutObjectOutput#request_charged #request_charged} => String
11221
11337
  #
11222
11338
  #
11339
+ # @example Example: To upload an object (specify optional headers)
11340
+ #
11341
+ # # The following example uploads an object. The request specifies optional request headers to directs S3 to use specific
11342
+ # # storage class and use server-side encryption.
11343
+ #
11344
+ # resp = client.put_object({
11345
+ # body: "HappyFace.jpg",
11346
+ # bucket: "examplebucket",
11347
+ # key: "HappyFace.jpg",
11348
+ # server_side_encryption: "AES256",
11349
+ # storage_class: "STANDARD_IA",
11350
+ # })
11351
+ #
11352
+ # resp.to_h outputs the following:
11353
+ # {
11354
+ # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11355
+ # server_side_encryption: "AES256",
11356
+ # version_id: "CG612hodqujkf8FaaNfp8U..FIhLROcp",
11357
+ # }
11358
+ #
11223
11359
  # @example Example: To upload an object and specify server-side encryption and object tags
11224
11360
  #
11225
11361
  # # The following example uploads and object. The request specifies the optional server-side encryption option. The request
@@ -11240,22 +11376,20 @@ module Aws::S3
11240
11376
  # version_id: "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt",
11241
11377
  # }
11242
11378
  #
11243
- # @example Example: To upload an object and specify optional tags
11379
+ # @example Example: To create an object.
11244
11380
  #
11245
- # # The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore
11246
- # # S3 returns version ID of the newly created object.
11381
+ # # The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.
11247
11382
  #
11248
11383
  # resp = client.put_object({
11249
- # body: "c:\\HappyFace.jpg",
11384
+ # body: "filetoupload",
11250
11385
  # bucket: "examplebucket",
11251
- # key: "HappyFace.jpg",
11252
- # tagging: "key1=value1&key2=value2",
11386
+ # key: "objectkey",
11253
11387
  # })
11254
11388
  #
11255
11389
  # resp.to_h outputs the following:
11256
11390
  # {
11257
11391
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11258
- # version_id: "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a",
11392
+ # version_id: "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ",
11259
11393
  # }
11260
11394
  #
11261
11395
  # @example Example: To upload object and specify user-defined metadata
@@ -11279,22 +11413,6 @@ module Aws::S3
11279
11413
  # version_id: "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0",
11280
11414
  # }
11281
11415
  #
11282
- # @example Example: To create an object.
11283
- #
11284
- # # The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.
11285
- #
11286
- # resp = client.put_object({
11287
- # body: "filetoupload",
11288
- # bucket: "examplebucket",
11289
- # key: "objectkey",
11290
- # })
11291
- #
11292
- # resp.to_h outputs the following:
11293
- # {
11294
- # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11295
- # version_id: "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ",
11296
- # }
11297
- #
11298
11416
  # @example Example: To upload an object and specify canned ACL.
11299
11417
  #
11300
11418
  # # The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ
@@ -11313,41 +11431,39 @@ module Aws::S3
11313
11431
  # version_id: "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr",
11314
11432
  # }
11315
11433
  #
11316
- # @example Example: To upload an object (specify optional headers)
11434
+ # @example Example: To upload an object
11317
11435
  #
11318
- # # The following example uploads an object. The request specifies optional request headers to directs S3 to use specific
11319
- # # storage class and use server-side encryption.
11436
+ # # The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file
11437
+ # # syntax. S3 returns VersionId of the newly created object.
11320
11438
  #
11321
11439
  # resp = client.put_object({
11322
11440
  # body: "HappyFace.jpg",
11323
11441
  # bucket: "examplebucket",
11324
11442
  # key: "HappyFace.jpg",
11325
- # server_side_encryption: "AES256",
11326
- # storage_class: "STANDARD_IA",
11327
11443
  # })
11328
11444
  #
11329
11445
  # resp.to_h outputs the following:
11330
11446
  # {
11331
11447
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11332
- # server_side_encryption: "AES256",
11333
- # version_id: "CG612hodqujkf8FaaNfp8U..FIhLROcp",
11448
+ # version_id: "tpf3zF08nBplQK1XLOefGskR7mGDwcDk",
11334
11449
  # }
11335
11450
  #
11336
- # @example Example: To upload an object
11451
+ # @example Example: To upload an object and specify optional tags
11337
11452
  #
11338
- # # The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file
11339
- # # syntax. S3 returns VersionId of the newly created object.
11453
+ # # The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore
11454
+ # # S3 returns version ID of the newly created object.
11340
11455
  #
11341
11456
  # resp = client.put_object({
11342
- # body: "HappyFace.jpg",
11457
+ # body: "c:\\HappyFace.jpg",
11343
11458
  # bucket: "examplebucket",
11344
11459
  # key: "HappyFace.jpg",
11460
+ # tagging: "key1=value1&key2=value2",
11345
11461
  # })
11346
11462
  #
11347
11463
  # resp.to_h outputs the following:
11348
11464
  # {
11349
11465
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11350
- # version_id: "tpf3zF08nBplQK1XLOefGskR7mGDwcDk",
11466
+ # version_id: "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a",
11351
11467
  # }
11352
11468
  #
11353
11469
  # @example Streaming a file from disk
@@ -11379,7 +11495,7 @@ module Aws::S3
11379
11495
  # "MetadataKey" => "MetadataValue",
11380
11496
  # },
11381
11497
  # server_side_encryption: "AES256", # accepts AES256, aws:kms
11382
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
11498
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
11383
11499
  # website_redirect_location: "WebsiteRedirectLocation",
11384
11500
  # sse_customer_algorithm: "SSECustomerAlgorithm",
11385
11501
  # sse_customer_key: "SSECustomerKey",
@@ -11432,6 +11548,14 @@ module Aws::S3
11432
11548
  # information, see [Access Control List (ACL) Overview][2] in the
11433
11549
  # *Amazon S3 User Guide*.
11434
11550
  #
11551
+ # If your bucket uses the bucket owner enforced setting for S3 Object
11552
+ # Ownership, ACLs are disabled and no longer affect permissions. You
11553
+ # must use policies to grant access to your bucket and the objects in
11554
+ # it. Requests to set ACLs or update ACLs fail and return the
11555
+ # `AccessControlListNotSupported` error code. Requests to read ACLs are
11556
+ # still supported. For more information, see [Controlling object
11557
+ # ownership][3] in the *Amazon S3 User Guide*.
11558
+ #
11435
11559
  # **Access Permissions**
11436
11560
  #
11437
11561
  # You can set access permissions using one of the following methods:
@@ -11441,7 +11565,7 @@ module Aws::S3
11441
11565
  # ACL has a predefined set of grantees and permissions. Specify the
11442
11566
  # canned ACL name as the value of `x-amz-ac`l. If you use this header,
11443
11567
  # you cannot use other access control-specific headers in your
11444
- # request. For more information, see [Canned ACL][3].
11568
+ # request. For more information, see [Canned ACL][4].
11445
11569
  #
11446
11570
  # * Specify access permissions explicitly with the `x-amz-grant-read`,
11447
11571
  # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and
@@ -11484,7 +11608,7 @@ module Aws::S3
11484
11608
  # * South America (São Paulo)
11485
11609
  #
11486
11610
  # For a list of all the Amazon S3 supported Regions and endpoints,
11487
- # see [Regions and Endpoints][4] in the Amazon Web Services General
11611
+ # see [Regions and Endpoints][5] in the Amazon Web Services General
11488
11612
  # Reference.
11489
11613
  #
11490
11614
  # </note>
@@ -11545,7 +11669,7 @@ module Aws::S3
11545
11669
  # * South America (São Paulo)
11546
11670
  #
11547
11671
  # For a list of all the Amazon S3 supported Regions and endpoints, see
11548
- # [Regions and Endpoints][4] in the Amazon Web Services General
11672
+ # [Regions and Endpoints][5] in the Amazon Web Services General
11549
11673
  # Reference.
11550
11674
  #
11551
11675
  # </note>
@@ -11558,18 +11682,19 @@ module Aws::S3
11558
11682
  #
11559
11683
  # **Related Resources**
11560
11684
  #
11561
- # * [CopyObject][5]
11685
+ # * [CopyObject][6]
11562
11686
  #
11563
- # * [GetObject][6]
11687
+ # * [GetObject][7]
11564
11688
  #
11565
11689
  #
11566
11690
  #
11567
11691
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions
11568
11692
  # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
11569
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
11570
- # [4]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
11571
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
11572
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
11693
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
11694
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
11695
+ # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
11696
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
11697
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
11573
11698
  #
11574
11699
  # @option params [String] :acl
11575
11700
  # The canned ACL to apply to the object. For more information, see
@@ -12703,7 +12828,7 @@ module Aws::S3
12703
12828
  # value: "MetadataValue",
12704
12829
  # },
12705
12830
  # ],
12706
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
12831
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
12707
12832
  # },
12708
12833
  # },
12709
12834
  # },
@@ -12736,10 +12861,11 @@ module Aws::S3
12736
12861
  # This action is not supported by Amazon S3 on Outposts.
12737
12862
  #
12738
12863
  # For more information about Amazon S3 Select, see [Selecting Content
12739
- # from Objects][1] in the *Amazon S3 User Guide*.
12864
+ # from Objects][1] and [SELECT Command][2] in the *Amazon S3 User
12865
+ # Guide*.
12740
12866
  #
12741
12867
  # For more information about using SQL with Amazon S3 Select, see [ SQL
12742
- # Reference for Amazon S3 Select and S3 Glacier Select][2] in the
12868
+ # Reference for Amazon S3 Select and S3 Glacier Select][3] in the
12743
12869
  # *Amazon S3 User Guide*.
12744
12870
  #
12745
12871
  #
@@ -12748,7 +12874,7 @@ module Aws::S3
12748
12874
  #
12749
12875
  # You must have `s3:GetObject` permission for this operation. Amazon S3
12750
12876
  # Select does not support anonymous access. For more information about
12751
- # permissions, see [Specifying Permissions in a Policy][3] in the
12877
+ # permissions, see [Specifying Permissions in a Policy][4] in the
12752
12878
  # *Amazon S3 User Guide*.
12753
12879
  #
12754
12880
  #
@@ -12775,70 +12901,71 @@ module Aws::S3
12775
12901
  #
12776
12902
  # For objects that are encrypted with customer-provided encryption
12777
12903
  # keys (SSE-C), you must use HTTPS, and you must use the headers that
12778
- # are documented in the [GetObject][4]. For more information about
12904
+ # are documented in the [GetObject][5]. For more information about
12779
12905
  # SSE-C, see [Server-Side Encryption (Using Customer-Provided
12780
- # Encryption Keys)][5] in the *Amazon S3 User Guide*.
12906
+ # Encryption Keys)][6] in the *Amazon S3 User Guide*.
12781
12907
  #
12782
12908
  # For objects that are encrypted with Amazon S3 managed encryption
12783
12909
  # keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS),
12784
12910
  # server-side encryption is handled transparently, so you don't need
12785
12911
  # to specify anything. For more information about server-side
12786
12912
  # encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using
12787
- # Server-Side Encryption][6] in the *Amazon S3 User Guide*.
12913
+ # Server-Side Encryption][7] in the *Amazon S3 User Guide*.
12788
12914
  #
12789
12915
  # **Working with the Response Body**
12790
12916
  #
12791
12917
  # Given the response size is unknown, Amazon S3 Select streams the
12792
12918
  # response as a series of messages and includes a `Transfer-Encoding`
12793
12919
  # header with `chunked` as its value in the response. For more
12794
- # information, see [Appendix: SelectObjectContent Response][7].
12920
+ # information, see [Appendix: SelectObjectContent Response][8].
12795
12921
  #
12796
12922
  #
12797
12923
  #
12798
12924
  # **GetObject Support**
12799
12925
  #
12800
12926
  # The `SelectObjectContent` action does not support the following
12801
- # `GetObject` functionality. For more information, see [GetObject][4].
12927
+ # `GetObject` functionality. For more information, see [GetObject][5].
12802
12928
  #
12803
12929
  # * `Range`\: Although you can specify a scan range for an Amazon S3
12804
- # Select request (see [SelectObjectContentRequest - ScanRange][8] in
12930
+ # Select request (see [SelectObjectContentRequest - ScanRange][9] in
12805
12931
  # the request parameters), you cannot specify the range of bytes of an
12806
12932
  # object to return.
12807
12933
  #
12808
12934
  # * GLACIER, DEEP\_ARCHIVE and REDUCED\_REDUNDANCY storage classes: You
12809
12935
  # cannot specify the GLACIER, DEEP\_ARCHIVE, or `REDUCED_REDUNDANCY`
12810
12936
  # storage classes. For more information, about storage classes see
12811
- # [Storage Classes][9] in the *Amazon S3 User Guide*.
12937
+ # [Storage Classes][10] in the *Amazon S3 User Guide*.
12812
12938
  #
12813
12939
  #
12814
12940
  #
12815
12941
  # **Special Errors**
12816
12942
  #
12817
12943
  # For a list of special errors for this operation, see [List of SELECT
12818
- # Object Content Error Codes][10]
12944
+ # Object Content Error Codes][11]
12819
12945
  #
12820
12946
  # **Related Resources**
12821
12947
  #
12822
- # * [GetObject][4]
12948
+ # * [GetObject][5]
12823
12949
  #
12824
- # * [GetBucketLifecycleConfiguration][11]
12950
+ # * [GetBucketLifecycleConfiguration][12]
12825
12951
  #
12826
- # * [PutBucketLifecycleConfiguration][12]
12952
+ # * [PutBucketLifecycleConfiguration][13]
12827
12953
  #
12828
12954
  #
12829
12955
  #
12830
12956
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html
12831
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html
12832
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
12833
- # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
12834
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
12835
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
12836
- # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html
12837
- # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange
12838
- # [9]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro
12839
- # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList
12840
- # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
12841
- # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
12957
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html
12958
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html
12959
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
12960
+ # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
12961
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
12962
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
12963
+ # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html
12964
+ # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange
12965
+ # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro
12966
+ # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList
12967
+ # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
12968
+ # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
12842
12969
  #
12843
12970
  # @option params [required, String] :bucket
12844
12971
  # The S3 bucket.
@@ -13691,45 +13818,45 @@ module Aws::S3
13691
13818
  # * {Types::UploadPartCopyOutput#request_charged #request_charged} => String
13692
13819
  #
13693
13820
  #
13694
- # @example Example: To upload a part by copying data from an existing object as data source
13821
+ # @example Example: To upload a part by copying byte range from an existing object as data source
13695
13822
  #
13696
- # # The following example uploads a part of a multipart upload by copying data from an existing object as data source.
13823
+ # # The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as
13824
+ # # data source.
13697
13825
  #
13698
13826
  # resp = client.upload_part_copy({
13699
13827
  # bucket: "examplebucket",
13700
13828
  # copy_source: "/bucketname/sourceobjectkey",
13829
+ # copy_source_range: "bytes=1-100000",
13701
13830
  # key: "examplelargeobject",
13702
- # part_number: 1,
13831
+ # part_number: 2,
13703
13832
  # upload_id: "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--",
13704
13833
  # })
13705
13834
  #
13706
13835
  # resp.to_h outputs the following:
13707
13836
  # {
13708
13837
  # copy_part_result: {
13709
- # etag: "\"b0c6f0e7e054ab8fa2536a2677f8734d\"",
13710
- # last_modified: Time.parse("2016-12-29T21:24:43.000Z"),
13838
+ # etag: "\"65d16d19e65a7508a51f043180edcc36\"",
13839
+ # last_modified: Time.parse("2016-12-29T21:44:28.000Z"),
13711
13840
  # },
13712
13841
  # }
13713
13842
  #
13714
- # @example Example: To upload a part by copying byte range from an existing object as data source
13843
+ # @example Example: To upload a part by copying data from an existing object as data source
13715
13844
  #
13716
- # # The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as
13717
- # # data source.
13845
+ # # The following example uploads a part of a multipart upload by copying data from an existing object as data source.
13718
13846
  #
13719
13847
  # resp = client.upload_part_copy({
13720
13848
  # bucket: "examplebucket",
13721
13849
  # copy_source: "/bucketname/sourceobjectkey",
13722
- # copy_source_range: "bytes=1-100000",
13723
13850
  # key: "examplelargeobject",
13724
- # part_number: 2,
13851
+ # part_number: 1,
13725
13852
  # upload_id: "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--",
13726
13853
  # })
13727
13854
  #
13728
13855
  # resp.to_h outputs the following:
13729
13856
  # {
13730
13857
  # copy_part_result: {
13731
- # etag: "\"65d16d19e65a7508a51f043180edcc36\"",
13732
- # last_modified: Time.parse("2016-12-29T21:44:28.000Z"),
13858
+ # etag: "\"b0c6f0e7e054ab8fa2536a2677f8734d\"",
13859
+ # last_modified: Time.parse("2016-12-29T21:24:43.000Z"),
13733
13860
  # },
13734
13861
  # }
13735
13862
  #
@@ -14061,7 +14188,7 @@ module Aws::S3
14061
14188
  # sse_customer_algorithm: "SSECustomerAlgorithm",
14062
14189
  # ssekms_key_id: "SSEKMSKeyId",
14063
14190
  # sse_customer_key_md5: "SSECustomerKeyMD5",
14064
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
14191
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
14065
14192
  # tag_count: 1,
14066
14193
  # version_id: "ObjectVersionId",
14067
14194
  # bucket_key_enabled: false,
@@ -14089,7 +14216,7 @@ module Aws::S3
14089
14216
  params: params,
14090
14217
  config: config)
14091
14218
  context[:gem_name] = 'aws-sdk-s3'
14092
- context[:gem_version] = '1.107.0'
14219
+ context[:gem_version] = '1.111.0'
14093
14220
  Seahorse::Client::Request.new(handlers, context)
14094
14221
  end
14095
14222