aws-sdk-s3 1.107.0 → 1.109.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -155,7 +155,9 @@ module Aws::S3
155
155
  # * EC2/ECS IMDS instance profile - When used by default, the timeouts
156
156
  # are very aggressive. Construct and pass an instance of
157
157
  # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
158
- # enable retries and extended timeouts.
158
+ # enable retries and extended timeouts. Instance profile credential
159
+ # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED']
160
+ # to true.
159
161
  #
160
162
  # @option options [required, String] :region
161
163
  # The AWS region to connect to. The configured `:region` is
@@ -896,11 +898,28 @@ module Aws::S3
896
898
  # Control List (ACL) Overview][10] and [Managing ACLs Using the REST
897
899
  # API][11].
898
900
  #
901
+ # If the bucket that you're copying objects to uses the bucket owner
902
+ # enforced setting for S3 Object Ownership, ACLs are disabled and no
903
+ # longer affect permissions. Buckets that use this setting only accept
904
+ # PUT requests that don't specify an ACL or PUT requests that specify
905
+ # bucket owner full control ACLs, such as the
906
+ # `bucket-owner-full-control` canned ACL or an equivalent form of this
907
+ # ACL expressed in the XML format.
908
+ #
909
+ # For more information, see [ Controlling ownership of objects and
910
+ # disabling ACLs][12] in the *Amazon S3 User Guide*.
911
+ #
912
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for Object
913
+ # Ownership, all objects written to the bucket by any account will be
914
+ # owned by the bucket owner.
915
+ #
916
+ # </note>
917
+ #
899
918
  # **Storage Class Options**
900
919
  #
901
920
  # You can use the `CopyObject` action to change the storage class of an
902
921
  # object that is already stored in Amazon S3 using the `StorageClass`
903
- # parameter. For more information, see [Storage Classes][12] in the
922
+ # parameter. For more information, see [Storage Classes][13] in the
904
923
  # *Amazon S3 User Guide*.
905
924
  #
906
925
  # **Versioning**
@@ -921,15 +940,15 @@ module Aws::S3
921
940
  #
922
941
  # If the source object's storage class is GLACIER, you must restore a
923
942
  # copy of this object before you can use it as a source object for the
924
- # copy operation. For more information, see [RestoreObject][13].
943
+ # copy operation. For more information, see [RestoreObject][14].
925
944
  #
926
945
  # The following operations are related to `CopyObject`\:
927
946
  #
928
- # * [PutObject][14]
947
+ # * [PutObject][15]
929
948
  #
930
- # * [GetObject][15]
949
+ # * [GetObject][16]
931
950
  #
932
- # For more information, see [Copying Objects][16].
951
+ # For more information, see [Copying Objects][17].
933
952
  #
934
953
  #
935
954
  #
@@ -944,11 +963,12 @@ module Aws::S3
944
963
  # [9]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
945
964
  # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
946
965
  # [11]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html
947
- # [12]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
948
- # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
949
- # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
950
- # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
951
- # [16]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html
966
+ # [12]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
967
+ # [13]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
968
+ # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
969
+ # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
970
+ # [16]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
971
+ # [17]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html
952
972
  #
953
973
  # @option params [String] :acl
954
974
  # The canned ACL to apply to the object.
@@ -1270,7 +1290,7 @@ module Aws::S3
1270
1290
  # metadata_directive: "COPY", # accepts COPY, REPLACE
1271
1291
  # tagging_directive: "COPY", # accepts COPY, REPLACE
1272
1292
  # server_side_encryption: "AES256", # accepts AES256, aws:kms
1273
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
1293
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
1274
1294
  # website_redirect_location: "WebsiteRedirectLocation",
1275
1295
  # sse_customer_algorithm: "SSECustomerAlgorithm",
1276
1296
  # sse_customer_key: "SSECustomerKey",
@@ -1343,22 +1363,33 @@ module Aws::S3
1343
1363
  #
1344
1364
  # </note>
1345
1365
  #
1366
+ # **Access control lists (ACLs)**
1367
+ #
1346
1368
  # When creating a bucket using this operation, you can optionally
1347
- # specify the accounts or groups that should be granted specific
1348
- # permissions on the bucket. There are two ways to grant the appropriate
1349
- # permissions using the request headers.
1369
+ # configure the bucket ACL to specify the accounts or groups that should
1370
+ # be granted specific permissions on the bucket.
1371
+ #
1372
+ # If your CreateBucket request includes the `BucketOwnerEnforced` value
1373
+ # for the `x-amz-object-ownership` header, your request can either not
1374
+ # specify an ACL or specify bucket owner full control ACLs, such as the
1375
+ # `bucket-owner-full-control` canned ACL or an equivalent ACL expressed
1376
+ # in the XML format. For more information, see [Controlling object
1377
+ # ownership][5] in the *Amazon S3 User Guide*.
1378
+ #
1379
+ # There are two ways to grant the appropriate permissions using the
1380
+ # request headers.
1350
1381
  #
1351
1382
  # * Specify a canned ACL using the `x-amz-acl` request header. Amazon S3
1352
1383
  # supports a set of predefined ACLs, known as *canned ACLs*. Each
1353
1384
  # canned ACL has a predefined set of grantees and permissions. For
1354
- # more information, see [Canned ACL][5].
1385
+ # more information, see [Canned ACL][6].
1355
1386
  #
1356
1387
  # * Specify access permissions explicitly using the `x-amz-grant-read`,
1357
1388
  # `x-amz-grant-write`, `x-amz-grant-read-acp`,
1358
1389
  # `x-amz-grant-write-acp`, and `x-amz-grant-full-control` headers.
1359
1390
  # These headers map to the set of permissions Amazon S3 supports in an
1360
1391
  # ACL. For more information, see [Access control list (ACL)
1361
- # overview][6].
1392
+ # overview][7].
1362
1393
  #
1363
1394
  # You specify each grantee as a type=value pair, where the type is one
1364
1395
  # of the following:
@@ -1391,7 +1422,7 @@ module Aws::S3
1391
1422
  # * South America (São Paulo)
1392
1423
  #
1393
1424
  # For a list of all the Amazon S3 supported Regions and endpoints,
1394
- # see [Regions and Endpoints][7] in the Amazon Web Services General
1425
+ # see [Regions and Endpoints][8] in the Amazon Web Services General
1395
1426
  # Reference.
1396
1427
  #
1397
1428
  # </note>
@@ -1409,22 +1440,29 @@ module Aws::S3
1409
1440
  #
1410
1441
  # **Permissions**
1411
1442
  #
1412
- # If your `CreateBucket` request specifies ACL permissions and the ACL
1413
- # is public-read, public-read-write, authenticated-read, or if you
1414
- # specify access permissions explicitly through any other ACL, both
1415
- # `s3:CreateBucket` and `s3:PutBucketAcl` permissions are needed. If the
1416
- # ACL the `CreateBucket` request is private, only `s3:CreateBucket`
1417
- # permission is needed.
1443
+ # In addition to `s3:CreateBucket`, the following permissions are
1444
+ # required when your CreateBucket includes specific headers:
1445
+ #
1446
+ # * **ACLs** - If your `CreateBucket` request specifies ACL permissions
1447
+ # and the ACL is public-read, public-read-write, authenticated-read,
1448
+ # or if you specify access permissions explicitly through any other
1449
+ # ACL, both `s3:CreateBucket` and `s3:PutBucketAcl` permissions are
1450
+ # needed. If the ACL the `CreateBucket` request is private or doesn't
1451
+ # specify any ACLs, only `s3:CreateBucket` permission is needed.
1418
1452
  #
1419
- # If `ObjectLockEnabledForBucket` is set to true in your `CreateBucket`
1420
- # request, `s3:PutBucketObjectLockConfiguration` and
1421
- # `s3:PutBucketVersioning` permissions are required.
1453
+ # * **Object Lock** - If `ObjectLockEnabledForBucket` is set to true in
1454
+ # your `CreateBucket` request, `s3:PutBucketObjectLockConfiguration`
1455
+ # and `s3:PutBucketVersioning` permissions are required.
1456
+ #
1457
+ # * **S3 Object Ownership** - If your CreateBucket request includes the
1458
+ # the `x-amz-object-ownership` header, `s3:PutBucketOwnershipControls`
1459
+ # permission is required.
1422
1460
  #
1423
1461
  # The following operations are related to `CreateBucket`\:
1424
1462
  #
1425
- # * [PutObject][8]
1463
+ # * [PutObject][9]
1426
1464
  #
1427
- # * [DeleteBucket][9]
1465
+ # * [DeleteBucket][10]
1428
1466
  #
1429
1467
  #
1430
1468
  #
@@ -1432,11 +1470,12 @@ module Aws::S3
1432
1470
  # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html
1433
1471
  # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
1434
1472
  # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
1435
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
1436
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
1437
- # [7]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
1438
- # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
1439
- # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
1473
+ # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
1474
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
1475
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html
1476
+ # [8]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
1477
+ # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
1478
+ # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
1440
1479
  #
1441
1480
  # @option params [String] :acl
1442
1481
  # The canned ACL to apply to the bucket.
@@ -1470,6 +1509,24 @@ module Aws::S3
1470
1509
  # Specifies whether you want S3 Object Lock to be enabled for the new
1471
1510
  # bucket.
1472
1511
  #
1512
+ # @option params [String] :object_ownership
1513
+ # The container element for object ownership for a bucket's ownership
1514
+ # controls.
1515
+ #
1516
+ # BucketOwnerPreferred - Objects uploaded to the bucket change ownership
1517
+ # to the bucket owner if the objects are uploaded with the
1518
+ # `bucket-owner-full-control` canned ACL.
1519
+ #
1520
+ # ObjectWriter - The uploading account will own the object if the object
1521
+ # is uploaded with the `bucket-owner-full-control` canned ACL.
1522
+ #
1523
+ # BucketOwnerEnforced - Access control lists (ACLs) are disabled and no
1524
+ # longer affect permissions. The bucket owner automatically owns and has
1525
+ # full control over every object in the bucket. The bucket only accepts
1526
+ # PUT requests that don't specify an ACL or bucket owner full control
1527
+ # ACLs, such as the `bucket-owner-full-control` canned ACL or an
1528
+ # equivalent form of this ACL expressed in the XML format.
1529
+ #
1473
1530
  # @return [Types::CreateBucketOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1474
1531
  #
1475
1532
  # * {Types::CreateBucketOutput#location #location} => String
@@ -1518,6 +1575,7 @@ module Aws::S3
1518
1575
  # grant_write: "GrantWrite",
1519
1576
  # grant_write_acp: "GrantWriteACP",
1520
1577
  # object_lock_enabled_for_bucket: false,
1578
+ # object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced
1521
1579
  # })
1522
1580
  #
1523
1581
  # @example Response structure
@@ -1996,7 +2054,7 @@ module Aws::S3
1996
2054
  # "MetadataKey" => "MetadataValue",
1997
2055
  # },
1998
2056
  # server_side_encryption: "AES256", # accepts AES256, aws:kms
1999
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
2057
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
2000
2058
  # website_redirect_location: "WebsiteRedirectLocation",
2001
2059
  # sse_customer_algorithm: "SSECustomerAlgorithm",
2002
2060
  # sse_customer_key: "SSECustomerKey",
@@ -2265,18 +2323,17 @@ module Aws::S3
2265
2323
  # storage costs by automatically moving data to the most cost-effective
2266
2324
  # storage access tier, without performance impact or operational
2267
2325
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
2268
- # two low latency and high throughput access tiers. For data that can be
2269
- # accessed asynchronously, you can choose to activate automatic
2270
- # archiving capabilities within the S3 Intelligent-Tiering storage
2271
- # class.
2326
+ # three low latency and high throughput access tiers. To get the lowest
2327
+ # storage cost on data that can be accessed in minutes to hours, you can
2328
+ # choose to activate additional archiving capabilities.
2272
2329
  #
2273
2330
  # The S3 Intelligent-Tiering storage class is the ideal storage class
2274
2331
  # for data with unknown, changing, or unpredictable access patterns,
2275
2332
  # independent of object size or retention period. If the size of an
2276
- # object is less than 128 KB, it is not eligible for auto-tiering.
2277
- # Smaller objects can be stored, but they are always charged at the
2278
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
2279
- # class.
2333
+ # object is less than 128 KB, it is not monitored and not eligible for
2334
+ # auto-tiering. Smaller objects can be stored, but they are always
2335
+ # charged at the Frequent Access tier rates in the S3
2336
+ # Intelligent-Tiering storage class.
2280
2337
  #
2281
2338
  # For more information, see [Storage class for automatically optimizing
2282
2339
  # frequently and infrequently accessed objects][1].
@@ -2910,15 +2967,6 @@ module Aws::S3
2910
2967
  # * {Types::DeleteObjectOutput#request_charged #request_charged} => String
2911
2968
  #
2912
2969
  #
2913
- # @example Example: To delete an object (from a non-versioned bucket)
2914
- #
2915
- # # The following example deletes an object from a non-versioned bucket.
2916
- #
2917
- # resp = client.delete_object({
2918
- # bucket: "ExampleBucket",
2919
- # key: "HappyFace.jpg",
2920
- # })
2921
- #
2922
2970
  # @example Example: To delete an object
2923
2971
  #
2924
2972
  # # The following example deletes an object from an S3 bucket.
@@ -2932,6 +2980,15 @@ module Aws::S3
2932
2980
  # {
2933
2981
  # }
2934
2982
  #
2983
+ # @example Example: To delete an object (from a non-versioned bucket)
2984
+ #
2985
+ # # The following example deletes an object from a non-versioned bucket.
2986
+ #
2987
+ # resp = client.delete_object({
2988
+ # bucket: "ExampleBucket",
2989
+ # key: "HappyFace.jpg",
2990
+ # })
2991
+ #
2935
2992
  # @example Request syntax with placeholder values
2936
2993
  #
2937
2994
  # resp = client.delete_object({
@@ -3024,35 +3081,35 @@ module Aws::S3
3024
3081
  # * {Types::DeleteObjectTaggingOutput#version_id #version_id} => String
3025
3082
  #
3026
3083
  #
3027
- # @example Example: To remove tag set from an object
3084
+ # @example Example: To remove tag set from an object version
3028
3085
  #
3029
- # # The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the
3030
- # # operation removes tag set from the latest object version.
3086
+ # # The following example removes tag set associated with the specified object version. The request specifies both the
3087
+ # # object key and object version.
3031
3088
  #
3032
3089
  # resp = client.delete_object_tagging({
3033
3090
  # bucket: "examplebucket",
3034
3091
  # key: "HappyFace.jpg",
3092
+ # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
3035
3093
  # })
3036
3094
  #
3037
3095
  # resp.to_h outputs the following:
3038
3096
  # {
3039
- # version_id: "null",
3097
+ # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
3040
3098
  # }
3041
3099
  #
3042
- # @example Example: To remove tag set from an object version
3100
+ # @example Example: To remove tag set from an object
3043
3101
  #
3044
- # # The following example removes tag set associated with the specified object version. The request specifies both the
3045
- # # object key and object version.
3102
+ # # The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the
3103
+ # # operation removes tag set from the latest object version.
3046
3104
  #
3047
3105
  # resp = client.delete_object_tagging({
3048
3106
  # bucket: "examplebucket",
3049
3107
  # key: "HappyFace.jpg",
3050
- # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
3051
3108
  # })
3052
3109
  #
3053
3110
  # resp.to_h outputs the following:
3054
3111
  # {
3055
- # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
3112
+ # version_id: "null",
3056
3113
  # }
3057
3114
  #
3058
3115
  # @example Request syntax with placeholder values
@@ -3437,15 +3494,24 @@ module Aws::S3
3437
3494
  # can return the ACL of the bucket without using an authorization
3438
3495
  # header.
3439
3496
  #
3497
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for S3 Object
3498
+ # Ownership, requests to read ACLs are still supported and return the
3499
+ # `bucket-owner-full-control` ACL with the owner being the account that
3500
+ # created the bucket. For more information, see [ Controlling object
3501
+ # ownership and disabling ACLs][1] in the *Amazon S3 User Guide*.
3502
+ #
3503
+ # </note>
3504
+ #
3440
3505
  # **Related Resources**
3441
3506
  #
3442
- # * [ListObjects][1]
3507
+ # * [ListObjects][2]
3443
3508
  #
3444
3509
  # ^
3445
3510
  #
3446
3511
  #
3447
3512
  #
3448
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
3513
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
3514
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
3449
3515
  #
3450
3516
  # @option params [required, String] :bucket
3451
3517
  # Specifies the S3 bucket whose ACL is being requested.
@@ -3731,18 +3797,17 @@ module Aws::S3
3731
3797
  # storage costs by automatically moving data to the most cost-effective
3732
3798
  # storage access tier, without performance impact or operational
3733
3799
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
3734
- # two low latency and high throughput access tiers. For data that can be
3735
- # accessed asynchronously, you can choose to activate automatic
3736
- # archiving capabilities within the S3 Intelligent-Tiering storage
3737
- # class.
3800
+ # three low latency and high throughput access tiers. To get the lowest
3801
+ # storage cost on data that can be accessed in minutes to hours, you can
3802
+ # choose to activate additional archiving capabilities.
3738
3803
  #
3739
3804
  # The S3 Intelligent-Tiering storage class is the ideal storage class
3740
3805
  # for data with unknown, changing, or unpredictable access patterns,
3741
3806
  # independent of object size or retention period. If the size of an
3742
- # object is less than 128 KB, it is not eligible for auto-tiering.
3743
- # Smaller objects can be stored, but they are always charged at the
3744
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
3745
- # class.
3807
+ # object is less than 128 KB, it is not monitored and not eligible for
3808
+ # auto-tiering. Smaller objects can be stored, but they are always
3809
+ # charged at the Frequent Access tier rates in the S3
3810
+ # Intelligent-Tiering storage class.
3746
3811
  #
3747
3812
  # For more information, see [Storage class for automatically optimizing
3748
3813
  # frequently and infrequently accessed objects][1].
@@ -3981,9 +4046,9 @@ module Aws::S3
3981
4046
  # resp.rules[0].status #=> String, one of "Enabled", "Disabled"
3982
4047
  # resp.rules[0].transition.date #=> Time
3983
4048
  # resp.rules[0].transition.days #=> Integer
3984
- # resp.rules[0].transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4049
+ # resp.rules[0].transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
3985
4050
  # resp.rules[0].noncurrent_version_transition.noncurrent_days #=> Integer
3986
- # resp.rules[0].noncurrent_version_transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4051
+ # resp.rules[0].noncurrent_version_transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
3987
4052
  # resp.rules[0].noncurrent_version_transition.newer_noncurrent_versions #=> Integer
3988
4053
  # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer
3989
4054
  # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer
@@ -4116,10 +4181,10 @@ module Aws::S3
4116
4181
  # resp.rules[0].transitions #=> Array
4117
4182
  # resp.rules[0].transitions[0].date #=> Time
4118
4183
  # resp.rules[0].transitions[0].days #=> Integer
4119
- # resp.rules[0].transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4184
+ # resp.rules[0].transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
4120
4185
  # resp.rules[0].noncurrent_version_transitions #=> Array
4121
4186
  # resp.rules[0].noncurrent_version_transitions[0].noncurrent_days #=> Integer
4122
- # resp.rules[0].noncurrent_version_transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4187
+ # resp.rules[0].noncurrent_version_transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
4123
4188
  # resp.rules[0].noncurrent_version_transitions[0].newer_noncurrent_versions #=> Integer
4124
4189
  # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer
4125
4190
  # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer
@@ -4425,18 +4490,18 @@ module Aws::S3
4425
4490
  #
4426
4491
  # resp.topic_configuration.id #=> String
4427
4492
  # resp.topic_configuration.events #=> Array
4428
- # resp.topic_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4429
- # resp.topic_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4493
+ # resp.topic_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4494
+ # resp.topic_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4430
4495
  # resp.topic_configuration.topic #=> String
4431
4496
  # resp.queue_configuration.id #=> String
4432
- # resp.queue_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4497
+ # resp.queue_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4433
4498
  # resp.queue_configuration.events #=> Array
4434
- # resp.queue_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4499
+ # resp.queue_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4435
4500
  # resp.queue_configuration.queue #=> String
4436
4501
  # resp.cloud_function_configuration.id #=> String
4437
- # resp.cloud_function_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4502
+ # resp.cloud_function_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4438
4503
  # resp.cloud_function_configuration.events #=> Array
4439
- # resp.cloud_function_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4504
+ # resp.cloud_function_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4440
4505
  # resp.cloud_function_configuration.cloud_function #=> String
4441
4506
  # resp.cloud_function_configuration.invocation_role #=> String
4442
4507
  #
@@ -4490,6 +4555,7 @@ module Aws::S3
4490
4555
  # * {Types::NotificationConfiguration#topic_configurations #topic_configurations} => Array&lt;Types::TopicConfiguration&gt;
4491
4556
  # * {Types::NotificationConfiguration#queue_configurations #queue_configurations} => Array&lt;Types::QueueConfiguration&gt;
4492
4557
  # * {Types::NotificationConfiguration#lambda_function_configurations #lambda_function_configurations} => Array&lt;Types::LambdaFunctionConfiguration&gt;
4558
+ # * {Types::NotificationConfiguration#event_bridge_configuration #event_bridge_configuration} => Types::EventBridgeConfiguration
4493
4559
  #
4494
4560
  # @example Request syntax with placeholder values
4495
4561
  #
@@ -4504,7 +4570,7 @@ module Aws::S3
4504
4570
  # resp.topic_configurations[0].id #=> String
4505
4571
  # resp.topic_configurations[0].topic_arn #=> String
4506
4572
  # resp.topic_configurations[0].events #=> Array
4507
- # resp.topic_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4573
+ # resp.topic_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4508
4574
  # resp.topic_configurations[0].filter.key.filter_rules #=> Array
4509
4575
  # resp.topic_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix"
4510
4576
  # resp.topic_configurations[0].filter.key.filter_rules[0].value #=> String
@@ -4512,7 +4578,7 @@ module Aws::S3
4512
4578
  # resp.queue_configurations[0].id #=> String
4513
4579
  # resp.queue_configurations[0].queue_arn #=> String
4514
4580
  # resp.queue_configurations[0].events #=> Array
4515
- # resp.queue_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4581
+ # resp.queue_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4516
4582
  # resp.queue_configurations[0].filter.key.filter_rules #=> Array
4517
4583
  # resp.queue_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix"
4518
4584
  # resp.queue_configurations[0].filter.key.filter_rules[0].value #=> String
@@ -4520,7 +4586,7 @@ module Aws::S3
4520
4586
  # resp.lambda_function_configurations[0].id #=> String
4521
4587
  # resp.lambda_function_configurations[0].lambda_function_arn #=> String
4522
4588
  # resp.lambda_function_configurations[0].events #=> Array
4523
- # resp.lambda_function_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4589
+ # resp.lambda_function_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4524
4590
  # resp.lambda_function_configurations[0].filter.key.filter_rules #=> Array
4525
4591
  # resp.lambda_function_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix"
4526
4592
  # resp.lambda_function_configurations[0].filter.key.filter_rules[0].value #=> String
@@ -4537,7 +4603,7 @@ module Aws::S3
4537
4603
  # Retrieves `OwnershipControls` for an Amazon S3 bucket. To use this
4538
4604
  # operation, you must have the `s3:GetBucketOwnershipControls`
4539
4605
  # permission. For more information about Amazon S3 permissions, see
4540
- # [Specifying Permissions in a Policy][1].
4606
+ # [Specifying permissions in a policy][1].
4541
4607
  #
4542
4608
  # For information about Amazon S3 Object Ownership, see [Using Object
4543
4609
  # Ownership][2].
@@ -4550,8 +4616,8 @@ module Aws::S3
4550
4616
  #
4551
4617
  #
4552
4618
  #
4553
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
4554
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
4619
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html
4620
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
4555
4621
  #
4556
4622
  # @option params [required, String] :bucket
4557
4623
  # The name of the Amazon S3 bucket whose `OwnershipControls` you want to
@@ -4576,7 +4642,7 @@ module Aws::S3
4576
4642
  # @example Response structure
4577
4643
  #
4578
4644
  # resp.ownership_controls.rules #=> Array
4579
- # resp.ownership_controls.rules[0].object_ownership #=> String, one of "BucketOwnerPreferred", "ObjectWriter"
4645
+ # resp.ownership_controls.rules[0].object_ownership #=> String, one of "BucketOwnerPreferred", "ObjectWriter", "BucketOwnerEnforced"
4580
4646
  #
4581
4647
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls AWS API Documentation
4582
4648
  #
@@ -4825,7 +4891,7 @@ module Aws::S3
4825
4891
  # resp.replication_configuration.rules[0].existing_object_replication.status #=> String, one of "Enabled", "Disabled"
4826
4892
  # resp.replication_configuration.rules[0].destination.bucket #=> String
4827
4893
  # resp.replication_configuration.rules[0].destination.account #=> String
4828
- # resp.replication_configuration.rules[0].destination.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
4894
+ # resp.replication_configuration.rules[0].destination.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
4829
4895
  # resp.replication_configuration.rules[0].destination.access_control_translation.owner #=> String, one of "Destination"
4830
4896
  # resp.replication_configuration.rules[0].destination.encryption_configuration.replica_kms_key_id #=> String
4831
4897
  # resp.replication_configuration.rules[0].destination.replication_time.status #=> String, one of "Enabled", "Disabled"
@@ -5596,7 +5662,7 @@ module Aws::S3
5596
5662
  # resp.sse_customer_key_md5 #=> String
5597
5663
  # resp.ssekms_key_id #=> String
5598
5664
  # resp.bucket_key_enabled #=> Boolean
5599
- # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
5665
+ # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
5600
5666
  # resp.request_charged #=> String, one of "requester"
5601
5667
  # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA"
5602
5668
  # resp.parts_count #=> Integer
@@ -5625,19 +5691,28 @@ module Aws::S3
5625
5691
  # an object. To return ACL information about a different version, use
5626
5692
  # the versionId subresource.
5627
5693
  #
5694
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for S3 Object
5695
+ # Ownership, requests to read ACLs are still supported and return the
5696
+ # `bucket-owner-full-control` ACL with the owner being the account that
5697
+ # created the bucket. For more information, see [ Controlling object
5698
+ # ownership and disabling ACLs][1] in the *Amazon S3 User Guide*.
5699
+ #
5700
+ # </note>
5701
+ #
5628
5702
  # The following operations are related to `GetObjectAcl`\:
5629
5703
  #
5630
- # * [GetObject][1]
5704
+ # * [GetObject][2]
5631
5705
  #
5632
- # * [DeleteObject][2]
5706
+ # * [DeleteObject][3]
5633
5707
  #
5634
- # * [PutObject][3]
5708
+ # * [PutObject][4]
5635
5709
  #
5636
5710
  #
5637
5711
  #
5638
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
5639
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
5640
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
5712
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
5713
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
5714
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
5715
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
5641
5716
  #
5642
5717
  # @option params [required, String] :bucket
5643
5718
  # The bucket name that contains the object for which to get the ACL
@@ -6058,49 +6133,49 @@ module Aws::S3
6058
6133
  # * {Types::GetObjectTaggingOutput#tag_set #tag_set} => Array&lt;Types::Tag&gt;
6059
6134
  #
6060
6135
  #
6061
- # @example Example: To retrieve tag set of an object
6136
+ # @example Example: To retrieve tag set of a specific object version
6062
6137
  #
6063
- # # The following example retrieves tag set of an object.
6138
+ # # The following example retrieves tag set of an object. The request specifies object version.
6064
6139
  #
6065
6140
  # resp = client.get_object_tagging({
6066
6141
  # bucket: "examplebucket",
6067
- # key: "HappyFace.jpg",
6142
+ # key: "exampleobject",
6143
+ # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
6068
6144
  # })
6069
6145
  #
6070
6146
  # resp.to_h outputs the following:
6071
6147
  # {
6072
6148
  # tag_set: [
6073
6149
  # {
6074
- # key: "Key4",
6075
- # value: "Value4",
6076
- # },
6077
- # {
6078
- # key: "Key3",
6079
- # value: "Value3",
6150
+ # key: "Key1",
6151
+ # value: "Value1",
6080
6152
  # },
6081
6153
  # ],
6082
- # version_id: "null",
6154
+ # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
6083
6155
  # }
6084
6156
  #
6085
- # @example Example: To retrieve tag set of a specific object version
6157
+ # @example Example: To retrieve tag set of an object
6086
6158
  #
6087
- # # The following example retrieves tag set of an object. The request specifies object version.
6159
+ # # The following example retrieves tag set of an object.
6088
6160
  #
6089
6161
  # resp = client.get_object_tagging({
6090
6162
  # bucket: "examplebucket",
6091
- # key: "exampleobject",
6092
- # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
6163
+ # key: "HappyFace.jpg",
6093
6164
  # })
6094
6165
  #
6095
6166
  # resp.to_h outputs the following:
6096
6167
  # {
6097
6168
  # tag_set: [
6098
6169
  # {
6099
- # key: "Key1",
6100
- # value: "Value1",
6170
+ # key: "Key4",
6171
+ # value: "Value4",
6172
+ # },
6173
+ # {
6174
+ # key: "Key3",
6175
+ # value: "Value3",
6101
6176
  # },
6102
6177
  # ],
6103
- # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
6178
+ # version_id: "null",
6104
6179
  # }
6105
6180
  #
6106
6181
  # @example Request syntax with placeholder values
@@ -6677,7 +6752,7 @@ module Aws::S3
6677
6752
  # resp.sse_customer_key_md5 #=> String
6678
6753
  # resp.ssekms_key_id #=> String
6679
6754
  # resp.bucket_key_enabled #=> Boolean
6680
- # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
6755
+ # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
6681
6756
  # resp.request_charged #=> String, one of "requester"
6682
6757
  # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA"
6683
6758
  # resp.parts_count #=> Integer
@@ -6804,18 +6879,17 @@ module Aws::S3
6804
6879
  # storage costs by automatically moving data to the most cost-effective
6805
6880
  # storage access tier, without performance impact or operational
6806
6881
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
6807
- # two low latency and high throughput access tiers. For data that can be
6808
- # accessed asynchronously, you can choose to activate automatic
6809
- # archiving capabilities within the S3 Intelligent-Tiering storage
6810
- # class.
6882
+ # three low latency and high throughput access tiers. To get the lowest
6883
+ # storage cost on data that can be accessed in minutes to hours, you can
6884
+ # choose to activate additional archiving capabilities.
6811
6885
  #
6812
6886
  # The S3 Intelligent-Tiering storage class is the ideal storage class
6813
6887
  # for data with unknown, changing, or unpredictable access patterns,
6814
6888
  # independent of object size or retention period. If the size of an
6815
- # object is less than 128 KB, it is not eligible for auto-tiering.
6816
- # Smaller objects can be stored, but they are always charged at the
6817
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
6818
- # class.
6889
+ # object is less than 128 KB, it is not monitored and not eligible for
6890
+ # auto-tiering. Smaller objects can be stored, but they are always
6891
+ # charged at the Frequent Access tier rates in the S3
6892
+ # Intelligent-Tiering storage class.
6819
6893
  #
6820
6894
  # For more information, see [Storage class for automatically optimizing
6821
6895
  # frequently and infrequently accessed objects][1].
@@ -7408,7 +7482,7 @@ module Aws::S3
7408
7482
  # resp.uploads[0].upload_id #=> String
7409
7483
  # resp.uploads[0].key #=> String
7410
7484
  # resp.uploads[0].initiated #=> Time
7411
- # resp.uploads[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
7485
+ # resp.uploads[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
7412
7486
  # resp.uploads[0].owner.display_name #=> String
7413
7487
  # resp.uploads[0].owner.id #=> String
7414
7488
  # resp.uploads[0].initiator.id #=> String
@@ -7788,7 +7862,7 @@ module Aws::S3
7788
7862
  # resp.contents[0].last_modified #=> Time
7789
7863
  # resp.contents[0].etag #=> String
7790
7864
  # resp.contents[0].size #=> Integer
7791
- # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS"
7865
+ # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
7792
7866
  # resp.contents[0].owner.display_name #=> String
7793
7867
  # resp.contents[0].owner.id #=> String
7794
7868
  # resp.name #=> String
@@ -7993,7 +8067,7 @@ module Aws::S3
7993
8067
  # resp.contents[0].last_modified #=> Time
7994
8068
  # resp.contents[0].etag #=> String
7995
8069
  # resp.contents[0].size #=> Integer
7996
- # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS"
8070
+ # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
7997
8071
  # resp.contents[0].owner.display_name #=> String
7998
8072
  # resp.contents[0].owner.id #=> String
7999
8073
  # resp.name #=> String
@@ -8202,7 +8276,7 @@ module Aws::S3
8202
8276
  # resp.initiator.display_name #=> String
8203
8277
  # resp.owner.display_name #=> String
8204
8278
  # resp.owner.id #=> String
8205
- # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
8279
+ # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
8206
8280
  # resp.request_charged #=> String, one of "requester"
8207
8281
  #
8208
8282
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts AWS API Documentation
@@ -8313,6 +8387,14 @@ module Aws::S3
8313
8387
  # you have an existing application that updates a bucket ACL using the
8314
8388
  # request body, then you can continue to use that approach.
8315
8389
  #
8390
+ # If your bucket uses the bucket owner enforced setting for S3 Object
8391
+ # Ownership, ACLs are disabled and no longer affect permissions. You
8392
+ # must use policies to grant access to your bucket and the objects in
8393
+ # it. Requests to set ACLs or update ACLs fail and return the
8394
+ # `AccessControlListNotSupported` error code. Requests to read ACLs are
8395
+ # still supported. For more information, see [Controlling object
8396
+ # ownership][2] in the *Amazon S3 User Guide*.
8397
+ #
8316
8398
  # **Access Permissions**
8317
8399
  #
8318
8400
  # You can set access permissions using one of the following methods:
@@ -8322,7 +8404,7 @@ module Aws::S3
8322
8404
  # canned ACL has a predefined set of grantees and permissions. Specify
8323
8405
  # the canned ACL name as the value of `x-amz-acl`. If you use this
8324
8406
  # header, you cannot use other access control-specific headers in your
8325
- # request. For more information, see [Canned ACL][2].
8407
+ # request. For more information, see [Canned ACL][3].
8326
8408
  #
8327
8409
  # * Specify access permissions explicitly with the `x-amz-grant-read`,
8328
8410
  # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and
@@ -8332,7 +8414,7 @@ module Aws::S3
8332
8414
  # permission. If you use these ACL-specific headers, you cannot use
8333
8415
  # the `x-amz-acl` header to set a canned ACL. These parameters map to
8334
8416
  # the set of permissions that Amazon S3 supports in an ACL. For more
8335
- # information, see [Access Control List (ACL) Overview][3].
8417
+ # information, see [Access Control List (ACL) Overview][4].
8336
8418
  #
8337
8419
  # You specify each grantee as a type=value pair, where the type is one
8338
8420
  # of the following:
@@ -8365,7 +8447,7 @@ module Aws::S3
8365
8447
  # * South America (São Paulo)
8366
8448
  #
8367
8449
  # For a list of all the Amazon S3 supported Regions and endpoints,
8368
- # see [Regions and Endpoints][4] in the Amazon Web Services General
8450
+ # see [Regions and Endpoints][5] in the Amazon Web Services General
8369
8451
  # Reference.
8370
8452
  #
8371
8453
  # </note>
@@ -8428,28 +8510,29 @@ module Aws::S3
8428
8510
  # * South America (São Paulo)
8429
8511
  #
8430
8512
  # For a list of all the Amazon S3 supported Regions and endpoints, see
8431
- # [Regions and Endpoints][4] in the Amazon Web Services General
8513
+ # [Regions and Endpoints][5] in the Amazon Web Services General
8432
8514
  # Reference.
8433
8515
  #
8434
8516
  # </note>
8435
8517
  #
8436
8518
  # **Related Resources**
8437
8519
  #
8438
- # * [CreateBucket][5]
8520
+ # * [CreateBucket][6]
8439
8521
  #
8440
- # * [DeleteBucket][6]
8522
+ # * [DeleteBucket][7]
8441
8523
  #
8442
- # * [GetObjectAcl][7]
8524
+ # * [GetObjectAcl][8]
8443
8525
  #
8444
8526
  #
8445
8527
  #
8446
8528
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
8447
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
8448
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
8449
- # [4]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
8450
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
8451
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
8452
- # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
8529
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
8530
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
8531
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
8532
+ # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
8533
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
8534
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
8535
+ # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
8453
8536
  #
8454
8537
  # @option params [String] :acl
8455
8538
  # The canned ACL to apply to the bucket.
@@ -8949,18 +9032,17 @@ module Aws::S3
8949
9032
  # storage costs by automatically moving data to the most cost-effective
8950
9033
  # storage access tier, without performance impact or operational
8951
9034
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
8952
- # two low latency and high throughput access tiers. For data that can be
8953
- # accessed asynchronously, you can choose to activate automatic
8954
- # archiving capabilities within the S3 Intelligent-Tiering storage
8955
- # class.
9035
+ # three low latency and high throughput access tiers. To get the lowest
9036
+ # storage cost on data that can be accessed in minutes to hours, you can
9037
+ # choose to activate additional archiving capabilities.
8956
9038
  #
8957
9039
  # The S3 Intelligent-Tiering storage class is the ideal storage class
8958
9040
  # for data with unknown, changing, or unpredictable access patterns,
8959
9041
  # independent of object size or retention period. If the size of an
8960
- # object is less than 128 KB, it is not eligible for auto-tiering.
8961
- # Smaller objects can be stored, but they are always charged at the
8962
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
8963
- # class.
9042
+ # object is less than 128 KB, it is not monitored and not eligible for
9043
+ # auto-tiering. Smaller objects can be stored, but they are always
9044
+ # charged at the Frequent Access tier rates in the S3
9045
+ # Intelligent-Tiering storage class.
8964
9046
  #
8965
9047
  # For more information, see [Storage class for automatically optimizing
8966
9048
  # frequently and infrequently accessed objects][1].
@@ -9297,11 +9379,11 @@ module Aws::S3
9297
9379
  # transition: {
9298
9380
  # date: Time.now,
9299
9381
  # days: 1,
9300
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9382
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9301
9383
  # },
9302
9384
  # noncurrent_version_transition: {
9303
9385
  # noncurrent_days: 1,
9304
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9386
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9305
9387
  # newer_noncurrent_versions: 1,
9306
9388
  # },
9307
9389
  # noncurrent_version_expiration: {
@@ -9487,13 +9569,13 @@ module Aws::S3
9487
9569
  # {
9488
9570
  # date: Time.now,
9489
9571
  # days: 1,
9490
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9572
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9491
9573
  # },
9492
9574
  # ],
9493
9575
  # noncurrent_version_transitions: [
9494
9576
  # {
9495
9577
  # noncurrent_days: 1,
9496
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9578
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9497
9579
  # newer_noncurrent_versions: 1,
9498
9580
  # },
9499
9581
  # ],
@@ -9529,6 +9611,12 @@ module Aws::S3
9529
9611
  # The `Permissions` request element specifies the kind of access the
9530
9612
  # grantee has to the logs.
9531
9613
  #
9614
+ # If the target bucket for log delivery uses the bucket owner enforced
9615
+ # setting for S3 Object Ownership, you can't use the `Grantee` request
9616
+ # element to grant access to others. Permissions can only be granted
9617
+ # using policies. For more information, see [Permissions for server
9618
+ # access log delivery][1] in the *Amazon S3 User Guide*.
9619
+ #
9532
9620
  # **Grantee Values**
9533
9621
  #
9534
9622
  # You can specify the person (grantee) to whom you're assigning access
@@ -9563,29 +9651,30 @@ module Aws::S3
9563
9651
  # />`
9564
9652
  #
9565
9653
  # For more information about server access logging, see [Server Access
9566
- # Logging][1].
9654
+ # Logging][2] in the *Amazon S3 User Guide*.
9567
9655
  #
9568
- # For more information about creating a bucket, see [CreateBucket][2].
9656
+ # For more information about creating a bucket, see [CreateBucket][3].
9569
9657
  # For more information about returning the logging status of a bucket,
9570
- # see [GetBucketLogging][3].
9658
+ # see [GetBucketLogging][4].
9571
9659
  #
9572
9660
  # The following operations are related to `PutBucketLogging`\:
9573
9661
  #
9574
- # * [PutObject][4]
9662
+ # * [PutObject][5]
9575
9663
  #
9576
- # * [DeleteBucket][5]
9664
+ # * [DeleteBucket][6]
9577
9665
  #
9578
- # * [CreateBucket][2]
9666
+ # * [CreateBucket][3]
9579
9667
  #
9580
- # * [GetBucketLogging][3]
9668
+ # * [GetBucketLogging][4]
9581
9669
  #
9582
9670
  #
9583
9671
  #
9584
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html
9585
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
9586
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html
9587
- # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
9588
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
9672
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general
9673
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html
9674
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
9675
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html
9676
+ # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
9677
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
9589
9678
  #
9590
9679
  # @option params [required, String] :bucket
9591
9680
  # The name of the bucket for which to set the logging parameters.
@@ -9800,20 +9889,20 @@ module Aws::S3
9800
9889
  # notification_configuration: { # required
9801
9890
  # topic_configuration: {
9802
9891
  # id: "NotificationId",
9803
- # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9804
- # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9892
+ # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9893
+ # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9805
9894
  # topic: "TopicArn",
9806
9895
  # },
9807
9896
  # queue_configuration: {
9808
9897
  # id: "NotificationId",
9809
- # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9810
- # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9898
+ # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9899
+ # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9811
9900
  # queue: "QueueArn",
9812
9901
  # },
9813
9902
  # cloud_function_configuration: {
9814
9903
  # id: "NotificationId",
9815
- # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9816
- # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9904
+ # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9905
+ # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9817
9906
  # cloud_function: "CloudFunction",
9818
9907
  # invocation_role: "CloudFunctionInvocationRole",
9819
9908
  # },
@@ -9910,6 +9999,10 @@ module Aws::S3
9910
9999
  # a different account, the request will fail with an HTTP `403 (Access
9911
10000
  # Denied)` error.
9912
10001
  #
10002
+ # @option params [Boolean] :skip_destination_validation
10003
+ # Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations.
10004
+ # True or false value.
10005
+ #
9913
10006
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
9914
10007
  #
9915
10008
  #
@@ -9940,7 +10033,7 @@ module Aws::S3
9940
10033
  # {
9941
10034
  # id: "NotificationId",
9942
10035
  # topic_arn: "TopicArn", # required
9943
- # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
10036
+ # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9944
10037
  # filter: {
9945
10038
  # key: {
9946
10039
  # filter_rules: [
@@ -9957,7 +10050,7 @@ module Aws::S3
9957
10050
  # {
9958
10051
  # id: "NotificationId",
9959
10052
  # queue_arn: "QueueArn", # required
9960
- # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
10053
+ # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9961
10054
  # filter: {
9962
10055
  # key: {
9963
10056
  # filter_rules: [
@@ -9974,7 +10067,7 @@ module Aws::S3
9974
10067
  # {
9975
10068
  # id: "NotificationId",
9976
10069
  # lambda_function_arn: "LambdaFunctionArn", # required
9977
- # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
10070
+ # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9978
10071
  # filter: {
9979
10072
  # key: {
9980
10073
  # filter_rules: [
@@ -9987,8 +10080,11 @@ module Aws::S3
9987
10080
  # },
9988
10081
  # },
9989
10082
  # ],
10083
+ # event_bridge_configuration: {
10084
+ # },
9990
10085
  # },
9991
10086
  # expected_bucket_owner: "AccountId",
10087
+ # skip_destination_validation: false,
9992
10088
  # })
9993
10089
  #
9994
10090
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration AWS API Documentation
@@ -10003,10 +10099,10 @@ module Aws::S3
10003
10099
  # Creates or modifies `OwnershipControls` for an Amazon S3 bucket. To
10004
10100
  # use this operation, you must have the `s3:PutBucketOwnershipControls`
10005
10101
  # permission. For more information about Amazon S3 permissions, see
10006
- # [Specifying Permissions in a Policy][1].
10102
+ # [Specifying permissions in a policy][1].
10007
10103
  #
10008
- # For information about Amazon S3 Object Ownership, see [Using Object
10009
- # Ownership][2].
10104
+ # For information about Amazon S3 Object Ownership, see [Using object
10105
+ # ownership][2].
10010
10106
  #
10011
10107
  # The following operations are related to `PutBucketOwnershipControls`\:
10012
10108
  #
@@ -10016,8 +10112,8 @@ module Aws::S3
10016
10112
  #
10017
10113
  #
10018
10114
  #
10019
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
10020
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
10115
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html
10116
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html
10021
10117
  #
10022
10118
  # @option params [required, String] :bucket
10023
10119
  # The name of the Amazon S3 bucket whose `OwnershipControls` you want to
@@ -10036,8 +10132,8 @@ module Aws::S3
10036
10132
  # Denied)` error.
10037
10133
  #
10038
10134
  # @option params [required, Types::OwnershipControls] :ownership_controls
10039
- # The `OwnershipControls` (BucketOwnerPreferred or ObjectWriter) that
10040
- # you want to apply to this Amazon S3 bucket.
10135
+ # The `OwnershipControls` (BucketOwnerEnforced, BucketOwnerPreferred, or
10136
+ # ObjectWriter) that you want to apply to this Amazon S3 bucket.
10041
10137
  #
10042
10138
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
10043
10139
  #
@@ -10050,7 +10146,7 @@ module Aws::S3
10050
10146
  # ownership_controls: { # required
10051
10147
  # rules: [ # required
10052
10148
  # {
10053
- # object_ownership: "BucketOwnerPreferred", # required, accepts BucketOwnerPreferred, ObjectWriter
10149
+ # object_ownership: "BucketOwnerPreferred", # required, accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced
10054
10150
  # },
10055
10151
  # ],
10056
10152
  # },
@@ -10323,7 +10419,7 @@ module Aws::S3
10323
10419
  # destination: { # required
10324
10420
  # bucket: "BucketName", # required
10325
10421
  # account: "AccountId",
10326
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
10422
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
10327
10423
  # access_control_translation: {
10328
10424
  # owner: "Destination", # required, accepts Destination
10329
10425
  # },
@@ -10912,13 +11008,33 @@ module Aws::S3
10912
11008
  # information, see [Access Control List (ACL) Overview][4] and [Managing
10913
11009
  # ACLs Using the REST API][5].
10914
11010
  #
11011
+ # If the bucket that you're uploading objects to uses the bucket owner
11012
+ # enforced setting for S3 Object Ownership, ACLs are disabled and no
11013
+ # longer affect permissions. Buckets that use this setting only accept
11014
+ # PUT requests that don't specify an ACL or PUT requests that specify
11015
+ # bucket owner full control ACLs, such as the
11016
+ # `bucket-owner-full-control` canned ACL or an equivalent form of this
11017
+ # ACL expressed in the XML format. PUT requests that contain other ACLs
11018
+ # (for example, custom grants to certain Amazon Web Services accounts)
11019
+ # fail and return a `400` error with the error code
11020
+ # `AccessControlListNotSupported`.
11021
+ #
11022
+ # For more information, see [ Controlling ownership of objects and
11023
+ # disabling ACLs][6] in the *Amazon S3 User Guide*.
11024
+ #
11025
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for Object
11026
+ # Ownership, all objects written to the bucket by any account will be
11027
+ # owned by the bucket owner.
11028
+ #
11029
+ # </note>
11030
+ #
10915
11031
  # **Storage Class Options**
10916
11032
  #
10917
11033
  # By default, Amazon S3 uses the STANDARD Storage Class to store newly
10918
11034
  # created objects. The STANDARD storage class provides high durability
10919
11035
  # and high availability. Depending on performance needs, you can specify
10920
11036
  # a different Storage Class. Amazon S3 on Outposts only uses the
10921
- # OUTPOSTS Storage Class. For more information, see [Storage Classes][6]
11037
+ # OUTPOSTS Storage Class. For more information, see [Storage Classes][7]
10922
11038
  # in the *Amazon S3 User Guide*.
10923
11039
  #
10924
11040
  # **Versioning**
@@ -10930,14 +11046,14 @@ module Aws::S3
10930
11046
  # object simultaneously, it stores all of the objects.
10931
11047
  #
10932
11048
  # For more information about versioning, see [Adding Objects to
10933
- # Versioning Enabled Buckets][7]. For information about returning the
10934
- # versioning state of a bucket, see [GetBucketVersioning][8].
11049
+ # Versioning Enabled Buckets][8]. For information about returning the
11050
+ # versioning state of a bucket, see [GetBucketVersioning][9].
10935
11051
  #
10936
11052
  # **Related Resources**
10937
11053
  #
10938
- # * [CopyObject][9]
11054
+ # * [CopyObject][10]
10939
11055
  #
10940
- # * [DeleteObject][10]
11056
+ # * [DeleteObject][11]
10941
11057
  #
10942
11058
  #
10943
11059
  #
@@ -10946,11 +11062,12 @@ module Aws::S3
10946
11062
  # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
10947
11063
  # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
10948
11064
  # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html
10949
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
10950
- # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
10951
- # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
10952
- # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
10953
- # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
11065
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
11066
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
11067
+ # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
11068
+ # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
11069
+ # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
11070
+ # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
10954
11071
  #
10955
11072
  # @option params [String] :acl
10956
11073
  # The canned ACL to apply to the object. For more information, see
@@ -11220,134 +11337,134 @@ module Aws::S3
11220
11337
  # * {Types::PutObjectOutput#request_charged #request_charged} => String
11221
11338
  #
11222
11339
  #
11223
- # @example Example: To upload an object and specify server-side encryption and object tags
11340
+ # @example Example: To upload an object (specify optional headers)
11224
11341
  #
11225
- # # The following example uploads and object. The request specifies the optional server-side encryption option. The request
11226
- # # also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.
11342
+ # # The following example uploads an object. The request specifies optional request headers to directs S3 to use specific
11343
+ # # storage class and use server-side encryption.
11227
11344
  #
11228
11345
  # resp = client.put_object({
11229
- # body: "filetoupload",
11346
+ # body: "HappyFace.jpg",
11230
11347
  # bucket: "examplebucket",
11231
- # key: "exampleobject",
11348
+ # key: "HappyFace.jpg",
11232
11349
  # server_side_encryption: "AES256",
11233
- # tagging: "key1=value1&key2=value2",
11350
+ # storage_class: "STANDARD_IA",
11234
11351
  # })
11235
11352
  #
11236
11353
  # resp.to_h outputs the following:
11237
11354
  # {
11238
11355
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11239
11356
  # server_side_encryption: "AES256",
11240
- # version_id: "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt",
11357
+ # version_id: "CG612hodqujkf8FaaNfp8U..FIhLROcp",
11241
11358
  # }
11242
11359
  #
11243
- # @example Example: To upload an object and specify optional tags
11360
+ # @example Example: To upload an object and specify canned ACL.
11244
11361
  #
11245
- # # The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore
11246
- # # S3 returns version ID of the newly created object.
11362
+ # # The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ
11363
+ # # access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.
11247
11364
  #
11248
11365
  # resp = client.put_object({
11249
- # body: "c:\\HappyFace.jpg",
11366
+ # acl: "authenticated-read",
11367
+ # body: "filetoupload",
11250
11368
  # bucket: "examplebucket",
11251
- # key: "HappyFace.jpg",
11252
- # tagging: "key1=value1&key2=value2",
11369
+ # key: "exampleobject",
11253
11370
  # })
11254
11371
  #
11255
11372
  # resp.to_h outputs the following:
11256
11373
  # {
11257
11374
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11258
- # version_id: "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a",
11375
+ # version_id: "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr",
11259
11376
  # }
11260
11377
  #
11261
- # @example Example: To upload object and specify user-defined metadata
11378
+ # @example Example: To upload an object
11262
11379
  #
11263
- # # The following example creates an object. The request also specifies optional metadata. If the bucket is versioning
11264
- # # enabled, S3 returns version ID in response.
11380
+ # # The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file
11381
+ # # syntax. S3 returns VersionId of the newly created object.
11265
11382
  #
11266
11383
  # resp = client.put_object({
11267
- # body: "filetoupload",
11384
+ # body: "HappyFace.jpg",
11268
11385
  # bucket: "examplebucket",
11269
- # key: "exampleobject",
11270
- # metadata: {
11271
- # "metadata1" => "value1",
11272
- # "metadata2" => "value2",
11273
- # },
11386
+ # key: "HappyFace.jpg",
11274
11387
  # })
11275
11388
  #
11276
11389
  # resp.to_h outputs the following:
11277
11390
  # {
11278
11391
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11279
- # version_id: "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0",
11392
+ # version_id: "tpf3zF08nBplQK1XLOefGskR7mGDwcDk",
11280
11393
  # }
11281
11394
  #
11282
- # @example Example: To create an object.
11395
+ # @example Example: To upload an object and specify optional tags
11283
11396
  #
11284
- # # The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.
11397
+ # # The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore
11398
+ # # S3 returns version ID of the newly created object.
11285
11399
  #
11286
11400
  # resp = client.put_object({
11287
- # body: "filetoupload",
11401
+ # body: "c:\\HappyFace.jpg",
11288
11402
  # bucket: "examplebucket",
11289
- # key: "objectkey",
11403
+ # key: "HappyFace.jpg",
11404
+ # tagging: "key1=value1&key2=value2",
11290
11405
  # })
11291
11406
  #
11292
11407
  # resp.to_h outputs the following:
11293
11408
  # {
11294
11409
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11295
- # version_id: "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ",
11410
+ # version_id: "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a",
11296
11411
  # }
11297
11412
  #
11298
- # @example Example: To upload an object and specify canned ACL.
11413
+ # @example Example: To upload object and specify user-defined metadata
11299
11414
  #
11300
- # # The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ
11301
- # # access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.
11415
+ # # The following example creates an object. The request also specifies optional metadata. If the bucket is versioning
11416
+ # # enabled, S3 returns version ID in response.
11302
11417
  #
11303
11418
  # resp = client.put_object({
11304
- # acl: "authenticated-read",
11305
11419
  # body: "filetoupload",
11306
11420
  # bucket: "examplebucket",
11307
11421
  # key: "exampleobject",
11422
+ # metadata: {
11423
+ # "metadata1" => "value1",
11424
+ # "metadata2" => "value2",
11425
+ # },
11308
11426
  # })
11309
11427
  #
11310
11428
  # resp.to_h outputs the following:
11311
11429
  # {
11312
11430
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11313
- # version_id: "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr",
11431
+ # version_id: "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0",
11314
11432
  # }
11315
11433
  #
11316
- # @example Example: To upload an object (specify optional headers)
11434
+ # @example Example: To upload an object and specify server-side encryption and object tags
11317
11435
  #
11318
- # # The following example uploads an object. The request specifies optional request headers to directs S3 to use specific
11319
- # # storage class and use server-side encryption.
11436
+ # # The following example uploads and object. The request specifies the optional server-side encryption option. The request
11437
+ # # also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.
11320
11438
  #
11321
11439
  # resp = client.put_object({
11322
- # body: "HappyFace.jpg",
11440
+ # body: "filetoupload",
11323
11441
  # bucket: "examplebucket",
11324
- # key: "HappyFace.jpg",
11442
+ # key: "exampleobject",
11325
11443
  # server_side_encryption: "AES256",
11326
- # storage_class: "STANDARD_IA",
11444
+ # tagging: "key1=value1&key2=value2",
11327
11445
  # })
11328
11446
  #
11329
11447
  # resp.to_h outputs the following:
11330
11448
  # {
11331
11449
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11332
11450
  # server_side_encryption: "AES256",
11333
- # version_id: "CG612hodqujkf8FaaNfp8U..FIhLROcp",
11451
+ # version_id: "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt",
11334
11452
  # }
11335
11453
  #
11336
- # @example Example: To upload an object
11454
+ # @example Example: To create an object.
11337
11455
  #
11338
- # # The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file
11339
- # # syntax. S3 returns VersionId of the newly created object.
11456
+ # # The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.
11340
11457
  #
11341
11458
  # resp = client.put_object({
11342
- # body: "HappyFace.jpg",
11459
+ # body: "filetoupload",
11343
11460
  # bucket: "examplebucket",
11344
- # key: "HappyFace.jpg",
11461
+ # key: "objectkey",
11345
11462
  # })
11346
11463
  #
11347
11464
  # resp.to_h outputs the following:
11348
11465
  # {
11349
11466
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11350
- # version_id: "tpf3zF08nBplQK1XLOefGskR7mGDwcDk",
11467
+ # version_id: "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ",
11351
11468
  # }
11352
11469
  #
11353
11470
  # @example Streaming a file from disk
@@ -11379,7 +11496,7 @@ module Aws::S3
11379
11496
  # "MetadataKey" => "MetadataValue",
11380
11497
  # },
11381
11498
  # server_side_encryption: "AES256", # accepts AES256, aws:kms
11382
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
11499
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
11383
11500
  # website_redirect_location: "WebsiteRedirectLocation",
11384
11501
  # sse_customer_algorithm: "SSECustomerAlgorithm",
11385
11502
  # sse_customer_key: "SSECustomerKey",
@@ -11432,6 +11549,14 @@ module Aws::S3
11432
11549
  # information, see [Access Control List (ACL) Overview][2] in the
11433
11550
  # *Amazon S3 User Guide*.
11434
11551
  #
11552
+ # If your bucket uses the bucket owner enforced setting for S3 Object
11553
+ # Ownership, ACLs are disabled and no longer affect permissions. You
11554
+ # must use policies to grant access to your bucket and the objects in
11555
+ # it. Requests to set ACLs or update ACLs fail and return the
11556
+ # `AccessControlListNotSupported` error code. Requests to read ACLs are
11557
+ # still supported. For more information, see [Controlling object
11558
+ # ownership][3] in the *Amazon S3 User Guide*.
11559
+ #
11435
11560
  # **Access Permissions**
11436
11561
  #
11437
11562
  # You can set access permissions using one of the following methods:
@@ -11441,7 +11566,7 @@ module Aws::S3
11441
11566
  # ACL has a predefined set of grantees and permissions. Specify the
11442
11567
  # canned ACL name as the value of `x-amz-ac`l. If you use this header,
11443
11568
  # you cannot use other access control-specific headers in your
11444
- # request. For more information, see [Canned ACL][3].
11569
+ # request. For more information, see [Canned ACL][4].
11445
11570
  #
11446
11571
  # * Specify access permissions explicitly with the `x-amz-grant-read`,
11447
11572
  # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and
@@ -11484,7 +11609,7 @@ module Aws::S3
11484
11609
  # * South America (São Paulo)
11485
11610
  #
11486
11611
  # For a list of all the Amazon S3 supported Regions and endpoints,
11487
- # see [Regions and Endpoints][4] in the Amazon Web Services General
11612
+ # see [Regions and Endpoints][5] in the Amazon Web Services General
11488
11613
  # Reference.
11489
11614
  #
11490
11615
  # </note>
@@ -11545,7 +11670,7 @@ module Aws::S3
11545
11670
  # * South America (São Paulo)
11546
11671
  #
11547
11672
  # For a list of all the Amazon S3 supported Regions and endpoints, see
11548
- # [Regions and Endpoints][4] in the Amazon Web Services General
11673
+ # [Regions and Endpoints][5] in the Amazon Web Services General
11549
11674
  # Reference.
11550
11675
  #
11551
11676
  # </note>
@@ -11558,18 +11683,19 @@ module Aws::S3
11558
11683
  #
11559
11684
  # **Related Resources**
11560
11685
  #
11561
- # * [CopyObject][5]
11686
+ # * [CopyObject][6]
11562
11687
  #
11563
- # * [GetObject][6]
11688
+ # * [GetObject][7]
11564
11689
  #
11565
11690
  #
11566
11691
  #
11567
11692
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions
11568
11693
  # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
11569
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
11570
- # [4]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
11571
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
11572
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
11694
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
11695
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
11696
+ # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
11697
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
11698
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
11573
11699
  #
11574
11700
  # @option params [String] :acl
11575
11701
  # The canned ACL to apply to the object. For more information, see
@@ -12703,7 +12829,7 @@ module Aws::S3
12703
12829
  # value: "MetadataValue",
12704
12830
  # },
12705
12831
  # ],
12706
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
12832
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
12707
12833
  # },
12708
12834
  # },
12709
12835
  # },
@@ -14061,7 +14187,7 @@ module Aws::S3
14061
14187
  # sse_customer_algorithm: "SSECustomerAlgorithm",
14062
14188
  # ssekms_key_id: "SSEKMSKeyId",
14063
14189
  # sse_customer_key_md5: "SSECustomerKeyMD5",
14064
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
14190
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
14065
14191
  # tag_count: 1,
14066
14192
  # version_id: "ObjectVersionId",
14067
14193
  # bucket_key_enabled: false,
@@ -14089,7 +14215,7 @@ module Aws::S3
14089
14215
  params: params,
14090
14216
  config: config)
14091
14217
  context[:gem_name] = 'aws-sdk-s3'
14092
- context[:gem_version] = '1.107.0'
14218
+ context[:gem_version] = '1.109.0'
14093
14219
  Seahorse::Client::Request.new(handlers, context)
14094
14220
  end
14095
14221