aws-sdk-s3 1.106.0 → 1.111.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -27,6 +27,7 @@ require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
27
27
  require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
28
28
  require 'aws-sdk-core/plugins/transfer_encoding.rb'
29
29
  require 'aws-sdk-core/plugins/http_checksum.rb'
30
+ require 'aws-sdk-core/plugins/defaults_mode.rb'
30
31
  require 'aws-sdk-core/plugins/protocols/rest_xml.rb'
31
32
  require 'aws-sdk-s3/plugins/accelerate.rb'
32
33
  require 'aws-sdk-s3/plugins/arn.rb'
@@ -91,6 +92,7 @@ module Aws::S3
91
92
  add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
92
93
  add_plugin(Aws::Plugins::TransferEncoding)
93
94
  add_plugin(Aws::Plugins::HttpChecksum)
95
+ add_plugin(Aws::Plugins::DefaultsMode)
94
96
  add_plugin(Aws::Plugins::Protocols::RestXml)
95
97
  add_plugin(Aws::S3::Plugins::Accelerate)
96
98
  add_plugin(Aws::S3::Plugins::ARN)
@@ -155,7 +157,9 @@ module Aws::S3
155
157
  # * EC2/ECS IMDS instance profile - When used by default, the timeouts
156
158
  # are very aggressive. Construct and pass an instance of
157
159
  # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
158
- # enable retries and extended timeouts.
160
+ # enable retries and extended timeouts. Instance profile credential
161
+ # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED']
162
+ # to true.
159
163
  #
160
164
  # @option options [required, String] :region
161
165
  # The AWS region to connect to. The configured `:region` is
@@ -216,6 +220,10 @@ module Aws::S3
216
220
  # Used only in `standard` and adaptive retry modes. Specifies whether to apply
217
221
  # a clock skew correction and retry requests with skewed client clocks.
218
222
  #
223
+ # @option options [String] :defaults_mode ("legacy")
224
+ # See {Aws::DefaultsModeConfiguration} for a list of the
225
+ # accepted modes and the configuration defaults that are included.
226
+ #
219
227
  # @option options [Boolean] :disable_host_prefix_injection (false)
220
228
  # Set to true to disable SDK automatically adding host prefix
221
229
  # to default service endpoint when available.
@@ -379,7 +387,7 @@ module Aws::S3
379
387
  # seconds to wait when opening a HTTP session before raising a
380
388
  # `Timeout::Error`.
381
389
  #
382
- # @option options [Integer] :http_read_timeout (60) The default
390
+ # @option options [Float] :http_read_timeout (60) The default
383
391
  # number of seconds to wait for response data. This value can
384
392
  # safely be set per-request on the session.
385
393
  #
@@ -395,6 +403,9 @@ module Aws::S3
395
403
  # disables this behaviour. This value can safely be set per
396
404
  # request on the session.
397
405
  #
406
+ # @option options [Float] :ssl_timeout (nil) Sets the SSL timeout
407
+ # in seconds.
408
+ #
398
409
  # @option options [Boolean] :http_wire_trace (false) When `true`,
399
410
  # HTTP debug output will be sent to the `:logger`.
400
411
  #
@@ -571,6 +582,11 @@ module Aws::S3
571
582
  # prepared to retry the failed requests. For more information, see
572
583
  # [Amazon S3 Error Best Practices][2].
573
584
  #
585
+ # You cannot use `Content-Type: application/x-www-form-urlencoded` with
586
+ # Complete Multipart Upload requests. Also, if you do not provide a
587
+ # `Content-Type` header, `CompleteMultipartUpload` returns a 200 OK
588
+ # response.
589
+ #
574
590
  # For more information about multipart uploads, see [Uploading Objects
575
591
  # Using Multipart Upload][3].
576
592
  #
@@ -891,11 +907,28 @@ module Aws::S3
891
907
  # Control List (ACL) Overview][10] and [Managing ACLs Using the REST
892
908
  # API][11].
893
909
  #
910
+ # If the bucket that you're copying objects to uses the bucket owner
911
+ # enforced setting for S3 Object Ownership, ACLs are disabled and no
912
+ # longer affect permissions. Buckets that use this setting only accept
913
+ # PUT requests that don't specify an ACL or PUT requests that specify
914
+ # bucket owner full control ACLs, such as the
915
+ # `bucket-owner-full-control` canned ACL or an equivalent form of this
916
+ # ACL expressed in the XML format.
917
+ #
918
+ # For more information, see [ Controlling ownership of objects and
919
+ # disabling ACLs][12] in the *Amazon S3 User Guide*.
920
+ #
921
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for Object
922
+ # Ownership, all objects written to the bucket by any account will be
923
+ # owned by the bucket owner.
924
+ #
925
+ # </note>
926
+ #
894
927
  # **Storage Class Options**
895
928
  #
896
929
  # You can use the `CopyObject` action to change the storage class of an
897
930
  # object that is already stored in Amazon S3 using the `StorageClass`
898
- # parameter. For more information, see [Storage Classes][12] in the
931
+ # parameter. For more information, see [Storage Classes][13] in the
899
932
  # *Amazon S3 User Guide*.
900
933
  #
901
934
  # **Versioning**
@@ -916,15 +949,15 @@ module Aws::S3
916
949
  #
917
950
  # If the source object's storage class is GLACIER, you must restore a
918
951
  # copy of this object before you can use it as a source object for the
919
- # copy operation. For more information, see [RestoreObject][13].
952
+ # copy operation. For more information, see [RestoreObject][14].
920
953
  #
921
954
  # The following operations are related to `CopyObject`\:
922
955
  #
923
- # * [PutObject][14]
956
+ # * [PutObject][15]
924
957
  #
925
- # * [GetObject][15]
958
+ # * [GetObject][16]
926
959
  #
927
- # For more information, see [Copying Objects][16].
960
+ # For more information, see [Copying Objects][17].
928
961
  #
929
962
  #
930
963
  #
@@ -939,11 +972,12 @@ module Aws::S3
939
972
  # [9]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
940
973
  # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
941
974
  # [11]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html
942
- # [12]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
943
- # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
944
- # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
945
- # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
946
- # [16]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html
975
+ # [12]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
976
+ # [13]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
977
+ # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
978
+ # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
979
+ # [16]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
980
+ # [17]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html
947
981
  #
948
982
  # @option params [String] :acl
949
983
  # The canned ACL to apply to the object.
@@ -1265,7 +1299,7 @@ module Aws::S3
1265
1299
  # metadata_directive: "COPY", # accepts COPY, REPLACE
1266
1300
  # tagging_directive: "COPY", # accepts COPY, REPLACE
1267
1301
  # server_side_encryption: "AES256", # accepts AES256, aws:kms
1268
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
1302
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
1269
1303
  # website_redirect_location: "WebsiteRedirectLocation",
1270
1304
  # sse_customer_algorithm: "SSECustomerAlgorithm",
1271
1305
  # sse_customer_key: "SSECustomerKey",
@@ -1338,22 +1372,33 @@ module Aws::S3
1338
1372
  #
1339
1373
  # </note>
1340
1374
  #
1375
+ # **Access control lists (ACLs)**
1376
+ #
1341
1377
  # When creating a bucket using this operation, you can optionally
1342
- # specify the accounts or groups that should be granted specific
1343
- # permissions on the bucket. There are two ways to grant the appropriate
1344
- # permissions using the request headers.
1378
+ # configure the bucket ACL to specify the accounts or groups that should
1379
+ # be granted specific permissions on the bucket.
1380
+ #
1381
+ # If your CreateBucket request sets bucket owner enforced for S3 Object
1382
+ # Ownership and specifies a bucket ACL that provides access to an
1383
+ # external Amazon Web Services account, your request fails with a `400`
1384
+ # error and returns the `InvalidBucketAclWithObjectOwnership` error
1385
+ # code. For more information, see [Controlling object ownership][5] in
1386
+ # the *Amazon S3 User Guide*.
1387
+ #
1388
+ # There are two ways to grant the appropriate permissions using the
1389
+ # request headers.
1345
1390
  #
1346
1391
  # * Specify a canned ACL using the `x-amz-acl` request header. Amazon S3
1347
1392
  # supports a set of predefined ACLs, known as *canned ACLs*. Each
1348
1393
  # canned ACL has a predefined set of grantees and permissions. For
1349
- # more information, see [Canned ACL][5].
1394
+ # more information, see [Canned ACL][6].
1350
1395
  #
1351
1396
  # * Specify access permissions explicitly using the `x-amz-grant-read`,
1352
1397
  # `x-amz-grant-write`, `x-amz-grant-read-acp`,
1353
1398
  # `x-amz-grant-write-acp`, and `x-amz-grant-full-control` headers.
1354
1399
  # These headers map to the set of permissions Amazon S3 supports in an
1355
1400
  # ACL. For more information, see [Access control list (ACL)
1356
- # overview][6].
1401
+ # overview][7].
1357
1402
  #
1358
1403
  # You specify each grantee as a type=value pair, where the type is one
1359
1404
  # of the following:
@@ -1386,7 +1431,7 @@ module Aws::S3
1386
1431
  # * South America (São Paulo)
1387
1432
  #
1388
1433
  # For a list of all the Amazon S3 supported Regions and endpoints,
1389
- # see [Regions and Endpoints][7] in the Amazon Web Services General
1434
+ # see [Regions and Endpoints][8] in the Amazon Web Services General
1390
1435
  # Reference.
1391
1436
  #
1392
1437
  # </note>
@@ -1404,22 +1449,29 @@ module Aws::S3
1404
1449
  #
1405
1450
  # **Permissions**
1406
1451
  #
1407
- # If your `CreateBucket` request specifies ACL permissions and the ACL
1408
- # is public-read, public-read-write, authenticated-read, or if you
1409
- # specify access permissions explicitly through any other ACL, both
1410
- # `s3:CreateBucket` and `s3:PutBucketAcl` permissions are needed. If the
1411
- # ACL the `CreateBucket` request is private, only `s3:CreateBucket`
1412
- # permission is needed.
1452
+ # In addition to `s3:CreateBucket`, the following permissions are
1453
+ # required when your CreateBucket includes specific headers:
1413
1454
  #
1414
- # If `ObjectLockEnabledForBucket` is set to true in your `CreateBucket`
1415
- # request, `s3:PutBucketObjectLockConfiguration` and
1416
- # `s3:PutBucketVersioning` permissions are required.
1455
+ # * **ACLs** - If your `CreateBucket` request specifies ACL permissions
1456
+ # and the ACL is public-read, public-read-write, authenticated-read,
1457
+ # or if you specify access permissions explicitly through any other
1458
+ # ACL, both `s3:CreateBucket` and `s3:PutBucketAcl` permissions are
1459
+ # needed. If the ACL the `CreateBucket` request is private or doesn't
1460
+ # specify any ACLs, only `s3:CreateBucket` permission is needed.
1461
+ #
1462
+ # * **Object Lock** - If `ObjectLockEnabledForBucket` is set to true in
1463
+ # your `CreateBucket` request, `s3:PutBucketObjectLockConfiguration`
1464
+ # and `s3:PutBucketVersioning` permissions are required.
1465
+ #
1466
+ # * **S3 Object Ownership** - If your CreateBucket request includes the
1467
+ # the `x-amz-object-ownership` header, `s3:PutBucketOwnershipControls`
1468
+ # permission is required.
1417
1469
  #
1418
1470
  # The following operations are related to `CreateBucket`\:
1419
1471
  #
1420
- # * [PutObject][8]
1472
+ # * [PutObject][9]
1421
1473
  #
1422
- # * [DeleteBucket][9]
1474
+ # * [DeleteBucket][10]
1423
1475
  #
1424
1476
  #
1425
1477
  #
@@ -1427,11 +1479,12 @@ module Aws::S3
1427
1479
  # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html
1428
1480
  # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
1429
1481
  # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
1430
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
1431
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
1432
- # [7]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
1433
- # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
1434
- # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
1482
+ # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
1483
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
1484
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html
1485
+ # [8]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
1486
+ # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
1487
+ # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
1435
1488
  #
1436
1489
  # @option params [String] :acl
1437
1490
  # The canned ACL to apply to the bucket.
@@ -1465,38 +1518,56 @@ module Aws::S3
1465
1518
  # Specifies whether you want S3 Object Lock to be enabled for the new
1466
1519
  # bucket.
1467
1520
  #
1521
+ # @option params [String] :object_ownership
1522
+ # The container element for object ownership for a bucket's ownership
1523
+ # controls.
1524
+ #
1525
+ # BucketOwnerPreferred - Objects uploaded to the bucket change ownership
1526
+ # to the bucket owner if the objects are uploaded with the
1527
+ # `bucket-owner-full-control` canned ACL.
1528
+ #
1529
+ # ObjectWriter - The uploading account will own the object if the object
1530
+ # is uploaded with the `bucket-owner-full-control` canned ACL.
1531
+ #
1532
+ # BucketOwnerEnforced - Access control lists (ACLs) are disabled and no
1533
+ # longer affect permissions. The bucket owner automatically owns and has
1534
+ # full control over every object in the bucket. The bucket only accepts
1535
+ # PUT requests that don't specify an ACL or bucket owner full control
1536
+ # ACLs, such as the `bucket-owner-full-control` canned ACL or an
1537
+ # equivalent form of this ACL expressed in the XML format.
1538
+ #
1468
1539
  # @return [Types::CreateBucketOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1469
1540
  #
1470
1541
  # * {Types::CreateBucketOutput#location #location} => String
1471
1542
  #
1472
1543
  #
1473
- # @example Example: To create a bucket
1544
+ # @example Example: To create a bucket in a specific region
1474
1545
  #
1475
- # # The following example creates a bucket.
1546
+ # # The following example creates a bucket. The request specifies an AWS region where to create the bucket.
1476
1547
  #
1477
1548
  # resp = client.create_bucket({
1478
1549
  # bucket: "examplebucket",
1550
+ # create_bucket_configuration: {
1551
+ # location_constraint: "eu-west-1",
1552
+ # },
1479
1553
  # })
1480
1554
  #
1481
1555
  # resp.to_h outputs the following:
1482
1556
  # {
1483
- # location: "/examplebucket",
1557
+ # location: "http://examplebucket.<Region>.s3.amazonaws.com/",
1484
1558
  # }
1485
1559
  #
1486
- # @example Example: To create a bucket in a specific region
1560
+ # @example Example: To create a bucket
1487
1561
  #
1488
- # # The following example creates a bucket. The request specifies an AWS region where to create the bucket.
1562
+ # # The following example creates a bucket.
1489
1563
  #
1490
1564
  # resp = client.create_bucket({
1491
1565
  # bucket: "examplebucket",
1492
- # create_bucket_configuration: {
1493
- # location_constraint: "eu-west-1",
1494
- # },
1495
1566
  # })
1496
1567
  #
1497
1568
  # resp.to_h outputs the following:
1498
1569
  # {
1499
- # location: "http://examplebucket.<Region>.s3.amazonaws.com/",
1570
+ # location: "/examplebucket",
1500
1571
  # }
1501
1572
  #
1502
1573
  # @example Request syntax with placeholder values
@@ -1513,6 +1584,7 @@ module Aws::S3
1513
1584
  # grant_write: "GrantWrite",
1514
1585
  # grant_write_acp: "GrantWriteACP",
1515
1586
  # object_lock_enabled_for_bucket: false,
1587
+ # object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced
1516
1588
  # })
1517
1589
  #
1518
1590
  # @example Response structure
@@ -1991,7 +2063,7 @@ module Aws::S3
1991
2063
  # "MetadataKey" => "MetadataValue",
1992
2064
  # },
1993
2065
  # server_side_encryption: "AES256", # accepts AES256, aws:kms
1994
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
2066
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
1995
2067
  # website_redirect_location: "WebsiteRedirectLocation",
1996
2068
  # sse_customer_algorithm: "SSECustomerAlgorithm",
1997
2069
  # sse_customer_key: "SSECustomerKey",
@@ -2260,18 +2332,17 @@ module Aws::S3
2260
2332
  # storage costs by automatically moving data to the most cost-effective
2261
2333
  # storage access tier, without performance impact or operational
2262
2334
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
2263
- # two low latency and high throughput access tiers. For data that can be
2264
- # accessed asynchronously, you can choose to activate automatic
2265
- # archiving capabilities within the S3 Intelligent-Tiering storage
2266
- # class.
2335
+ # three low latency and high throughput access tiers. To get the lowest
2336
+ # storage cost on data that can be accessed in minutes to hours, you can
2337
+ # choose to activate additional archiving capabilities.
2267
2338
  #
2268
2339
  # The S3 Intelligent-Tiering storage class is the ideal storage class
2269
2340
  # for data with unknown, changing, or unpredictable access patterns,
2270
2341
  # independent of object size or retention period. If the size of an
2271
- # object is less than 128 KB, it is not eligible for auto-tiering.
2272
- # Smaller objects can be stored, but they are always charged at the
2273
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
2274
- # class.
2342
+ # object is less than 128 KB, it is not monitored and not eligible for
2343
+ # auto-tiering. Smaller objects can be stored, but they are always
2344
+ # charged at the Frequent Access tier rates in the S3
2345
+ # Intelligent-Tiering storage class.
2275
2346
  #
2276
2347
  # For more information, see [Storage class for automatically optimizing
2277
2348
  # frequently and infrequently accessed objects][1].
@@ -3432,15 +3503,24 @@ module Aws::S3
3432
3503
  # can return the ACL of the bucket without using an authorization
3433
3504
  # header.
3434
3505
  #
3506
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for S3 Object
3507
+ # Ownership, requests to read ACLs are still supported and return the
3508
+ # `bucket-owner-full-control` ACL with the owner being the account that
3509
+ # created the bucket. For more information, see [ Controlling object
3510
+ # ownership and disabling ACLs][1] in the *Amazon S3 User Guide*.
3511
+ #
3512
+ # </note>
3513
+ #
3435
3514
  # **Related Resources**
3436
3515
  #
3437
- # * [ListObjects][1]
3516
+ # * [ListObjects][2]
3438
3517
  #
3439
3518
  # ^
3440
3519
  #
3441
3520
  #
3442
3521
  #
3443
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
3522
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
3523
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
3444
3524
  #
3445
3525
  # @option params [required, String] :bucket
3446
3526
  # Specifies the S3 bucket whose ACL is being requested.
@@ -3726,18 +3806,17 @@ module Aws::S3
3726
3806
  # storage costs by automatically moving data to the most cost-effective
3727
3807
  # storage access tier, without performance impact or operational
3728
3808
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
3729
- # two low latency and high throughput access tiers. For data that can be
3730
- # accessed asynchronously, you can choose to activate automatic
3731
- # archiving capabilities within the S3 Intelligent-Tiering storage
3732
- # class.
3809
+ # three low latency and high throughput access tiers. To get the lowest
3810
+ # storage cost on data that can be accessed in minutes to hours, you can
3811
+ # choose to activate additional archiving capabilities.
3733
3812
  #
3734
3813
  # The S3 Intelligent-Tiering storage class is the ideal storage class
3735
3814
  # for data with unknown, changing, or unpredictable access patterns,
3736
3815
  # independent of object size or retention period. If the size of an
3737
- # object is less than 128 KB, it is not eligible for auto-tiering.
3738
- # Smaller objects can be stored, but they are always charged at the
3739
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
3740
- # class.
3816
+ # object is less than 128 KB, it is not monitored and not eligible for
3817
+ # auto-tiering. Smaller objects can be stored, but they are always
3818
+ # charged at the Frequent Access tier rates in the S3
3819
+ # Intelligent-Tiering storage class.
3741
3820
  #
3742
3821
  # For more information, see [Storage class for automatically optimizing
3743
3822
  # frequently and infrequently accessed objects][1].
@@ -3976,10 +4055,12 @@ module Aws::S3
3976
4055
  # resp.rules[0].status #=> String, one of "Enabled", "Disabled"
3977
4056
  # resp.rules[0].transition.date #=> Time
3978
4057
  # resp.rules[0].transition.days #=> Integer
3979
- # resp.rules[0].transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4058
+ # resp.rules[0].transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
3980
4059
  # resp.rules[0].noncurrent_version_transition.noncurrent_days #=> Integer
3981
- # resp.rules[0].noncurrent_version_transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4060
+ # resp.rules[0].noncurrent_version_transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
4061
+ # resp.rules[0].noncurrent_version_transition.newer_noncurrent_versions #=> Integer
3982
4062
  # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer
4063
+ # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer
3983
4064
  # resp.rules[0].abort_incomplete_multipart_upload.days_after_initiation #=> Integer
3984
4065
  #
3985
4066
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle AWS API Documentation
@@ -4097,19 +4178,25 @@ module Aws::S3
4097
4178
  # resp.rules[0].filter.prefix #=> String
4098
4179
  # resp.rules[0].filter.tag.key #=> String
4099
4180
  # resp.rules[0].filter.tag.value #=> String
4181
+ # resp.rules[0].filter.object_size_greater_than #=> Integer
4182
+ # resp.rules[0].filter.object_size_less_than #=> Integer
4100
4183
  # resp.rules[0].filter.and.prefix #=> String
4101
4184
  # resp.rules[0].filter.and.tags #=> Array
4102
4185
  # resp.rules[0].filter.and.tags[0].key #=> String
4103
4186
  # resp.rules[0].filter.and.tags[0].value #=> String
4187
+ # resp.rules[0].filter.and.object_size_greater_than #=> Integer
4188
+ # resp.rules[0].filter.and.object_size_less_than #=> Integer
4104
4189
  # resp.rules[0].status #=> String, one of "Enabled", "Disabled"
4105
4190
  # resp.rules[0].transitions #=> Array
4106
4191
  # resp.rules[0].transitions[0].date #=> Time
4107
4192
  # resp.rules[0].transitions[0].days #=> Integer
4108
- # resp.rules[0].transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4193
+ # resp.rules[0].transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
4109
4194
  # resp.rules[0].noncurrent_version_transitions #=> Array
4110
4195
  # resp.rules[0].noncurrent_version_transitions[0].noncurrent_days #=> Integer
4111
- # resp.rules[0].noncurrent_version_transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
4196
+ # resp.rules[0].noncurrent_version_transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR"
4197
+ # resp.rules[0].noncurrent_version_transitions[0].newer_noncurrent_versions #=> Integer
4112
4198
  # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer
4199
+ # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer
4113
4200
  # resp.rules[0].abort_incomplete_multipart_upload.days_after_initiation #=> Integer
4114
4201
  #
4115
4202
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration AWS API Documentation
@@ -4412,18 +4499,18 @@ module Aws::S3
4412
4499
  #
4413
4500
  # resp.topic_configuration.id #=> String
4414
4501
  # resp.topic_configuration.events #=> Array
4415
- # resp.topic_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4416
- # resp.topic_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4502
+ # resp.topic_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4503
+ # resp.topic_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4417
4504
  # resp.topic_configuration.topic #=> String
4418
4505
  # resp.queue_configuration.id #=> String
4419
- # resp.queue_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4506
+ # resp.queue_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4420
4507
  # resp.queue_configuration.events #=> Array
4421
- # resp.queue_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4508
+ # resp.queue_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4422
4509
  # resp.queue_configuration.queue #=> String
4423
4510
  # resp.cloud_function_configuration.id #=> String
4424
- # resp.cloud_function_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4511
+ # resp.cloud_function_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4425
4512
  # resp.cloud_function_configuration.events #=> Array
4426
- # resp.cloud_function_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4513
+ # resp.cloud_function_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4427
4514
  # resp.cloud_function_configuration.cloud_function #=> String
4428
4515
  # resp.cloud_function_configuration.invocation_role #=> String
4429
4516
  #
@@ -4477,6 +4564,7 @@ module Aws::S3
4477
4564
  # * {Types::NotificationConfiguration#topic_configurations #topic_configurations} => Array&lt;Types::TopicConfiguration&gt;
4478
4565
  # * {Types::NotificationConfiguration#queue_configurations #queue_configurations} => Array&lt;Types::QueueConfiguration&gt;
4479
4566
  # * {Types::NotificationConfiguration#lambda_function_configurations #lambda_function_configurations} => Array&lt;Types::LambdaFunctionConfiguration&gt;
4567
+ # * {Types::NotificationConfiguration#event_bridge_configuration #event_bridge_configuration} => Types::EventBridgeConfiguration
4480
4568
  #
4481
4569
  # @example Request syntax with placeholder values
4482
4570
  #
@@ -4491,7 +4579,7 @@ module Aws::S3
4491
4579
  # resp.topic_configurations[0].id #=> String
4492
4580
  # resp.topic_configurations[0].topic_arn #=> String
4493
4581
  # resp.topic_configurations[0].events #=> Array
4494
- # resp.topic_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4582
+ # resp.topic_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4495
4583
  # resp.topic_configurations[0].filter.key.filter_rules #=> Array
4496
4584
  # resp.topic_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix"
4497
4585
  # resp.topic_configurations[0].filter.key.filter_rules[0].value #=> String
@@ -4499,7 +4587,7 @@ module Aws::S3
4499
4587
  # resp.queue_configurations[0].id #=> String
4500
4588
  # resp.queue_configurations[0].queue_arn #=> String
4501
4589
  # resp.queue_configurations[0].events #=> Array
4502
- # resp.queue_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4590
+ # resp.queue_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4503
4591
  # resp.queue_configurations[0].filter.key.filter_rules #=> Array
4504
4592
  # resp.queue_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix"
4505
4593
  # resp.queue_configurations[0].filter.key.filter_rules[0].value #=> String
@@ -4507,7 +4595,7 @@ module Aws::S3
4507
4595
  # resp.lambda_function_configurations[0].id #=> String
4508
4596
  # resp.lambda_function_configurations[0].lambda_function_arn #=> String
4509
4597
  # resp.lambda_function_configurations[0].events #=> Array
4510
- # resp.lambda_function_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold"
4598
+ # resp.lambda_function_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete"
4511
4599
  # resp.lambda_function_configurations[0].filter.key.filter_rules #=> Array
4512
4600
  # resp.lambda_function_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix"
4513
4601
  # resp.lambda_function_configurations[0].filter.key.filter_rules[0].value #=> String
@@ -4524,7 +4612,7 @@ module Aws::S3
4524
4612
  # Retrieves `OwnershipControls` for an Amazon S3 bucket. To use this
4525
4613
  # operation, you must have the `s3:GetBucketOwnershipControls`
4526
4614
  # permission. For more information about Amazon S3 permissions, see
4527
- # [Specifying Permissions in a Policy][1].
4615
+ # [Specifying permissions in a policy][1].
4528
4616
  #
4529
4617
  # For information about Amazon S3 Object Ownership, see [Using Object
4530
4618
  # Ownership][2].
@@ -4537,8 +4625,8 @@ module Aws::S3
4537
4625
  #
4538
4626
  #
4539
4627
  #
4540
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
4541
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
4628
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html
4629
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
4542
4630
  #
4543
4631
  # @option params [required, String] :bucket
4544
4632
  # The name of the Amazon S3 bucket whose `OwnershipControls` you want to
@@ -4563,7 +4651,7 @@ module Aws::S3
4563
4651
  # @example Response structure
4564
4652
  #
4565
4653
  # resp.ownership_controls.rules #=> Array
4566
- # resp.ownership_controls.rules[0].object_ownership #=> String, one of "BucketOwnerPreferred", "ObjectWriter"
4654
+ # resp.ownership_controls.rules[0].object_ownership #=> String, one of "BucketOwnerPreferred", "ObjectWriter", "BucketOwnerEnforced"
4567
4655
  #
4568
4656
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls AWS API Documentation
4569
4657
  #
@@ -4812,7 +4900,7 @@ module Aws::S3
4812
4900
  # resp.replication_configuration.rules[0].existing_object_replication.status #=> String, one of "Enabled", "Disabled"
4813
4901
  # resp.replication_configuration.rules[0].destination.bucket #=> String
4814
4902
  # resp.replication_configuration.rules[0].destination.account #=> String
4815
- # resp.replication_configuration.rules[0].destination.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
4903
+ # resp.replication_configuration.rules[0].destination.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
4816
4904
  # resp.replication_configuration.rules[0].destination.access_control_translation.owner #=> String, one of "Destination"
4817
4905
  # resp.replication_configuration.rules[0].destination.encryption_configuration.replica_kms_key_id #=> String
4818
4906
  # resp.replication_configuration.rules[0].destination.replication_time.status #=> String, one of "Enabled", "Disabled"
@@ -5213,8 +5301,10 @@ module Aws::S3
5213
5301
  # By default, the GET action returns the current version of an object.
5214
5302
  # To return a different version, use the `versionId` subresource.
5215
5303
  #
5216
- # <note markdown="1"> * You need the `s3:GetObjectVersion` permission to access a specific
5217
- # version of an object.
5304
+ # <note markdown="1"> * If you supply a `versionId`, you need the `s3:GetObjectVersion`
5305
+ # permission to access a specific version of an object. If you request
5306
+ # a specific version, you do not need to have the `s3:GetObject`
5307
+ # permission.
5218
5308
  #
5219
5309
  # * If the current version of the object is a delete marker, Amazon S3
5220
5310
  # behaves as if the object was deleted and includes
@@ -5453,49 +5543,49 @@ module Aws::S3
5453
5543
  # * {Types::GetObjectOutput#object_lock_legal_hold_status #object_lock_legal_hold_status} => String
5454
5544
  #
5455
5545
  #
5456
- # @example Example: To retrieve an object
5546
+ # @example Example: To retrieve a byte range of an object
5457
5547
  #
5458
- # # The following example retrieves an object for an S3 bucket.
5548
+ # # The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a
5549
+ # # specific byte range.
5459
5550
  #
5460
5551
  # resp = client.get_object({
5461
5552
  # bucket: "examplebucket",
5462
- # key: "HappyFace.jpg",
5553
+ # key: "SampleFile.txt",
5554
+ # range: "bytes=0-9",
5463
5555
  # })
5464
5556
  #
5465
5557
  # resp.to_h outputs the following:
5466
5558
  # {
5467
5559
  # accept_ranges: "bytes",
5468
- # content_length: 3191,
5469
- # content_type: "image/jpeg",
5470
- # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
5471
- # last_modified: Time.parse("Thu, 15 Dec 2016 01:19:41 GMT"),
5560
+ # content_length: 10,
5561
+ # content_range: "bytes 0-9/43",
5562
+ # content_type: "text/plain",
5563
+ # etag: "\"0d94420ffd0bc68cd3d152506b97a9cc\"",
5564
+ # last_modified: Time.parse("Thu, 09 Oct 2014 22:57:28 GMT"),
5472
5565
  # metadata: {
5473
5566
  # },
5474
- # tag_count: 2,
5475
5567
  # version_id: "null",
5476
5568
  # }
5477
5569
  #
5478
- # @example Example: To retrieve a byte range of an object
5570
+ # @example Example: To retrieve an object
5479
5571
  #
5480
- # # The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a
5481
- # # specific byte range.
5572
+ # # The following example retrieves an object for an S3 bucket.
5482
5573
  #
5483
5574
  # resp = client.get_object({
5484
5575
  # bucket: "examplebucket",
5485
- # key: "SampleFile.txt",
5486
- # range: "bytes=0-9",
5576
+ # key: "HappyFace.jpg",
5487
5577
  # })
5488
5578
  #
5489
5579
  # resp.to_h outputs the following:
5490
5580
  # {
5491
5581
  # accept_ranges: "bytes",
5492
- # content_length: 10,
5493
- # content_range: "bytes 0-9/43",
5494
- # content_type: "text/plain",
5495
- # etag: "\"0d94420ffd0bc68cd3d152506b97a9cc\"",
5496
- # last_modified: Time.parse("Thu, 09 Oct 2014 22:57:28 GMT"),
5582
+ # content_length: 3191,
5583
+ # content_type: "image/jpeg",
5584
+ # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
5585
+ # last_modified: Time.parse("Thu, 15 Dec 2016 01:19:41 GMT"),
5497
5586
  # metadata: {
5498
5587
  # },
5588
+ # tag_count: 2,
5499
5589
  # version_id: "null",
5500
5590
  # }
5501
5591
  #
@@ -5581,7 +5671,7 @@ module Aws::S3
5581
5671
  # resp.sse_customer_key_md5 #=> String
5582
5672
  # resp.ssekms_key_id #=> String
5583
5673
  # resp.bucket_key_enabled #=> Boolean
5584
- # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
5674
+ # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
5585
5675
  # resp.request_charged #=> String, one of "requester"
5586
5676
  # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA"
5587
5677
  # resp.parts_count #=> Integer
@@ -5610,19 +5700,28 @@ module Aws::S3
5610
5700
  # an object. To return ACL information about a different version, use
5611
5701
  # the versionId subresource.
5612
5702
  #
5703
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for S3 Object
5704
+ # Ownership, requests to read ACLs are still supported and return the
5705
+ # `bucket-owner-full-control` ACL with the owner being the account that
5706
+ # created the bucket. For more information, see [ Controlling object
5707
+ # ownership and disabling ACLs][1] in the *Amazon S3 User Guide*.
5708
+ #
5709
+ # </note>
5710
+ #
5613
5711
  # The following operations are related to `GetObjectAcl`\:
5614
5712
  #
5615
- # * [GetObject][1]
5713
+ # * [GetObject][2]
5616
5714
  #
5617
- # * [DeleteObject][2]
5715
+ # * [DeleteObject][3]
5618
5716
  #
5619
- # * [PutObject][3]
5717
+ # * [PutObject][4]
5620
5718
  #
5621
5719
  #
5622
5720
  #
5623
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
5624
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
5625
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
5721
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
5722
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
5723
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
5724
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
5626
5725
  #
5627
5726
  # @option params [required, String] :bucket
5628
5727
  # The bucket name that contains the object for which to get the ACL
@@ -6043,49 +6142,49 @@ module Aws::S3
6043
6142
  # * {Types::GetObjectTaggingOutput#tag_set #tag_set} => Array&lt;Types::Tag&gt;
6044
6143
  #
6045
6144
  #
6046
- # @example Example: To retrieve tag set of a specific object version
6145
+ # @example Example: To retrieve tag set of an object
6047
6146
  #
6048
- # # The following example retrieves tag set of an object. The request specifies object version.
6147
+ # # The following example retrieves tag set of an object.
6049
6148
  #
6050
6149
  # resp = client.get_object_tagging({
6051
6150
  # bucket: "examplebucket",
6052
- # key: "exampleobject",
6053
- # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
6151
+ # key: "HappyFace.jpg",
6054
6152
  # })
6055
6153
  #
6056
6154
  # resp.to_h outputs the following:
6057
6155
  # {
6058
6156
  # tag_set: [
6059
6157
  # {
6060
- # key: "Key1",
6061
- # value: "Value1",
6158
+ # key: "Key4",
6159
+ # value: "Value4",
6160
+ # },
6161
+ # {
6162
+ # key: "Key3",
6163
+ # value: "Value3",
6062
6164
  # },
6063
6165
  # ],
6064
- # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
6166
+ # version_id: "null",
6065
6167
  # }
6066
6168
  #
6067
- # @example Example: To retrieve tag set of an object
6169
+ # @example Example: To retrieve tag set of a specific object version
6068
6170
  #
6069
- # # The following example retrieves tag set of an object.
6171
+ # # The following example retrieves tag set of an object. The request specifies object version.
6070
6172
  #
6071
6173
  # resp = client.get_object_tagging({
6072
6174
  # bucket: "examplebucket",
6073
- # key: "HappyFace.jpg",
6175
+ # key: "exampleobject",
6176
+ # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
6074
6177
  # })
6075
6178
  #
6076
6179
  # resp.to_h outputs the following:
6077
6180
  # {
6078
6181
  # tag_set: [
6079
6182
  # {
6080
- # key: "Key4",
6081
- # value: "Value4",
6082
- # },
6083
- # {
6084
- # key: "Key3",
6085
- # value: "Value3",
6183
+ # key: "Key1",
6184
+ # value: "Value1",
6086
6185
  # },
6087
6186
  # ],
6088
- # version_id: "null",
6187
+ # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI",
6089
6188
  # }
6090
6189
  #
6091
6190
  # @example Request syntax with placeholder values
@@ -6506,18 +6605,8 @@ module Aws::S3
6506
6605
  # The object key.
6507
6606
  #
6508
6607
  # @option params [String] :range
6509
- # Downloads the specified range bytes of an object. For more information
6510
- # about the HTTP Range header, see
6511
- # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35][1].
6512
- #
6513
- # <note markdown="1"> Amazon S3 doesn't support retrieving multiple ranges of data per
6514
- # `GET` request.
6515
- #
6516
- # </note>
6517
- #
6518
- #
6519
- #
6520
- # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
6608
+ # Because `HeadObject` returns only the metadata for an object, this
6609
+ # parameter has no effect.
6521
6610
  #
6522
6611
  # @option params [String] :version_id
6523
6612
  # VersionId used to reference a specific version of the object.
@@ -6662,7 +6751,7 @@ module Aws::S3
6662
6751
  # resp.sse_customer_key_md5 #=> String
6663
6752
  # resp.ssekms_key_id #=> String
6664
6753
  # resp.bucket_key_enabled #=> Boolean
6665
- # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
6754
+ # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
6666
6755
  # resp.request_charged #=> String, one of "requester"
6667
6756
  # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA"
6668
6757
  # resp.parts_count #=> Integer
@@ -6789,18 +6878,17 @@ module Aws::S3
6789
6878
  # storage costs by automatically moving data to the most cost-effective
6790
6879
  # storage access tier, without performance impact or operational
6791
6880
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
6792
- # two low latency and high throughput access tiers. For data that can be
6793
- # accessed asynchronously, you can choose to activate automatic
6794
- # archiving capabilities within the S3 Intelligent-Tiering storage
6795
- # class.
6881
+ # three low latency and high throughput access tiers. To get the lowest
6882
+ # storage cost on data that can be accessed in minutes to hours, you can
6883
+ # choose to activate additional archiving capabilities.
6796
6884
  #
6797
6885
  # The S3 Intelligent-Tiering storage class is the ideal storage class
6798
6886
  # for data with unknown, changing, or unpredictable access patterns,
6799
6887
  # independent of object size or retention period. If the size of an
6800
- # object is less than 128 KB, it is not eligible for auto-tiering.
6801
- # Smaller objects can be stored, but they are always charged at the
6802
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
6803
- # class.
6888
+ # object is less than 128 KB, it is not monitored and not eligible for
6889
+ # auto-tiering. Smaller objects can be stored, but they are always
6890
+ # charged at the Frequent Access tier rates in the S3
6891
+ # Intelligent-Tiering storage class.
6804
6892
  #
6805
6893
  # For more information, see [Storage class for automatically optimizing
6806
6894
  # frequently and infrequently accessed objects][1].
@@ -7270,97 +7358,97 @@ module Aws::S3
7270
7358
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
7271
7359
  #
7272
7360
  #
7273
- # @example Example: To list in-progress multipart uploads on a bucket
7361
+ # @example Example: List next set of multipart uploads when previous result is truncated
7274
7362
  #
7275
- # # The following example lists in-progress multipart uploads on a specific bucket.
7363
+ # # The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next
7364
+ # # setup of multipart uploads.
7276
7365
  #
7277
7366
  # resp = client.list_multipart_uploads({
7278
7367
  # bucket: "examplebucket",
7368
+ # key_marker: "nextkeyfrompreviousresponse",
7369
+ # max_uploads: 2,
7370
+ # upload_id_marker: "valuefrompreviousresponse",
7279
7371
  # })
7280
7372
  #
7281
7373
  # resp.to_h outputs the following:
7282
7374
  # {
7375
+ # bucket: "acl1",
7376
+ # is_truncated: true,
7377
+ # key_marker: "",
7378
+ # max_uploads: 2,
7379
+ # next_key_marker: "someobjectkey",
7380
+ # next_upload_id_marker: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7381
+ # upload_id_marker: "",
7283
7382
  # uploads: [
7284
7383
  # {
7285
7384
  # initiated: Time.parse("2014-05-01T05:40:58.000Z"),
7286
7385
  # initiator: {
7287
- # display_name: "display-name",
7386
+ # display_name: "ownder-display-name",
7288
7387
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7289
7388
  # },
7290
7389
  # key: "JavaFile",
7291
7390
  # owner: {
7292
- # display_name: "display-name",
7293
- # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7391
+ # display_name: "mohanataws",
7392
+ # id: "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7294
7393
  # },
7295
7394
  # storage_class: "STANDARD",
7296
- # upload_id: "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--",
7395
+ # upload_id: "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--",
7297
7396
  # },
7298
7397
  # {
7299
7398
  # initiated: Time.parse("2014-05-01T05:41:27.000Z"),
7300
7399
  # initiator: {
7301
- # display_name: "display-name",
7400
+ # display_name: "ownder-display-name",
7302
7401
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7303
7402
  # },
7304
7403
  # key: "JavaFile",
7305
7404
  # owner: {
7306
- # display_name: "display-name",
7405
+ # display_name: "ownder-display-name",
7307
7406
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7308
7407
  # },
7309
7408
  # storage_class: "STANDARD",
7310
- # upload_id: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7409
+ # upload_id: "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7311
7410
  # },
7312
7411
  # ],
7313
7412
  # }
7314
7413
  #
7315
- # @example Example: List next set of multipart uploads when previous result is truncated
7414
+ # @example Example: To list in-progress multipart uploads on a bucket
7316
7415
  #
7317
- # # The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next
7318
- # # setup of multipart uploads.
7416
+ # # The following example lists in-progress multipart uploads on a specific bucket.
7319
7417
  #
7320
7418
  # resp = client.list_multipart_uploads({
7321
7419
  # bucket: "examplebucket",
7322
- # key_marker: "nextkeyfrompreviousresponse",
7323
- # max_uploads: 2,
7324
- # upload_id_marker: "valuefrompreviousresponse",
7325
7420
  # })
7326
7421
  #
7327
7422
  # resp.to_h outputs the following:
7328
7423
  # {
7329
- # bucket: "acl1",
7330
- # is_truncated: true,
7331
- # key_marker: "",
7332
- # max_uploads: 2,
7333
- # next_key_marker: "someobjectkey",
7334
- # next_upload_id_marker: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7335
- # upload_id_marker: "",
7336
7424
  # uploads: [
7337
7425
  # {
7338
7426
  # initiated: Time.parse("2014-05-01T05:40:58.000Z"),
7339
7427
  # initiator: {
7340
- # display_name: "ownder-display-name",
7428
+ # display_name: "display-name",
7341
7429
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7342
7430
  # },
7343
7431
  # key: "JavaFile",
7344
7432
  # owner: {
7345
- # display_name: "mohanataws",
7346
- # id: "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7433
+ # display_name: "display-name",
7434
+ # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7347
7435
  # },
7348
7436
  # storage_class: "STANDARD",
7349
- # upload_id: "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--",
7437
+ # upload_id: "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--",
7350
7438
  # },
7351
7439
  # {
7352
7440
  # initiated: Time.parse("2014-05-01T05:41:27.000Z"),
7353
7441
  # initiator: {
7354
- # display_name: "ownder-display-name",
7442
+ # display_name: "display-name",
7355
7443
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7356
7444
  # },
7357
7445
  # key: "JavaFile",
7358
7446
  # owner: {
7359
- # display_name: "ownder-display-name",
7447
+ # display_name: "display-name",
7360
7448
  # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc",
7361
7449
  # },
7362
7450
  # storage_class: "STANDARD",
7363
- # upload_id: "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7451
+ # upload_id: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--",
7364
7452
  # },
7365
7453
  # ],
7366
7454
  # }
@@ -7393,7 +7481,7 @@ module Aws::S3
7393
7481
  # resp.uploads[0].upload_id #=> String
7394
7482
  # resp.uploads[0].key #=> String
7395
7483
  # resp.uploads[0].initiated #=> Time
7396
- # resp.uploads[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
7484
+ # resp.uploads[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
7397
7485
  # resp.uploads[0].owner.display_name #=> String
7398
7486
  # resp.uploads[0].owner.id #=> String
7399
7487
  # resp.uploads[0].initiator.id #=> String
@@ -7773,7 +7861,7 @@ module Aws::S3
7773
7861
  # resp.contents[0].last_modified #=> Time
7774
7862
  # resp.contents[0].etag #=> String
7775
7863
  # resp.contents[0].size #=> Integer
7776
- # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS"
7864
+ # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
7777
7865
  # resp.contents[0].owner.display_name #=> String
7778
7866
  # resp.contents[0].owner.id #=> String
7779
7867
  # resp.name #=> String
@@ -7978,7 +8066,7 @@ module Aws::S3
7978
8066
  # resp.contents[0].last_modified #=> Time
7979
8067
  # resp.contents[0].etag #=> String
7980
8068
  # resp.contents[0].size #=> Integer
7981
- # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS"
8069
+ # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
7982
8070
  # resp.contents[0].owner.display_name #=> String
7983
8071
  # resp.contents[0].owner.id #=> String
7984
8072
  # resp.name #=> String
@@ -8187,7 +8275,7 @@ module Aws::S3
8187
8275
  # resp.initiator.display_name #=> String
8188
8276
  # resp.owner.display_name #=> String
8189
8277
  # resp.owner.id #=> String
8190
- # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS"
8278
+ # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR"
8191
8279
  # resp.request_charged #=> String, one of "requester"
8192
8280
  #
8193
8281
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts AWS API Documentation
@@ -8298,6 +8386,14 @@ module Aws::S3
8298
8386
  # you have an existing application that updates a bucket ACL using the
8299
8387
  # request body, then you can continue to use that approach.
8300
8388
  #
8389
+ # If your bucket uses the bucket owner enforced setting for S3 Object
8390
+ # Ownership, ACLs are disabled and no longer affect permissions. You
8391
+ # must use policies to grant access to your bucket and the objects in
8392
+ # it. Requests to set ACLs or update ACLs fail and return the
8393
+ # `AccessControlListNotSupported` error code. Requests to read ACLs are
8394
+ # still supported. For more information, see [Controlling object
8395
+ # ownership][2] in the *Amazon S3 User Guide*.
8396
+ #
8301
8397
  # **Access Permissions**
8302
8398
  #
8303
8399
  # You can set access permissions using one of the following methods:
@@ -8307,7 +8403,7 @@ module Aws::S3
8307
8403
  # canned ACL has a predefined set of grantees and permissions. Specify
8308
8404
  # the canned ACL name as the value of `x-amz-acl`. If you use this
8309
8405
  # header, you cannot use other access control-specific headers in your
8310
- # request. For more information, see [Canned ACL][2].
8406
+ # request. For more information, see [Canned ACL][3].
8311
8407
  #
8312
8408
  # * Specify access permissions explicitly with the `x-amz-grant-read`,
8313
8409
  # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and
@@ -8317,7 +8413,7 @@ module Aws::S3
8317
8413
  # permission. If you use these ACL-specific headers, you cannot use
8318
8414
  # the `x-amz-acl` header to set a canned ACL. These parameters map to
8319
8415
  # the set of permissions that Amazon S3 supports in an ACL. For more
8320
- # information, see [Access Control List (ACL) Overview][3].
8416
+ # information, see [Access Control List (ACL) Overview][4].
8321
8417
  #
8322
8418
  # You specify each grantee as a type=value pair, where the type is one
8323
8419
  # of the following:
@@ -8350,7 +8446,7 @@ module Aws::S3
8350
8446
  # * South America (São Paulo)
8351
8447
  #
8352
8448
  # For a list of all the Amazon S3 supported Regions and endpoints,
8353
- # see [Regions and Endpoints][4] in the Amazon Web Services General
8449
+ # see [Regions and Endpoints][5] in the Amazon Web Services General
8354
8450
  # Reference.
8355
8451
  #
8356
8452
  # </note>
@@ -8413,28 +8509,29 @@ module Aws::S3
8413
8509
  # * South America (São Paulo)
8414
8510
  #
8415
8511
  # For a list of all the Amazon S3 supported Regions and endpoints, see
8416
- # [Regions and Endpoints][4] in the Amazon Web Services General
8512
+ # [Regions and Endpoints][5] in the Amazon Web Services General
8417
8513
  # Reference.
8418
8514
  #
8419
8515
  # </note>
8420
8516
  #
8421
8517
  # **Related Resources**
8422
8518
  #
8423
- # * [CreateBucket][5]
8519
+ # * [CreateBucket][6]
8424
8520
  #
8425
- # * [DeleteBucket][6]
8521
+ # * [DeleteBucket][7]
8426
8522
  #
8427
- # * [GetObjectAcl][7]
8523
+ # * [GetObjectAcl][8]
8428
8524
  #
8429
8525
  #
8430
8526
  #
8431
8527
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
8432
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
8433
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
8434
- # [4]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
8435
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
8436
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
8437
- # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
8528
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
8529
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
8530
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
8531
+ # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
8532
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
8533
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
8534
+ # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
8438
8535
  #
8439
8536
  # @option params [String] :acl
8440
8537
  # The canned ACL to apply to the bucket.
@@ -8934,18 +9031,17 @@ module Aws::S3
8934
9031
  # storage costs by automatically moving data to the most cost-effective
8935
9032
  # storage access tier, without performance impact or operational
8936
9033
  # overhead. S3 Intelligent-Tiering delivers automatic cost savings in
8937
- # two low latency and high throughput access tiers. For data that can be
8938
- # accessed asynchronously, you can choose to activate automatic
8939
- # archiving capabilities within the S3 Intelligent-Tiering storage
8940
- # class.
9034
+ # three low latency and high throughput access tiers. To get the lowest
9035
+ # storage cost on data that can be accessed in minutes to hours, you can
9036
+ # choose to activate additional archiving capabilities.
8941
9037
  #
8942
9038
  # The S3 Intelligent-Tiering storage class is the ideal storage class
8943
9039
  # for data with unknown, changing, or unpredictable access patterns,
8944
9040
  # independent of object size or retention period. If the size of an
8945
- # object is less than 128 KB, it is not eligible for auto-tiering.
8946
- # Smaller objects can be stored, but they are always charged at the
8947
- # Frequent Access tier rates in the S3 Intelligent-Tiering storage
8948
- # class.
9041
+ # object is less than 128 KB, it is not monitored and not eligible for
9042
+ # auto-tiering. Smaller objects can be stored, but they are always
9043
+ # charged at the Frequent Access tier rates in the S3
9044
+ # Intelligent-Tiering storage class.
8949
9045
  #
8950
9046
  # For more information, see [Storage class for automatically optimizing
8951
9047
  # frequently and infrequently accessed objects][1].
@@ -9282,14 +9378,16 @@ module Aws::S3
9282
9378
  # transition: {
9283
9379
  # date: Time.now,
9284
9380
  # days: 1,
9285
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9381
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9286
9382
  # },
9287
9383
  # noncurrent_version_transition: {
9288
9384
  # noncurrent_days: 1,
9289
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9385
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9386
+ # newer_noncurrent_versions: 1,
9290
9387
  # },
9291
9388
  # noncurrent_version_expiration: {
9292
9389
  # noncurrent_days: 1,
9390
+ # newer_noncurrent_versions: 1,
9293
9391
  # },
9294
9392
  # abort_incomplete_multipart_upload: {
9295
9393
  # days_after_initiation: 1,
@@ -9451,6 +9549,8 @@ module Aws::S3
9451
9549
  # key: "ObjectKey", # required
9452
9550
  # value: "Value", # required
9453
9551
  # },
9552
+ # object_size_greater_than: 1,
9553
+ # object_size_less_than: 1,
9454
9554
  # and: {
9455
9555
  # prefix: "Prefix",
9456
9556
  # tags: [
@@ -9459,6 +9559,8 @@ module Aws::S3
9459
9559
  # value: "Value", # required
9460
9560
  # },
9461
9561
  # ],
9562
+ # object_size_greater_than: 1,
9563
+ # object_size_less_than: 1,
9462
9564
  # },
9463
9565
  # },
9464
9566
  # status: "Enabled", # required, accepts Enabled, Disabled
@@ -9466,17 +9568,19 @@ module Aws::S3
9466
9568
  # {
9467
9569
  # date: Time.now,
9468
9570
  # days: 1,
9469
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9571
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9470
9572
  # },
9471
9573
  # ],
9472
9574
  # noncurrent_version_transitions: [
9473
9575
  # {
9474
9576
  # noncurrent_days: 1,
9475
- # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
9577
+ # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR
9578
+ # newer_noncurrent_versions: 1,
9476
9579
  # },
9477
9580
  # ],
9478
9581
  # noncurrent_version_expiration: {
9479
9582
  # noncurrent_days: 1,
9583
+ # newer_noncurrent_versions: 1,
9480
9584
  # },
9481
9585
  # abort_incomplete_multipart_upload: {
9482
9586
  # days_after_initiation: 1,
@@ -9506,6 +9610,12 @@ module Aws::S3
9506
9610
  # The `Permissions` request element specifies the kind of access the
9507
9611
  # grantee has to the logs.
9508
9612
  #
9613
+ # If the target bucket for log delivery uses the bucket owner enforced
9614
+ # setting for S3 Object Ownership, you can't use the `Grantee` request
9615
+ # element to grant access to others. Permissions can only be granted
9616
+ # using policies. For more information, see [Permissions for server
9617
+ # access log delivery][1] in the *Amazon S3 User Guide*.
9618
+ #
9509
9619
  # **Grantee Values**
9510
9620
  #
9511
9621
  # You can specify the person (grantee) to whom you're assigning access
@@ -9540,29 +9650,30 @@ module Aws::S3
9540
9650
  # />`
9541
9651
  #
9542
9652
  # For more information about server access logging, see [Server Access
9543
- # Logging][1].
9653
+ # Logging][2] in the *Amazon S3 User Guide*.
9544
9654
  #
9545
- # For more information about creating a bucket, see [CreateBucket][2].
9655
+ # For more information about creating a bucket, see [CreateBucket][3].
9546
9656
  # For more information about returning the logging status of a bucket,
9547
- # see [GetBucketLogging][3].
9657
+ # see [GetBucketLogging][4].
9548
9658
  #
9549
9659
  # The following operations are related to `PutBucketLogging`\:
9550
9660
  #
9551
- # * [PutObject][4]
9661
+ # * [PutObject][5]
9552
9662
  #
9553
- # * [DeleteBucket][5]
9663
+ # * [DeleteBucket][6]
9554
9664
  #
9555
- # * [CreateBucket][2]
9665
+ # * [CreateBucket][3]
9556
9666
  #
9557
- # * [GetBucketLogging][3]
9667
+ # * [GetBucketLogging][4]
9558
9668
  #
9559
9669
  #
9560
9670
  #
9561
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html
9562
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
9563
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html
9564
- # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
9565
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
9671
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general
9672
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html
9673
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
9674
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html
9675
+ # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
9676
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
9566
9677
  #
9567
9678
  # @option params [required, String] :bucket
9568
9679
  # The name of the bucket for which to set the logging parameters.
@@ -9777,20 +9888,20 @@ module Aws::S3
9777
9888
  # notification_configuration: { # required
9778
9889
  # topic_configuration: {
9779
9890
  # id: "NotificationId",
9780
- # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9781
- # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9891
+ # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9892
+ # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9782
9893
  # topic: "TopicArn",
9783
9894
  # },
9784
9895
  # queue_configuration: {
9785
9896
  # id: "NotificationId",
9786
- # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9787
- # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9897
+ # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9898
+ # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9788
9899
  # queue: "QueueArn",
9789
9900
  # },
9790
9901
  # cloud_function_configuration: {
9791
9902
  # id: "NotificationId",
9792
- # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9793
- # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
9903
+ # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9904
+ # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9794
9905
  # cloud_function: "CloudFunction",
9795
9906
  # invocation_role: "CloudFunctionInvocationRole",
9796
9907
  # },
@@ -9887,6 +9998,10 @@ module Aws::S3
9887
9998
  # a different account, the request will fail with an HTTP `403 (Access
9888
9999
  # Denied)` error.
9889
10000
  #
10001
+ # @option params [Boolean] :skip_destination_validation
10002
+ # Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations.
10003
+ # True or false value.
10004
+ #
9890
10005
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
9891
10006
  #
9892
10007
  #
@@ -9917,7 +10032,7 @@ module Aws::S3
9917
10032
  # {
9918
10033
  # id: "NotificationId",
9919
10034
  # topic_arn: "TopicArn", # required
9920
- # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
10035
+ # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9921
10036
  # filter: {
9922
10037
  # key: {
9923
10038
  # filter_rules: [
@@ -9934,7 +10049,7 @@ module Aws::S3
9934
10049
  # {
9935
10050
  # id: "NotificationId",
9936
10051
  # queue_arn: "QueueArn", # required
9937
- # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
10052
+ # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9938
10053
  # filter: {
9939
10054
  # key: {
9940
10055
  # filter_rules: [
@@ -9951,7 +10066,7 @@ module Aws::S3
9951
10066
  # {
9952
10067
  # id: "NotificationId",
9953
10068
  # lambda_function_arn: "LambdaFunctionArn", # required
9954
- # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold
10069
+ # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete
9955
10070
  # filter: {
9956
10071
  # key: {
9957
10072
  # filter_rules: [
@@ -9964,8 +10079,11 @@ module Aws::S3
9964
10079
  # },
9965
10080
  # },
9966
10081
  # ],
10082
+ # event_bridge_configuration: {
10083
+ # },
9967
10084
  # },
9968
10085
  # expected_bucket_owner: "AccountId",
10086
+ # skip_destination_validation: false,
9969
10087
  # })
9970
10088
  #
9971
10089
  # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration AWS API Documentation
@@ -9980,10 +10098,10 @@ module Aws::S3
9980
10098
  # Creates or modifies `OwnershipControls` for an Amazon S3 bucket. To
9981
10099
  # use this operation, you must have the `s3:PutBucketOwnershipControls`
9982
10100
  # permission. For more information about Amazon S3 permissions, see
9983
- # [Specifying Permissions in a Policy][1].
10101
+ # [Specifying permissions in a policy][1].
9984
10102
  #
9985
- # For information about Amazon S3 Object Ownership, see [Using Object
9986
- # Ownership][2].
10103
+ # For information about Amazon S3 Object Ownership, see [Using object
10104
+ # ownership][2].
9987
10105
  #
9988
10106
  # The following operations are related to `PutBucketOwnershipControls`\:
9989
10107
  #
@@ -9993,8 +10111,8 @@ module Aws::S3
9993
10111
  #
9994
10112
  #
9995
10113
  #
9996
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
9997
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
10114
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html
10115
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html
9998
10116
  #
9999
10117
  # @option params [required, String] :bucket
10000
10118
  # The name of the Amazon S3 bucket whose `OwnershipControls` you want to
@@ -10013,8 +10131,8 @@ module Aws::S3
10013
10131
  # Denied)` error.
10014
10132
  #
10015
10133
  # @option params [required, Types::OwnershipControls] :ownership_controls
10016
- # The `OwnershipControls` (BucketOwnerPreferred or ObjectWriter) that
10017
- # you want to apply to this Amazon S3 bucket.
10134
+ # The `OwnershipControls` (BucketOwnerEnforced, BucketOwnerPreferred, or
10135
+ # ObjectWriter) that you want to apply to this Amazon S3 bucket.
10018
10136
  #
10019
10137
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
10020
10138
  #
@@ -10027,7 +10145,7 @@ module Aws::S3
10027
10145
  # ownership_controls: { # required
10028
10146
  # rules: [ # required
10029
10147
  # {
10030
- # object_ownership: "BucketOwnerPreferred", # required, accepts BucketOwnerPreferred, ObjectWriter
10148
+ # object_ownership: "BucketOwnerPreferred", # required, accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced
10031
10149
  # },
10032
10150
  # ],
10033
10151
  # },
@@ -10300,7 +10418,7 @@ module Aws::S3
10300
10418
  # destination: { # required
10301
10419
  # bucket: "BucketName", # required
10302
10420
  # account: "AccountId",
10303
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
10421
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
10304
10422
  # access_control_translation: {
10305
10423
  # owner: "Destination", # required, accepts Destination
10306
10424
  # },
@@ -10889,13 +11007,33 @@ module Aws::S3
10889
11007
  # information, see [Access Control List (ACL) Overview][4] and [Managing
10890
11008
  # ACLs Using the REST API][5].
10891
11009
  #
11010
+ # If the bucket that you're uploading objects to uses the bucket owner
11011
+ # enforced setting for S3 Object Ownership, ACLs are disabled and no
11012
+ # longer affect permissions. Buckets that use this setting only accept
11013
+ # PUT requests that don't specify an ACL or PUT requests that specify
11014
+ # bucket owner full control ACLs, such as the
11015
+ # `bucket-owner-full-control` canned ACL or an equivalent form of this
11016
+ # ACL expressed in the XML format. PUT requests that contain other ACLs
11017
+ # (for example, custom grants to certain Amazon Web Services accounts)
11018
+ # fail and return a `400` error with the error code
11019
+ # `AccessControlListNotSupported`.
11020
+ #
11021
+ # For more information, see [ Controlling ownership of objects and
11022
+ # disabling ACLs][6] in the *Amazon S3 User Guide*.
11023
+ #
11024
+ # <note markdown="1"> If your bucket uses the bucket owner enforced setting for Object
11025
+ # Ownership, all objects written to the bucket by any account will be
11026
+ # owned by the bucket owner.
11027
+ #
11028
+ # </note>
11029
+ #
10892
11030
  # **Storage Class Options**
10893
11031
  #
10894
11032
  # By default, Amazon S3 uses the STANDARD Storage Class to store newly
10895
11033
  # created objects. The STANDARD storage class provides high durability
10896
11034
  # and high availability. Depending on performance needs, you can specify
10897
11035
  # a different Storage Class. Amazon S3 on Outposts only uses the
10898
- # OUTPOSTS Storage Class. For more information, see [Storage Classes][6]
11036
+ # OUTPOSTS Storage Class. For more information, see [Storage Classes][7]
10899
11037
  # in the *Amazon S3 User Guide*.
10900
11038
  #
10901
11039
  # **Versioning**
@@ -10907,14 +11045,14 @@ module Aws::S3
10907
11045
  # object simultaneously, it stores all of the objects.
10908
11046
  #
10909
11047
  # For more information about versioning, see [Adding Objects to
10910
- # Versioning Enabled Buckets][7]. For information about returning the
10911
- # versioning state of a bucket, see [GetBucketVersioning][8].
11048
+ # Versioning Enabled Buckets][8]. For information about returning the
11049
+ # versioning state of a bucket, see [GetBucketVersioning][9].
10912
11050
  #
10913
11051
  # **Related Resources**
10914
11052
  #
10915
- # * [CopyObject][9]
11053
+ # * [CopyObject][10]
10916
11054
  #
10917
- # * [DeleteObject][10]
11055
+ # * [DeleteObject][11]
10918
11056
  #
10919
11057
  #
10920
11058
  #
@@ -10923,11 +11061,12 @@ module Aws::S3
10923
11061
  # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
10924
11062
  # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
10925
11063
  # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html
10926
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
10927
- # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
10928
- # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
10929
- # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
10930
- # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
11064
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
11065
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
11066
+ # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
11067
+ # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
11068
+ # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
11069
+ # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
10931
11070
  #
10932
11071
  # @option params [String] :acl
10933
11072
  # The canned ACL to apply to the object. For more information, see
@@ -11197,134 +11336,134 @@ module Aws::S3
11197
11336
  # * {Types::PutObjectOutput#request_charged #request_charged} => String
11198
11337
  #
11199
11338
  #
11200
- # @example Example: To upload an object and specify optional tags
11339
+ # @example Example: To upload an object (specify optional headers)
11201
11340
  #
11202
- # # The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore
11203
- # # S3 returns version ID of the newly created object.
11341
+ # # The following example uploads an object. The request specifies optional request headers to directs S3 to use specific
11342
+ # # storage class and use server-side encryption.
11204
11343
  #
11205
11344
  # resp = client.put_object({
11206
- # body: "c:\\HappyFace.jpg",
11345
+ # body: "HappyFace.jpg",
11207
11346
  # bucket: "examplebucket",
11208
11347
  # key: "HappyFace.jpg",
11209
- # tagging: "key1=value1&key2=value2",
11348
+ # server_side_encryption: "AES256",
11349
+ # storage_class: "STANDARD_IA",
11210
11350
  # })
11211
11351
  #
11212
11352
  # resp.to_h outputs the following:
11213
11353
  # {
11214
11354
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11215
- # version_id: "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a",
11355
+ # server_side_encryption: "AES256",
11356
+ # version_id: "CG612hodqujkf8FaaNfp8U..FIhLROcp",
11216
11357
  # }
11217
11358
  #
11218
- # @example Example: To upload an object and specify canned ACL.
11359
+ # @example Example: To upload an object and specify server-side encryption and object tags
11219
11360
  #
11220
- # # The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ
11221
- # # access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.
11361
+ # # The following example uploads and object. The request specifies the optional server-side encryption option. The request
11362
+ # # also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.
11222
11363
  #
11223
11364
  # resp = client.put_object({
11224
- # acl: "authenticated-read",
11225
11365
  # body: "filetoupload",
11226
11366
  # bucket: "examplebucket",
11227
11367
  # key: "exampleobject",
11368
+ # server_side_encryption: "AES256",
11369
+ # tagging: "key1=value1&key2=value2",
11228
11370
  # })
11229
11371
  #
11230
11372
  # resp.to_h outputs the following:
11231
11373
  # {
11232
11374
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11233
- # version_id: "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr",
11375
+ # server_side_encryption: "AES256",
11376
+ # version_id: "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt",
11234
11377
  # }
11235
11378
  #
11236
- # @example Example: To upload an object and specify server-side encryption and object tags
11379
+ # @example Example: To create an object.
11237
11380
  #
11238
- # # The following example uploads and object. The request specifies the optional server-side encryption option. The request
11239
- # # also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.
11381
+ # # The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.
11240
11382
  #
11241
11383
  # resp = client.put_object({
11242
11384
  # body: "filetoupload",
11243
11385
  # bucket: "examplebucket",
11244
- # key: "exampleobject",
11245
- # server_side_encryption: "AES256",
11246
- # tagging: "key1=value1&key2=value2",
11386
+ # key: "objectkey",
11247
11387
  # })
11248
11388
  #
11249
11389
  # resp.to_h outputs the following:
11250
11390
  # {
11251
11391
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11252
- # server_side_encryption: "AES256",
11253
- # version_id: "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt",
11392
+ # version_id: "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ",
11254
11393
  # }
11255
11394
  #
11256
- # @example Example: To create an object.
11395
+ # @example Example: To upload object and specify user-defined metadata
11257
11396
  #
11258
- # # The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.
11397
+ # # The following example creates an object. The request also specifies optional metadata. If the bucket is versioning
11398
+ # # enabled, S3 returns version ID in response.
11259
11399
  #
11260
11400
  # resp = client.put_object({
11261
11401
  # body: "filetoupload",
11262
11402
  # bucket: "examplebucket",
11263
- # key: "objectkey",
11403
+ # key: "exampleobject",
11404
+ # metadata: {
11405
+ # "metadata1" => "value1",
11406
+ # "metadata2" => "value2",
11407
+ # },
11264
11408
  # })
11265
11409
  #
11266
11410
  # resp.to_h outputs the following:
11267
11411
  # {
11268
11412
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11269
- # version_id: "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ",
11413
+ # version_id: "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0",
11270
11414
  # }
11271
11415
  #
11272
- # @example Example: To upload an object
11416
+ # @example Example: To upload an object and specify canned ACL.
11273
11417
  #
11274
- # # The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file
11275
- # # syntax. S3 returns VersionId of the newly created object.
11418
+ # # The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ
11419
+ # # access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.
11276
11420
  #
11277
11421
  # resp = client.put_object({
11278
- # body: "HappyFace.jpg",
11422
+ # acl: "authenticated-read",
11423
+ # body: "filetoupload",
11279
11424
  # bucket: "examplebucket",
11280
- # key: "HappyFace.jpg",
11425
+ # key: "exampleobject",
11281
11426
  # })
11282
11427
  #
11283
11428
  # resp.to_h outputs the following:
11284
11429
  # {
11285
11430
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11286
- # version_id: "tpf3zF08nBplQK1XLOefGskR7mGDwcDk",
11431
+ # version_id: "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr",
11287
11432
  # }
11288
11433
  #
11289
- # @example Example: To upload an object (specify optional headers)
11434
+ # @example Example: To upload an object
11290
11435
  #
11291
- # # The following example uploads an object. The request specifies optional request headers to directs S3 to use specific
11292
- # # storage class and use server-side encryption.
11436
+ # # The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file
11437
+ # # syntax. S3 returns VersionId of the newly created object.
11293
11438
  #
11294
11439
  # resp = client.put_object({
11295
11440
  # body: "HappyFace.jpg",
11296
11441
  # bucket: "examplebucket",
11297
11442
  # key: "HappyFace.jpg",
11298
- # server_side_encryption: "AES256",
11299
- # storage_class: "STANDARD_IA",
11300
11443
  # })
11301
11444
  #
11302
11445
  # resp.to_h outputs the following:
11303
11446
  # {
11304
11447
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11305
- # server_side_encryption: "AES256",
11306
- # version_id: "CG612hodqujkf8FaaNfp8U..FIhLROcp",
11448
+ # version_id: "tpf3zF08nBplQK1XLOefGskR7mGDwcDk",
11307
11449
  # }
11308
11450
  #
11309
- # @example Example: To upload object and specify user-defined metadata
11451
+ # @example Example: To upload an object and specify optional tags
11310
11452
  #
11311
- # # The following example creates an object. The request also specifies optional metadata. If the bucket is versioning
11312
- # # enabled, S3 returns version ID in response.
11453
+ # # The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore
11454
+ # # S3 returns version ID of the newly created object.
11313
11455
  #
11314
11456
  # resp = client.put_object({
11315
- # body: "filetoupload",
11457
+ # body: "c:\\HappyFace.jpg",
11316
11458
  # bucket: "examplebucket",
11317
- # key: "exampleobject",
11318
- # metadata: {
11319
- # "metadata1" => "value1",
11320
- # "metadata2" => "value2",
11321
- # },
11459
+ # key: "HappyFace.jpg",
11460
+ # tagging: "key1=value1&key2=value2",
11322
11461
  # })
11323
11462
  #
11324
11463
  # resp.to_h outputs the following:
11325
11464
  # {
11326
11465
  # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
11327
- # version_id: "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0",
11466
+ # version_id: "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a",
11328
11467
  # }
11329
11468
  #
11330
11469
  # @example Streaming a file from disk
@@ -11356,7 +11495,7 @@ module Aws::S3
11356
11495
  # "MetadataKey" => "MetadataValue",
11357
11496
  # },
11358
11497
  # server_side_encryption: "AES256", # accepts AES256, aws:kms
11359
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
11498
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
11360
11499
  # website_redirect_location: "WebsiteRedirectLocation",
11361
11500
  # sse_customer_algorithm: "SSECustomerAlgorithm",
11362
11501
  # sse_customer_key: "SSECustomerKey",
@@ -11409,6 +11548,14 @@ module Aws::S3
11409
11548
  # information, see [Access Control List (ACL) Overview][2] in the
11410
11549
  # *Amazon S3 User Guide*.
11411
11550
  #
11551
+ # If your bucket uses the bucket owner enforced setting for S3 Object
11552
+ # Ownership, ACLs are disabled and no longer affect permissions. You
11553
+ # must use policies to grant access to your bucket and the objects in
11554
+ # it. Requests to set ACLs or update ACLs fail and return the
11555
+ # `AccessControlListNotSupported` error code. Requests to read ACLs are
11556
+ # still supported. For more information, see [Controlling object
11557
+ # ownership][3] in the *Amazon S3 User Guide*.
11558
+ #
11412
11559
  # **Access Permissions**
11413
11560
  #
11414
11561
  # You can set access permissions using one of the following methods:
@@ -11418,7 +11565,7 @@ module Aws::S3
11418
11565
  # ACL has a predefined set of grantees and permissions. Specify the
11419
11566
  # canned ACL name as the value of `x-amz-ac`l. If you use this header,
11420
11567
  # you cannot use other access control-specific headers in your
11421
- # request. For more information, see [Canned ACL][3].
11568
+ # request. For more information, see [Canned ACL][4].
11422
11569
  #
11423
11570
  # * Specify access permissions explicitly with the `x-amz-grant-read`,
11424
11571
  # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and
@@ -11461,7 +11608,7 @@ module Aws::S3
11461
11608
  # * South America (São Paulo)
11462
11609
  #
11463
11610
  # For a list of all the Amazon S3 supported Regions and endpoints,
11464
- # see [Regions and Endpoints][4] in the Amazon Web Services General
11611
+ # see [Regions and Endpoints][5] in the Amazon Web Services General
11465
11612
  # Reference.
11466
11613
  #
11467
11614
  # </note>
@@ -11522,7 +11669,7 @@ module Aws::S3
11522
11669
  # * South America (São Paulo)
11523
11670
  #
11524
11671
  # For a list of all the Amazon S3 supported Regions and endpoints, see
11525
- # [Regions and Endpoints][4] in the Amazon Web Services General
11672
+ # [Regions and Endpoints][5] in the Amazon Web Services General
11526
11673
  # Reference.
11527
11674
  #
11528
11675
  # </note>
@@ -11535,18 +11682,19 @@ module Aws::S3
11535
11682
  #
11536
11683
  # **Related Resources**
11537
11684
  #
11538
- # * [CopyObject][5]
11685
+ # * [CopyObject][6]
11539
11686
  #
11540
- # * [GetObject][6]
11687
+ # * [GetObject][7]
11541
11688
  #
11542
11689
  #
11543
11690
  #
11544
11691
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions
11545
11692
  # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
11546
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
11547
- # [4]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
11548
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
11549
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
11693
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
11694
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
11695
+ # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
11696
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
11697
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
11550
11698
  #
11551
11699
  # @option params [String] :acl
11552
11700
  # The canned ACL to apply to the object. For more information, see
@@ -12680,7 +12828,7 @@ module Aws::S3
12680
12828
  # value: "MetadataValue",
12681
12829
  # },
12682
12830
  # ],
12683
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
12831
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
12684
12832
  # },
12685
12833
  # },
12686
12834
  # },
@@ -12713,10 +12861,11 @@ module Aws::S3
12713
12861
  # This action is not supported by Amazon S3 on Outposts.
12714
12862
  #
12715
12863
  # For more information about Amazon S3 Select, see [Selecting Content
12716
- # from Objects][1] in the *Amazon S3 User Guide*.
12864
+ # from Objects][1] and [SELECT Command][2] in the *Amazon S3 User
12865
+ # Guide*.
12717
12866
  #
12718
12867
  # For more information about using SQL with Amazon S3 Select, see [ SQL
12719
- # Reference for Amazon S3 Select and S3 Glacier Select][2] in the
12868
+ # Reference for Amazon S3 Select and S3 Glacier Select][3] in the
12720
12869
  # *Amazon S3 User Guide*.
12721
12870
  #
12722
12871
  #
@@ -12725,7 +12874,7 @@ module Aws::S3
12725
12874
  #
12726
12875
  # You must have `s3:GetObject` permission for this operation. Amazon S3
12727
12876
  # Select does not support anonymous access. For more information about
12728
- # permissions, see [Specifying Permissions in a Policy][3] in the
12877
+ # permissions, see [Specifying Permissions in a Policy][4] in the
12729
12878
  # *Amazon S3 User Guide*.
12730
12879
  #
12731
12880
  #
@@ -12752,70 +12901,71 @@ module Aws::S3
12752
12901
  #
12753
12902
  # For objects that are encrypted with customer-provided encryption
12754
12903
  # keys (SSE-C), you must use HTTPS, and you must use the headers that
12755
- # are documented in the [GetObject][4]. For more information about
12904
+ # are documented in the [GetObject][5]. For more information about
12756
12905
  # SSE-C, see [Server-Side Encryption (Using Customer-Provided
12757
- # Encryption Keys)][5] in the *Amazon S3 User Guide*.
12906
+ # Encryption Keys)][6] in the *Amazon S3 User Guide*.
12758
12907
  #
12759
12908
  # For objects that are encrypted with Amazon S3 managed encryption
12760
12909
  # keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS),
12761
12910
  # server-side encryption is handled transparently, so you don't need
12762
12911
  # to specify anything. For more information about server-side
12763
12912
  # encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using
12764
- # Server-Side Encryption][6] in the *Amazon S3 User Guide*.
12913
+ # Server-Side Encryption][7] in the *Amazon S3 User Guide*.
12765
12914
  #
12766
12915
  # **Working with the Response Body**
12767
12916
  #
12768
12917
  # Given the response size is unknown, Amazon S3 Select streams the
12769
12918
  # response as a series of messages and includes a `Transfer-Encoding`
12770
12919
  # header with `chunked` as its value in the response. For more
12771
- # information, see [Appendix: SelectObjectContent Response][7].
12920
+ # information, see [Appendix: SelectObjectContent Response][8].
12772
12921
  #
12773
12922
  #
12774
12923
  #
12775
12924
  # **GetObject Support**
12776
12925
  #
12777
12926
  # The `SelectObjectContent` action does not support the following
12778
- # `GetObject` functionality. For more information, see [GetObject][4].
12927
+ # `GetObject` functionality. For more information, see [GetObject][5].
12779
12928
  #
12780
12929
  # * `Range`\: Although you can specify a scan range for an Amazon S3
12781
- # Select request (see [SelectObjectContentRequest - ScanRange][8] in
12930
+ # Select request (see [SelectObjectContentRequest - ScanRange][9] in
12782
12931
  # the request parameters), you cannot specify the range of bytes of an
12783
12932
  # object to return.
12784
12933
  #
12785
12934
  # * GLACIER, DEEP\_ARCHIVE and REDUCED\_REDUNDANCY storage classes: You
12786
12935
  # cannot specify the GLACIER, DEEP\_ARCHIVE, or `REDUCED_REDUNDANCY`
12787
12936
  # storage classes. For more information, about storage classes see
12788
- # [Storage Classes][9] in the *Amazon S3 User Guide*.
12937
+ # [Storage Classes][10] in the *Amazon S3 User Guide*.
12789
12938
  #
12790
12939
  #
12791
12940
  #
12792
12941
  # **Special Errors**
12793
12942
  #
12794
12943
  # For a list of special errors for this operation, see [List of SELECT
12795
- # Object Content Error Codes][10]
12944
+ # Object Content Error Codes][11]
12796
12945
  #
12797
12946
  # **Related Resources**
12798
12947
  #
12799
- # * [GetObject][4]
12948
+ # * [GetObject][5]
12800
12949
  #
12801
- # * [GetBucketLifecycleConfiguration][11]
12950
+ # * [GetBucketLifecycleConfiguration][12]
12802
12951
  #
12803
- # * [PutBucketLifecycleConfiguration][12]
12952
+ # * [PutBucketLifecycleConfiguration][13]
12804
12953
  #
12805
12954
  #
12806
12955
  #
12807
12956
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html
12808
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html
12809
- # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
12810
- # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
12811
- # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
12812
- # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
12813
- # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html
12814
- # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange
12815
- # [9]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro
12816
- # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList
12817
- # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
12818
- # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
12957
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html
12958
+ # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html
12959
+ # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
12960
+ # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
12961
+ # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
12962
+ # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
12963
+ # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html
12964
+ # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange
12965
+ # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro
12966
+ # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList
12967
+ # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
12968
+ # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
12819
12969
  #
12820
12970
  # @option params [required, String] :bucket
12821
12971
  # The S3 bucket.
@@ -13668,45 +13818,45 @@ module Aws::S3
13668
13818
  # * {Types::UploadPartCopyOutput#request_charged #request_charged} => String
13669
13819
  #
13670
13820
  #
13671
- # @example Example: To upload a part by copying data from an existing object as data source
13821
+ # @example Example: To upload a part by copying byte range from an existing object as data source
13672
13822
  #
13673
- # # The following example uploads a part of a multipart upload by copying data from an existing object as data source.
13823
+ # # The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as
13824
+ # # data source.
13674
13825
  #
13675
13826
  # resp = client.upload_part_copy({
13676
13827
  # bucket: "examplebucket",
13677
13828
  # copy_source: "/bucketname/sourceobjectkey",
13829
+ # copy_source_range: "bytes=1-100000",
13678
13830
  # key: "examplelargeobject",
13679
- # part_number: 1,
13831
+ # part_number: 2,
13680
13832
  # upload_id: "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--",
13681
13833
  # })
13682
13834
  #
13683
13835
  # resp.to_h outputs the following:
13684
13836
  # {
13685
13837
  # copy_part_result: {
13686
- # etag: "\"b0c6f0e7e054ab8fa2536a2677f8734d\"",
13687
- # last_modified: Time.parse("2016-12-29T21:24:43.000Z"),
13838
+ # etag: "\"65d16d19e65a7508a51f043180edcc36\"",
13839
+ # last_modified: Time.parse("2016-12-29T21:44:28.000Z"),
13688
13840
  # },
13689
13841
  # }
13690
13842
  #
13691
- # @example Example: To upload a part by copying byte range from an existing object as data source
13843
+ # @example Example: To upload a part by copying data from an existing object as data source
13692
13844
  #
13693
- # # The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as
13694
- # # data source.
13845
+ # # The following example uploads a part of a multipart upload by copying data from an existing object as data source.
13695
13846
  #
13696
13847
  # resp = client.upload_part_copy({
13697
13848
  # bucket: "examplebucket",
13698
13849
  # copy_source: "/bucketname/sourceobjectkey",
13699
- # copy_source_range: "bytes=1-100000",
13700
13850
  # key: "examplelargeobject",
13701
- # part_number: 2,
13851
+ # part_number: 1,
13702
13852
  # upload_id: "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--",
13703
13853
  # })
13704
13854
  #
13705
13855
  # resp.to_h outputs the following:
13706
13856
  # {
13707
13857
  # copy_part_result: {
13708
- # etag: "\"65d16d19e65a7508a51f043180edcc36\"",
13709
- # last_modified: Time.parse("2016-12-29T21:44:28.000Z"),
13858
+ # etag: "\"b0c6f0e7e054ab8fa2536a2677f8734d\"",
13859
+ # last_modified: Time.parse("2016-12-29T21:24:43.000Z"),
13710
13860
  # },
13711
13861
  # }
13712
13862
  #
@@ -14038,7 +14188,7 @@ module Aws::S3
14038
14188
  # sse_customer_algorithm: "SSECustomerAlgorithm",
14039
14189
  # ssekms_key_id: "SSEKMSKeyId",
14040
14190
  # sse_customer_key_md5: "SSECustomerKeyMD5",
14041
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS
14191
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR
14042
14192
  # tag_count: 1,
14043
14193
  # version_id: "ObjectVersionId",
14044
14194
  # bucket_key_enabled: false,
@@ -14066,7 +14216,7 @@ module Aws::S3
14066
14216
  params: params,
14067
14217
  config: config)
14068
14218
  context[:gem_name] = 'aws-sdk-s3'
14069
- context[:gem_version] = '1.106.0'
14219
+ context[:gem_version] = '1.111.0'
14070
14220
  Seahorse::Client::Request.new(handlers, context)
14071
14221
  end
14072
14222