aws-sdk-s3 1.167.0 → 1.194.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +158 -0
  3. data/VERSION +1 -1
  4. data/lib/aws-sdk-s3/bucket.rb +145 -39
  5. data/lib/aws-sdk-s3/bucket_acl.rb +6 -5
  6. data/lib/aws-sdk-s3/bucket_cors.rb +6 -5
  7. data/lib/aws-sdk-s3/bucket_lifecycle.rb +7 -2
  8. data/lib/aws-sdk-s3/bucket_lifecycle_configuration.rb +22 -2
  9. data/lib/aws-sdk-s3/bucket_logging.rb +2 -2
  10. data/lib/aws-sdk-s3/bucket_policy.rb +6 -5
  11. data/lib/aws-sdk-s3/bucket_request_payment.rb +3 -3
  12. data/lib/aws-sdk-s3/bucket_tagging.rb +3 -3
  13. data/lib/aws-sdk-s3/bucket_versioning.rb +9 -9
  14. data/lib/aws-sdk-s3/bucket_website.rb +3 -3
  15. data/lib/aws-sdk-s3/client.rb +3670 -1773
  16. data/lib/aws-sdk-s3/client_api.rb +558 -160
  17. data/lib/aws-sdk-s3/endpoint_parameters.rb +13 -18
  18. data/lib/aws-sdk-s3/endpoint_provider.rb +400 -276
  19. data/lib/aws-sdk-s3/endpoints.rb +529 -1403
  20. data/lib/aws-sdk-s3/errors.rb +55 -0
  21. data/lib/aws-sdk-s3/file_downloader.rb +14 -31
  22. data/lib/aws-sdk-s3/legacy_signer.rb +2 -1
  23. data/lib/aws-sdk-s3/multipart_file_uploader.rb +31 -13
  24. data/lib/aws-sdk-s3/multipart_upload.rb +83 -6
  25. data/lib/aws-sdk-s3/multipart_upload_part.rb +50 -34
  26. data/lib/aws-sdk-s3/object.rb +326 -129
  27. data/lib/aws-sdk-s3/object_acl.rb +11 -5
  28. data/lib/aws-sdk-s3/object_multipart_copier.rb +2 -1
  29. data/lib/aws-sdk-s3/object_summary.rb +240 -96
  30. data/lib/aws-sdk-s3/object_version.rb +60 -13
  31. data/lib/aws-sdk-s3/plugins/checksum_algorithm.rb +31 -0
  32. data/lib/aws-sdk-s3/plugins/endpoints.rb +1 -204
  33. data/lib/aws-sdk-s3/plugins/express_session_auth.rb +11 -20
  34. data/lib/aws-sdk-s3/plugins/http_200_errors.rb +3 -3
  35. data/lib/aws-sdk-s3/plugins/md5s.rb +10 -71
  36. data/lib/aws-sdk-s3/plugins/streaming_retry.rb +5 -7
  37. data/lib/aws-sdk-s3/plugins/url_encoded_keys.rb +2 -1
  38. data/lib/aws-sdk-s3/presigner.rb +5 -5
  39. data/lib/aws-sdk-s3/resource.rb +41 -10
  40. data/lib/aws-sdk-s3/types.rb +3423 -1093
  41. data/lib/aws-sdk-s3.rb +1 -1
  42. data/sig/bucket.rbs +27 -9
  43. data/sig/bucket_acl.rbs +1 -1
  44. data/sig/bucket_cors.rbs +1 -1
  45. data/sig/bucket_lifecycle.rbs +1 -1
  46. data/sig/bucket_lifecycle_configuration.rbs +1 -1
  47. data/sig/bucket_logging.rbs +1 -1
  48. data/sig/bucket_policy.rbs +1 -1
  49. data/sig/bucket_request_payment.rbs +1 -1
  50. data/sig/bucket_tagging.rbs +1 -1
  51. data/sig/bucket_versioning.rbs +3 -3
  52. data/sig/bucket_website.rbs +1 -1
  53. data/sig/client.rbs +249 -68
  54. data/sig/errors.rbs +10 -0
  55. data/sig/multipart_upload.rbs +12 -3
  56. data/sig/multipart_upload_part.rbs +5 -1
  57. data/sig/object.rbs +35 -16
  58. data/sig/object_acl.rbs +1 -1
  59. data/sig/object_summary.rbs +26 -16
  60. data/sig/object_version.rbs +9 -3
  61. data/sig/resource.rbs +15 -4
  62. data/sig/types.rbs +339 -65
  63. metadata +7 -10
  64. data/lib/aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb +0 -31
@@ -84,6 +84,18 @@ module Aws::S3
84
84
  data[:checksum_algorithm]
85
85
  end
86
86
 
87
+ # The checksum type that is used to calculate the object’s checksum
88
+ # value. For more information, see [Checking object integrity][1] in the
89
+ # *Amazon S3 User Guide*.
90
+ #
91
+ #
92
+ #
93
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
94
+ # @return [String]
95
+ def checksum_type
96
+ data[:checksum_type]
97
+ end
98
+
87
99
  # Size in bytes of the object
88
100
  # @return [Integer]
89
101
  def size
@@ -92,8 +104,10 @@ module Aws::S3
92
104
 
93
105
  # The class of storage used to store the object.
94
106
  #
95
- # <note markdown="1"> **Directory buckets** - Only the S3 Express One Zone storage class is
96
- # supported by directory buckets to store objects.
107
+ # <note markdown="1"> **Directory buckets** - Directory buckets only support
108
+ # `EXPRESS_ONEZONE` (the S3 Express One Zone storage class) in
109
+ # Availability Zones and `ONEZONE_IA` (the S3 One Zone-Infrequent Access
110
+ # storage class) in Dedicated Local Zones.
97
111
  #
98
112
  # </note>
99
113
  # @return [String]
@@ -118,9 +132,10 @@ module Aws::S3
118
132
  # archived objects, see [ Working with archived objects][1] in the
119
133
  # *Amazon S3 User Guide*.
120
134
  #
121
- # <note markdown="1"> This functionality is not supported for directory buckets. Only the S3
122
- # Express One Zone storage class is supported by directory buckets to
123
- # store objects.
135
+ # <note markdown="1"> This functionality is not supported for directory buckets. Directory
136
+ # buckets only support `EXPRESS_ONEZONE` (the S3 Express One Zone
137
+ # storage class) in Availability Zones and `ONEZONE_IA` (the S3 One
138
+ # Zone-Infrequent Access storage class) in Dedicated Local Zones.
124
139
  #
125
140
  # </note>
126
141
  #
@@ -324,7 +339,7 @@ module Aws::S3
324
339
  # object_summary.copy_from({
325
340
  # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control
326
341
  # cache_control: "CacheControl",
327
- # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256
342
+ # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256, CRC64NVME
328
343
  # content_disposition: "ContentDisposition",
329
344
  # content_encoding: "ContentEncoding",
330
345
  # content_language: "ContentLanguage",
@@ -344,8 +359,8 @@ module Aws::S3
344
359
  # },
345
360
  # metadata_directive: "COPY", # accepts COPY, REPLACE
346
361
  # tagging_directive: "COPY", # accepts COPY, REPLACE
347
- # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse
348
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
362
+ # server_side_encryption: "AES256", # accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
363
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
349
364
  # website_redirect_location: "WebsiteRedirectLocation",
350
365
  # sse_customer_algorithm: "SSECustomerAlgorithm",
351
366
  # sse_customer_key: "SSECustomerKey",
@@ -723,17 +738,25 @@ module Aws::S3
723
738
  # * To encrypt new object copies to a directory bucket with SSE-KMS, we
724
739
  # recommend you specify SSE-KMS as the directory bucket's default
725
740
  # encryption configuration with a KMS key (specifically, a [customer
726
- # managed key][4]). [Amazon Web Services managed key][5] (`aws/s3`)
727
- # isn't supported. Your SSE-KMS configuration can only support 1
728
- # [customer managed key][4] per directory bucket for the lifetime of
729
- # the bucket. After you specify a customer managed key for SSE-KMS,
730
- # you can't override the customer managed key for the bucket's
731
- # SSE-KMS configuration. Then, when you perform a `CopyObject`
732
- # operation and want to specify server-side encryption settings for
733
- # new object copies with SSE-KMS in the encryption-related request
734
- # headers, you must ensure the encryption key is the same customer
735
- # managed key that you specified for the directory bucket's default
736
- # encryption configuration.
741
+ # managed key][4]). The [Amazon Web Services managed key][5]
742
+ # (`aws/s3`) isn't supported. Your SSE-KMS configuration can only
743
+ # support 1 [customer managed key][4] per directory bucket for the
744
+ # lifetime of the bucket. After you specify a customer managed key for
745
+ # SSE-KMS, you can't override the customer managed key for the
746
+ # bucket's SSE-KMS configuration. Then, when you perform a
747
+ # `CopyObject` operation and want to specify server-side encryption
748
+ # settings for new object copies with SSE-KMS in the
749
+ # encryption-related request headers, you must ensure the encryption
750
+ # key is the same customer managed key that you specified for the
751
+ # directory bucket's default encryption configuration.
752
+ #
753
+ # * <b>S3 access points for Amazon FSx </b> - When accessing data stored
754
+ # in Amazon FSx file systems using S3 access points, the only valid
755
+ # server side encryption option is `aws:fsx`. All Amazon FSx file
756
+ # systems have encryption configured by default and are encrypted at
757
+ # rest. Data is automatically encrypted before being written to the
758
+ # file system, and automatically decrypted as it is read. These
759
+ # processes are handled transparently by Amazon FSx.
737
760
  #
738
761
  #
739
762
  #
@@ -749,10 +772,12 @@ module Aws::S3
749
772
  # availability. Depending on performance needs, you can specify a
750
773
  # different Storage Class.
751
774
  #
752
- # <note markdown="1"> * <b>Directory buckets </b> - For directory buckets, only the S3
753
- # Express One Zone storage class is supported to store newly created
754
- # objects. Unsupported storage class values won't write a destination
755
- # object and will respond with the HTTP status code `400 Bad Request`.
775
+ # <note markdown="1"> * <b>Directory buckets </b> - Directory buckets only support
776
+ # `EXPRESS_ONEZONE` (the S3 Express One Zone storage class) in
777
+ # Availability Zones and `ONEZONE_IA` (the S3 One Zone-Infrequent
778
+ # Access storage class) in Dedicated Local Zones. Unsupported storage
779
+ # class values won't write a destination object and will respond with
780
+ # the HTTP status code `400 Bad Request`.
756
781
  #
757
782
  # * <b>Amazon S3 on Outposts </b> - S3 on Outposts only uses the
758
783
  # `OUTPOSTS` Storage Class.
@@ -840,15 +865,17 @@ module Aws::S3
840
865
  # Signature Version in Request Authentication][1] in the *Amazon S3 User
841
866
  # Guide*.
842
867
  #
843
- # **Directory buckets** - If you specify `x-amz-server-side-encryption`
844
- # with `aws:kms`, you must specify the `
845
- # x-amz-server-side-encryption-aws-kms-key-id` header with the ID (Key
846
- # ID or Key ARN) of the KMS symmetric encryption customer managed key to
847
- # use. Otherwise, you get an HTTP `400 Bad Request` error. Only use the
848
- # key ID or key ARN. The key alias format of the KMS key isn't
849
- # supported. Your SSE-KMS configuration can only support 1 [customer
850
- # managed key][2] per directory bucket for the lifetime of the bucket.
851
- # [Amazon Web Services managed key][3] (`aws/s3`) isn't supported.
868
+ # **Directory buckets** - To encrypt data using SSE-KMS, it's
869
+ # recommended to specify the `x-amz-server-side-encryption` header to
870
+ # `aws:kms`. Then, the `x-amz-server-side-encryption-aws-kms-key-id`
871
+ # header implicitly uses the bucket's default KMS customer managed key
872
+ # ID. If you want to explicitly set the `
873
+ # x-amz-server-side-encryption-aws-kms-key-id` header, it must match the
874
+ # bucket's default customer managed key (using key ID or ARN, not
875
+ # alias). Your SSE-KMS configuration can only support 1 [customer
876
+ # managed key][2] per directory bucket's lifetime. The [Amazon Web
877
+ # Services managed key][3] (`aws/s3`) isn't supported. Incorrect key
878
+ # specification results in an HTTP `400 Bad Request` error.
852
879
  #
853
880
  #
854
881
  #
@@ -1055,6 +1082,9 @@ module Aws::S3
1055
1082
  # request_payer: "requester", # accepts requester
1056
1083
  # bypass_governance_retention: false,
1057
1084
  # expected_bucket_owner: "AccountId",
1085
+ # if_match: "IfMatch",
1086
+ # if_match_last_modified_time: Time.now,
1087
+ # if_match_size: 1,
1058
1088
  # })
1059
1089
  # @param [Hash] options ({})
1060
1090
  # @option options [String] :mfa
@@ -1101,6 +1131,46 @@ module Aws::S3
1101
1131
  # The account ID of the expected bucket owner. If the account ID that
1102
1132
  # you provide does not match the actual owner of the bucket, the request
1103
1133
  # fails with the HTTP status code `403 Forbidden` (access denied).
1134
+ # @option options [String] :if_match
1135
+ # The `If-Match` header field makes the request method conditional on
1136
+ # ETags. If the ETag value does not match, the operation returns a `412
1137
+ # Precondition Failed` error. If the ETag matches or if the object
1138
+ # doesn't exist, the operation will return a `204 Success (No Content)
1139
+ # response`.
1140
+ #
1141
+ # For more information about conditional requests, see [RFC 7232][1].
1142
+ #
1143
+ # <note markdown="1"> This functionality is only supported for directory buckets.
1144
+ #
1145
+ # </note>
1146
+ #
1147
+ #
1148
+ #
1149
+ # [1]: https://tools.ietf.org/html/rfc7232
1150
+ # @option options [Time,DateTime,Date,Integer,String] :if_match_last_modified_time
1151
+ # If present, the object is deleted only if its modification times
1152
+ # matches the provided `Timestamp`. If the `Timestamp` values do not
1153
+ # match, the operation returns a `412 Precondition Failed` error. If the
1154
+ # `Timestamp` matches or if the object doesn’t exist, the operation
1155
+ # returns a `204 Success (No Content)` response.
1156
+ #
1157
+ # <note markdown="1"> This functionality is only supported for directory buckets.
1158
+ #
1159
+ # </note>
1160
+ # @option options [Integer] :if_match_size
1161
+ # If present, the object is deleted only if its size matches the
1162
+ # provided size in bytes. If the `Size` value does not match, the
1163
+ # operation returns a `412 Precondition Failed` error. If the `Size`
1164
+ # matches or if the object doesn’t exist, the operation returns a `204
1165
+ # Success (No Content)` response.
1166
+ #
1167
+ # <note markdown="1"> This functionality is only supported for directory buckets.
1168
+ #
1169
+ # </note>
1170
+ #
1171
+ # You can use the `If-Match`, `x-amz-if-match-last-modified-time` and
1172
+ # `x-amz-if-match-size` conditional headers in conjunction with
1173
+ # each-other or individually.
1104
1174
  # @return [Types::DeleteObjectOutput]
1105
1175
  def delete(options = {})
1106
1176
  options = options.merge(
@@ -1357,15 +1427,6 @@ module Aws::S3
1357
1427
  # fails with the HTTP status code `403 Forbidden` (access denied).
1358
1428
  # @option options [String] :checksum_mode
1359
1429
  # To retrieve the checksum, this mode must be enabled.
1360
- #
1361
- # **General purpose buckets** - In addition, if you enable checksum mode
1362
- # and the object is uploaded with a [checksum][1] and encrypted with an
1363
- # Key Management Service (KMS) key, you must have permission to use the
1364
- # `kms:Decrypt` action to retrieve the checksum.
1365
- #
1366
- #
1367
- #
1368
- # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html
1369
1430
  # @return [Types::GetObjectOutput]
1370
1431
  def get(options = {}, &block)
1371
1432
  options = options.merge(
@@ -1395,8 +1456,8 @@ module Aws::S3
1395
1456
  # metadata: {
1396
1457
  # "MetadataKey" => "MetadataValue",
1397
1458
  # },
1398
- # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse
1399
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
1459
+ # server_side_encryption: "AES256", # accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
1460
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
1400
1461
  # website_redirect_location: "WebsiteRedirectLocation",
1401
1462
  # sse_customer_algorithm: "SSECustomerAlgorithm",
1402
1463
  # sse_customer_key: "SSECustomerKey",
@@ -1410,7 +1471,8 @@ module Aws::S3
1410
1471
  # object_lock_retain_until_date: Time.now,
1411
1472
  # object_lock_legal_hold_status: "ON", # accepts ON, OFF
1412
1473
  # expected_bucket_owner: "AccountId",
1413
- # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256
1474
+ # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256, CRC64NVME
1475
+ # checksum_type: "COMPOSITE", # accepts COMPOSITE, FULL_OBJECT
1414
1476
  # })
1415
1477
  # @param [Hash] options ({})
1416
1478
  # @option options [String] :acl
@@ -1712,7 +1774,7 @@ module Aws::S3
1712
1774
  # A map of metadata to store with the object in S3.
1713
1775
  # @option options [String] :server_side_encryption
1714
1776
  # The server-side encryption algorithm used when you store this object
1715
- # in Amazon S3 (for example, `AES256`, `aws:kms`).
1777
+ # in Amazon S3 or Amazon FSx.
1716
1778
  #
1717
1779
  # * <b>Directory buckets </b> - For directory buckets, there are only
1718
1780
  # two supported options for server-side encryption: server-side
@@ -1754,6 +1816,14 @@ module Aws::S3
1754
1816
  #
1755
1817
  # </note>
1756
1818
  #
1819
+ # * <b>S3 access points for Amazon FSx </b> - When accessing data stored
1820
+ # in Amazon FSx file systems using S3 access points, the only valid
1821
+ # server side encryption option is `aws:fsx`. All Amazon FSx file
1822
+ # systems have encryption configured by default and are encrypted at
1823
+ # rest. Data is automatically encrypted before being written to the
1824
+ # file system, and automatically decrypted as it is read. These
1825
+ # processes are handled transparently by Amazon FSx.
1826
+ #
1757
1827
  #
1758
1828
  #
1759
1829
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
@@ -1767,8 +1837,9 @@ module Aws::S3
1767
1837
  # a different Storage Class. For more information, see [Storage
1768
1838
  # Classes][1] in the *Amazon S3 User Guide*.
1769
1839
  #
1770
- # <note markdown="1"> * For directory buckets, only the S3 Express One Zone storage class is
1771
- # supported to store newly created objects.
1840
+ # <note markdown="1"> * Directory buckets only support `EXPRESS_ONEZONE` (the S3 Express One
1841
+ # Zone storage class) in Availability Zones and `ONEZONE_IA` (the S3
1842
+ # One Zone-Infrequent Access storage class) in Dedicated Local Zones.
1772
1843
  #
1773
1844
  # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
1774
1845
  #
@@ -1825,15 +1896,17 @@ module Aws::S3
1825
1896
  # `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the
1826
1897
  # Amazon Web Services managed key (`aws/s3`) to protect the data.
1827
1898
  #
1828
- # **Directory buckets** - If you specify `x-amz-server-side-encryption`
1829
- # with `aws:kms`, you must specify the `
1830
- # x-amz-server-side-encryption-aws-kms-key-id` header with the ID (Key
1831
- # ID or Key ARN) of the KMS symmetric encryption customer managed key to
1832
- # use. Otherwise, you get an HTTP `400 Bad Request` error. Only use the
1833
- # key ID or key ARN. The key alias format of the KMS key isn't
1834
- # supported. Your SSE-KMS configuration can only support 1 [customer
1835
- # managed key][1] per directory bucket for the lifetime of the bucket.
1836
- # [Amazon Web Services managed key][2] (`aws/s3`) isn't supported.
1899
+ # **Directory buckets** - To encrypt data using SSE-KMS, it's
1900
+ # recommended to specify the `x-amz-server-side-encryption` header to
1901
+ # `aws:kms`. Then, the `x-amz-server-side-encryption-aws-kms-key-id`
1902
+ # header implicitly uses the bucket's default KMS customer managed key
1903
+ # ID. If you want to explicitly set the `
1904
+ # x-amz-server-side-encryption-aws-kms-key-id` header, it must match the
1905
+ # bucket's default customer managed key (using key ID or ARN, not
1906
+ # alias). Your SSE-KMS configuration can only support 1 [customer
1907
+ # managed key][1] per directory bucket's lifetime. The [Amazon Web
1908
+ # Services managed key][2] (`aws/s3`) isn't supported. Incorrect key
1909
+ # specification results in an HTTP `400 Bad Request` error.
1837
1910
  #
1838
1911
  #
1839
1912
  #
@@ -1841,7 +1914,7 @@ module Aws::S3
1841
1914
  # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
1842
1915
  # @option options [String] :ssekms_encryption_context
1843
1916
  # Specifies the Amazon Web Services KMS Encryption Context to use for
1844
- # object encryption. The value of this header is a Base64-encoded string
1917
+ # object encryption. The value of this header is a Base64 encoded string
1845
1918
  # of a UTF-8 encoded JSON, which contains the encryption context as
1846
1919
  # key-value pairs.
1847
1920
  #
@@ -1930,6 +2003,14 @@ module Aws::S3
1930
2003
  #
1931
2004
  #
1932
2005
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
2006
+ # @option options [String] :checksum_type
2007
+ # Indicates the checksum type that you want Amazon S3 to use to
2008
+ # calculate the object’s checksum value. For more information, see
2009
+ # [Checking object integrity in the Amazon S3 User Guide][1].
2010
+ #
2011
+ #
2012
+ #
2013
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
1933
2014
  # @return [MultipartUpload]
1934
2015
  def initiate_multipart_upload(options = {})
1935
2016
  options = options.merge(
@@ -1959,22 +2040,25 @@ module Aws::S3
1959
2040
  # content_length: 1,
1960
2041
  # content_md5: "ContentMD5",
1961
2042
  # content_type: "ContentType",
1962
- # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256
2043
+ # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256, CRC64NVME
1963
2044
  # checksum_crc32: "ChecksumCRC32",
1964
2045
  # checksum_crc32c: "ChecksumCRC32C",
2046
+ # checksum_crc64nvme: "ChecksumCRC64NVME",
1965
2047
  # checksum_sha1: "ChecksumSHA1",
1966
2048
  # checksum_sha256: "ChecksumSHA256",
1967
2049
  # expires: Time.now,
2050
+ # if_match: "IfMatch",
1968
2051
  # if_none_match: "IfNoneMatch",
1969
2052
  # grant_full_control: "GrantFullControl",
1970
2053
  # grant_read: "GrantRead",
1971
2054
  # grant_read_acp: "GrantReadACP",
1972
2055
  # grant_write_acp: "GrantWriteACP",
2056
+ # write_offset_bytes: 1,
1973
2057
  # metadata: {
1974
2058
  # "MetadataKey" => "MetadataValue",
1975
2059
  # },
1976
- # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse
1977
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
2060
+ # server_side_encryption: "AES256", # accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
2061
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
1978
2062
  # website_redirect_location: "WebsiteRedirectLocation",
1979
2063
  # sse_customer_algorithm: "SSECustomerAlgorithm",
1980
2064
  # sse_customer_key: "SSECustomerKey",
@@ -2065,7 +2149,7 @@ module Aws::S3
2065
2149
  #
2066
2150
  # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length
2067
2151
  # @option options [String] :content_md5
2068
- # The base64-encoded 128-bit MD5 digest of the message (without the
2152
+ # The Base64 encoded 128-bit `MD5` digest of the message (without the
2069
2153
  # headers) according to RFC 1864. This header can be used as a message
2070
2154
  # integrity check to verify that the data is the same data that was
2071
2155
  # originally sent. Although it is optional, we recommend using the
@@ -2073,10 +2157,11 @@ module Aws::S3
2073
2157
  # information about REST request authentication, see [REST
2074
2158
  # Authentication][1].
2075
2159
  #
2076
- # <note markdown="1"> The `Content-MD5` header is required for any request to upload an
2077
- # object with a retention period configured using Amazon S3 Object Lock.
2078
- # For more information about Amazon S3 Object Lock, see [Amazon S3
2079
- # Object Lock Overview][2] in the *Amazon S3 User Guide*.
2160
+ # <note markdown="1"> The `Content-MD5` or `x-amz-sdk-checksum-algorithm` header is required
2161
+ # for any request to upload an object with a retention period configured
2162
+ # using Amazon S3 Object Lock. For more information, see [Uploading
2163
+ # objects to an Object Lock enabled bucket ][2] in the *Amazon S3 User
2164
+ # Guide*.
2080
2165
  #
2081
2166
  # </note>
2082
2167
  #
@@ -2087,7 +2172,7 @@ module Aws::S3
2087
2172
  #
2088
2173
  #
2089
2174
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
2090
- # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html
2175
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object
2091
2176
  # @option options [String] :content_type
2092
2177
  # A standard MIME type describing the format of the contents. For more
2093
2178
  # information, see
@@ -2111,6 +2196,8 @@ module Aws::S3
2111
2196
  #
2112
2197
  # * `CRC32C`
2113
2198
  #
2199
+ # * `CRC64NVME`
2200
+ #
2114
2201
  # * `SHA1`
2115
2202
  #
2116
2203
  # * `SHA256`
@@ -2120,22 +2207,28 @@ module Aws::S3
2120
2207
  #
2121
2208
  # If the individual checksum value you provide through
2122
2209
  # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you
2123
- # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any
2124
- # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm
2125
- # that matches the provided value in `x-amz-checksum-algorithm `.
2210
+ # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 fails the
2211
+ # request with a `BadDigest` error.
2126
2212
  #
2127
- # <note markdown="1"> For directory buckets, when you use Amazon Web Services SDKs, `CRC32`
2128
- # is the default checksum algorithm that's used for performance.
2213
+ # <note markdown="1"> The `Content-MD5` or `x-amz-sdk-checksum-algorithm` header is required
2214
+ # for any request to upload an object with a retention period configured
2215
+ # using Amazon S3 Object Lock. For more information, see [Uploading
2216
+ # objects to an Object Lock enabled bucket ][2] in the *Amazon S3 User
2217
+ # Guide*.
2129
2218
  #
2130
2219
  # </note>
2131
2220
  #
2221
+ # For directory buckets, when you use Amazon Web Services SDKs, `CRC32`
2222
+ # is the default checksum algorithm that's used for performance.
2223
+ #
2132
2224
  #
2133
2225
  #
2134
2226
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
2227
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object
2135
2228
  # @option options [String] :checksum_crc32
2136
2229
  # This header can be used as a data integrity check to verify that the
2137
2230
  # data received is the same data that was originally sent. This header
2138
- # specifies the base64-encoded, 32-bit CRC-32 checksum of the object.
2231
+ # specifies the Base64 encoded, 32-bit `CRC32` checksum of the object.
2139
2232
  # For more information, see [Checking object integrity][1] in the
2140
2233
  # *Amazon S3 User Guide*.
2141
2234
  #
@@ -2145,17 +2238,28 @@ module Aws::S3
2145
2238
  # @option options [String] :checksum_crc32c
2146
2239
  # This header can be used as a data integrity check to verify that the
2147
2240
  # data received is the same data that was originally sent. This header
2148
- # specifies the base64-encoded, 32-bit CRC-32C checksum of the object.
2241
+ # specifies the Base64 encoded, 32-bit `CRC32C` checksum of the object.
2149
2242
  # For more information, see [Checking object integrity][1] in the
2150
2243
  # *Amazon S3 User Guide*.
2151
2244
  #
2152
2245
  #
2153
2246
  #
2154
2247
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
2248
+ # @option options [String] :checksum_crc64nvme
2249
+ # This header can be used as a data integrity check to verify that the
2250
+ # data received is the same data that was originally sent. This header
2251
+ # specifies the Base64 encoded, 64-bit `CRC64NVME` checksum of the
2252
+ # object. The `CRC64NVME` checksum is always a full object checksum. For
2253
+ # more information, see [Checking object integrity in the Amazon S3 User
2254
+ # Guide][1].
2255
+ #
2256
+ #
2257
+ #
2258
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
2155
2259
  # @option options [String] :checksum_sha1
2156
2260
  # This header can be used as a data integrity check to verify that the
2157
2261
  # data received is the same data that was originally sent. This header
2158
- # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For
2262
+ # specifies the Base64 encoded, 160-bit `SHA1` digest of the object. For
2159
2263
  # more information, see [Checking object integrity][1] in the *Amazon S3
2160
2264
  # User Guide*.
2161
2265
  #
@@ -2165,7 +2269,7 @@ module Aws::S3
2165
2269
  # @option options [String] :checksum_sha256
2166
2270
  # This header can be used as a data integrity check to verify that the
2167
2271
  # data received is the same data that was originally sent. This header
2168
- # specifies the base64-encoded, 256-bit SHA-256 digest of the object.
2272
+ # specifies the Base64 encoded, 256-bit `SHA256` digest of the object.
2169
2273
  # For more information, see [Checking object integrity][1] in the
2170
2274
  # *Amazon S3 User Guide*.
2171
2275
  #
@@ -2180,6 +2284,25 @@ module Aws::S3
2180
2284
  #
2181
2285
  #
2182
2286
  # [1]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3
2287
+ # @option options [String] :if_match
2288
+ # Uploads the object only if the ETag (entity tag) value provided during
2289
+ # the WRITE operation matches the ETag of the object in S3. If the ETag
2290
+ # values do not match, the operation returns a `412 Precondition Failed`
2291
+ # error.
2292
+ #
2293
+ # If a conflicting operation occurs during the upload S3 returns a `409
2294
+ # ConditionalRequestConflict` response. On a 409 failure you should
2295
+ # fetch the object's ETag and retry the upload.
2296
+ #
2297
+ # Expects the ETag value as a string.
2298
+ #
2299
+ # For more information about conditional requests, see [RFC 7232][1], or
2300
+ # [Conditional requests][2] in the *Amazon S3 User Guide*.
2301
+ #
2302
+ #
2303
+ #
2304
+ # [1]: https://tools.ietf.org/html/rfc7232
2305
+ # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html
2183
2306
  # @option options [String] :if_none_match
2184
2307
  # Uploads the object only if the object key name does not already exist
2185
2308
  # in the bucket specified. Otherwise, Amazon S3 returns a `412
@@ -2231,12 +2354,21 @@ module Aws::S3
2231
2354
  # * This functionality is not supported for Amazon S3 on Outposts.
2232
2355
  #
2233
2356
  # </note>
2357
+ # @option options [Integer] :write_offset_bytes
2358
+ # Specifies the offset for appending data to existing objects in bytes.
2359
+ # The offset must be equal to the size of the existing object being
2360
+ # appended to. If no object exists, setting this header to 0 will create
2361
+ # a new object.
2362
+ #
2363
+ # <note markdown="1"> This functionality is only supported for objects in the Amazon S3
2364
+ # Express One Zone storage class in directory buckets.
2365
+ #
2366
+ # </note>
2234
2367
  # @option options [Hash<String,String>] :metadata
2235
2368
  # A map of metadata to store with the object in S3.
2236
2369
  # @option options [String] :server_side_encryption
2237
2370
  # The server-side encryption algorithm that was used when you store this
2238
- # object in Amazon S3 (for example, `AES256`, `aws:kms`,
2239
- # `aws:kms:dsse`).
2371
+ # object in Amazon S3 or Amazon FSx.
2240
2372
  #
2241
2373
  # * <b>General purpose buckets </b> - You have four mutually exclusive
2242
2374
  # options to protect data using server-side encryption in Amazon S3,
@@ -2290,6 +2422,14 @@ module Aws::S3
2290
2422
  #
2291
2423
  # </note>
2292
2424
  #
2425
+ # * <b>S3 access points for Amazon FSx </b> - When accessing data stored
2426
+ # in Amazon FSx file systems using S3 access points, the only valid
2427
+ # server side encryption option is `aws:fsx`. All Amazon FSx file
2428
+ # systems have encryption configured by default and are encrypted at
2429
+ # rest. Data is automatically encrypted before being written to the
2430
+ # file system, and automatically decrypted as it is read. These
2431
+ # processes are handled transparently by Amazon FSx.
2432
+ #
2293
2433
  #
2294
2434
  #
2295
2435
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html
@@ -2304,8 +2444,9 @@ module Aws::S3
2304
2444
  # a different Storage Class. For more information, see [Storage
2305
2445
  # Classes][1] in the *Amazon S3 User Guide*.
2306
2446
  #
2307
- # <note markdown="1"> * For directory buckets, only the S3 Express One Zone storage class is
2308
- # supported to store newly created objects.
2447
+ # <note markdown="1"> * Directory buckets only support `EXPRESS_ONEZONE` (the S3 Express One
2448
+ # Zone storage class) in Availability Zones and `ONEZONE_IA` (the S3
2449
+ # One Zone-Infrequent Access storage class) in Dedicated Local Zones.
2309
2450
  #
2310
2451
  # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
2311
2452
  #
@@ -2383,15 +2524,17 @@ module Aws::S3
2383
2524
  # `x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the
2384
2525
  # Amazon Web Services managed key (`aws/s3`) to protect the data.
2385
2526
  #
2386
- # **Directory buckets** - If you specify `x-amz-server-side-encryption`
2387
- # with `aws:kms`, you must specify the `
2388
- # x-amz-server-side-encryption-aws-kms-key-id` header with the ID (Key
2389
- # ID or Key ARN) of the KMS symmetric encryption customer managed key to
2390
- # use. Otherwise, you get an HTTP `400 Bad Request` error. Only use the
2391
- # key ID or key ARN. The key alias format of the KMS key isn't
2392
- # supported. Your SSE-KMS configuration can only support 1 [customer
2393
- # managed key][1] per directory bucket for the lifetime of the bucket.
2394
- # [Amazon Web Services managed key][2] (`aws/s3`) isn't supported.
2527
+ # **Directory buckets** - To encrypt data using SSE-KMS, it's
2528
+ # recommended to specify the `x-amz-server-side-encryption` header to
2529
+ # `aws:kms`. Then, the `x-amz-server-side-encryption-aws-kms-key-id`
2530
+ # header implicitly uses the bucket's default KMS customer managed key
2531
+ # ID. If you want to explicitly set the `
2532
+ # x-amz-server-side-encryption-aws-kms-key-id` header, it must match the
2533
+ # bucket's default customer managed key (using key ID or ARN, not
2534
+ # alias). Your SSE-KMS configuration can only support 1 [customer
2535
+ # managed key][1] per directory bucket's lifetime. The [Amazon Web
2536
+ # Services managed key][2] (`aws/s3`) isn't supported. Incorrect key
2537
+ # specification results in an HTTP `400 Bad Request` error.
2395
2538
  #
2396
2539
  #
2397
2540
  #
@@ -2400,7 +2543,7 @@ module Aws::S3
2400
2543
  # @option options [String] :ssekms_encryption_context
2401
2544
  # Specifies the Amazon Web Services KMS Encryption Context as an
2402
2545
  # additional encryption context to use for object encryption. The value
2403
- # of this header is a Base64-encoded string of a UTF-8 encoded JSON,
2546
+ # of this header is a Base64 encoded string of a UTF-8 encoded JSON,
2404
2547
  # which contains the encryption context as key-value pairs. This value
2405
2548
  # is stored as object metadata and automatically gets passed on to
2406
2549
  # Amazon Web Services KMS for future `GetObject` operations on this
@@ -2559,7 +2702,7 @@ module Aws::S3
2559
2702
  # bucket_name: "BucketName", # required
2560
2703
  # prefix: "LocationPrefix", # required
2561
2704
  # encryption: {
2562
- # encryption_type: "AES256", # required, accepts AES256, aws:kms, aws:kms:dsse
2705
+ # encryption_type: "AES256", # required, accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
2563
2706
  # kms_key_id: "SSEKMSKeyId",
2564
2707
  # kms_context: "KMSContext",
2565
2708
  # },
@@ -2590,12 +2733,12 @@ module Aws::S3
2590
2733
  # value: "MetadataValue",
2591
2734
  # },
2592
2735
  # ],
2593
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
2736
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
2594
2737
  # },
2595
2738
  # },
2596
2739
  # },
2597
2740
  # request_payer: "requester", # accepts requester
2598
- # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256
2741
+ # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256, CRC64NVME
2599
2742
  # expected_bucket_owner: "AccountId",
2600
2743
  # })
2601
2744
  # @param [Hash] options ({})
@@ -2772,7 +2915,7 @@ module Aws::S3
2772
2915
  # request_payer: "requester", # accepts requester
2773
2916
  # bypass_governance_retention: false,
2774
2917
  # expected_bucket_owner: "AccountId",
2775
- # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256
2918
+ # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256, CRC64NVME
2776
2919
  # })
2777
2920
  # @param options ({})
2778
2921
  # @option options [String] :mfa
@@ -2840,6 +2983,8 @@ module Aws::S3
2840
2983
  #
2841
2984
  # * `CRC32C`
2842
2985
  #
2986
+ # * `CRC64NVME`
2987
+ #
2843
2988
  # * `SHA1`
2844
2989
  #
2845
2990
  # * `SHA256`
@@ -2849,9 +2994,8 @@ module Aws::S3
2849
2994
  #
2850
2995
  # If the individual checksum value you provide through
2851
2996
  # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you
2852
- # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any
2853
- # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm
2854
- # that matches the provided value in `x-amz-checksum-algorithm `.
2997
+ # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 fails the
2998
+ # request with a `BadDigest` error.
2855
2999
  #
2856
3000
  # If you provide an individual checksum, Amazon S3 ignores any provided
2857
3001
  # `ChecksumAlgorithm` parameter.