aws-sdk-s3 1.188.0 → 1.199.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -324,7 +324,12 @@ module Aws::S3
324
324
  end
325
325
 
326
326
  # The server-side encryption algorithm used when you store this object
327
- # in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`).
327
+ # in Amazon S3 or Amazon FSx.
328
+ #
329
+ # <note markdown="1"> When accessing data stored in Amazon FSx file systems using S3 access
330
+ # points, the only valid server side encryption option is `aws:fsx`.
331
+ #
332
+ # </note>
328
333
  # @return [String]
329
334
  def server_side_encryption
330
335
  data[:server_side_encryption]
@@ -470,6 +475,24 @@ module Aws::S3
470
475
  data[:parts_count]
471
476
  end
472
477
 
478
+ # The number of tags, if any, on the object, when you have the relevant
479
+ # permission to read object tags.
480
+ #
481
+ # You can use [GetObjectTagging][1] to retrieve the tag set associated
482
+ # with an object.
483
+ #
484
+ # <note markdown="1"> This functionality is not supported for directory buckets.
485
+ #
486
+ # </note>
487
+ #
488
+ #
489
+ #
490
+ # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
491
+ # @return [Integer]
492
+ def tag_count
493
+ data[:tag_count]
494
+ end
495
+
473
496
  # The Object Lock mode, if any, that's in effect for this object. This
474
497
  # header is only returned if the requester has the
475
498
  # `s3:GetObjectRetention` permission. For more information about S3
@@ -739,8 +762,8 @@ module Aws::S3
739
762
  # },
740
763
  # metadata_directive: "COPY", # accepts COPY, REPLACE
741
764
  # tagging_directive: "COPY", # accepts COPY, REPLACE
742
- # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse
743
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
765
+ # server_side_encryption: "AES256", # accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
766
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
744
767
  # website_redirect_location: "WebsiteRedirectLocation",
745
768
  # sse_customer_algorithm: "SSECustomerAlgorithm",
746
769
  # sse_customer_key: "SSECustomerKey",
@@ -1130,6 +1153,14 @@ module Aws::S3
1130
1153
  # key is the same customer managed key that you specified for the
1131
1154
  # directory bucket's default encryption configuration.
1132
1155
  #
1156
+ # * <b>S3 access points for Amazon FSx </b> - When accessing data stored
1157
+ # in Amazon FSx file systems using S3 access points, the only valid
1158
+ # server side encryption option is `aws:fsx`. All Amazon FSx file
1159
+ # systems have encryption configured by default and are encrypted at
1160
+ # rest. Data is automatically encrypted before being written to the
1161
+ # file system, and automatically decrypted as it is read. These
1162
+ # processes are handled transparently by Amazon FSx.
1163
+ #
1133
1164
  #
1134
1165
  #
1135
1166
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
@@ -1828,8 +1859,8 @@ module Aws::S3
1828
1859
  # metadata: {
1829
1860
  # "MetadataKey" => "MetadataValue",
1830
1861
  # },
1831
- # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse
1832
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
1862
+ # server_side_encryption: "AES256", # accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
1863
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
1833
1864
  # website_redirect_location: "WebsiteRedirectLocation",
1834
1865
  # sse_customer_algorithm: "SSECustomerAlgorithm",
1835
1866
  # sse_customer_key: "SSECustomerKey",
@@ -2146,7 +2177,7 @@ module Aws::S3
2146
2177
  # A map of metadata to store with the object in S3.
2147
2178
  # @option options [String] :server_side_encryption
2148
2179
  # The server-side encryption algorithm used when you store this object
2149
- # in Amazon S3 (for example, `AES256`, `aws:kms`).
2180
+ # in Amazon S3 or Amazon FSx.
2150
2181
  #
2151
2182
  # * <b>Directory buckets </b> - For directory buckets, there are only
2152
2183
  # two supported options for server-side encryption: server-side
@@ -2188,6 +2219,14 @@ module Aws::S3
2188
2219
  #
2189
2220
  # </note>
2190
2221
  #
2222
+ # * <b>S3 access points for Amazon FSx </b> - When accessing data stored
2223
+ # in Amazon FSx file systems using S3 access points, the only valid
2224
+ # server side encryption option is `aws:fsx`. All Amazon FSx file
2225
+ # systems have encryption configured by default and are encrypted at
2226
+ # rest. Data is automatically encrypted before being written to the
2227
+ # file system, and automatically decrypted as it is read. These
2228
+ # processes are handled transparently by Amazon FSx.
2229
+ #
2191
2230
  #
2192
2231
  #
2193
2232
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
@@ -2421,8 +2460,8 @@ module Aws::S3
2421
2460
  # metadata: {
2422
2461
  # "MetadataKey" => "MetadataValue",
2423
2462
  # },
2424
- # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse
2425
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
2463
+ # server_side_encryption: "AES256", # accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
2464
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
2426
2465
  # website_redirect_location: "WebsiteRedirectLocation",
2427
2466
  # sse_customer_algorithm: "SSECustomerAlgorithm",
2428
2467
  # sse_customer_key: "SSECustomerKey",
@@ -2732,8 +2771,7 @@ module Aws::S3
2732
2771
  # A map of metadata to store with the object in S3.
2733
2772
  # @option options [String] :server_side_encryption
2734
2773
  # The server-side encryption algorithm that was used when you store this
2735
- # object in Amazon S3 (for example, `AES256`, `aws:kms`,
2736
- # `aws:kms:dsse`).
2774
+ # object in Amazon S3 or Amazon FSx.
2737
2775
  #
2738
2776
  # * <b>General purpose buckets </b> - You have four mutually exclusive
2739
2777
  # options to protect data using server-side encryption in Amazon S3,
@@ -2787,6 +2825,14 @@ module Aws::S3
2787
2825
  #
2788
2826
  # </note>
2789
2827
  #
2828
+ # * <b>S3 access points for Amazon FSx </b> - When accessing data stored
2829
+ # in Amazon FSx file systems using S3 access points, the only valid
2830
+ # server side encryption option is `aws:fsx`. All Amazon FSx file
2831
+ # systems have encryption configured by default and are encrypted at
2832
+ # rest. Data is automatically encrypted before being written to the
2833
+ # file system, and automatically decrypted as it is read. These
2834
+ # processes are handled transparently by Amazon FSx.
2835
+ #
2790
2836
  #
2791
2837
  #
2792
2838
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html
@@ -3059,7 +3105,7 @@ module Aws::S3
3059
3105
  # bucket_name: "BucketName", # required
3060
3106
  # prefix: "LocationPrefix", # required
3061
3107
  # encryption: {
3062
- # encryption_type: "AES256", # required, accepts AES256, aws:kms, aws:kms:dsse
3108
+ # encryption_type: "AES256", # required, accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
3063
3109
  # kms_key_id: "SSEKMSKeyId",
3064
3110
  # kms_context: "KMSContext",
3065
3111
  # },
@@ -3090,7 +3136,7 @@ module Aws::S3
3090
3136
  # value: "MetadataValue",
3091
3137
  # },
3092
3138
  # ],
3093
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
3139
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
3094
3140
  # },
3095
3141
  # },
3096
3142
  # },
@@ -1,7 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'thread'
4
- require 'cgi'
4
+ require "cgi/escape"
5
+ require "cgi/util" if RUBY_VERSION < "3.5"
5
6
 
6
7
  module Aws
7
8
  module S3
@@ -359,8 +359,8 @@ module Aws::S3
359
359
  # },
360
360
  # metadata_directive: "COPY", # accepts COPY, REPLACE
361
361
  # tagging_directive: "COPY", # accepts COPY, REPLACE
362
- # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse
363
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
362
+ # server_side_encryption: "AES256", # accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
363
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
364
364
  # website_redirect_location: "WebsiteRedirectLocation",
365
365
  # sse_customer_algorithm: "SSECustomerAlgorithm",
366
366
  # sse_customer_key: "SSECustomerKey",
@@ -750,6 +750,14 @@ module Aws::S3
750
750
  # key is the same customer managed key that you specified for the
751
751
  # directory bucket's default encryption configuration.
752
752
  #
753
+ # * <b>S3 access points for Amazon FSx </b> - When accessing data stored
754
+ # in Amazon FSx file systems using S3 access points, the only valid
755
+ # server side encryption option is `aws:fsx`. All Amazon FSx file
756
+ # systems have encryption configured by default and are encrypted at
757
+ # rest. Data is automatically encrypted before being written to the
758
+ # file system, and automatically decrypted as it is read. These
759
+ # processes are handled transparently by Amazon FSx.
760
+ #
753
761
  #
754
762
  #
755
763
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
@@ -1448,8 +1456,8 @@ module Aws::S3
1448
1456
  # metadata: {
1449
1457
  # "MetadataKey" => "MetadataValue",
1450
1458
  # },
1451
- # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse
1452
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
1459
+ # server_side_encryption: "AES256", # accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
1460
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
1453
1461
  # website_redirect_location: "WebsiteRedirectLocation",
1454
1462
  # sse_customer_algorithm: "SSECustomerAlgorithm",
1455
1463
  # sse_customer_key: "SSECustomerKey",
@@ -1766,7 +1774,7 @@ module Aws::S3
1766
1774
  # A map of metadata to store with the object in S3.
1767
1775
  # @option options [String] :server_side_encryption
1768
1776
  # The server-side encryption algorithm used when you store this object
1769
- # in Amazon S3 (for example, `AES256`, `aws:kms`).
1777
+ # in Amazon S3 or Amazon FSx.
1770
1778
  #
1771
1779
  # * <b>Directory buckets </b> - For directory buckets, there are only
1772
1780
  # two supported options for server-side encryption: server-side
@@ -1808,6 +1816,14 @@ module Aws::S3
1808
1816
  #
1809
1817
  # </note>
1810
1818
  #
1819
+ # * <b>S3 access points for Amazon FSx </b> - When accessing data stored
1820
+ # in Amazon FSx file systems using S3 access points, the only valid
1821
+ # server side encryption option is `aws:fsx`. All Amazon FSx file
1822
+ # systems have encryption configured by default and are encrypted at
1823
+ # rest. Data is automatically encrypted before being written to the
1824
+ # file system, and automatically decrypted as it is read. These
1825
+ # processes are handled transparently by Amazon FSx.
1826
+ #
1811
1827
  #
1812
1828
  #
1813
1829
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
@@ -2041,8 +2057,8 @@ module Aws::S3
2041
2057
  # metadata: {
2042
2058
  # "MetadataKey" => "MetadataValue",
2043
2059
  # },
2044
- # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse
2045
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
2060
+ # server_side_encryption: "AES256", # accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
2061
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
2046
2062
  # website_redirect_location: "WebsiteRedirectLocation",
2047
2063
  # sse_customer_algorithm: "SSECustomerAlgorithm",
2048
2064
  # sse_customer_key: "SSECustomerKey",
@@ -2352,8 +2368,7 @@ module Aws::S3
2352
2368
  # A map of metadata to store with the object in S3.
2353
2369
  # @option options [String] :server_side_encryption
2354
2370
  # The server-side encryption algorithm that was used when you store this
2355
- # object in Amazon S3 (for example, `AES256`, `aws:kms`,
2356
- # `aws:kms:dsse`).
2371
+ # object in Amazon S3 or Amazon FSx.
2357
2372
  #
2358
2373
  # * <b>General purpose buckets </b> - You have four mutually exclusive
2359
2374
  # options to protect data using server-side encryption in Amazon S3,
@@ -2407,6 +2422,14 @@ module Aws::S3
2407
2422
  #
2408
2423
  # </note>
2409
2424
  #
2425
+ # * <b>S3 access points for Amazon FSx </b> - When accessing data stored
2426
+ # in Amazon FSx file systems using S3 access points, the only valid
2427
+ # server side encryption option is `aws:fsx`. All Amazon FSx file
2428
+ # systems have encryption configured by default and are encrypted at
2429
+ # rest. Data is automatically encrypted before being written to the
2430
+ # file system, and automatically decrypted as it is read. These
2431
+ # processes are handled transparently by Amazon FSx.
2432
+ #
2410
2433
  #
2411
2434
  #
2412
2435
  # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html
@@ -2679,7 +2702,7 @@ module Aws::S3
2679
2702
  # bucket_name: "BucketName", # required
2680
2703
  # prefix: "LocationPrefix", # required
2681
2704
  # encryption: {
2682
- # encryption_type: "AES256", # required, accepts AES256, aws:kms, aws:kms:dsse
2705
+ # encryption_type: "AES256", # required, accepts AES256, aws:fsx, aws:kms, aws:kms:dsse
2683
2706
  # kms_key_id: "SSEKMSKeyId",
2684
2707
  # kms_context: "KMSContext",
2685
2708
  # },
@@ -2710,7 +2733,7 @@ module Aws::S3
2710
2733
  # value: "MetadataValue",
2711
2734
  # },
2712
2735
  # ],
2713
- # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE
2736
+ # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE, FSX_OPENZFS
2714
2737
  # },
2715
2738
  # },
2716
2739
  # },
@@ -1,7 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'uri'
4
- require 'cgi'
4
+ require "cgi/escape"
5
+ require "cgi/util" if RUBY_VERSION < "3.5"
5
6
 
6
7
  module Aws
7
8
  module S3
@@ -50,6 +50,12 @@ module Aws::S3
50
50
  # data_redundancy: "SingleAvailabilityZone", # accepts SingleAvailabilityZone, SingleLocalZone
51
51
  # type: "Directory", # accepts Directory
52
52
  # },
53
+ # tags: [
54
+ # {
55
+ # key: "ObjectKey", # required
56
+ # value: "Value", # required
57
+ # },
58
+ # ],
53
59
  # },
54
60
  # grant_full_control: "GrantFullControl",
55
61
  # grant_read: "GrantRead",
@@ -0,0 +1,252 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aws
4
+ module S3
5
+ # A high-level S3 transfer utility that provides enhanced upload and download
6
+ # capabilities with automatic multipart handling, progress tracking, and
7
+ # handling of large files. The following features are supported:
8
+ #
9
+ # * upload a file with multipart upload
10
+ # * upload a stream with multipart upload
11
+ # * download a S3 object with multipart download
12
+ # * track transfer progress by using progress listener
13
+ #
14
+ class TransferManager
15
+ # @param [Hash] options
16
+ # @option options [S3::Client] :client (S3::Client.new)
17
+ # The S3 client to use for {TransferManager} operations. If not provided, a new default client
18
+ # will be created automatically.
19
+ def initialize(options = {})
20
+ @client = options.delete(:client) || Client.new
21
+ end
22
+
23
+ # @return [S3::Client]
24
+ attr_reader :client
25
+
26
+ # Downloads a file in S3 to a path on disk.
27
+ #
28
+ # # small files (< 5MB) are downloaded in a single API call
29
+ # tm = TransferManager.new
30
+ # tm.download_file('/path/to/file', bucket: 'bucket', key: 'key')
31
+ #
32
+ # Files larger than 5MB are downloaded using multipart method:
33
+ #
34
+ # # large files are split into parts and the parts are downloaded in parallel
35
+ # tm.download_file('/path/to/large_file', bucket: 'bucket', key: 'key')
36
+ #
37
+ # You can provide a callback to monitor progress of the download:
38
+ #
39
+ # # bytes and part_sizes are each an array with 1 entry per part
40
+ # # part_sizes may not be known until the first bytes are retrieved
41
+ # progress = proc do |bytes, part_sizes, file_size|
42
+ # bytes.map.with_index do |b, i|
43
+ # puts "Part #{i + 1}: #{b} / #{part_sizes[i]}".join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
44
+ # end
45
+ # end
46
+ # tm.download_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
47
+ #
48
+ # @param [String, Pathname, File, Tempfile] destination
49
+ # Where to download the file to. This can either be a String or Pathname to the file, an open File object,
50
+ # or an open Tempfile object. If you pass an open File or Tempfile object, then you are responsible for
51
+ # closing it after the download completes. Download behavior varies by destination type:
52
+ #
53
+ # * **String/Pathname paths**: Downloads to a temporary file first, then atomically moves to the final
54
+ # destination. This prevents corruption of any existing file if the download fails.
55
+ # * **File/Tempfile objects**: Downloads directly to the file object without using temporary files.
56
+ # You are responsible for managing the file object's state and closing it after the download completes.
57
+ # If the download fails, the file object may contain partial data.
58
+ #
59
+ # @param [String] bucket
60
+ # The name of the S3 bucket to upload to.
61
+ #
62
+ # @param [String] key
63
+ # The object key name in S3 bucket.
64
+ #
65
+ # @param [Hash] options
66
+ # Additional options for {Client#get_object} and #{Client#head_object} may be provided.
67
+ #
68
+ # @option options [String] :mode ("auto") `"auto"`, `"single_request"` or `"get_range"`
69
+ #
70
+ # * `"auto"` mode is enabled by default, which performs `multipart_download`
71
+ # * `"single_request`" mode forces only 1 GET request is made in download
72
+ # * `"get_range"` mode requires `:chunk_size` parameter to configured in customizing each range size
73
+ #
74
+ # @option options [Integer] :chunk_size required in `"get_range"` mode.
75
+ #
76
+ # @option options [Integer] :thread_count (10) Customize threads used in the multipart download.
77
+ #
78
+ # @option options [String] :version_id The object version id used to retrieve the object.
79
+ #
80
+ # @see https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html ObjectVersioning
81
+ #
82
+ # @option options [String] :checksum_mode ("ENABLED")
83
+ # When `"ENABLED"` and the object has a stored checksum, it will be used to validate the download and will
84
+ # raise an `Aws::Errors::ChecksumError` if checksum validation fails. You may provide a `on_checksum_validated`
85
+ # callback if you need to verify that validation occurred and which algorithm was used.
86
+ # To disable checksum validation, set `checksum_mode` to `"DISABLED"`.
87
+ #
88
+ # @option options [Callable] :on_checksum_validated
89
+ # Called each time a request's checksum is validated with the checksum algorithm and the
90
+ # response. For multipart downloads, this will be called for each part that is downloaded and validated.
91
+ #
92
+ # @option options [Proc] :progress_callback
93
+ # A Proc that will be called when each chunk of the download is received. It will be invoked with
94
+ # `bytes_read`, `part_sizes`, `file_size`. When the object is downloaded as parts (rather than by ranges),
95
+ # the `part_sizes` will not be known ahead of time and will be `nil` in the callback until the first bytes
96
+ # in the part are received.
97
+ #
98
+ # @raise [MultipartDownloadError] Raised when an object validation fails outside of service errors.
99
+ #
100
+ # @return [Boolean] Returns `true` when the file is downloaded without any errors.
101
+ #
102
+ # @see Client#get_object
103
+ # @see Client#head_object
104
+ def download_file(destination, bucket:, key:, **options)
105
+ downloader = FileDownloader.new(client: @client)
106
+ downloader.download(destination, options.merge(bucket: bucket, key: key))
107
+ true
108
+ end
109
+
110
+ # Uploads a file from disk to S3.
111
+ #
112
+ # # a small file are uploaded with PutObject API
113
+ # tm = TransferManager.new
114
+ # tm.upload_file('/path/to/small_file', bucket: 'bucket', key: 'key')
115
+ #
116
+ # Files larger than or equal to `:multipart_threshold` are uploaded using multipart upload APIs.
117
+ #
118
+ # # large files are automatically split into parts and the parts are uploaded in parallel
119
+ # tm.upload_file('/path/to/large_file', bucket: 'bucket', key: 'key')
120
+ #
121
+ # The response of the S3 upload API is yielded if a block given.
122
+ #
123
+ # # API response will have etag value of the file
124
+ # tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key') do |response|
125
+ # etag = response.etag
126
+ # end
127
+ #
128
+ # You can provide a callback to monitor progress of the upload:
129
+ #
130
+ # # bytes and totals are each an array with 1 entry per part
131
+ # progress = proc do |bytes, totals|
132
+ # bytes.map.with_index do |b, i|
133
+ # puts "Part #{i + 1}: #{b} / #{totals[i]} " + "Total: #{100.0 * bytes.sum / totals.sum}%"
134
+ # end
135
+ # end
136
+ # tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
137
+ #
138
+ # @param [String, Pathname, File, Tempfile] source
139
+ # A file on the local file system that will be uploaded. This can either be a `String` or `Pathname` to the
140
+ # file, an open `File` object, or an open `Tempfile` object. If you pass an open `File` or `Tempfile` object,
141
+ # then you are responsible for closing it after the upload completes. When using an open Tempfile, rewind it
142
+ # before uploading or else the object will be empty.
143
+ #
144
+ # @param [String] bucket
145
+ # The name of the S3 bucket to upload to.
146
+ #
147
+ # @param [String] key
148
+ # The object key name for the uploaded file.
149
+ #
150
+ # @param [Hash] options
151
+ # Additional options for {Client#put_object} when file sizes below the multipart threshold.
152
+ # For files larger than the multipart threshold, options for {Client#create_multipart_upload},
153
+ # {Client#complete_multipart_upload}, and {Client#upload_part} can be provided.
154
+ #
155
+ # @option options [Integer] :multipart_threshold (104857600)
156
+ # Files larger han or equal to `:multipart_threshold` are uploaded using the S3 multipart upload APIs.
157
+ # Default threshold is `100MB`.
158
+ #
159
+ # @option options [Integer] :thread_count (10)
160
+ # The number of parallel multipart uploads. This option is not used if the file is smaller than
161
+ # `:multipart_threshold`.
162
+ #
163
+ # @option options [Proc] :progress_callback (nil)
164
+ # A Proc that will be called when each chunk of the upload is sent.
165
+ # It will be invoked with `[bytes_read]` and `[total_sizes]`.
166
+ #
167
+ # @raise [MultipartUploadError] If an file is being uploaded in parts, and the upload can not be completed,
168
+ # then the upload is aborted and this error is raised. The raised error has a `#errors` method that
169
+ # returns the failures that caused the upload to be aborted.
170
+ #
171
+ # @return [Boolean] Returns `true` when the file is uploaded without any errors.
172
+ #
173
+ # @see Client#put_object
174
+ # @see Client#create_multipart_upload
175
+ # @see Client#complete_multipart_upload
176
+ # @see Client#upload_part
177
+ def upload_file(source, bucket:, key:, **options)
178
+ uploading_options = options.dup
179
+ uploader = FileUploader.new(
180
+ multipart_threshold: uploading_options.delete(:multipart_threshold),
181
+ client: @client
182
+ )
183
+ response = uploader.upload(source, uploading_options.merge(bucket: bucket, key: key))
184
+ yield response if block_given?
185
+ true
186
+ end
187
+
188
+ # Uploads a stream in a streaming fashion to S3.
189
+ #
190
+ # Passed chunks automatically split into multipart upload parts and the parts are uploaded in parallel.
191
+ # This allows for streaming uploads that never touch the disk.
192
+ #
193
+ # **Note**: There are known issues in JRuby until jruby-9.1.15.0, so avoid using this with older JRuby versions.
194
+ #
195
+ # @example Streaming chunks of data
196
+ # tm = TransferManager.new
197
+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
198
+ # 10.times { write_stream << 'foo' }
199
+ # end
200
+ # @example Streaming chunks of data
201
+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
202
+ # IO.copy_stream(IO.popen('ls'), write_stream)
203
+ # end
204
+ # @example Streaming chunks of data
205
+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
206
+ # IO.copy_stream(STDIN, write_stream)
207
+ # end
208
+ #
209
+ # @param [String] bucket
210
+ # The name of the S3 bucket to upload to.
211
+ #
212
+ # @param [String] key
213
+ # The object key name for the uploaded file.
214
+ #
215
+ # @param [Hash] options
216
+ # Additional options for {Client#create_multipart_upload}, {Client#complete_multipart_upload}, and
217
+ # {Client#upload_part} can be provided.
218
+ #
219
+ # @option options [Integer] :thread_count (10)
220
+ # The number of parallel multipart uploads.
221
+ #
222
+ # @option options [Boolean] :tempfile (false)
223
+ # Normally read data is stored in memory when building the parts in order to complete the underlying
224
+ # multipart upload. By passing `:tempfile => true`, the data read will be temporarily stored on disk reducing
225
+ # the memory footprint vastly.
226
+ #
227
+ # @option options [Integer] :part_size (5242880)
228
+ # Define how big each part size but the last should be. Default `:part_size` is `5 * 1024 * 1024`.
229
+ #
230
+ # @raise [MultipartUploadError] If an object is being uploaded in parts, and the upload can not be completed,
231
+ # then the upload is aborted and this error is raised. The raised error has a `#errors` method that returns
232
+ # the failures that caused the upload to be aborted.
233
+ #
234
+ # @return [Boolean] Returns `true` when the object is uploaded without any errors.
235
+ #
236
+ # @see Client#create_multipart_upload
237
+ # @see Client#complete_multipart_upload
238
+ # @see Client#upload_part
239
+ def upload_stream(bucket:, key:, **options, &block)
240
+ uploading_options = options.dup
241
+ uploader = MultipartStreamUploader.new(
242
+ client: @client,
243
+ thread_count: uploading_options.delete(:thread_count),
244
+ tempfile: uploading_options.delete(:tempfile),
245
+ part_size: uploading_options.delete(:part_size)
246
+ )
247
+ uploader.upload(uploading_options.merge(bucket: bucket, key: key), &block)
248
+ true
249
+ end
250
+ end
251
+ end
252
+ end