aws-sdk-s3 1.103.0 → 1.202.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +664 -0
- data/VERSION +1 -1
- data/lib/aws-sdk-s3/access_grants_credentials.rb +57 -0
- data/lib/aws-sdk-s3/access_grants_credentials_provider.rb +250 -0
- data/lib/aws-sdk-s3/bucket.rb +858 -116
- data/lib/aws-sdk-s3/bucket_acl.rb +32 -9
- data/lib/aws-sdk-s3/bucket_cors.rb +38 -13
- data/lib/aws-sdk-s3/bucket_lifecycle.rb +43 -12
- data/lib/aws-sdk-s3/bucket_lifecycle_configuration.rb +100 -13
- data/lib/aws-sdk-s3/bucket_logging.rb +35 -6
- data/lib/aws-sdk-s3/bucket_notification.rb +27 -9
- data/lib/aws-sdk-s3/bucket_policy.rb +79 -10
- data/lib/aws-sdk-s3/bucket_region_cache.rb +9 -5
- data/lib/aws-sdk-s3/bucket_request_payment.rb +29 -7
- data/lib/aws-sdk-s3/bucket_tagging.rb +35 -11
- data/lib/aws-sdk-s3/bucket_versioning.rb +108 -17
- data/lib/aws-sdk-s3/bucket_website.rb +35 -11
- data/lib/aws-sdk-s3/client.rb +11799 -3636
- data/lib/aws-sdk-s3/client_api.rb +1201 -276
- data/lib/aws-sdk-s3/customizations/bucket.rb +23 -47
- data/lib/aws-sdk-s3/customizations/errors.rb +40 -0
- data/lib/aws-sdk-s3/customizations/object.rb +216 -70
- data/lib/aws-sdk-s3/customizations/object_summary.rb +5 -0
- data/lib/aws-sdk-s3/customizations/object_version.rb +13 -0
- data/lib/aws-sdk-s3/customizations/types/permanent_redirect.rb +26 -0
- data/lib/aws-sdk-s3/customizations.rb +27 -29
- data/lib/aws-sdk-s3/default_executor.rb +103 -0
- data/lib/aws-sdk-s3/encryption/client.rb +6 -2
- data/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb +13 -9
- data/lib/aws-sdk-s3/encryptionV2/client.rb +6 -2
- data/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb +1 -0
- data/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb +10 -6
- data/lib/aws-sdk-s3/endpoint_parameters.rb +181 -0
- data/lib/aws-sdk-s3/endpoint_provider.rb +716 -0
- data/lib/aws-sdk-s3/endpoints.rb +1518 -0
- data/lib/aws-sdk-s3/errors.rb +58 -0
- data/lib/aws-sdk-s3/express_credentials.rb +55 -0
- data/lib/aws-sdk-s3/express_credentials_provider.rb +59 -0
- data/lib/aws-sdk-s3/file_downloader.rb +241 -87
- data/lib/aws-sdk-s3/file_uploader.rb +16 -13
- data/lib/aws-sdk-s3/legacy_signer.rb +2 -1
- data/lib/aws-sdk-s3/multipart_download_error.rb +8 -0
- data/lib/aws-sdk-s3/multipart_file_uploader.rb +108 -86
- data/lib/aws-sdk-s3/multipart_stream_uploader.rb +110 -92
- data/lib/aws-sdk-s3/multipart_upload.rb +294 -19
- data/lib/aws-sdk-s3/multipart_upload_error.rb +3 -4
- data/lib/aws-sdk-s3/multipart_upload_part.rb +297 -31
- data/lib/aws-sdk-s3/object.rb +2224 -269
- data/lib/aws-sdk-s3/object_acl.rb +59 -17
- data/lib/aws-sdk-s3/object_copier.rb +7 -5
- data/lib/aws-sdk-s3/object_multipart_copier.rb +48 -23
- data/lib/aws-sdk-s3/object_summary.rb +1915 -220
- data/lib/aws-sdk-s3/object_version.rb +450 -58
- data/lib/aws-sdk-s3/plugins/accelerate.rb +3 -44
- data/lib/aws-sdk-s3/plugins/access_grants.rb +178 -0
- data/lib/aws-sdk-s3/plugins/arn.rb +0 -197
- data/lib/aws-sdk-s3/plugins/bucket_dns.rb +3 -39
- data/lib/aws-sdk-s3/plugins/bucket_name_restrictions.rb +1 -6
- data/lib/aws-sdk-s3/plugins/checksum_algorithm.rb +31 -0
- data/lib/aws-sdk-s3/plugins/dualstack.rb +1 -55
- data/lib/aws-sdk-s3/plugins/endpoints.rb +86 -0
- data/lib/aws-sdk-s3/plugins/expect_100_continue.rb +2 -1
- data/lib/aws-sdk-s3/plugins/express_session_auth.rb +88 -0
- data/lib/aws-sdk-s3/plugins/http_200_errors.rb +55 -18
- data/lib/aws-sdk-s3/plugins/iad_regional_endpoint.rb +6 -29
- data/lib/aws-sdk-s3/plugins/location_constraint.rb +3 -1
- data/lib/aws-sdk-s3/plugins/md5s.rb +10 -68
- data/lib/aws-sdk-s3/plugins/s3_signer.rb +42 -111
- data/lib/aws-sdk-s3/plugins/streaming_retry.rb +28 -9
- data/lib/aws-sdk-s3/plugins/url_encoded_keys.rb +2 -1
- data/lib/aws-sdk-s3/presigned_post.rb +99 -78
- data/lib/aws-sdk-s3/presigner.rb +32 -41
- data/lib/aws-sdk-s3/resource.rb +139 -12
- data/lib/aws-sdk-s3/transfer_manager.rb +304 -0
- data/lib/aws-sdk-s3/types.rb +10204 -5378
- data/lib/aws-sdk-s3.rb +35 -27
- data/sig/bucket.rbs +231 -0
- data/sig/bucket_acl.rbs +78 -0
- data/sig/bucket_cors.rbs +69 -0
- data/sig/bucket_lifecycle.rbs +88 -0
- data/sig/bucket_lifecycle_configuration.rbs +115 -0
- data/sig/bucket_logging.rbs +76 -0
- data/sig/bucket_notification.rbs +114 -0
- data/sig/bucket_policy.rbs +59 -0
- data/sig/bucket_request_payment.rbs +54 -0
- data/sig/bucket_tagging.rbs +65 -0
- data/sig/bucket_versioning.rbs +77 -0
- data/sig/bucket_website.rbs +93 -0
- data/sig/client.rbs +2586 -0
- data/sig/customizations/bucket.rbs +19 -0
- data/sig/customizations/object.rbs +38 -0
- data/sig/customizations/object_summary.rbs +35 -0
- data/sig/errors.rbs +44 -0
- data/sig/multipart_upload.rbs +120 -0
- data/sig/multipart_upload_part.rbs +109 -0
- data/sig/object.rbs +464 -0
- data/sig/object_acl.rbs +86 -0
- data/sig/object_summary.rbs +347 -0
- data/sig/object_version.rbs +143 -0
- data/sig/resource.rbs +141 -0
- data/sig/types.rbs +2868 -0
- data/sig/waiters.rbs +95 -0
- metadata +51 -16
- data/lib/aws-sdk-s3/arn/access_point_arn.rb +0 -69
- data/lib/aws-sdk-s3/arn/multi_region_access_point_arn.rb +0 -69
- data/lib/aws-sdk-s3/arn/object_lambda_arn.rb +0 -69
- data/lib/aws-sdk-s3/arn/outpost_access_point_arn.rb +0 -73
- data/lib/aws-sdk-s3/plugins/object_lambda_endpoint.rb +0 -25
data/lib/aws-sdk-s3/resource.rb
CHANGED
|
@@ -41,7 +41,21 @@ module Aws::S3
|
|
|
41
41
|
# acl: "private", # accepts private, public-read, public-read-write, authenticated-read
|
|
42
42
|
# bucket: "BucketName", # required
|
|
43
43
|
# create_bucket_configuration: {
|
|
44
|
-
# location_constraint: "af-south-1", # accepts af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-northeast-3, ap-south-1, ap-southeast-1, ap-southeast-2, ca-central-1, cn-north-1, cn-northwest-1, EU, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, me-south-1, sa-east-1, us-east-2, us-gov-east-1, us-gov-west-1, us-west-1, us-west-2
|
|
44
|
+
# location_constraint: "af-south-1", # accepts af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-northeast-3, ap-south-1, ap-south-2, ap-southeast-1, ap-southeast-2, ap-southeast-3, ap-southeast-4, ap-southeast-5, ca-central-1, cn-north-1, cn-northwest-1, EU, eu-central-1, eu-central-2, eu-north-1, eu-south-1, eu-south-2, eu-west-1, eu-west-2, eu-west-3, il-central-1, me-central-1, me-south-1, sa-east-1, us-east-2, us-gov-east-1, us-gov-west-1, us-west-1, us-west-2
|
|
45
|
+
# location: {
|
|
46
|
+
# type: "AvailabilityZone", # accepts AvailabilityZone, LocalZone
|
|
47
|
+
# name: "LocationNameAsString",
|
|
48
|
+
# },
|
|
49
|
+
# bucket: {
|
|
50
|
+
# data_redundancy: "SingleAvailabilityZone", # accepts SingleAvailabilityZone, SingleLocalZone
|
|
51
|
+
# type: "Directory", # accepts Directory
|
|
52
|
+
# },
|
|
53
|
+
# tags: [
|
|
54
|
+
# {
|
|
55
|
+
# key: "ObjectKey", # required
|
|
56
|
+
# value: "Value", # required
|
|
57
|
+
# },
|
|
58
|
+
# ],
|
|
45
59
|
# },
|
|
46
60
|
# grant_full_control: "GrantFullControl",
|
|
47
61
|
# grant_read: "GrantRead",
|
|
@@ -49,34 +63,119 @@ module Aws::S3
|
|
|
49
63
|
# grant_write: "GrantWrite",
|
|
50
64
|
# grant_write_acp: "GrantWriteACP",
|
|
51
65
|
# object_lock_enabled_for_bucket: false,
|
|
66
|
+
# object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced
|
|
52
67
|
# })
|
|
53
68
|
# @param [Hash] options ({})
|
|
54
69
|
# @option options [String] :acl
|
|
55
70
|
# The canned ACL to apply to the bucket.
|
|
71
|
+
#
|
|
72
|
+
# <note markdown="1"> This functionality is not supported for directory buckets.
|
|
73
|
+
#
|
|
74
|
+
# </note>
|
|
56
75
|
# @option options [required, String] :bucket
|
|
57
76
|
# The name of the bucket to create.
|
|
77
|
+
#
|
|
78
|
+
# **General purpose buckets** - For information about bucket naming
|
|
79
|
+
# restrictions, see [Bucket naming rules][1] in the *Amazon S3 User
|
|
80
|
+
# Guide*.
|
|
81
|
+
#
|
|
82
|
+
# <b>Directory buckets </b> - When you use this operation with a
|
|
83
|
+
# directory bucket, you must use path-style requests in the format
|
|
84
|
+
# `https://s3express-control.region-code.amazonaws.com/bucket-name `.
|
|
85
|
+
# Virtual-hosted-style requests aren't supported. Directory bucket
|
|
86
|
+
# names must be unique in the chosen Zone (Availability Zone or Local
|
|
87
|
+
# Zone). Bucket names must also follow the format `
|
|
88
|
+
# bucket-base-name--zone-id--x-s3` (for example, `
|
|
89
|
+
# DOC-EXAMPLE-BUCKET--usw2-az1--x-s3`). For information about bucket
|
|
90
|
+
# naming restrictions, see [Directory bucket naming rules][2] in the
|
|
91
|
+
# *Amazon S3 User Guide*
|
|
92
|
+
#
|
|
93
|
+
#
|
|
94
|
+
#
|
|
95
|
+
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
|
|
96
|
+
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
|
|
58
97
|
# @option options [Types::CreateBucketConfiguration] :create_bucket_configuration
|
|
59
98
|
# The configuration information for the bucket.
|
|
60
99
|
# @option options [String] :grant_full_control
|
|
61
100
|
# Allows grantee the read, write, read ACP, and write ACP permissions on
|
|
62
101
|
# the bucket.
|
|
102
|
+
#
|
|
103
|
+
# <note markdown="1"> This functionality is not supported for directory buckets.
|
|
104
|
+
#
|
|
105
|
+
# </note>
|
|
63
106
|
# @option options [String] :grant_read
|
|
64
107
|
# Allows grantee to list the objects in the bucket.
|
|
108
|
+
#
|
|
109
|
+
# <note markdown="1"> This functionality is not supported for directory buckets.
|
|
110
|
+
#
|
|
111
|
+
# </note>
|
|
65
112
|
# @option options [String] :grant_read_acp
|
|
66
113
|
# Allows grantee to read the bucket ACL.
|
|
114
|
+
#
|
|
115
|
+
# <note markdown="1"> This functionality is not supported for directory buckets.
|
|
116
|
+
#
|
|
117
|
+
# </note>
|
|
67
118
|
# @option options [String] :grant_write
|
|
68
119
|
# Allows grantee to create new objects in the bucket.
|
|
69
120
|
#
|
|
70
121
|
# For the bucket and object owners of existing objects, also allows
|
|
71
122
|
# deletions and overwrites of those objects.
|
|
123
|
+
#
|
|
124
|
+
# <note markdown="1"> This functionality is not supported for directory buckets.
|
|
125
|
+
#
|
|
126
|
+
# </note>
|
|
72
127
|
# @option options [String] :grant_write_acp
|
|
73
128
|
# Allows grantee to write the ACL for the applicable bucket.
|
|
129
|
+
#
|
|
130
|
+
# <note markdown="1"> This functionality is not supported for directory buckets.
|
|
131
|
+
#
|
|
132
|
+
# </note>
|
|
74
133
|
# @option options [Boolean] :object_lock_enabled_for_bucket
|
|
75
134
|
# Specifies whether you want S3 Object Lock to be enabled for the new
|
|
76
135
|
# bucket.
|
|
136
|
+
#
|
|
137
|
+
# <note markdown="1"> This functionality is not supported for directory buckets.
|
|
138
|
+
#
|
|
139
|
+
# </note>
|
|
140
|
+
# @option options [String] :object_ownership
|
|
141
|
+
# The container element for object ownership for a bucket's ownership
|
|
142
|
+
# controls.
|
|
143
|
+
#
|
|
144
|
+
# `BucketOwnerPreferred` - Objects uploaded to the bucket change
|
|
145
|
+
# ownership to the bucket owner if the objects are uploaded with the
|
|
146
|
+
# `bucket-owner-full-control` canned ACL.
|
|
147
|
+
#
|
|
148
|
+
# `ObjectWriter` - The uploading account will own the object if the
|
|
149
|
+
# object is uploaded with the `bucket-owner-full-control` canned ACL.
|
|
150
|
+
#
|
|
151
|
+
# `BucketOwnerEnforced` - Access control lists (ACLs) are disabled and
|
|
152
|
+
# no longer affect permissions. The bucket owner automatically owns and
|
|
153
|
+
# has full control over every object in the bucket. The bucket only
|
|
154
|
+
# accepts PUT requests that don't specify an ACL or specify bucket
|
|
155
|
+
# owner full control ACLs (such as the predefined
|
|
156
|
+
# `bucket-owner-full-control` canned ACL or a custom ACL in XML format
|
|
157
|
+
# that grants the same permissions).
|
|
158
|
+
#
|
|
159
|
+
# By default, `ObjectOwnership` is set to `BucketOwnerEnforced` and ACLs
|
|
160
|
+
# are disabled. We recommend keeping ACLs disabled, except in uncommon
|
|
161
|
+
# use cases where you must control access for each object individually.
|
|
162
|
+
# For more information about S3 Object Ownership, see [Controlling
|
|
163
|
+
# ownership of objects and disabling ACLs for your bucket][1] in the
|
|
164
|
+
# *Amazon S3 User Guide*.
|
|
165
|
+
#
|
|
166
|
+
# <note markdown="1"> This functionality is not supported for directory buckets. Directory
|
|
167
|
+
# buckets use the bucket owner enforced setting for S3 Object Ownership.
|
|
168
|
+
#
|
|
169
|
+
# </note>
|
|
170
|
+
#
|
|
171
|
+
#
|
|
172
|
+
#
|
|
173
|
+
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
|
|
77
174
|
# @return [Bucket]
|
|
78
175
|
def create_bucket(options = {})
|
|
79
|
-
|
|
176
|
+
Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
|
|
177
|
+
@client.create_bucket(options)
|
|
178
|
+
end
|
|
80
179
|
Bucket.new(
|
|
81
180
|
name: options[:bucket],
|
|
82
181
|
client: @client
|
|
@@ -96,21 +195,49 @@ module Aws::S3
|
|
|
96
195
|
|
|
97
196
|
# @example Request syntax with placeholder values
|
|
98
197
|
#
|
|
99
|
-
# s3.buckets(
|
|
198
|
+
# buckets = s3.buckets({
|
|
199
|
+
# prefix: "Prefix",
|
|
200
|
+
# bucket_region: "BucketRegion",
|
|
201
|
+
# })
|
|
100
202
|
# @param [Hash] options ({})
|
|
203
|
+
# @option options [String] :prefix
|
|
204
|
+
# Limits the response to bucket names that begin with the specified
|
|
205
|
+
# bucket name prefix.
|
|
206
|
+
# @option options [String] :bucket_region
|
|
207
|
+
# Limits the response to buckets that are located in the specified
|
|
208
|
+
# Amazon Web Services Region. The Amazon Web Services Region must be
|
|
209
|
+
# expressed according to the Amazon Web Services Region code, such as
|
|
210
|
+
# `us-west-2` for the US West (Oregon) Region. For a list of the valid
|
|
211
|
+
# values for all of the Amazon Web Services Regions, see [Regions and
|
|
212
|
+
# Endpoints][1].
|
|
213
|
+
#
|
|
214
|
+
# <note markdown="1"> Requests made to a Regional endpoint that is different from the
|
|
215
|
+
# `bucket-region` parameter are not supported. For example, if you want
|
|
216
|
+
# to limit the response to your buckets in Region `us-west-2`, the
|
|
217
|
+
# request must be made to an endpoint in Region `us-west-2`.
|
|
218
|
+
#
|
|
219
|
+
# </note>
|
|
220
|
+
#
|
|
221
|
+
#
|
|
222
|
+
#
|
|
223
|
+
# [1]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
|
|
101
224
|
# @return [Bucket::Collection]
|
|
102
225
|
def buckets(options = {})
|
|
103
226
|
batches = Enumerator.new do |y|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
227
|
+
resp = Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
|
|
228
|
+
@client.list_buckets(options)
|
|
229
|
+
end
|
|
230
|
+
resp.each_page do |page|
|
|
231
|
+
batch = []
|
|
232
|
+
page.data.buckets.each do |b|
|
|
233
|
+
batch << Bucket.new(
|
|
234
|
+
name: b.name,
|
|
235
|
+
data: b,
|
|
236
|
+
client: @client
|
|
237
|
+
)
|
|
238
|
+
end
|
|
239
|
+
y.yield(batch)
|
|
112
240
|
end
|
|
113
|
-
y.yield(batch)
|
|
114
241
|
end
|
|
115
242
|
Bucket::Collection.new(batches)
|
|
116
243
|
end
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Aws
|
|
4
|
+
module S3
|
|
5
|
+
# A high-level S3 transfer utility that provides enhanced upload and download capabilities with automatic
|
|
6
|
+
# multipart handling, progress tracking, and handling of large files. The following features are supported:
|
|
7
|
+
#
|
|
8
|
+
# * upload a file with multipart upload
|
|
9
|
+
# * upload a stream with multipart upload
|
|
10
|
+
# * download a S3 object with multipart download
|
|
11
|
+
# * track transfer progress by using progress listener
|
|
12
|
+
#
|
|
13
|
+
# ## Executor Management
|
|
14
|
+
# TransferManager uses executors to handle concurrent operations during multipart transfers. You can control
|
|
15
|
+
# concurrency behavior by providing a custom executor or relying on the default executor management.
|
|
16
|
+
#
|
|
17
|
+
# ### Default Behavior
|
|
18
|
+
# When no `:executor` is provided, TransferManager creates a new DefaultExecutor for each individual
|
|
19
|
+
# operation (`download_file`, `upload_file`, etc.) and automatically shuts it down when that operation completes.
|
|
20
|
+
# Each operation gets its own isolated thread pool with the specified `:thread_count` (default 10 threads).
|
|
21
|
+
#
|
|
22
|
+
# ### Custom Executor
|
|
23
|
+
# You can provide your own executor (e.g., `Concurrent::ThreadPoolExecutor`) for fine-grained control over thread
|
|
24
|
+
# pools and resource management. When using a custom executor, you are responsible for shutting it down
|
|
25
|
+
# when finished. The executor may be reused across multiple TransferManager operations.
|
|
26
|
+
#
|
|
27
|
+
# Custom executors must implement the same interface as DefaultExecutor.
|
|
28
|
+
#
|
|
29
|
+
# **Required methods:**
|
|
30
|
+
#
|
|
31
|
+
# * `post(*args, &block)` - Execute a task with given arguments and block
|
|
32
|
+
# * `kill` - Immediately terminate all running tasks
|
|
33
|
+
#
|
|
34
|
+
# **Optional methods:**
|
|
35
|
+
#
|
|
36
|
+
# * `shutdown(timeout = nil)` - Gracefully shutdown the executor with optional timeout
|
|
37
|
+
#
|
|
38
|
+
# @example Using default executor (automatic creation and shutdown)
|
|
39
|
+
# tm = TransferManager.new # No executor provided
|
|
40
|
+
# # DefaultExecutor created, used, and shutdown automatically
|
|
41
|
+
# tm.download_file('/path/to/file', bucket: 'bucket', key: 'key')
|
|
42
|
+
#
|
|
43
|
+
# @example Using custom executor (manual shutdown required)
|
|
44
|
+
# require 'concurrent-ruby'
|
|
45
|
+
#
|
|
46
|
+
# executor = Concurrent::ThreadPoolExecutor.new(max_threads: 5)
|
|
47
|
+
# tm = TransferManager.new(executor: executor)
|
|
48
|
+
# tm.download_file('/path/to/file1', bucket: 'bucket', key: 'key1')
|
|
49
|
+
# executor.shutdown # You must shutdown custom executors
|
|
50
|
+
#
|
|
51
|
+
class TransferManager
|
|
52
|
+
|
|
53
|
+
# @param [Hash] options
|
|
54
|
+
# @option options [S3::Client] :client (S3::Client.new)
|
|
55
|
+
# The S3 client to use for {TransferManager} operations. If not provided, a new default client
|
|
56
|
+
# will be created automatically.
|
|
57
|
+
# @option options [Object] :executor
|
|
58
|
+
# The executor to use for multipart operations. Must implement the same interface as {DefaultExecutor}.
|
|
59
|
+
# If not provided, a new {DefaultExecutor} will be created automatically for each operation and
|
|
60
|
+
# shutdown after completion. When provided a custom executor, it will be reused across operations, and
|
|
61
|
+
# you are responsible for shutting it down when finished.
|
|
62
|
+
def initialize(options = {})
|
|
63
|
+
@client = options[:client] || Client.new
|
|
64
|
+
@executor = options[:executor]
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
# @return [S3::Client]
|
|
68
|
+
attr_reader :client
|
|
69
|
+
|
|
70
|
+
# @return [Object]
|
|
71
|
+
attr_reader :executor
|
|
72
|
+
|
|
73
|
+
# Downloads a file in S3 to a path on disk.
|
|
74
|
+
#
|
|
75
|
+
# # small files (< 5MB) are downloaded in a single API call
|
|
76
|
+
# tm = TransferManager.new
|
|
77
|
+
# tm.download_file('/path/to/file', bucket: 'bucket', key: 'key')
|
|
78
|
+
#
|
|
79
|
+
# Files larger than 5MB are downloaded using multipart method:
|
|
80
|
+
#
|
|
81
|
+
# # large files are split into parts and the parts are downloaded in parallel
|
|
82
|
+
# tm.download_file('/path/to/large_file', bucket: 'bucket', key: 'key')
|
|
83
|
+
#
|
|
84
|
+
# You can provide a callback to monitor progress of the download:
|
|
85
|
+
#
|
|
86
|
+
# # bytes and part_sizes are each an array with 1 entry per part
|
|
87
|
+
# # part_sizes may not be known until the first bytes are retrieved
|
|
88
|
+
# progress = proc do |bytes, part_sizes, file_size|
|
|
89
|
+
# bytes.map.with_index do |b, i|
|
|
90
|
+
# puts "Part #{i + 1}: #{b} / #{part_sizes[i]}".join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
|
|
91
|
+
# end
|
|
92
|
+
# end
|
|
93
|
+
# tm.download_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
|
|
94
|
+
#
|
|
95
|
+
# @param [String, Pathname, File, Tempfile] destination
|
|
96
|
+
# Where to download the file to. This can either be a String or Pathname to the file, an open File object,
|
|
97
|
+
# or an open Tempfile object. If you pass an open File or Tempfile object, then you are responsible for
|
|
98
|
+
# closing it after the download completes. Download behavior varies by destination type:
|
|
99
|
+
#
|
|
100
|
+
# * **String/Pathname paths**: Downloads to a temporary file first, then atomically moves to the final
|
|
101
|
+
# destination. This prevents corruption of any existing file if the download fails.
|
|
102
|
+
# * **File/Tempfile objects**: Downloads directly to the file object without using temporary files.
|
|
103
|
+
# You are responsible for managing the file object's state and closing it after the download completes.
|
|
104
|
+
# If the download fails, the file object may contain partial data.
|
|
105
|
+
#
|
|
106
|
+
# @param [String] bucket
|
|
107
|
+
# The name of the S3 bucket to upload to.
|
|
108
|
+
#
|
|
109
|
+
# @param [String] key
|
|
110
|
+
# The object key name in S3 bucket.
|
|
111
|
+
#
|
|
112
|
+
# @param [Hash] options
|
|
113
|
+
# Additional options for {Client#get_object} and #{Client#head_object} may be provided.
|
|
114
|
+
#
|
|
115
|
+
# @option options [String] :mode ("auto") `"auto"`, `"single_request"` or `"get_range"`
|
|
116
|
+
#
|
|
117
|
+
# * `"auto"` mode is enabled by default, which performs `multipart_download`
|
|
118
|
+
# * `"single_request`" mode forces only 1 GET request is made in download
|
|
119
|
+
# * `"get_range"` mode requires `:chunk_size` parameter to configured in customizing each range size
|
|
120
|
+
#
|
|
121
|
+
# @option options [Integer] :chunk_size required in `"get_range"` mode.
|
|
122
|
+
#
|
|
123
|
+
# @option options [Integer] :thread_count (10) Customize threads used in the multipart download.
|
|
124
|
+
# Only used when no custom executor is provided (creates {DefaultExecutor} with given thread count).
|
|
125
|
+
#
|
|
126
|
+
# @option options [String] :checksum_mode ("ENABLED")
|
|
127
|
+
# When `"ENABLED"` and the object has a stored checksum, it will be used to validate the download and will
|
|
128
|
+
# raise an `Aws::Errors::ChecksumError` if checksum validation fails. You may provide a `on_checksum_validated`
|
|
129
|
+
# callback if you need to verify that validation occurred and which algorithm was used.
|
|
130
|
+
# To disable checksum validation, set `checksum_mode` to `"DISABLED"`.
|
|
131
|
+
#
|
|
132
|
+
# @option options [Callable] :on_checksum_validated
|
|
133
|
+
# Called each time a request's checksum is validated with the checksum algorithm and the
|
|
134
|
+
# response. For multipart downloads, this will be called for each part that is downloaded and validated.
|
|
135
|
+
#
|
|
136
|
+
# @option options [Proc] :progress_callback
|
|
137
|
+
# A Proc that will be called when each chunk of the download is received. It will be invoked with
|
|
138
|
+
# `bytes_read`, `part_sizes`, `file_size`. When the object is downloaded as parts (rather than by ranges),
|
|
139
|
+
# the `part_sizes` will not be known ahead of time and will be `nil` in the callback until the first bytes
|
|
140
|
+
# in the part are received.
|
|
141
|
+
#
|
|
142
|
+
# @raise [MultipartDownloadError] Raised when an object validation fails outside of service errors.
|
|
143
|
+
#
|
|
144
|
+
# @return [Boolean] Returns `true` when the file is downloaded without any errors.
|
|
145
|
+
#
|
|
146
|
+
# @see Client#get_object
|
|
147
|
+
# @see Client#head_object
|
|
148
|
+
def download_file(destination, bucket:, key:, **options)
|
|
149
|
+
download_opts = options.merge(bucket: bucket, key: key)
|
|
150
|
+
executor = @executor || DefaultExecutor.new(max_threads: download_opts.delete(:thread_count))
|
|
151
|
+
downloader = FileDownloader.new(client: @client, executor: executor)
|
|
152
|
+
downloader.download(destination, download_opts)
|
|
153
|
+
executor.shutdown unless @executor
|
|
154
|
+
true
|
|
155
|
+
end
|
|
156
|
+
|
|
157
|
+
# Uploads a file from disk to S3.
|
|
158
|
+
#
|
|
159
|
+
# # a small file are uploaded with PutObject API
|
|
160
|
+
# tm = TransferManager.new
|
|
161
|
+
# tm.upload_file('/path/to/small_file', bucket: 'bucket', key: 'key')
|
|
162
|
+
#
|
|
163
|
+
# Files larger than or equal to `:multipart_threshold` are uploaded using multipart upload APIs.
|
|
164
|
+
#
|
|
165
|
+
# # large files are automatically split into parts and the parts are uploaded in parallel
|
|
166
|
+
# tm.upload_file('/path/to/large_file', bucket: 'bucket', key: 'key')
|
|
167
|
+
#
|
|
168
|
+
# The response of the S3 upload API is yielded if a block given.
|
|
169
|
+
#
|
|
170
|
+
# # API response will have etag value of the file
|
|
171
|
+
# tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key') do |response|
|
|
172
|
+
# etag = response.etag
|
|
173
|
+
# end
|
|
174
|
+
#
|
|
175
|
+
# You can provide a callback to monitor progress of the upload:
|
|
176
|
+
#
|
|
177
|
+
# # bytes and totals are each an array with 1 entry per part
|
|
178
|
+
# progress = proc do |bytes, totals|
|
|
179
|
+
# bytes.map.with_index do |b, i|
|
|
180
|
+
# puts "Part #{i + 1}: #{b} / #{totals[i]} " + "Total: #{100.0 * bytes.sum / totals.sum}%"
|
|
181
|
+
# end
|
|
182
|
+
# end
|
|
183
|
+
# tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
|
|
184
|
+
#
|
|
185
|
+
# @param [String, Pathname, File, Tempfile] source
|
|
186
|
+
# A file on the local file system that will be uploaded. This can either be a `String` or `Pathname` to the
|
|
187
|
+
# file, an open `File` object, or an open `Tempfile` object. If you pass an open `File` or `Tempfile` object,
|
|
188
|
+
# then you are responsible for closing it after the upload completes. When using an open Tempfile, rewind it
|
|
189
|
+
# before uploading or else the object will be empty.
|
|
190
|
+
#
|
|
191
|
+
# @param [String] bucket
|
|
192
|
+
# The name of the S3 bucket to upload to.
|
|
193
|
+
#
|
|
194
|
+
# @param [String] key
|
|
195
|
+
# The object key name for the uploaded file.
|
|
196
|
+
#
|
|
197
|
+
# @param [Hash] options
|
|
198
|
+
# Additional options for {Client#put_object} when file sizes below the multipart threshold.
|
|
199
|
+
# For files larger than the multipart threshold, options for {Client#create_multipart_upload},
|
|
200
|
+
# {Client#complete_multipart_upload}, and {Client#upload_part} can be provided.
|
|
201
|
+
#
|
|
202
|
+
# @option options [Integer] :multipart_threshold (104857600)
|
|
203
|
+
# Files larger han or equal to `:multipart_threshold` are uploaded using the S3 multipart upload APIs.
|
|
204
|
+
# Default threshold is `100MB`.
|
|
205
|
+
#
|
|
206
|
+
# @option options [Integer] :thread_count (10) Customize threads used in the multipart upload.
|
|
207
|
+
# Only used when no custom executor is provided (creates {DefaultExecutor} with the given thread count).
|
|
208
|
+
#
|
|
209
|
+
# @option options [Proc] :progress_callback (nil)
|
|
210
|
+
# A Proc that will be called when each chunk of the upload is sent.
|
|
211
|
+
# It will be invoked with `[bytes_read]` and `[total_sizes]`.
|
|
212
|
+
#
|
|
213
|
+
# @raise [MultipartUploadError] If a file is being uploaded in parts, and the upload can not be completed,
|
|
214
|
+
# then the upload is aborted and this error is raised. The raised error has a `#errors` method that
|
|
215
|
+
# returns the failures that caused the upload to be aborted.
|
|
216
|
+
#
|
|
217
|
+
# @return [Boolean] Returns `true` when the file is uploaded without any errors.
|
|
218
|
+
#
|
|
219
|
+
# @see Client#put_object
|
|
220
|
+
# @see Client#create_multipart_upload
|
|
221
|
+
# @see Client#complete_multipart_upload
|
|
222
|
+
# @see Client#upload_part
|
|
223
|
+
def upload_file(source, bucket:, key:, **options)
|
|
224
|
+
upload_opts = options.merge(bucket: bucket, key: key)
|
|
225
|
+
executor = @executor || DefaultExecutor.new(max_threads: upload_opts.delete(:thread_count))
|
|
226
|
+
uploader = FileUploader.new(
|
|
227
|
+
multipart_threshold: upload_opts.delete(:multipart_threshold),
|
|
228
|
+
client: @client,
|
|
229
|
+
executor: executor
|
|
230
|
+
)
|
|
231
|
+
response = uploader.upload(source, upload_opts)
|
|
232
|
+
yield response if block_given?
|
|
233
|
+
executor.shutdown unless @executor
|
|
234
|
+
true
|
|
235
|
+
end
|
|
236
|
+
|
|
237
|
+
# Uploads a stream in a streaming fashion to S3.
|
|
238
|
+
#
|
|
239
|
+
# Passed chunks automatically split into multipart upload parts and the parts are uploaded in parallel.
|
|
240
|
+
# This allows for streaming uploads that never touch the disk.
|
|
241
|
+
#
|
|
242
|
+
# **Note**: There are known issues in JRuby until jruby-9.1.15.0, so avoid using this with older JRuby versions.
|
|
243
|
+
#
|
|
244
|
+
# @example Streaming chunks of data
|
|
245
|
+
# tm = TransferManager.new
|
|
246
|
+
# tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
|
|
247
|
+
# 10.times { write_stream << 'foo' }
|
|
248
|
+
# end
|
|
249
|
+
# @example Streaming chunks of data
|
|
250
|
+
# tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
|
|
251
|
+
# IO.copy_stream(IO.popen('ls'), write_stream)
|
|
252
|
+
# end
|
|
253
|
+
# @example Streaming chunks of data
|
|
254
|
+
# tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
|
|
255
|
+
# IO.copy_stream(STDIN, write_stream)
|
|
256
|
+
# end
|
|
257
|
+
#
|
|
258
|
+
# @param [String] bucket
|
|
259
|
+
# The name of the S3 bucket to upload to.
|
|
260
|
+
#
|
|
261
|
+
# @param [String] key
|
|
262
|
+
# The object key name for the uploaded file.
|
|
263
|
+
#
|
|
264
|
+
# @param [Hash] options
|
|
265
|
+
# Additional options for {Client#create_multipart_upload}, {Client#complete_multipart_upload}, and
|
|
266
|
+
# {Client#upload_part} can be provided.
|
|
267
|
+
#
|
|
268
|
+
# @option options [Integer] :thread_count (10)
|
|
269
|
+
# The number of parallel multipart uploads. Only used when no custom executor is provided (creates
|
|
270
|
+
# {DefaultExecutor} with the given thread count). An additional thread is used internally for task coordination.
|
|
271
|
+
#
|
|
272
|
+
# @option options [Boolean] :tempfile (false)
|
|
273
|
+
# Normally read data is stored in memory when building the parts in order to complete the underlying
|
|
274
|
+
# multipart upload. By passing `:tempfile => true`, the data read will be temporarily stored on disk reducing
|
|
275
|
+
# the memory footprint vastly.
|
|
276
|
+
#
|
|
277
|
+
# @option options [Integer] :part_size (5242880)
|
|
278
|
+
# Define how big each part size but the last should be. Default `:part_size` is `5 * 1024 * 1024`.
|
|
279
|
+
#
|
|
280
|
+
# @raise [MultipartUploadError] If an object is being uploaded in parts, and the upload can not be completed,
|
|
281
|
+
# then the upload is aborted and this error is raised. The raised error has a `#errors` method that returns
|
|
282
|
+
# the failures that caused the upload to be aborted.
|
|
283
|
+
#
|
|
284
|
+
# @return [Boolean] Returns `true` when the object is uploaded without any errors.
|
|
285
|
+
#
|
|
286
|
+
# @see Client#create_multipart_upload
|
|
287
|
+
# @see Client#complete_multipart_upload
|
|
288
|
+
# @see Client#upload_part
|
|
289
|
+
def upload_stream(bucket:, key:, **options, &block)
|
|
290
|
+
upload_opts = options.merge(bucket: bucket, key: key)
|
|
291
|
+
executor = @executor || DefaultExecutor.new(max_threads: upload_opts.delete(:thread_count))
|
|
292
|
+
uploader = MultipartStreamUploader.new(
|
|
293
|
+
client: @client,
|
|
294
|
+
executor: executor,
|
|
295
|
+
tempfile: upload_opts.delete(:tempfile),
|
|
296
|
+
part_size: upload_opts.delete(:part_size)
|
|
297
|
+
)
|
|
298
|
+
uploader.upload(upload_opts, &block)
|
|
299
|
+
executor.shutdown unless @executor
|
|
300
|
+
true
|
|
301
|
+
end
|
|
302
|
+
end
|
|
303
|
+
end
|
|
304
|
+
end
|