aws-sdk-s3 1.188.0 → 1.199.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +80 -0
- data/VERSION +1 -1
- data/lib/aws-sdk-s3/bucket.rb +43 -4
- data/lib/aws-sdk-s3/client.rb +1119 -106
- data/lib/aws-sdk-s3/client_api.rb +228 -0
- data/lib/aws-sdk-s3/customizations/object.rb +63 -76
- data/lib/aws-sdk-s3/customizations.rb +2 -1
- data/lib/aws-sdk-s3/endpoints.rb +84 -0
- data/lib/aws-sdk-s3/errors.rb +11 -0
- data/lib/aws-sdk-s3/file_downloader.rb +57 -74
- data/lib/aws-sdk-s3/file_uploader.rb +3 -5
- data/lib/aws-sdk-s3/legacy_signer.rb +2 -1
- data/lib/aws-sdk-s3/multipart_download_error.rb +8 -0
- data/lib/aws-sdk-s3/multipart_file_uploader.rb +34 -65
- data/lib/aws-sdk-s3/multipart_stream_uploader.rb +80 -88
- data/lib/aws-sdk-s3/multipart_upload_error.rb +3 -4
- data/lib/aws-sdk-s3/object.rb +58 -12
- data/lib/aws-sdk-s3/object_multipart_copier.rb +2 -1
- data/lib/aws-sdk-s3/object_summary.rb +34 -11
- data/lib/aws-sdk-s3/plugins/url_encoded_keys.rb +2 -1
- data/lib/aws-sdk-s3/resource.rb +6 -0
- data/lib/aws-sdk-s3/transfer_manager.rb +252 -0
- data/lib/aws-sdk-s3/types.rb +1108 -50
- data/lib/aws-sdk-s3.rb +1 -1
- data/sig/bucket.rbs +12 -3
- data/sig/client.rbs +141 -30
- data/sig/errors.rbs +2 -0
- data/sig/multipart_upload.rbs +1 -1
- data/sig/object.rbs +13 -10
- data/sig/object_summary.rbs +9 -9
- data/sig/resource.rbs +8 -1
- data/sig/types.rbs +182 -29
- metadata +6 -4
@@ -398,14 +398,14 @@ module Aws
|
|
398
398
|
end
|
399
399
|
true
|
400
400
|
end
|
401
|
+
deprecated(:upload_stream, use: 'Aws::S3::TransferManager#upload_stream', version: 'next major version')
|
401
402
|
|
402
403
|
# Uploads a file from disk to the current object in S3.
|
403
404
|
#
|
404
405
|
# # small files are uploaded in a single API call
|
405
406
|
# obj.upload_file('/path/to/file')
|
406
407
|
#
|
407
|
-
# Files larger than or equal to `:multipart_threshold` are uploaded
|
408
|
-
# using the Amazon S3 multipart upload APIs.
|
408
|
+
# Files larger than or equal to `:multipart_threshold` are uploaded using the Amazon S3 multipart upload APIs.
|
409
409
|
#
|
410
410
|
# # large files are automatically split into parts
|
411
411
|
# # and the parts are uploaded in parallel
|
@@ -421,47 +421,37 @@ module Aws
|
|
421
421
|
# You can provide a callback to monitor progress of the upload:
|
422
422
|
#
|
423
423
|
# # bytes and totals are each an array with 1 entry per part
|
424
|
-
# progress =
|
425
|
-
# puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%"
|
424
|
+
# progress = proc do |bytes, totals|
|
425
|
+
# puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%"
|
426
426
|
# end
|
427
427
|
# obj.upload_file('/path/to/file', progress_callback: progress)
|
428
428
|
#
|
429
|
-
# @param [String, Pathname, File, Tempfile] source A file on the local
|
430
|
-
#
|
431
|
-
#
|
432
|
-
#
|
433
|
-
# you are responsible for closing it after the upload completes. When
|
434
|
-
# using an open Tempfile, rewind it before uploading or else the object
|
429
|
+
# @param [String, Pathname, File, Tempfile] source A file on the local file system that will be uploaded as
|
430
|
+
# this object. This can either be a String or Pathname to the file, an open File object, or an open
|
431
|
+
# Tempfile object. If you pass an open File or Tempfile object, then you are responsible for closing it
|
432
|
+
# after the upload completes. When using an open Tempfile, rewind it before uploading or else the object
|
435
433
|
# will be empty.
|
436
434
|
#
|
437
435
|
# @param [Hash] options
|
438
|
-
# Additional options for {Client#put_object}
|
439
|
-
#
|
440
|
-
#
|
441
|
-
# {Client#complete_multipart_upload},
|
442
|
-
# and {Client#upload_part} can be provided.
|
436
|
+
# Additional options for {Client#put_object} when file sizes below the multipart threshold.
|
437
|
+
# For files larger than the multipart threshold, options for {Client#create_multipart_upload},
|
438
|
+
# {Client#complete_multipart_upload}, and {Client#upload_part} can be provided.
|
443
439
|
#
|
444
|
-
# @option options [Integer] :multipart_threshold (104857600) Files larger
|
445
|
-
#
|
446
|
-
# multipart APIs.
|
447
|
-
# Default threshold is 100MB.
|
440
|
+
# @option options [Integer] :multipart_threshold (104857600) Files larger han or equal to
|
441
|
+
# `:multipart_threshold` are uploaded using the S3 multipart APIs. Default threshold is 100MB.
|
448
442
|
#
|
449
|
-
# @option options [Integer] :thread_count (10) The number of parallel
|
450
|
-
#
|
451
|
-
# `:multipart_threshold`.
|
443
|
+
# @option options [Integer] :thread_count (10) The number of parallel multipart uploads.
|
444
|
+
# This option is not used if the file is smaller than `:multipart_threshold`.
|
452
445
|
#
|
453
446
|
# @option options [Proc] :progress_callback
|
454
447
|
# A Proc that will be called when each chunk of the upload is sent.
|
455
448
|
# It will be invoked with [bytes_read], [total_sizes]
|
456
449
|
#
|
457
|
-
# @raise [MultipartUploadError] If an object is being uploaded in
|
458
|
-
#
|
459
|
-
#
|
460
|
-
# method that returns the failures that caused the upload to be
|
461
|
-
# aborted.
|
450
|
+
# @raise [MultipartUploadError] If an object is being uploaded in parts, and the upload can not be completed,
|
451
|
+
# then the upload is aborted and this error is raised. The raised error has a `#errors` method that
|
452
|
+
# returns the failures that caused the upload to be aborted.
|
462
453
|
#
|
463
|
-
# @return [Boolean] Returns `true` when the object is uploaded
|
464
|
-
# without any errors.
|
454
|
+
# @return [Boolean] Returns `true` when the object is uploaded without any errors.
|
465
455
|
#
|
466
456
|
# @see Client#put_object
|
467
457
|
# @see Client#create_multipart_upload
|
@@ -469,26 +459,21 @@ module Aws
|
|
469
459
|
# @see Client#upload_part
|
470
460
|
def upload_file(source, options = {})
|
471
461
|
uploading_options = options.dup
|
472
|
-
uploader = FileUploader.new(
|
473
|
-
multipart_threshold: uploading_options.delete(:multipart_threshold),
|
474
|
-
client: client
|
475
|
-
)
|
462
|
+
uploader = FileUploader.new(multipart_threshold: uploading_options.delete(:multipart_threshold), client: client)
|
476
463
|
response = Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
|
477
|
-
uploader.upload(
|
478
|
-
source,
|
479
|
-
uploading_options.merge(bucket: bucket_name, key: key)
|
480
|
-
)
|
464
|
+
uploader.upload(source, uploading_options.merge(bucket: bucket_name, key: key))
|
481
465
|
end
|
482
466
|
yield response if block_given?
|
483
467
|
true
|
484
468
|
end
|
469
|
+
deprecated(:upload_file, use: 'Aws::S3::TransferManager#upload_file', version: 'next major version')
|
485
470
|
|
486
471
|
# Downloads a file in S3 to a path on disk.
|
487
472
|
#
|
488
473
|
# # small files (< 5MB) are downloaded in a single API call
|
489
474
|
# obj.download_file('/path/to/file')
|
490
475
|
#
|
491
|
-
# Files larger than 5MB are downloaded using multipart method
|
476
|
+
# Files larger than 5MB are downloaded using multipart method:
|
492
477
|
#
|
493
478
|
# # large files are split into parts
|
494
479
|
# # and the parts are downloaded in parallel
|
@@ -498,67 +483,69 @@ module Aws
|
|
498
483
|
#
|
499
484
|
# # bytes and part_sizes are each an array with 1 entry per part
|
500
485
|
# # part_sizes may not be known until the first bytes are retrieved
|
501
|
-
# progress =
|
502
|
-
# puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{part_sizes[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
|
486
|
+
# progress = proc do |bytes, part_sizes, file_size|
|
487
|
+
# puts bytes.map.with_index { |b, i| "Part #{i + 1}: #{b} / #{part_sizes[i]}" }.join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
|
503
488
|
# end
|
504
489
|
# obj.download_file('/path/to/file', progress_callback: progress)
|
505
490
|
#
|
506
|
-
# @param [String
|
491
|
+
# @param [String, Pathname, File, Tempfile] destination
|
492
|
+
# Where to download the file to. This can either be a String or Pathname to the file, an open File object,
|
493
|
+
# or an open Tempfile object. If you pass an open File or Tempfile object, then you are responsible for
|
494
|
+
# closing it after the download completes. Download behavior varies by destination type:
|
495
|
+
#
|
496
|
+
# * **String/Pathname paths**: Downloads to a temporary file first, then atomically moves to the final
|
497
|
+
# destination. This prevents corruption of any existing file if the download fails.
|
498
|
+
# * **File/Tempfile objects**: Downloads directly to the file object without using temporary files.
|
499
|
+
# You are responsible for managing the file object's state and closing it after the download completes.
|
500
|
+
# If the download fails, the file object may contain partial data.
|
507
501
|
#
|
508
502
|
# @param [Hash] options
|
509
|
-
# Additional options for {Client#get_object} and #{Client#head_object}
|
510
|
-
# may be provided.
|
503
|
+
# Additional options for {Client#get_object} and #{Client#head_object} may be provided.
|
511
504
|
#
|
512
|
-
# @option options [String] mode `auto`, `single_request
|
513
|
-
# `single_request` mode forces only 1 GET request is made in download,
|
514
|
-
# `get_range` mode allows `chunk_size` parameter to configured in
|
515
|
-
# customizing each range size in multipart_download,
|
516
|
-
# By default, `auto` mode is enabled, which performs multipart_download
|
505
|
+
# @option options [String] :mode ("auto") `"auto"`, `"single_request"` or `"get_range"`
|
517
506
|
#
|
518
|
-
#
|
507
|
+
# * `auto` mode is enabled by default, which performs `multipart_download`
|
508
|
+
# * `"single_request`" mode forces only 1 GET request is made in download
|
509
|
+
# * `"get_range"` mode requires `:chunk_size` parameter to configured in customizing each range size
|
519
510
|
#
|
520
|
-
# @option options [Integer]
|
521
|
-
# the multipart download.
|
511
|
+
# @option options [Integer] :chunk_size required in `"get_range"` mode.
|
522
512
|
#
|
523
|
-
# @option options [
|
524
|
-
# retrieve the object. For more about object versioning, see:
|
525
|
-
# https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html
|
513
|
+
# @option options [Integer] :thread_count (10) Customize threads used in the multipart download.
|
526
514
|
#
|
527
|
-
# @option options [String]
|
528
|
-
# the object has a stored checksum, it will be used to validate the
|
529
|
-
# download and will raise an `Aws::Errors::ChecksumError` if
|
530
|
-
# checksum validation fails. You may provide a `on_checksum_validated`
|
531
|
-
# callback if you need to verify that validation occurred and which
|
532
|
-
# algorithm was used. To disable checksum validation, set
|
533
|
-
# `checksum_mode` to "DISABLED".
|
515
|
+
# @option options [String] :version_id The object version id used to retrieve the object.
|
534
516
|
#
|
535
|
-
#
|
536
|
-
#
|
537
|
-
#
|
538
|
-
#
|
517
|
+
# @see https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html ObjectVersioning
|
518
|
+
#
|
519
|
+
# @option options [String] :checksum_mode ("ENABLED")
|
520
|
+
# When `"ENABLED"` and the object has a stored checksum, it will be used to validate the download and will
|
521
|
+
# raise an `Aws::Errors::ChecksumError` if checksum validation fails. You may provide a `on_checksum_validated`
|
522
|
+
# callback if you need to verify that validation occurred and which algorithm was used.
|
523
|
+
# To disable checksum validation, set `checksum_mode` to `"DISABLED"`.
|
524
|
+
#
|
525
|
+
# @option options [Callable] :on_checksum_validated
|
526
|
+
# Called each time a request's checksum is validated with the checksum algorithm and the
|
527
|
+
# response. For multipart downloads, this will be called for each part that is downloaded and validated.
|
539
528
|
#
|
540
529
|
# @option options [Proc] :progress_callback
|
541
|
-
# A Proc that will be called when each chunk of the download is received.
|
542
|
-
#
|
543
|
-
#
|
544
|
-
#
|
545
|
-
# callback until the first bytes in the part are received.
|
530
|
+
# A Proc that will be called when each chunk of the download is received. It will be invoked with
|
531
|
+
# `bytes_read`, `part_sizes`, `file_size`. When the object is downloaded as parts (rather than by ranges),
|
532
|
+
# the `part_sizes` will not be known ahead of time and will be `nil` in the callback until the first bytes
|
533
|
+
# in the part are received.
|
546
534
|
#
|
547
|
-
# @
|
548
|
-
#
|
535
|
+
# @raise [MultipartDownloadError] Raised when an object validation fails outside of service errors.
|
536
|
+
#
|
537
|
+
# @return [Boolean] Returns `true` when the file is downloaded without any errors.
|
549
538
|
#
|
550
539
|
# @see Client#get_object
|
551
540
|
# @see Client#head_object
|
552
541
|
def download_file(destination, options = {})
|
553
542
|
downloader = FileDownloader.new(client: client)
|
554
543
|
Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
|
555
|
-
downloader.download(
|
556
|
-
destination,
|
557
|
-
options.merge(bucket: bucket_name, key: key)
|
558
|
-
)
|
544
|
+
downloader.download(destination, options.merge(bucket: bucket_name, key: key))
|
559
545
|
end
|
560
546
|
true
|
561
547
|
end
|
548
|
+
deprecated(:download_file, use: 'Aws::S3::TransferManager#download_file', version: 'next major version')
|
562
549
|
|
563
550
|
class Collection < Aws::Resources::Collection
|
564
551
|
alias_method :delete, :batch_delete!
|
@@ -10,6 +10,7 @@ module Aws
|
|
10
10
|
autoload :FileUploader, 'aws-sdk-s3/file_uploader'
|
11
11
|
autoload :FileDownloader, 'aws-sdk-s3/file_downloader'
|
12
12
|
autoload :LegacySigner, 'aws-sdk-s3/legacy_signer'
|
13
|
+
autoload :MultipartDownloadError, 'aws-sdk-s3/multipart_download_error'
|
13
14
|
autoload :MultipartFileUploader, 'aws-sdk-s3/multipart_file_uploader'
|
14
15
|
autoload :MultipartStreamUploader, 'aws-sdk-s3/multipart_stream_uploader'
|
15
16
|
autoload :MultipartUploadError, 'aws-sdk-s3/multipart_upload_error'
|
@@ -17,13 +18,13 @@ module Aws
|
|
17
18
|
autoload :ObjectMultipartCopier, 'aws-sdk-s3/object_multipart_copier'
|
18
19
|
autoload :PresignedPost, 'aws-sdk-s3/presigned_post'
|
19
20
|
autoload :Presigner, 'aws-sdk-s3/presigner'
|
21
|
+
autoload :TransferManager, 'aws-sdk-s3/transfer_manager'
|
20
22
|
|
21
23
|
# s3 express session auth
|
22
24
|
autoload :ExpressCredentials, 'aws-sdk-s3/express_credentials'
|
23
25
|
autoload :ExpressCredentialsProvider, 'aws-sdk-s3/express_credentials_provider'
|
24
26
|
|
25
27
|
# s3 access grants auth
|
26
|
-
|
27
28
|
autoload :AccessGrantsCredentials, 'aws-sdk-s3/access_grants_credentials'
|
28
29
|
autoload :AccessGrantsCredentialsProvider, 'aws-sdk-s3/access_grants_credentials_provider'
|
29
30
|
end
|
data/lib/aws-sdk-s3/endpoints.rb
CHANGED
@@ -63,6 +63,18 @@ module Aws::S3
|
|
63
63
|
end
|
64
64
|
end
|
65
65
|
|
66
|
+
class CreateBucketMetadataConfiguration
|
67
|
+
def self.build(context)
|
68
|
+
Aws::S3::EndpointParameters.create(
|
69
|
+
context.config,
|
70
|
+
bucket: context.params[:bucket],
|
71
|
+
use_dual_stack: context[:use_dualstack_endpoint],
|
72
|
+
accelerate: context[:use_accelerate_endpoint],
|
73
|
+
use_s3_express_control_endpoint: true,
|
74
|
+
)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
66
78
|
class CreateBucketMetadataTableConfiguration
|
67
79
|
def self.build(context)
|
68
80
|
Aws::S3::EndpointParameters.create(
|
@@ -183,6 +195,18 @@ module Aws::S3
|
|
183
195
|
end
|
184
196
|
end
|
185
197
|
|
198
|
+
class DeleteBucketMetadataConfiguration
|
199
|
+
def self.build(context)
|
200
|
+
Aws::S3::EndpointParameters.create(
|
201
|
+
context.config,
|
202
|
+
bucket: context.params[:bucket],
|
203
|
+
use_dual_stack: context[:use_dualstack_endpoint],
|
204
|
+
accelerate: context[:use_accelerate_endpoint],
|
205
|
+
use_s3_express_control_endpoint: true,
|
206
|
+
)
|
207
|
+
end
|
208
|
+
end
|
209
|
+
|
186
210
|
class DeleteBucketMetadataTableConfiguration
|
187
211
|
def self.build(context)
|
188
212
|
Aws::S3::EndpointParameters.create(
|
@@ -445,6 +469,18 @@ module Aws::S3
|
|
445
469
|
end
|
446
470
|
end
|
447
471
|
|
472
|
+
class GetBucketMetadataConfiguration
|
473
|
+
def self.build(context)
|
474
|
+
Aws::S3::EndpointParameters.create(
|
475
|
+
context.config,
|
476
|
+
bucket: context.params[:bucket],
|
477
|
+
use_dual_stack: context[:use_dualstack_endpoint],
|
478
|
+
accelerate: context[:use_accelerate_endpoint],
|
479
|
+
use_s3_express_control_endpoint: true,
|
480
|
+
)
|
481
|
+
end
|
482
|
+
end
|
483
|
+
|
448
484
|
class GetBucketMetadataTableConfiguration
|
449
485
|
def self.build(context)
|
450
486
|
Aws::S3::EndpointParameters.create(
|
@@ -1162,6 +1198,18 @@ module Aws::S3
|
|
1162
1198
|
end
|
1163
1199
|
end
|
1164
1200
|
|
1201
|
+
class RenameObject
|
1202
|
+
def self.build(context)
|
1203
|
+
Aws::S3::EndpointParameters.create(
|
1204
|
+
context.config,
|
1205
|
+
bucket: context.params[:bucket],
|
1206
|
+
use_dual_stack: context[:use_dualstack_endpoint],
|
1207
|
+
accelerate: context[:use_accelerate_endpoint],
|
1208
|
+
key: context.params[:key],
|
1209
|
+
)
|
1210
|
+
end
|
1211
|
+
end
|
1212
|
+
|
1165
1213
|
class RestoreObject
|
1166
1214
|
def self.build(context)
|
1167
1215
|
Aws::S3::EndpointParameters.create(
|
@@ -1184,6 +1232,30 @@ module Aws::S3
|
|
1184
1232
|
end
|
1185
1233
|
end
|
1186
1234
|
|
1235
|
+
class UpdateBucketMetadataInventoryTableConfiguration
|
1236
|
+
def self.build(context)
|
1237
|
+
Aws::S3::EndpointParameters.create(
|
1238
|
+
context.config,
|
1239
|
+
bucket: context.params[:bucket],
|
1240
|
+
use_dual_stack: context[:use_dualstack_endpoint],
|
1241
|
+
accelerate: context[:use_accelerate_endpoint],
|
1242
|
+
use_s3_express_control_endpoint: true,
|
1243
|
+
)
|
1244
|
+
end
|
1245
|
+
end
|
1246
|
+
|
1247
|
+
class UpdateBucketMetadataJournalTableConfiguration
|
1248
|
+
def self.build(context)
|
1249
|
+
Aws::S3::EndpointParameters.create(
|
1250
|
+
context.config,
|
1251
|
+
bucket: context.params[:bucket],
|
1252
|
+
use_dual_stack: context[:use_dualstack_endpoint],
|
1253
|
+
accelerate: context[:use_accelerate_endpoint],
|
1254
|
+
use_s3_express_control_endpoint: true,
|
1255
|
+
)
|
1256
|
+
end
|
1257
|
+
end
|
1258
|
+
|
1187
1259
|
class UploadPart
|
1188
1260
|
def self.build(context)
|
1189
1261
|
Aws::S3::EndpointParameters.create(
|
@@ -1230,6 +1302,8 @@ module Aws::S3
|
|
1230
1302
|
CopyObject.build(context)
|
1231
1303
|
when :create_bucket
|
1232
1304
|
CreateBucket.build(context)
|
1305
|
+
when :create_bucket_metadata_configuration
|
1306
|
+
CreateBucketMetadataConfiguration.build(context)
|
1233
1307
|
when :create_bucket_metadata_table_configuration
|
1234
1308
|
CreateBucketMetadataTableConfiguration.build(context)
|
1235
1309
|
when :create_multipart_upload
|
@@ -1250,6 +1324,8 @@ module Aws::S3
|
|
1250
1324
|
DeleteBucketInventoryConfiguration.build(context)
|
1251
1325
|
when :delete_bucket_lifecycle
|
1252
1326
|
DeleteBucketLifecycle.build(context)
|
1327
|
+
when :delete_bucket_metadata_configuration
|
1328
|
+
DeleteBucketMetadataConfiguration.build(context)
|
1253
1329
|
when :delete_bucket_metadata_table_configuration
|
1254
1330
|
DeleteBucketMetadataTableConfiguration.build(context)
|
1255
1331
|
when :delete_bucket_metrics_configuration
|
@@ -1294,6 +1370,8 @@ module Aws::S3
|
|
1294
1370
|
GetBucketLocation.build(context)
|
1295
1371
|
when :get_bucket_logging
|
1296
1372
|
GetBucketLogging.build(context)
|
1373
|
+
when :get_bucket_metadata_configuration
|
1374
|
+
GetBucketMetadataConfiguration.build(context)
|
1297
1375
|
when :get_bucket_metadata_table_configuration
|
1298
1376
|
GetBucketMetadataTableConfiguration.build(context)
|
1299
1377
|
when :get_bucket_metrics_configuration
|
@@ -1416,10 +1494,16 @@ module Aws::S3
|
|
1416
1494
|
PutObjectTagging.build(context)
|
1417
1495
|
when :put_public_access_block
|
1418
1496
|
PutPublicAccessBlock.build(context)
|
1497
|
+
when :rename_object
|
1498
|
+
RenameObject.build(context)
|
1419
1499
|
when :restore_object
|
1420
1500
|
RestoreObject.build(context)
|
1421
1501
|
when :select_object_content
|
1422
1502
|
SelectObjectContent.build(context)
|
1503
|
+
when :update_bucket_metadata_inventory_table_configuration
|
1504
|
+
UpdateBucketMetadataInventoryTableConfiguration.build(context)
|
1505
|
+
when :update_bucket_metadata_journal_table_configuration
|
1506
|
+
UpdateBucketMetadataJournalTableConfiguration.build(context)
|
1423
1507
|
when :upload_part
|
1424
1508
|
UploadPart.build(context)
|
1425
1509
|
when :upload_part_copy
|
data/lib/aws-sdk-s3/errors.rb
CHANGED
@@ -30,6 +30,7 @@ module Aws::S3
|
|
30
30
|
# * {BucketAlreadyExists}
|
31
31
|
# * {BucketAlreadyOwnedByYou}
|
32
32
|
# * {EncryptionTypeMismatch}
|
33
|
+
# * {IdempotencyParameterMismatch}
|
33
34
|
# * {InvalidObjectState}
|
34
35
|
# * {InvalidRequest}
|
35
36
|
# * {InvalidWriteOffset}
|
@@ -76,6 +77,16 @@ module Aws::S3
|
|
76
77
|
end
|
77
78
|
end
|
78
79
|
|
80
|
+
class IdempotencyParameterMismatch < ServiceError
|
81
|
+
|
82
|
+
# @param [Seahorse::Client::RequestContext] context
|
83
|
+
# @param [String] message
|
84
|
+
# @param [Aws::S3::Types::IdempotencyParameterMismatch] data
|
85
|
+
def initialize(context, message, data = Aws::EmptyStructure.new)
|
86
|
+
super(context, message, data)
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
79
90
|
class InvalidObjectState < ServiceError
|
80
91
|
|
81
92
|
# @param [Seahorse::Client::RequestContext] context
|
@@ -1,9 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require 'pathname'
|
4
|
-
require '
|
4
|
+
require 'securerandom'
|
5
5
|
require 'set'
|
6
|
-
require 'tmpdir'
|
7
6
|
|
8
7
|
module Aws
|
9
8
|
module S3
|
@@ -12,7 +11,6 @@ module Aws
|
|
12
11
|
|
13
12
|
MIN_CHUNK_SIZE = 5 * 1024 * 1024
|
14
13
|
MAX_PARTS = 10_000
|
15
|
-
THREAD_COUNT = 10
|
16
14
|
|
17
15
|
def initialize(options = {})
|
18
16
|
@client = options[:client] || Client.new
|
@@ -22,18 +20,18 @@ module Aws
|
|
22
20
|
attr_reader :client
|
23
21
|
|
24
22
|
def download(destination, options = {})
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
@
|
34
|
-
@on_checksum_validated = options
|
35
|
-
@progress_callback = options
|
36
|
-
|
23
|
+
valid_types = [String, Pathname, File, Tempfile]
|
24
|
+
unless valid_types.include?(destination.class)
|
25
|
+
raise ArgumentError, "Invalid destination, expected #{valid_types.join(', ')} but got: #{destination.class}"
|
26
|
+
end
|
27
|
+
|
28
|
+
@destination = destination
|
29
|
+
@mode = options.delete(:mode) || 'auto'
|
30
|
+
@thread_count = options.delete(:thread_count) || 10
|
31
|
+
@chunk_size = options.delete(:chunk_size)
|
32
|
+
@on_checksum_validated = options.delete(:on_checksum_validated)
|
33
|
+
@progress_callback = options.delete(:progress_callback)
|
34
|
+
@params = options
|
37
35
|
validate!
|
38
36
|
|
39
37
|
Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
|
@@ -41,32 +39,31 @@ module Aws
|
|
41
39
|
when 'auto' then multipart_download
|
42
40
|
when 'single_request' then single_request
|
43
41
|
when 'get_range'
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
msg = 'In :get_range mode, :chunk_size must be provided'
|
49
|
-
raise ArgumentError, msg
|
50
|
-
end
|
42
|
+
raise ArgumentError, 'In get_range mode, :chunk_size must be provided' unless @chunk_size
|
43
|
+
|
44
|
+
resp = @client.head_object(@params)
|
45
|
+
multithreaded_get_by_ranges(resp.content_length, resp.etag)
|
51
46
|
else
|
52
|
-
|
53
|
-
'mode should be :single_request, :get_range or :auto'
|
54
|
-
raise ArgumentError, msg
|
47
|
+
raise ArgumentError, "Invalid mode #{@mode} provided, :mode should be single_request, get_range or auto"
|
55
48
|
end
|
56
49
|
end
|
50
|
+
File.rename(@temp_path, @destination) if @temp_path
|
51
|
+
ensure
|
52
|
+
File.delete(@temp_path) if @temp_path && File.exist?(@temp_path)
|
57
53
|
end
|
58
54
|
|
59
55
|
private
|
60
56
|
|
61
57
|
def validate!
|
62
|
-
|
63
|
-
|
64
|
-
|
58
|
+
return unless @on_checksum_validated && !@on_checksum_validated.respond_to?(:call)
|
59
|
+
|
60
|
+
raise ArgumentError, ':on_checksum_validated must be callable'
|
65
61
|
end
|
66
62
|
|
67
63
|
def multipart_download
|
68
64
|
resp = @client.head_object(@params.merge(part_number: 1))
|
69
65
|
count = resp.parts_count
|
66
|
+
|
70
67
|
if count.nil? || count <= 1
|
71
68
|
if resp.content_length <= MIN_CHUNK_SIZE
|
72
69
|
single_request
|
@@ -74,8 +71,8 @@ module Aws
|
|
74
71
|
multithreaded_get_by_ranges(resp.content_length, resp.etag)
|
75
72
|
end
|
76
73
|
else
|
77
|
-
#
|
78
|
-
resp = @client.head_object(@params)
|
74
|
+
# covers cases when given object is not uploaded via UploadPart API
|
75
|
+
resp = @client.head_object(@params) # partNumber is an option
|
79
76
|
if resp.content_length <= MIN_CHUNK_SIZE
|
80
77
|
single_request
|
81
78
|
else
|
@@ -86,7 +83,7 @@ module Aws
|
|
86
83
|
|
87
84
|
def compute_mode(file_size, count, etag)
|
88
85
|
chunk_size = compute_chunk(file_size)
|
89
|
-
part_size = (file_size.to_f / count
|
86
|
+
part_size = (file_size.to_f / count).ceil
|
90
87
|
if chunk_size < part_size
|
91
88
|
multithreaded_get_by_ranges(file_size, etag)
|
92
89
|
else
|
@@ -94,32 +91,10 @@ module Aws
|
|
94
91
|
end
|
95
92
|
end
|
96
93
|
|
97
|
-
def construct_chunks(file_size)
|
98
|
-
offset = 0
|
99
|
-
default_chunk_size = compute_chunk(file_size)
|
100
|
-
chunks = []
|
101
|
-
while offset < file_size
|
102
|
-
progress = offset + default_chunk_size
|
103
|
-
progress = file_size if progress > file_size
|
104
|
-
chunks << "bytes=#{offset}-#{progress - 1}"
|
105
|
-
offset = progress
|
106
|
-
end
|
107
|
-
chunks
|
108
|
-
end
|
109
|
-
|
110
94
|
def compute_chunk(file_size)
|
111
|
-
if @chunk_size && @chunk_size > file_size
|
112
|
-
raise ArgumentError, ":chunk_size shouldn't exceed total file size."
|
113
|
-
else
|
114
|
-
@chunk_size || [
|
115
|
-
(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE
|
116
|
-
].max.to_i
|
117
|
-
end
|
118
|
-
end
|
95
|
+
raise ArgumentError, ":chunk_size shouldn't exceed total file size." if @chunk_size && @chunk_size > file_size
|
119
96
|
|
120
|
-
|
121
|
-
chunks = (1..chunks) if mode.eql? 'part_number'
|
122
|
-
chunks.each_slice(@thread_count).to_a
|
97
|
+
@chunk_size || [(file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE].max.to_i
|
123
98
|
end
|
124
99
|
|
125
100
|
def multithreaded_get_by_ranges(file_size, etag)
|
@@ -130,12 +105,8 @@ module Aws
|
|
130
105
|
while offset < file_size
|
131
106
|
progress = offset + default_chunk_size
|
132
107
|
progress = file_size if progress > file_size
|
133
|
-
|
134
|
-
chunks << Part.new(
|
135
|
-
part_number: part_number,
|
136
|
-
size: (progress-offset),
|
137
|
-
params: @params.merge(range: range, if_match: etag)
|
138
|
-
)
|
108
|
+
params = @params.merge(range: "bytes=#{offset}-#{progress - 1}", if_match: etag)
|
109
|
+
chunks << Part.new(part_number: part_number, size: (progress - offset), params: params)
|
139
110
|
part_number += 1
|
140
111
|
offset = progress
|
141
112
|
end
|
@@ -152,10 +123,13 @@ module Aws
|
|
152
123
|
def download_in_threads(pending, total_size)
|
153
124
|
threads = []
|
154
125
|
progress = MultipartProgress.new(pending, total_size, @progress_callback) if @progress_callback
|
126
|
+
unless [File, Tempfile].include?(@destination.class)
|
127
|
+
@temp_path = "#{@destination}.s3tmp.#{SecureRandom.alphanumeric(8)}"
|
128
|
+
end
|
155
129
|
@thread_count.times do
|
156
130
|
thread = Thread.new do
|
157
131
|
begin
|
158
|
-
while part = pending.shift
|
132
|
+
while (part = pending.shift)
|
159
133
|
if progress
|
160
134
|
part.params[:on_chunk_received] =
|
161
135
|
proc do |_chunk, bytes, total|
|
@@ -163,16 +137,17 @@ module Aws
|
|
163
137
|
end
|
164
138
|
end
|
165
139
|
resp = @client.get_object(part.params)
|
166
|
-
|
140
|
+
range = extract_range(resp.content_range)
|
141
|
+
validate_range(range, part.params[:range]) if part.params[:range]
|
142
|
+
write(resp.body, range)
|
167
143
|
if @on_checksum_validated && resp.checksum_validated
|
168
144
|
@on_checksum_validated.call(resp.checksum_validated, resp)
|
169
145
|
end
|
170
146
|
end
|
171
147
|
nil
|
172
|
-
rescue =>
|
173
|
-
# keep other threads from downloading other parts
|
174
|
-
|
175
|
-
raise error
|
148
|
+
rescue StandardError => e
|
149
|
+
pending.clear! # keep other threads from downloading other parts
|
150
|
+
raise e
|
176
151
|
end
|
177
152
|
end
|
178
153
|
threads << thread
|
@@ -180,21 +155,28 @@ module Aws
|
|
180
155
|
threads.map(&:value).compact
|
181
156
|
end
|
182
157
|
|
183
|
-
def
|
184
|
-
|
185
|
-
|
186
|
-
|
158
|
+
def extract_range(value)
|
159
|
+
value.match(%r{bytes (?<range>\d+-\d+)/\d+})[:range]
|
160
|
+
end
|
161
|
+
|
162
|
+
def validate_range(actual, expected)
|
163
|
+
return if actual == expected.match(/bytes=(?<range>\d+-\d+)/)[:range]
|
164
|
+
|
165
|
+
raise MultipartDownloadError, "multipart download failed: expected range of #{expected} but got #{actual}"
|
166
|
+
end
|
167
|
+
|
168
|
+
def write(body, range)
|
169
|
+
path = @temp_path || @destination
|
170
|
+
File.write(path, body.read, range.split('-').first.to_i)
|
187
171
|
end
|
188
172
|
|
189
173
|
def single_request
|
190
|
-
params = @params.merge(response_target: @
|
174
|
+
params = @params.merge(response_target: @destination)
|
191
175
|
params[:on_chunk_received] = single_part_progress if @progress_callback
|
192
176
|
resp = @client.get_object(params)
|
193
|
-
|
194
177
|
return resp unless @on_checksum_validated
|
195
178
|
|
196
179
|
@on_checksum_validated.call(resp.checksum_validated, resp) if resp.checksum_validated
|
197
|
-
|
198
180
|
resp
|
199
181
|
end
|
200
182
|
|
@@ -204,6 +186,7 @@ module Aws
|
|
204
186
|
end
|
205
187
|
end
|
206
188
|
|
189
|
+
# @api private
|
207
190
|
class Part < Struct.new(:part_number, :size, :params)
|
208
191
|
include Aws::Structure
|
209
192
|
end
|