aws-sdk-s3 1.188.0 → 1.205.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +119 -0
- data/VERSION +1 -1
- data/lib/aws-sdk-s3/bucket.rb +43 -4
- data/lib/aws-sdk-s3/bucket_versioning.rb +33 -0
- data/lib/aws-sdk-s3/client.rb +1943 -252
- data/lib/aws-sdk-s3/client_api.rb +289 -0
- data/lib/aws-sdk-s3/customizations/object.rb +76 -86
- data/lib/aws-sdk-s3/customizations.rb +3 -1
- data/lib/aws-sdk-s3/default_executor.rb +103 -0
- data/lib/aws-sdk-s3/endpoint_parameters.rb +17 -17
- data/lib/aws-sdk-s3/endpoint_provider.rb +220 -50
- data/lib/aws-sdk-s3/endpoints.rb +110 -0
- data/lib/aws-sdk-s3/errors.rb +11 -0
- data/lib/aws-sdk-s3/file_downloader.rb +197 -134
- data/lib/aws-sdk-s3/file_uploader.rb +9 -13
- data/lib/aws-sdk-s3/legacy_signer.rb +2 -1
- data/lib/aws-sdk-s3/multipart_download_error.rb +8 -0
- data/lib/aws-sdk-s3/multipart_file_uploader.rb +92 -107
- data/lib/aws-sdk-s3/multipart_stream_uploader.rb +96 -107
- data/lib/aws-sdk-s3/multipart_upload_error.rb +3 -4
- data/lib/aws-sdk-s3/object.rb +110 -35
- data/lib/aws-sdk-s3/object_multipart_copier.rb +2 -1
- data/lib/aws-sdk-s3/object_summary.rb +72 -20
- data/lib/aws-sdk-s3/object_version.rb +7 -9
- data/lib/aws-sdk-s3/plugins/endpoints.rb +1 -1
- data/lib/aws-sdk-s3/plugins/url_encoded_keys.rb +2 -1
- data/lib/aws-sdk-s3/resource.rb +6 -0
- data/lib/aws-sdk-s3/transfer_manager.rb +303 -0
- data/lib/aws-sdk-s3/types.rb +1490 -189
- data/lib/aws-sdk-s3.rb +1 -1
- data/sig/bucket.rbs +12 -3
- data/sig/client.rbs +170 -31
- data/sig/errors.rbs +2 -0
- data/sig/multipart_upload.rbs +1 -1
- data/sig/object.rbs +15 -10
- data/sig/object_summary.rbs +11 -9
- data/sig/resource.rbs +8 -1
- data/sig/types.rbs +215 -29
- metadata +7 -4
|
@@ -358,8 +358,8 @@ module Aws
|
|
|
358
358
|
# {Client#complete_multipart_upload},
|
|
359
359
|
# and {Client#upload_part} can be provided.
|
|
360
360
|
#
|
|
361
|
-
# @option options [Integer] :thread_count (10) The number of parallel
|
|
362
|
-
#
|
|
361
|
+
# @option options [Integer] :thread_count (10) The number of parallel multipart uploads.
|
|
362
|
+
# An additional thread is used internally for task coordination.
|
|
363
363
|
#
|
|
364
364
|
# @option options [Boolean] :tempfile (false) Normally read data is stored
|
|
365
365
|
# in memory when building the parts in order to complete the underlying
|
|
@@ -383,29 +383,28 @@ module Aws
|
|
|
383
383
|
# @see Client#complete_multipart_upload
|
|
384
384
|
# @see Client#upload_part
|
|
385
385
|
def upload_stream(options = {}, &block)
|
|
386
|
-
|
|
386
|
+
upload_opts = options.merge(bucket: bucket_name, key: key)
|
|
387
|
+
executor = DefaultExecutor.new(max_threads: upload_opts.delete(:thread_count))
|
|
387
388
|
uploader = MultipartStreamUploader.new(
|
|
388
389
|
client: client,
|
|
389
|
-
|
|
390
|
-
tempfile:
|
|
391
|
-
part_size:
|
|
390
|
+
executor: executor,
|
|
391
|
+
tempfile: upload_opts.delete(:tempfile),
|
|
392
|
+
part_size: upload_opts.delete(:part_size)
|
|
392
393
|
)
|
|
393
394
|
Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
|
|
394
|
-
uploader.upload(
|
|
395
|
-
uploading_options.merge(bucket: bucket_name, key: key),
|
|
396
|
-
&block
|
|
397
|
-
)
|
|
395
|
+
uploader.upload(upload_opts, &block)
|
|
398
396
|
end
|
|
397
|
+
executor.shutdown
|
|
399
398
|
true
|
|
400
399
|
end
|
|
400
|
+
deprecated(:upload_stream, use: 'Aws::S3::TransferManager#upload_stream', version: 'next major version')
|
|
401
401
|
|
|
402
402
|
# Uploads a file from disk to the current object in S3.
|
|
403
403
|
#
|
|
404
404
|
# # small files are uploaded in a single API call
|
|
405
405
|
# obj.upload_file('/path/to/file')
|
|
406
406
|
#
|
|
407
|
-
# Files larger than or equal to `:multipart_threshold` are uploaded
|
|
408
|
-
# using the Amazon S3 multipart upload APIs.
|
|
407
|
+
# Files larger than or equal to `:multipart_threshold` are uploaded using the Amazon S3 multipart upload APIs.
|
|
409
408
|
#
|
|
410
409
|
# # large files are automatically split into parts
|
|
411
410
|
# # and the parts are uploaded in parallel
|
|
@@ -421,74 +420,65 @@ module Aws
|
|
|
421
420
|
# You can provide a callback to monitor progress of the upload:
|
|
422
421
|
#
|
|
423
422
|
# # bytes and totals are each an array with 1 entry per part
|
|
424
|
-
# progress =
|
|
425
|
-
# puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%"
|
|
423
|
+
# progress = proc do |bytes, totals|
|
|
424
|
+
# puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%"
|
|
426
425
|
# end
|
|
427
426
|
# obj.upload_file('/path/to/file', progress_callback: progress)
|
|
428
427
|
#
|
|
429
|
-
# @param [String, Pathname, File, Tempfile] source A file on the local
|
|
430
|
-
#
|
|
431
|
-
#
|
|
432
|
-
#
|
|
433
|
-
# you are responsible for closing it after the upload completes. When
|
|
434
|
-
# using an open Tempfile, rewind it before uploading or else the object
|
|
428
|
+
# @param [String, Pathname, File, Tempfile] source A file on the local file system that will be uploaded as
|
|
429
|
+
# this object. This can either be a String or Pathname to the file, an open File object, or an open
|
|
430
|
+
# Tempfile object. If you pass an open File or Tempfile object, then you are responsible for closing it
|
|
431
|
+
# after the upload completes. When using an open Tempfile, rewind it before uploading or else the object
|
|
435
432
|
# will be empty.
|
|
436
433
|
#
|
|
437
434
|
# @param [Hash] options
|
|
438
|
-
# Additional options for {Client#put_object}
|
|
439
|
-
#
|
|
440
|
-
#
|
|
441
|
-
# {Client#complete_multipart_upload},
|
|
442
|
-
# and {Client#upload_part} can be provided.
|
|
435
|
+
# Additional options for {Client#put_object} when file sizes below the multipart threshold.
|
|
436
|
+
# For files larger than the multipart threshold, options for {Client#create_multipart_upload},
|
|
437
|
+
# {Client#complete_multipart_upload}, and {Client#upload_part} can be provided.
|
|
443
438
|
#
|
|
444
|
-
# @option options [Integer] :multipart_threshold (104857600) Files larger
|
|
445
|
-
#
|
|
446
|
-
# multipart APIs.
|
|
447
|
-
# Default threshold is 100MB.
|
|
439
|
+
# @option options [Integer] :multipart_threshold (104857600) Files larger han or equal to
|
|
440
|
+
# `:multipart_threshold` are uploaded using the S3 multipart APIs. Default threshold is 100MB.
|
|
448
441
|
#
|
|
449
|
-
# @option options [Integer] :thread_count (10) The number of parallel
|
|
450
|
-
#
|
|
451
|
-
# `:multipart_threshold`.
|
|
442
|
+
# @option options [Integer] :thread_count (10) The number of parallel multipart uploads.
|
|
443
|
+
# This option is not used if the file is smaller than `:multipart_threshold`.
|
|
452
444
|
#
|
|
453
445
|
# @option options [Proc] :progress_callback
|
|
454
446
|
# A Proc that will be called when each chunk of the upload is sent.
|
|
455
447
|
# It will be invoked with [bytes_read], [total_sizes]
|
|
456
448
|
#
|
|
457
|
-
# @raise [MultipartUploadError] If an object is being uploaded in
|
|
458
|
-
#
|
|
459
|
-
#
|
|
460
|
-
# method that returns the failures that caused the upload to be
|
|
461
|
-
# aborted.
|
|
449
|
+
# @raise [MultipartUploadError] If an object is being uploaded in parts, and the upload can not be completed,
|
|
450
|
+
# then the upload is aborted and this error is raised. The raised error has a `#errors` method that
|
|
451
|
+
# returns the failures that caused the upload to be aborted.
|
|
462
452
|
#
|
|
463
|
-
# @return [Boolean] Returns `true` when the object is uploaded
|
|
464
|
-
# without any errors.
|
|
453
|
+
# @return [Boolean] Returns `true` when the object is uploaded without any errors.
|
|
465
454
|
#
|
|
466
455
|
# @see Client#put_object
|
|
467
456
|
# @see Client#create_multipart_upload
|
|
468
457
|
# @see Client#complete_multipart_upload
|
|
469
458
|
# @see Client#upload_part
|
|
470
459
|
def upload_file(source, options = {})
|
|
471
|
-
|
|
460
|
+
upload_opts = options.merge(bucket: bucket_name, key: key)
|
|
461
|
+
executor = DefaultExecutor.new(max_threads: upload_opts.delete(:thread_count))
|
|
472
462
|
uploader = FileUploader.new(
|
|
473
|
-
|
|
474
|
-
|
|
463
|
+
client: client,
|
|
464
|
+
executor: executor,
|
|
465
|
+
multipart_threshold: upload_opts.delete(:multipart_threshold)
|
|
475
466
|
)
|
|
476
467
|
response = Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
|
|
477
|
-
uploader.upload(
|
|
478
|
-
source,
|
|
479
|
-
uploading_options.merge(bucket: bucket_name, key: key)
|
|
480
|
-
)
|
|
468
|
+
uploader.upload(source, upload_opts)
|
|
481
469
|
end
|
|
482
470
|
yield response if block_given?
|
|
471
|
+
executor.shutdown
|
|
483
472
|
true
|
|
484
473
|
end
|
|
474
|
+
deprecated(:upload_file, use: 'Aws::S3::TransferManager#upload_file', version: 'next major version')
|
|
485
475
|
|
|
486
476
|
# Downloads a file in S3 to a path on disk.
|
|
487
477
|
#
|
|
488
478
|
# # small files (< 5MB) are downloaded in a single API call
|
|
489
479
|
# obj.download_file('/path/to/file')
|
|
490
480
|
#
|
|
491
|
-
# Files larger than 5MB are downloaded using multipart method
|
|
481
|
+
# Files larger than 5MB are downloaded using multipart method:
|
|
492
482
|
#
|
|
493
483
|
# # large files are split into parts
|
|
494
484
|
# # and the parts are downloaded in parallel
|
|
@@ -498,67 +488,67 @@ module Aws
|
|
|
498
488
|
#
|
|
499
489
|
# # bytes and part_sizes are each an array with 1 entry per part
|
|
500
490
|
# # part_sizes may not be known until the first bytes are retrieved
|
|
501
|
-
# progress =
|
|
502
|
-
# puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{part_sizes[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
|
|
491
|
+
# progress = proc do |bytes, part_sizes, file_size|
|
|
492
|
+
# puts bytes.map.with_index { |b, i| "Part #{i + 1}: #{b} / #{part_sizes[i]}" }.join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
|
|
503
493
|
# end
|
|
504
494
|
# obj.download_file('/path/to/file', progress_callback: progress)
|
|
505
495
|
#
|
|
506
|
-
# @param [String
|
|
496
|
+
# @param [String, Pathname, File, Tempfile] destination
|
|
497
|
+
# Where to download the file to. This can either be a String or Pathname to the file, an open File object,
|
|
498
|
+
# or an open Tempfile object. If you pass an open File or Tempfile object, then you are responsible for
|
|
499
|
+
# closing it after the download completes. Download behavior varies by destination type:
|
|
500
|
+
#
|
|
501
|
+
# * **String/Pathname paths**: Downloads to a temporary file first, then atomically moves to the final
|
|
502
|
+
# destination. This prevents corruption of any existing file if the download fails.
|
|
503
|
+
# * **File/Tempfile objects**: Downloads directly to the file object without using temporary files.
|
|
504
|
+
# You are responsible for managing the file object's state and closing it after the download completes.
|
|
505
|
+
# If the download fails, the file object may contain partial data.
|
|
507
506
|
#
|
|
508
507
|
# @param [Hash] options
|
|
509
|
-
# Additional options for {Client#get_object} and #{Client#head_object}
|
|
510
|
-
# may be provided.
|
|
508
|
+
# Additional options for {Client#get_object} and #{Client#head_object} may be provided.
|
|
511
509
|
#
|
|
512
|
-
# @option options [String] mode `auto`, `single_request
|
|
513
|
-
# `single_request` mode forces only 1 GET request is made in download,
|
|
514
|
-
# `get_range` mode allows `chunk_size` parameter to configured in
|
|
515
|
-
# customizing each range size in multipart_download,
|
|
516
|
-
# By default, `auto` mode is enabled, which performs multipart_download
|
|
510
|
+
# @option options [String] :mode ("auto") `"auto"`, `"single_request"` or `"get_range"`
|
|
517
511
|
#
|
|
518
|
-
#
|
|
512
|
+
# * `auto` mode is enabled by default, which performs `multipart_download`
|
|
513
|
+
# * `"single_request`" mode forces only 1 GET request is made in download
|
|
514
|
+
# * `"get_range"` mode requires `:chunk_size` parameter to configured in customizing each range size
|
|
519
515
|
#
|
|
520
|
-
# @option options [Integer]
|
|
521
|
-
# the multipart download.
|
|
516
|
+
# @option options [Integer] :chunk_size required in `"get_range"` mode.
|
|
522
517
|
#
|
|
523
|
-
# @option options [
|
|
524
|
-
# retrieve the object. For more about object versioning, see:
|
|
525
|
-
# https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html
|
|
518
|
+
# @option options [Integer] :thread_count (10) Customize threads used in the multipart download.
|
|
526
519
|
#
|
|
527
|
-
# @option options [String] checksum_mode (ENABLED)
|
|
528
|
-
#
|
|
529
|
-
#
|
|
530
|
-
#
|
|
531
|
-
# callback if you need to verify that validation occurred and which
|
|
532
|
-
# algorithm was used. To disable checksum validation, set
|
|
533
|
-
# `checksum_mode` to "DISABLED".
|
|
520
|
+
# @option options [String] :checksum_mode ("ENABLED")
|
|
521
|
+
# This option is deprecated. Use `:response_checksum_validation` on your S3 client instead.
|
|
522
|
+
# To disable checksum validation, set `response_checksum_validation: 'when_required'`
|
|
523
|
+
# when creating your S3 client.
|
|
534
524
|
#
|
|
535
|
-
# @option options [Callable] on_checksum_validated
|
|
536
|
-
# request's checksum is validated with the checksum algorithm and the
|
|
537
|
-
# response. For multipart downloads, this will be called for each
|
|
538
|
-
# part that is downloaded and validated.
|
|
525
|
+
# @option options [Callable] :on_checksum_validated
|
|
526
|
+
# Called each time a request's checksum is validated with the checksum algorithm and the
|
|
527
|
+
# response. For multipart downloads, this will be called for each part that is downloaded and validated.
|
|
539
528
|
#
|
|
540
529
|
# @option options [Proc] :progress_callback
|
|
541
|
-
# A Proc that will be called when each chunk of the download is received.
|
|
542
|
-
#
|
|
543
|
-
#
|
|
544
|
-
#
|
|
545
|
-
#
|
|
530
|
+
# A Proc that will be called when each chunk of the download is received. It will be invoked with
|
|
531
|
+
# `bytes_read`, `part_sizes`, `file_size`. When the object is downloaded as parts (rather than by ranges),
|
|
532
|
+
# the `part_sizes` will not be known ahead of time and will be `nil` in the callback until the first bytes
|
|
533
|
+
# in the part are received.
|
|
534
|
+
#
|
|
535
|
+
# @raise [MultipartDownloadError] Raised when an object validation fails outside of service errors.
|
|
546
536
|
#
|
|
547
|
-
# @return [Boolean] Returns `true` when the file is downloaded without
|
|
548
|
-
# any errors.
|
|
537
|
+
# @return [Boolean] Returns `true` when the file is downloaded without any errors.
|
|
549
538
|
#
|
|
550
539
|
# @see Client#get_object
|
|
551
540
|
# @see Client#head_object
|
|
552
541
|
def download_file(destination, options = {})
|
|
553
|
-
|
|
542
|
+
download_opts = options.merge(bucket: bucket_name, key: key)
|
|
543
|
+
executor = DefaultExecutor.new(max_threads: download_opts.delete([:thread_count]))
|
|
544
|
+
downloader = FileDownloader.new(client: client, executor: executor)
|
|
554
545
|
Aws::Plugins::UserAgent.metric('RESOURCE_MODEL') do
|
|
555
|
-
downloader.download(
|
|
556
|
-
destination,
|
|
557
|
-
options.merge(bucket: bucket_name, key: key)
|
|
558
|
-
)
|
|
546
|
+
downloader.download(destination, download_opts)
|
|
559
547
|
end
|
|
548
|
+
executor.shutdown
|
|
560
549
|
true
|
|
561
550
|
end
|
|
551
|
+
deprecated(:download_file, use: 'Aws::S3::TransferManager#download_file', version: 'next major version')
|
|
562
552
|
|
|
563
553
|
class Collection < Aws::Resources::Collection
|
|
564
554
|
alias_method :delete, :batch_delete!
|
|
@@ -7,9 +7,11 @@ module Aws
|
|
|
7
7
|
autoload :Encryption, 'aws-sdk-s3/encryption'
|
|
8
8
|
autoload :EncryptionV2, 'aws-sdk-s3/encryption_v2'
|
|
9
9
|
autoload :FilePart, 'aws-sdk-s3/file_part'
|
|
10
|
+
autoload :DefaultExecutor, 'aws-sdk-s3/default_executor'
|
|
10
11
|
autoload :FileUploader, 'aws-sdk-s3/file_uploader'
|
|
11
12
|
autoload :FileDownloader, 'aws-sdk-s3/file_downloader'
|
|
12
13
|
autoload :LegacySigner, 'aws-sdk-s3/legacy_signer'
|
|
14
|
+
autoload :MultipartDownloadError, 'aws-sdk-s3/multipart_download_error'
|
|
13
15
|
autoload :MultipartFileUploader, 'aws-sdk-s3/multipart_file_uploader'
|
|
14
16
|
autoload :MultipartStreamUploader, 'aws-sdk-s3/multipart_stream_uploader'
|
|
15
17
|
autoload :MultipartUploadError, 'aws-sdk-s3/multipart_upload_error'
|
|
@@ -17,13 +19,13 @@ module Aws
|
|
|
17
19
|
autoload :ObjectMultipartCopier, 'aws-sdk-s3/object_multipart_copier'
|
|
18
20
|
autoload :PresignedPost, 'aws-sdk-s3/presigned_post'
|
|
19
21
|
autoload :Presigner, 'aws-sdk-s3/presigner'
|
|
22
|
+
autoload :TransferManager, 'aws-sdk-s3/transfer_manager'
|
|
20
23
|
|
|
21
24
|
# s3 express session auth
|
|
22
25
|
autoload :ExpressCredentials, 'aws-sdk-s3/express_credentials'
|
|
23
26
|
autoload :ExpressCredentialsProvider, 'aws-sdk-s3/express_credentials_provider'
|
|
24
27
|
|
|
25
28
|
# s3 access grants auth
|
|
26
|
-
|
|
27
29
|
autoload :AccessGrantsCredentials, 'aws-sdk-s3/access_grants_credentials'
|
|
28
30
|
autoload :AccessGrantsCredentialsProvider, 'aws-sdk-s3/access_grants_credentials_provider'
|
|
29
31
|
end
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Aws
|
|
4
|
+
module S3
|
|
5
|
+
# @api private
|
|
6
|
+
class DefaultExecutor
|
|
7
|
+
DEFAULT_MAX_THREADS = 10
|
|
8
|
+
RUNNING = :running
|
|
9
|
+
SHUTTING_DOWN = :shutting_down
|
|
10
|
+
SHUTDOWN = :shutdown
|
|
11
|
+
|
|
12
|
+
def initialize(options = {})
|
|
13
|
+
@max_threads = options[:max_threads] || DEFAULT_MAX_THREADS
|
|
14
|
+
@state = RUNNING
|
|
15
|
+
@queue = Queue.new
|
|
16
|
+
@pool = []
|
|
17
|
+
@mutex = Mutex.new
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
# Submits a task for execution.
|
|
21
|
+
# @param [Object] args Variable number of arguments to pass to the block
|
|
22
|
+
# @param [Proc] block The block to be executed
|
|
23
|
+
# @return [Boolean] Returns true if the task was submitted successfully
|
|
24
|
+
def post(*args, &block)
|
|
25
|
+
@mutex.synchronize do
|
|
26
|
+
raise 'Executor has been shutdown and is no longer accepting tasks' unless @state == RUNNING
|
|
27
|
+
|
|
28
|
+
@queue << [args, block]
|
|
29
|
+
ensure_worker_available
|
|
30
|
+
end
|
|
31
|
+
true
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
# Immediately terminates all worker threads and clears pending tasks.
|
|
35
|
+
# This is a forceful shutdown that doesn't wait for running tasks to complete.
|
|
36
|
+
#
|
|
37
|
+
# @return [Boolean] true when termination is complete
|
|
38
|
+
def kill
|
|
39
|
+
@mutex.synchronize do
|
|
40
|
+
@state = SHUTDOWN
|
|
41
|
+
@pool.each(&:kill)
|
|
42
|
+
@pool.clear
|
|
43
|
+
@queue.clear
|
|
44
|
+
end
|
|
45
|
+
true
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# Gracefully shuts down the executor, optionally with a timeout.
|
|
49
|
+
# Stops accepting new tasks and waits for running tasks to complete.
|
|
50
|
+
#
|
|
51
|
+
# @param timeout [Numeric, nil] Maximum time in seconds to wait for shutdown.
|
|
52
|
+
# If nil, waits indefinitely. If timeout expires, remaining threads are killed.
|
|
53
|
+
# @return [Boolean] true when shutdown is complete
|
|
54
|
+
def shutdown(timeout = nil)
|
|
55
|
+
@mutex.synchronize do
|
|
56
|
+
return true if @state == SHUTDOWN
|
|
57
|
+
|
|
58
|
+
@state = SHUTTING_DOWN
|
|
59
|
+
@pool.size.times { @queue << :shutdown }
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
if timeout
|
|
63
|
+
deadline = Time.now + timeout
|
|
64
|
+
@pool.each do |thread|
|
|
65
|
+
remaining = deadline - Time.now
|
|
66
|
+
break if remaining <= 0
|
|
67
|
+
|
|
68
|
+
thread.join([remaining, 0].max)
|
|
69
|
+
end
|
|
70
|
+
@pool.select(&:alive?).each(&:kill)
|
|
71
|
+
else
|
|
72
|
+
@pool.each(&:join)
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
@mutex.synchronize do
|
|
76
|
+
@pool.clear
|
|
77
|
+
@state = SHUTDOWN
|
|
78
|
+
end
|
|
79
|
+
true
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
private
|
|
83
|
+
|
|
84
|
+
def ensure_worker_available
|
|
85
|
+
return unless @state == RUNNING
|
|
86
|
+
|
|
87
|
+
@pool.select!(&:alive?)
|
|
88
|
+
@pool << spawn_worker if @pool.size < @max_threads
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
def spawn_worker
|
|
92
|
+
Thread.new do
|
|
93
|
+
while (job = @queue.shift)
|
|
94
|
+
break if job == :shutdown
|
|
95
|
+
|
|
96
|
+
args, block = job
|
|
97
|
+
block.call(*args)
|
|
98
|
+
end
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
end
|
|
@@ -13,87 +13,87 @@ module Aws::S3
|
|
|
13
13
|
# @!attribute bucket
|
|
14
14
|
# The S3 bucket used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 bucket.
|
|
15
15
|
#
|
|
16
|
-
# @return [
|
|
16
|
+
# @return [string]
|
|
17
17
|
#
|
|
18
18
|
# @!attribute region
|
|
19
19
|
# The AWS region used to dispatch the request.
|
|
20
20
|
#
|
|
21
|
-
# @return [
|
|
21
|
+
# @return [string]
|
|
22
22
|
#
|
|
23
23
|
# @!attribute use_fips
|
|
24
24
|
# When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.
|
|
25
25
|
#
|
|
26
|
-
# @return [
|
|
26
|
+
# @return [boolean]
|
|
27
27
|
#
|
|
28
28
|
# @!attribute use_dual_stack
|
|
29
29
|
# When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.
|
|
30
30
|
#
|
|
31
|
-
# @return [
|
|
31
|
+
# @return [boolean]
|
|
32
32
|
#
|
|
33
33
|
# @!attribute endpoint
|
|
34
34
|
# Override the endpoint used to send this request
|
|
35
35
|
#
|
|
36
|
-
# @return [
|
|
36
|
+
# @return [string]
|
|
37
37
|
#
|
|
38
38
|
# @!attribute force_path_style
|
|
39
39
|
# When true, force a path-style endpoint to be used where the bucket name is part of the path.
|
|
40
40
|
#
|
|
41
|
-
# @return [
|
|
41
|
+
# @return [boolean]
|
|
42
42
|
#
|
|
43
43
|
# @!attribute accelerate
|
|
44
44
|
# When true, use S3 Accelerate. NOTE: Not all regions support S3 accelerate.
|
|
45
45
|
#
|
|
46
|
-
# @return [
|
|
46
|
+
# @return [boolean]
|
|
47
47
|
#
|
|
48
48
|
# @!attribute use_global_endpoint
|
|
49
49
|
# Whether the global endpoint should be used, rather then the regional endpoint for us-east-1.
|
|
50
50
|
#
|
|
51
|
-
# @return [
|
|
51
|
+
# @return [boolean]
|
|
52
52
|
#
|
|
53
53
|
# @!attribute use_object_lambda_endpoint
|
|
54
54
|
# Internal parameter to use object lambda endpoint for an operation (eg: WriteGetObjectResponse)
|
|
55
55
|
#
|
|
56
|
-
# @return [
|
|
56
|
+
# @return [boolean]
|
|
57
57
|
#
|
|
58
58
|
# @!attribute key
|
|
59
59
|
# The S3 Key used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 Key.
|
|
60
60
|
#
|
|
61
|
-
# @return [
|
|
61
|
+
# @return [string]
|
|
62
62
|
#
|
|
63
63
|
# @!attribute prefix
|
|
64
64
|
# The S3 Prefix used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 Prefix.
|
|
65
65
|
#
|
|
66
|
-
# @return [
|
|
66
|
+
# @return [string]
|
|
67
67
|
#
|
|
68
68
|
# @!attribute copy_source
|
|
69
69
|
# The Copy Source used for Copy Object request. This is an optional parameter that will be set automatically for operations that are scoped to Copy Source.
|
|
70
70
|
#
|
|
71
|
-
# @return [
|
|
71
|
+
# @return [string]
|
|
72
72
|
#
|
|
73
73
|
# @!attribute disable_access_points
|
|
74
74
|
# Internal parameter to disable Access Point Buckets
|
|
75
75
|
#
|
|
76
|
-
# @return [
|
|
76
|
+
# @return [boolean]
|
|
77
77
|
#
|
|
78
78
|
# @!attribute disable_multi_region_access_points
|
|
79
79
|
# Whether multi-region access points (MRAP) should be disabled.
|
|
80
80
|
#
|
|
81
|
-
# @return [
|
|
81
|
+
# @return [boolean]
|
|
82
82
|
#
|
|
83
83
|
# @!attribute use_arn_region
|
|
84
84
|
# When an Access Point ARN is provided and this flag is enabled, the SDK MUST use the ARN's region when constructing the endpoint instead of the client's configured region.
|
|
85
85
|
#
|
|
86
|
-
# @return [
|
|
86
|
+
# @return [boolean]
|
|
87
87
|
#
|
|
88
88
|
# @!attribute use_s3_express_control_endpoint
|
|
89
89
|
# Internal parameter to indicate whether S3Express operation should use control plane, (ex. CreateBucket)
|
|
90
90
|
#
|
|
91
|
-
# @return [
|
|
91
|
+
# @return [boolean]
|
|
92
92
|
#
|
|
93
93
|
# @!attribute disable_s3_express_session_auth
|
|
94
94
|
# Parameter to indicate whether S3Express session auth should be disabled
|
|
95
95
|
#
|
|
96
|
-
# @return [
|
|
96
|
+
# @return [boolean]
|
|
97
97
|
#
|
|
98
98
|
EndpointParameters = Struct.new(
|
|
99
99
|
:bucket,
|