aws-sdk-s3 1.198.0 → 1.202.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,7 +11,6 @@ module Aws
11
11
  class MultipartStreamUploader
12
12
 
13
13
  DEFAULT_PART_SIZE = 5 * 1024 * 1024 # 5MB
14
- DEFAULT_THREAD_COUNT = 10
15
14
  CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
16
15
  UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
17
16
  COMPLETE_UPLOAD_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
@@ -19,9 +18,9 @@ module Aws
19
18
  # @option options [Client] :client
20
19
  def initialize(options = {})
21
20
  @client = options[:client] || Client.new
21
+ @executor = options[:executor]
22
22
  @tempfile = options[:tempfile]
23
23
  @part_size = options[:part_size] || DEFAULT_PART_SIZE
24
- @thread_count = options[:thread_count] || DEFAULT_THREAD_COUNT
25
24
  end
26
25
 
27
26
  # @return [Client]
@@ -29,7 +28,6 @@ module Aws
29
28
 
30
29
  # @option options [required,String] :bucket
31
30
  # @option options [required,String] :key
32
- # @option options [Integer] :thread_count (DEFAULT_THREAD_COUNT)
33
31
  # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
34
32
  def upload(options = {}, &block)
35
33
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
@@ -54,28 +52,30 @@ module Aws
54
52
  end
55
53
 
56
54
  def upload_parts(upload_id, options, &block)
57
- completed = Queue.new
58
- thread_errors = []
59
- errors = begin
55
+ completed_parts = Queue.new
56
+ errors = []
57
+
58
+ begin
60
59
  IO.pipe do |read_pipe, write_pipe|
61
- threads = upload_in_threads(
62
- read_pipe,
63
- completed,
64
- upload_part_opts(options).merge(upload_id: upload_id),
65
- thread_errors
66
- )
67
- begin
68
- block.call(write_pipe)
69
- ensure
70
- # Ensure the pipe is closed to avoid https://github.com/jruby/jruby/issues/6111
71
- write_pipe.close
60
+ upload_thread = Thread.new do
61
+ upload_with_executor(
62
+ read_pipe,
63
+ completed_parts,
64
+ errors,
65
+ upload_part_opts(options).merge(upload_id: upload_id)
66
+ )
72
67
  end
73
- threads.map(&:value).compact
68
+
69
+ block.call(write_pipe)
70
+ ensure
71
+ # Ensure the pipe is closed to avoid https://github.com/jruby/jruby/issues/6111
72
+ write_pipe.close
73
+ upload_thread.join
74
74
  end
75
75
  rescue StandardError => e
76
- thread_errors + [e]
76
+ errors << e
77
77
  end
78
- return ordered_parts(completed) if errors.empty?
78
+ return ordered_parts(completed_parts) if errors.empty?
79
79
 
80
80
  abort_upload(upload_id, options, errors)
81
81
  end
@@ -128,37 +128,34 @@ module Aws
128
128
  end
129
129
  end
130
130
 
131
- def upload_in_threads(read_pipe, completed, options, thread_errors)
132
- mutex = Mutex.new
131
+ def upload_with_executor(read_pipe, completed, errors, options)
132
+ completion_queue = Queue.new
133
+ queued_parts = 0
133
134
  part_number = 0
134
- options.fetch(:thread_count, @thread_count).times.map do
135
- thread = Thread.new do
136
- loop do
137
- body, thread_part_number = mutex.synchronize do
138
- [read_to_part_body(read_pipe), part_number += 1]
139
- end
140
- break unless body || thread_part_number == 1
141
-
142
- begin
143
- part = options.merge(body: body, part_number: thread_part_number)
144
- resp = @client.upload_part(part)
145
- completed_part = create_completed_part(resp, part)
146
- completed.push(completed_part)
147
- ensure
148
- clear_body(body)
149
- end
150
- end
151
- nil
135
+ mutex = Mutex.new
136
+ loop do
137
+ part_body, current_part_num = mutex.synchronize do
138
+ [read_to_part_body(read_pipe), part_number += 1]
139
+ end
140
+ break unless part_body || current_part_num == 1
141
+
142
+ queued_parts += 1
143
+ @executor.post(part_body, current_part_num, options) do |body, num, opts|
144
+ part = opts.merge(body: body, part_number: num)
145
+ resp = @client.upload_part(part)
146
+ completed_part = create_completed_part(resp, part)
147
+ completed.push(completed_part)
152
148
  rescue StandardError => e
153
- # keep other threads from uploading other parts
154
149
  mutex.synchronize do
155
- thread_errors.push(e)
150
+ errors.push(e)
156
151
  read_pipe.close_read unless read_pipe.closed?
157
152
  end
158
- e
153
+ ensure
154
+ clear_body(body)
155
+ completion_queue << :done
159
156
  end
160
- thread
161
157
  end
158
+ queued_parts.times { completion_queue.pop }
162
159
  end
163
160
 
164
161
  def create_completed_part(resp, part)
@@ -136,10 +136,10 @@ module Aws::S3
136
136
  end
137
137
 
138
138
  # The Base64 encoded, 32-bit `CRC32 checksum` of the object. This
139
- # checksum is only be present if the checksum was uploaded with the
140
- # object. When you use an API operation on an object that was uploaded
141
- # using multipart uploads, this value may not be a direct checksum value
142
- # of the full object. Instead, it's a calculation based on the checksum
139
+ # checksum is only present if the checksum was uploaded with the object.
140
+ # When you use an API operation on an object that was uploaded using
141
+ # multipart uploads, this value may not be a direct checksum value of
142
+ # the full object. Instead, it's a calculation based on the checksum
143
143
  # values of each individual part. For more information about how
144
144
  # checksums are calculated with multipart uploads, see [ Checking object
145
145
  # integrity][1] in the *Amazon S3 User Guide*.
@@ -181,8 +181,8 @@ module Aws::S3
181
181
  data[:checksum_crc64nvme]
182
182
  end
183
183
 
184
- # The Base64 encoded, 160-bit `SHA1` digest of the object. This will
185
- # only be present if the object was uploaded with the object. When you
184
+ # The Base64 encoded, 160-bit `SHA1` digest of the object. This checksum
185
+ # is only present if the checksum was uploaded with the object. When you
186
186
  # use the API operation on an object that was uploaded using multipart
187
187
  # uploads, this value may not be a direct checksum value of the full
188
188
  # object. Instead, it's a calculation based on the checksum values of
@@ -198,14 +198,14 @@ module Aws::S3
198
198
  data[:checksum_sha1]
199
199
  end
200
200
 
201
- # The Base64 encoded, 256-bit `SHA256` digest of the object. This will
202
- # only be present if the object was uploaded with the object. When you
203
- # use an API operation on an object that was uploaded using multipart
204
- # uploads, this value may not be a direct checksum value of the full
205
- # object. Instead, it's a calculation based on the checksum values of
206
- # each individual part. For more information about how checksums are
207
- # calculated with multipart uploads, see [ Checking object integrity][1]
208
- # in the *Amazon S3 User Guide*.
201
+ # The Base64 encoded, 256-bit `SHA256` digest of the object. This
202
+ # checksum is only present if the checksum was uploaded with the object.
203
+ # When you use an API operation on an object that was uploaded using
204
+ # multipart uploads, this value may not be a direct checksum value of
205
+ # the full object. Instead, it's a calculation based on the checksum
206
+ # values of each individual part. For more information about how
207
+ # checksums are calculated with multipart uploads, see [ Checking object
208
+ # integrity][1] in the *Amazon S3 User Guide*.
209
209
  #
210
210
  #
211
211
  #
@@ -757,6 +757,8 @@ module Aws::S3
757
757
  # grant_read: "GrantRead",
758
758
  # grant_read_acp: "GrantReadACP",
759
759
  # grant_write_acp: "GrantWriteACP",
760
+ # if_match: "IfMatch",
761
+ # if_none_match: "IfNoneMatch",
760
762
  # metadata: {
761
763
  # "MetadataKey" => "MetadataValue",
762
764
  # },
@@ -1013,6 +1015,35 @@ module Aws::S3
1013
1015
  # * This functionality is not supported for Amazon S3 on Outposts.
1014
1016
  #
1015
1017
  # </note>
1018
+ # @option options [String] :if_match
1019
+ # Copies the object if the entity tag (ETag) of the destination object
1020
+ # matches the specified tag. If the ETag values do not match, the
1021
+ # operation returns a `412 Precondition Failed` error. If a concurrent
1022
+ # operation occurs during the upload S3 returns a `409
1023
+ # ConditionalRequestConflict` response. On a 409 failure you should
1024
+ # fetch the object's ETag and retry the upload.
1025
+ #
1026
+ # Expects the ETag value as a string.
1027
+ #
1028
+ # For more information about conditional requests, see [RFC 7232][1].
1029
+ #
1030
+ #
1031
+ #
1032
+ # [1]: https://tools.ietf.org/html/rfc7232
1033
+ # @option options [String] :if_none_match
1034
+ # Copies the object only if the object key name at the destination does
1035
+ # not already exist in the bucket specified. Otherwise, Amazon S3
1036
+ # returns a `412 Precondition Failed` error. If a concurrent operation
1037
+ # occurs during the upload S3 returns a `409 ConditionalRequestConflict`
1038
+ # response. On a 409 failure you should retry the upload.
1039
+ #
1040
+ # Expects the '*' (asterisk) character.
1041
+ #
1042
+ # For more information about conditional requests, see [RFC 7232][1].
1043
+ #
1044
+ #
1045
+ #
1046
+ # [1]: https://tools.ietf.org/html/rfc7232
1016
1047
  # @option options [Hash<String,String>] :metadata
1017
1048
  # A map of metadata to store with the object in S3.
1018
1049
  # @option options [String] :metadata_directive
@@ -1535,17 +1566,15 @@ module Aws::S3
1535
1566
  # you provide does not match the actual owner of the bucket, the request
1536
1567
  # fails with the HTTP status code `403 Forbidden` (access denied).
1537
1568
  # @option options [String] :if_match
1538
- # The `If-Match` header field makes the request method conditional on
1539
- # ETags. If the ETag value does not match, the operation returns a `412
1540
- # Precondition Failed` error. If the ETag matches or if the object
1541
- # doesn't exist, the operation will return a `204 Success (No Content)
1542
- # response`.
1543
- #
1544
- # For more information about conditional requests, see [RFC 7232][1].
1569
+ # Deletes the object if the ETag (entity tag) value provided during the
1570
+ # delete operation matches the ETag of the object in S3. If the ETag
1571
+ # values do not match, the operation returns a `412 Precondition Failed`
1572
+ # error.
1545
1573
  #
1546
- # <note markdown="1"> This functionality is only supported for directory buckets.
1574
+ # Expects the ETag value as a string. `If-Match` does accept a string
1575
+ # value of an '*' (asterisk) character to denote a match of any ETag.
1547
1576
  #
1548
- # </note>
1577
+ # For more information about conditional requests, see [RFC 7232][1].
1549
1578
  #
1550
1579
  #
1551
1580
  #
@@ -354,6 +354,8 @@ module Aws::S3
354
354
  # grant_read: "GrantRead",
355
355
  # grant_read_acp: "GrantReadACP",
356
356
  # grant_write_acp: "GrantWriteACP",
357
+ # if_match: "IfMatch",
358
+ # if_none_match: "IfNoneMatch",
357
359
  # metadata: {
358
360
  # "MetadataKey" => "MetadataValue",
359
361
  # },
@@ -610,6 +612,35 @@ module Aws::S3
610
612
  # * This functionality is not supported for Amazon S3 on Outposts.
611
613
  #
612
614
  # </note>
615
+ # @option options [String] :if_match
616
+ # Copies the object if the entity tag (ETag) of the destination object
617
+ # matches the specified tag. If the ETag values do not match, the
618
+ # operation returns a `412 Precondition Failed` error. If a concurrent
619
+ # operation occurs during the upload S3 returns a `409
620
+ # ConditionalRequestConflict` response. On a 409 failure you should
621
+ # fetch the object's ETag and retry the upload.
622
+ #
623
+ # Expects the ETag value as a string.
624
+ #
625
+ # For more information about conditional requests, see [RFC 7232][1].
626
+ #
627
+ #
628
+ #
629
+ # [1]: https://tools.ietf.org/html/rfc7232
630
+ # @option options [String] :if_none_match
631
+ # Copies the object only if the object key name at the destination does
632
+ # not already exist in the bucket specified. Otherwise, Amazon S3
633
+ # returns a `412 Precondition Failed` error. If a concurrent operation
634
+ # occurs during the upload S3 returns a `409 ConditionalRequestConflict`
635
+ # response. On a 409 failure you should retry the upload.
636
+ #
637
+ # Expects the '*' (asterisk) character.
638
+ #
639
+ # For more information about conditional requests, see [RFC 7232][1].
640
+ #
641
+ #
642
+ #
643
+ # [1]: https://tools.ietf.org/html/rfc7232
613
644
  # @option options [Hash<String,String>] :metadata
614
645
  # A map of metadata to store with the object in S3.
615
646
  # @option options [String] :metadata_directive
@@ -1132,17 +1163,15 @@ module Aws::S3
1132
1163
  # you provide does not match the actual owner of the bucket, the request
1133
1164
  # fails with the HTTP status code `403 Forbidden` (access denied).
1134
1165
  # @option options [String] :if_match
1135
- # The `If-Match` header field makes the request method conditional on
1136
- # ETags. If the ETag value does not match, the operation returns a `412
1137
- # Precondition Failed` error. If the ETag matches or if the object
1138
- # doesn't exist, the operation will return a `204 Success (No Content)
1139
- # response`.
1166
+ # Deletes the object if the ETag (entity tag) value provided during the
1167
+ # delete operation matches the ETag of the object in S3. If the ETag
1168
+ # values do not match, the operation returns a `412 Precondition Failed`
1169
+ # error.
1140
1170
  #
1141
- # For more information about conditional requests, see [RFC 7232][1].
1171
+ # Expects the ETag value as a string. `If-Match` does accept a string
1172
+ # value of an '*' (asterisk) character to denote a match of any ETag.
1142
1173
  #
1143
- # <note markdown="1"> This functionality is only supported for directory buckets.
1144
- #
1145
- # </note>
1174
+ # For more information about conditional requests, see [RFC 7232][1].
1146
1175
  #
1147
1176
  #
1148
1177
  #
@@ -312,17 +312,15 @@ module Aws::S3
312
312
  # you provide does not match the actual owner of the bucket, the request
313
313
  # fails with the HTTP status code `403 Forbidden` (access denied).
314
314
  # @option options [String] :if_match
315
- # The `If-Match` header field makes the request method conditional on
316
- # ETags. If the ETag value does not match, the operation returns a `412
317
- # Precondition Failed` error. If the ETag matches or if the object
318
- # doesn't exist, the operation will return a `204 Success (No Content)
319
- # response`.
320
- #
321
- # For more information about conditional requests, see [RFC 7232][1].
315
+ # Deletes the object if the ETag (entity tag) value provided during the
316
+ # delete operation matches the ETag of the object in S3. If the ETag
317
+ # values do not match, the operation returns a `412 Precondition Failed`
318
+ # error.
322
319
  #
323
- # <note markdown="1"> This functionality is only supported for directory buckets.
320
+ # Expects the ETag value as a string. `If-Match` does accept a string
321
+ # value of an '*' (asterisk) character to denote a match of any ETag.
324
322
  #
325
- # </note>
323
+ # For more information about conditional requests, see [RFC 7232][1].
326
324
  #
327
325
  #
328
326
  #
@@ -25,7 +25,7 @@ The endpoint provider used to resolve endpoints. Any object that responds to
25
25
 
26
26
  option(
27
27
  :disable_s3_express_session_auth,
28
- doc_type: 'Boolean',
28
+ doc_type: 'boolean',
29
29
  docstring: <<~DOCS) do |cfg|
30
30
  Parameter to indicate whether S3Express session auth should be disabled
31
31
  DOCS
@@ -2,27 +2,74 @@
2
2
 
3
3
  module Aws
4
4
  module S3
5
- # A high-level S3 transfer utility that provides enhanced upload and download
6
- # capabilities with automatic multipart handling, progress tracking, and
7
- # handling of large files. The following features are supported:
5
+ # A high-level S3 transfer utility that provides enhanced upload and download capabilities with automatic
6
+ # multipart handling, progress tracking, and handling of large files. The following features are supported:
8
7
  #
9
8
  # * upload a file with multipart upload
10
9
  # * upload a stream with multipart upload
11
10
  # * download a S3 object with multipart download
12
11
  # * track transfer progress by using progress listener
13
12
  #
13
+ # ## Executor Management
14
+ # TransferManager uses executors to handle concurrent operations during multipart transfers. You can control
15
+ # concurrency behavior by providing a custom executor or relying on the default executor management.
16
+ #
17
+ # ### Default Behavior
18
+ # When no `:executor` is provided, TransferManager creates a new DefaultExecutor for each individual
19
+ # operation (`download_file`, `upload_file`, etc.) and automatically shuts it down when that operation completes.
20
+ # Each operation gets its own isolated thread pool with the specified `:thread_count` (default 10 threads).
21
+ #
22
+ # ### Custom Executor
23
+ # You can provide your own executor (e.g., `Concurrent::ThreadPoolExecutor`) for fine-grained control over thread
24
+ # pools and resource management. When using a custom executor, you are responsible for shutting it down
25
+ # when finished. The executor may be reused across multiple TransferManager operations.
26
+ #
27
+ # Custom executors must implement the same interface as DefaultExecutor.
28
+ #
29
+ # **Required methods:**
30
+ #
31
+ # * `post(*args, &block)` - Execute a task with given arguments and block
32
+ # * `kill` - Immediately terminate all running tasks
33
+ #
34
+ # **Optional methods:**
35
+ #
36
+ # * `shutdown(timeout = nil)` - Gracefully shutdown the executor with optional timeout
37
+ #
38
+ # @example Using default executor (automatic creation and shutdown)
39
+ # tm = TransferManager.new # No executor provided
40
+ # # DefaultExecutor created, used, and shutdown automatically
41
+ # tm.download_file('/path/to/file', bucket: 'bucket', key: 'key')
42
+ #
43
+ # @example Using custom executor (manual shutdown required)
44
+ # require 'concurrent-ruby'
45
+ #
46
+ # executor = Concurrent::ThreadPoolExecutor.new(max_threads: 5)
47
+ # tm = TransferManager.new(executor: executor)
48
+ # tm.download_file('/path/to/file1', bucket: 'bucket', key: 'key1')
49
+ # executor.shutdown # You must shutdown custom executors
50
+ #
14
51
  class TransferManager
52
+
15
53
  # @param [Hash] options
16
54
  # @option options [S3::Client] :client (S3::Client.new)
17
55
  # The S3 client to use for {TransferManager} operations. If not provided, a new default client
18
56
  # will be created automatically.
57
+ # @option options [Object] :executor
58
+ # The executor to use for multipart operations. Must implement the same interface as {DefaultExecutor}.
59
+ # If not provided, a new {DefaultExecutor} will be created automatically for each operation and
60
+ # shutdown after completion. When provided a custom executor, it will be reused across operations, and
61
+ # you are responsible for shutting it down when finished.
19
62
  def initialize(options = {})
20
- @client = options.delete(:client) || Client.new
63
+ @client = options[:client] || Client.new
64
+ @executor = options[:executor]
21
65
  end
22
66
 
23
67
  # @return [S3::Client]
24
68
  attr_reader :client
25
69
 
70
+ # @return [Object]
71
+ attr_reader :executor
72
+
26
73
  # Downloads a file in S3 to a path on disk.
27
74
  #
28
75
  # # small files (< 5MB) are downloaded in a single API call
@@ -48,7 +95,13 @@ module Aws
48
95
  # @param [String, Pathname, File, Tempfile] destination
49
96
  # Where to download the file to. This can either be a String or Pathname to the file, an open File object,
50
97
  # or an open Tempfile object. If you pass an open File or Tempfile object, then you are responsible for
51
- # closing it after the download completes.
98
+ # closing it after the download completes. Download behavior varies by destination type:
99
+ #
100
+ # * **String/Pathname paths**: Downloads to a temporary file first, then atomically moves to the final
101
+ # destination. This prevents corruption of any existing file if the download fails.
102
+ # * **File/Tempfile objects**: Downloads directly to the file object without using temporary files.
103
+ # You are responsible for managing the file object's state and closing it after the download completes.
104
+ # If the download fails, the file object may contain partial data.
52
105
  #
53
106
  # @param [String] bucket
54
107
  # The name of the S3 bucket to upload to.
@@ -68,10 +121,7 @@ module Aws
68
121
  # @option options [Integer] :chunk_size required in `"get_range"` mode.
69
122
  #
70
123
  # @option options [Integer] :thread_count (10) Customize threads used in the multipart download.
71
- #
72
- # @option options [String] :version_id The object version id used to retrieve the object.
73
- #
74
- # @see https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html ObjectVersioning
124
+ # Only used when no custom executor is provided (creates {DefaultExecutor} with given thread count).
75
125
  #
76
126
  # @option options [String] :checksum_mode ("ENABLED")
77
127
  # When `"ENABLED"` and the object has a stored checksum, it will be used to validate the download and will
@@ -96,8 +146,11 @@ module Aws
96
146
  # @see Client#get_object
97
147
  # @see Client#head_object
98
148
  def download_file(destination, bucket:, key:, **options)
99
- downloader = FileDownloader.new(client: @client)
100
- downloader.download(destination, options.merge(bucket: bucket, key: key))
149
+ download_opts = options.merge(bucket: bucket, key: key)
150
+ executor = @executor || DefaultExecutor.new(max_threads: download_opts.delete(:thread_count))
151
+ downloader = FileDownloader.new(client: @client, executor: executor)
152
+ downloader.download(destination, download_opts)
153
+ executor.shutdown unless @executor
101
154
  true
102
155
  end
103
156
 
@@ -133,7 +186,7 @@ module Aws
133
186
  # A file on the local file system that will be uploaded. This can either be a `String` or `Pathname` to the
134
187
  # file, an open `File` object, or an open `Tempfile` object. If you pass an open `File` or `Tempfile` object,
135
188
  # then you are responsible for closing it after the upload completes. When using an open Tempfile, rewind it
136
- # before uploading or else the object will be empty.
189
+ # before uploading or else the object will be empty.
137
190
  #
138
191
  # @param [String] bucket
139
192
  # The name of the S3 bucket to upload to.
@@ -150,15 +203,14 @@ module Aws
150
203
  # Files larger han or equal to `:multipart_threshold` are uploaded using the S3 multipart upload APIs.
151
204
  # Default threshold is `100MB`.
152
205
  #
153
- # @option options [Integer] :thread_count (10)
154
- # The number of parallel multipart uploads. This option is not used if the file is smaller than
155
- # `:multipart_threshold`.
206
+ # @option options [Integer] :thread_count (10) Customize threads used in the multipart upload.
207
+ # Only used when no custom executor is provided (creates {DefaultExecutor} with the given thread count).
156
208
  #
157
209
  # @option options [Proc] :progress_callback (nil)
158
210
  # A Proc that will be called when each chunk of the upload is sent.
159
211
  # It will be invoked with `[bytes_read]` and `[total_sizes]`.
160
212
  #
161
- # @raise [MultipartUploadError] If an file is being uploaded in parts, and the upload can not be completed,
213
+ # @raise [MultipartUploadError] If a file is being uploaded in parts, and the upload can not be completed,
162
214
  # then the upload is aborted and this error is raised. The raised error has a `#errors` method that
163
215
  # returns the failures that caused the upload to be aborted.
164
216
  #
@@ -169,13 +221,16 @@ module Aws
169
221
  # @see Client#complete_multipart_upload
170
222
  # @see Client#upload_part
171
223
  def upload_file(source, bucket:, key:, **options)
172
- uploading_options = options.dup
224
+ upload_opts = options.merge(bucket: bucket, key: key)
225
+ executor = @executor || DefaultExecutor.new(max_threads: upload_opts.delete(:thread_count))
173
226
  uploader = FileUploader.new(
174
- multipart_threshold: uploading_options.delete(:multipart_threshold),
175
- client: @client
227
+ multipart_threshold: upload_opts.delete(:multipart_threshold),
228
+ client: @client,
229
+ executor: executor
176
230
  )
177
- response = uploader.upload(source, uploading_options.merge(bucket: bucket, key: key))
231
+ response = uploader.upload(source, upload_opts)
178
232
  yield response if block_given?
233
+ executor.shutdown unless @executor
179
234
  true
180
235
  end
181
236
 
@@ -211,7 +266,8 @@ module Aws
211
266
  # {Client#upload_part} can be provided.
212
267
  #
213
268
  # @option options [Integer] :thread_count (10)
214
- # The number of parallel multipart uploads.
269
+ # The number of parallel multipart uploads. Only used when no custom executor is provided (creates
270
+ # {DefaultExecutor} with the given thread count). An additional thread is used internally for task coordination.
215
271
  #
216
272
  # @option options [Boolean] :tempfile (false)
217
273
  # Normally read data is stored in memory when building the parts in order to complete the underlying
@@ -231,14 +287,16 @@ module Aws
231
287
  # @see Client#complete_multipart_upload
232
288
  # @see Client#upload_part
233
289
  def upload_stream(bucket:, key:, **options, &block)
234
- uploading_options = options.dup
290
+ upload_opts = options.merge(bucket: bucket, key: key)
291
+ executor = @executor || DefaultExecutor.new(max_threads: upload_opts.delete(:thread_count))
235
292
  uploader = MultipartStreamUploader.new(
236
293
  client: @client,
237
- thread_count: uploading_options.delete(:thread_count),
238
- tempfile: uploading_options.delete(:tempfile),
239
- part_size: uploading_options.delete(:part_size)
294
+ executor: executor,
295
+ tempfile: upload_opts.delete(:tempfile),
296
+ part_size: upload_opts.delete(:part_size)
240
297
  )
241
- uploader.upload(uploading_options.merge(bucket: bucket, key: key), &block)
298
+ uploader.upload(upload_opts, &block)
299
+ executor.shutdown unless @executor
242
300
  true
243
301
  end
244
302
  end