aws-sdk-s3 1.196.1 → 1.197.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 72291dc4a45c81045393df33c2dc26b215e8b67d50f33e097d7685da2a02ca47
4
- data.tar.gz: eeb0aacd605389c7511cb63684362c12351c68648fa32f7d888e00ea7b0a7d9d
3
+ metadata.gz: 206194d0b6217c7fd01477cc0407b4d55080bdd269f14d33f3ae072fabfaa2ee
4
+ data.tar.gz: e269ddd5c94adc905a49ddf2f8fa80f1446684889d934aefd14d38136f81822b
5
5
  SHA512:
6
- metadata.gz: 20dcb199634a44435dee8acb5d775dc9ed7d50b60c10091425416b692783e5de48ae91df2a4525c0543391584e08338a37c8c9b43dfebe60ffb609a99c535910
7
- data.tar.gz: 2c4fbba3c97ae00405a8a2ea973145194126f027cf070229a3d80cc47900b2d8e3a1106f69f510ef4c765f090e3d56806f9b5a470449caa3ca898527377b9601
6
+ metadata.gz: 2d27932019687706da5456a0c7dfa3e4c9bb81e3173105f6572e7a1afd4af96e553e94fedb19ca3c0052fcc4f3b297fcb14fe3719da037d9a9b333701fc35c24
7
+ data.tar.gz: bc7d64a072edfe9fc2154fec0834e6d27b19f2fbdca79b39571b1057cd73e5041abb113678d5c03414e664bd9422a8a714c37ec84015865de6566dc5b1a5909c
data/CHANGELOG.md CHANGED
@@ -1,6 +1,15 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.197.0 (2025-08-19)
5
+ ------------------
6
+
7
+ * Issue - When multipart stream uploader fails to complete multipart upload, it calls abort multipart upload.
8
+
9
+ * Issue - For `Aws::S3::Object` class, the following methods have been deprecated: `download_file`, `upload_file` and `upload_stream`. Use `Aws::S3::TransferManager` instead.
10
+
11
+ * Feature - Add `Aws::S3::TransferManager`, a S3 transfer utility that provides upload/download capabilities with automatic multipart handling, progress tracking, and handling of large files.
12
+
4
13
  1.196.1 (2025-08-05)
5
14
  ------------------
6
15
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.196.1
1
+ 1.197.0
@@ -21735,7 +21735,7 @@ module Aws::S3
21735
21735
  tracer: tracer
21736
21736
  )
21737
21737
  context[:gem_name] = 'aws-sdk-s3'
21738
- context[:gem_version] = '1.196.1'
21738
+ context[:gem_version] = '1.197.0'
21739
21739
  Seahorse::Client::Request.new(handlers, context)
21740
21740
  end
21741
21741
 
@@ -398,6 +398,7 @@ module Aws
398
398
  end
399
399
  true
400
400
  end
401
+ deprecated(:upload_stream, use: 'Aws::S3::TransferManager#upload_stream', version: 'next major version')
401
402
 
402
403
  # Uploads a file from disk to the current object in S3.
403
404
  #
@@ -465,6 +466,7 @@ module Aws
465
466
  yield response if block_given?
466
467
  true
467
468
  end
469
+ deprecated(:upload_file, use: 'Aws::S3::TransferManager#upload_file', version: 'next major version')
468
470
 
469
471
  # Downloads a file in S3 to a path on disk.
470
472
  #
@@ -534,6 +536,7 @@ module Aws
534
536
  end
535
537
  true
536
538
  end
539
+ deprecated(:download_file, use: 'Aws::S3::TransferManager#download_file', version: 'next major version')
537
540
 
538
541
  class Collection < Aws::Resources::Collection
539
542
  alias_method :delete, :batch_delete!
@@ -18,13 +18,13 @@ module Aws
18
18
  autoload :ObjectMultipartCopier, 'aws-sdk-s3/object_multipart_copier'
19
19
  autoload :PresignedPost, 'aws-sdk-s3/presigned_post'
20
20
  autoload :Presigner, 'aws-sdk-s3/presigner'
21
+ autoload :TransferManager, 'aws-sdk-s3/transfer_manager'
21
22
 
22
23
  # s3 express session auth
23
24
  autoload :ExpressCredentials, 'aws-sdk-s3/express_credentials'
24
25
  autoload :ExpressCredentialsProvider, 'aws-sdk-s3/express_credentials_provider'
25
26
 
26
27
  # s3 access grants auth
27
-
28
28
  autoload :AccessGrantsCredentials, 'aws-sdk-s3/access_grants_credentials'
29
29
  autoload :AccessGrantsCredentialsProvider, 'aws-sdk-s3/access_grants_credentials_provider'
30
30
  end
@@ -7,7 +7,7 @@ module Aws
7
7
  # @api private
8
8
  class FileUploader
9
9
 
10
- ONE_HUNDRED_MEGABYTES = 100 * 1024 * 1024
10
+ DEFAULT_MULTIPART_THRESHOLD = 100 * 1024 * 1024
11
11
 
12
12
  # @param [Hash] options
13
13
  # @option options [Client] :client
@@ -15,15 +15,13 @@ module Aws
15
15
  def initialize(options = {})
16
16
  @options = options
17
17
  @client = options[:client] || Client.new
18
- @multipart_threshold = options[:multipart_threshold] ||
19
- ONE_HUNDRED_MEGABYTES
18
+ @multipart_threshold = options[:multipart_threshold] || DEFAULT_MULTIPART_THRESHOLD
20
19
  end
21
20
 
22
21
  # @return [Client]
23
22
  attr_reader :client
24
23
 
25
- # @return [Integer] Files larger than or equal to this in bytes are uploaded
26
- # using a {MultipartFileUploader}.
24
+ # @return [Integer] Files larger than or equal to this in bytes are uploaded using a {MultipartFileUploader}.
27
25
  attr_reader :multipart_threshold
28
26
 
29
27
  # @param [String, Pathname, File, Tempfile] source The file to upload.
@@ -9,17 +9,11 @@ module Aws
9
9
  class MultipartFileUploader
10
10
 
11
11
  MIN_PART_SIZE = 5 * 1024 * 1024 # 5MB
12
-
13
12
  MAX_PARTS = 10_000
14
-
15
- THREAD_COUNT = 10
16
-
13
+ DEFAULT_THREAD_COUNT = 10
17
14
  CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
18
-
19
15
  COMPLETE_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
20
-
21
16
  UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
22
-
23
17
  CHECKSUM_KEYS = Set.new(
24
18
  Client.api.operation(:upload_part).input.shape.members.map do |n, s|
25
19
  n if s.location == 'header' && s.location_name.start_with?('x-amz-checksum-')
@@ -27,10 +21,10 @@ module Aws
27
21
  )
28
22
 
29
23
  # @option options [Client] :client
30
- # @option options [Integer] :thread_count (THREAD_COUNT)
24
+ # @option options [Integer] :thread_count (DEFAULT_THREAD_COUNT)
31
25
  def initialize(options = {})
32
26
  @client = options[:client] || Client.new
33
- @thread_count = options[:thread_count] || THREAD_COUNT
27
+ @thread_count = options[:thread_count] || DEFAULT_THREAD_COUNT
34
28
  end
35
29
 
36
30
  # @return [Client]
@@ -9,33 +9,19 @@ module Aws
9
9
  module S3
10
10
  # @api private
11
11
  class MultipartStreamUploader
12
- # api private
13
- PART_SIZE = 5 * 1024 * 1024 # 5MB
14
12
 
15
- # api private
16
- THREAD_COUNT = 10
17
-
18
- # api private
19
- TEMPFILE_PREIX = 'aws-sdk-s3-upload_stream'.freeze
20
-
21
- # @api private
22
- CREATE_OPTIONS =
23
- Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
24
-
25
- # @api private
26
- UPLOAD_PART_OPTIONS =
27
- Set.new(Client.api.operation(:upload_part).input.shape.member_names)
28
-
29
- # @api private
30
- COMPLETE_UPLOAD_OPTIONS =
31
- Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
13
+ DEFAULT_PART_SIZE = 5 * 1024 * 1024 # 5MB
14
+ DEFAULT_THREAD_COUNT = 10
15
+ CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
16
+ UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
17
+ COMPLETE_UPLOAD_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
32
18
 
33
19
  # @option options [Client] :client
34
20
  def initialize(options = {})
35
21
  @client = options[:client] || Client.new
36
22
  @tempfile = options[:tempfile]
37
- @part_size = options[:part_size] || PART_SIZE
38
- @thread_count = options[:thread_count] || THREAD_COUNT
23
+ @part_size = options[:part_size] || DEFAULT_PART_SIZE
24
+ @thread_count = options[:thread_count] || DEFAULT_THREAD_COUNT
39
25
  end
40
26
 
41
27
  # @return [Client]
@@ -43,7 +29,7 @@ module Aws
43
29
 
44
30
  # @option options [required,String] :bucket
45
31
  # @option options [required,String] :key
46
- # @option options [Integer] :thread_count (THREAD_COUNT)
32
+ # @option options [Integer] :thread_count (DEFAULT_THREAD_COUNT)
47
33
  # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
48
34
  def upload(options = {}, &block)
49
35
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
@@ -61,11 +47,10 @@ module Aws
61
47
 
62
48
  def complete_upload(upload_id, parts, options)
63
49
  @client.complete_multipart_upload(
64
- **complete_opts(options).merge(
65
- upload_id: upload_id,
66
- multipart_upload: { parts: parts }
67
- )
50
+ **complete_opts(options).merge(upload_id: upload_id, multipart_upload: { parts: parts })
68
51
  )
52
+ rescue StandardError => e
53
+ abort_upload(upload_id, options, [e])
69
54
  end
70
55
 
71
56
  def upload_parts(upload_id, options, &block)
@@ -74,9 +59,11 @@ module Aws
74
59
  errors = begin
75
60
  IO.pipe do |read_pipe, write_pipe|
76
61
  threads = upload_in_threads(
77
- read_pipe, completed,
62
+ read_pipe,
63
+ completed,
78
64
  upload_part_opts(options).merge(upload_id: upload_id),
79
- thread_errors)
65
+ thread_errors
66
+ )
80
67
  begin
81
68
  block.call(write_pipe)
82
69
  ensure
@@ -85,62 +72,53 @@ module Aws
85
72
  end
86
73
  threads.map(&:value).compact
87
74
  end
88
- rescue => e
75
+ rescue StandardError => e
89
76
  thread_errors + [e]
90
77
  end
78
+ return ordered_parts(completed) if errors.empty?
91
79
 
92
- if errors.empty?
93
- Array.new(completed.size) { completed.pop }.sort_by { |part| part[:part_number] }
94
- else
95
- abort_upload(upload_id, options, errors)
96
- end
80
+ abort_upload(upload_id, options, errors)
97
81
  end
98
82
 
99
83
  def abort_upload(upload_id, options, errors)
100
- @client.abort_multipart_upload(
101
- bucket: options[:bucket],
102
- key: options[:key],
103
- upload_id: upload_id
104
- )
84
+ @client.abort_multipart_upload(bucket: options[:bucket], key: options[:key], upload_id: upload_id)
105
85
  msg = "multipart upload failed: #{errors.map(&:message).join('; ')}"
106
86
  raise MultipartUploadError.new(msg, errors)
107
- rescue MultipartUploadError => error
108
- raise error
109
- rescue => error
110
- msg = "failed to abort multipart upload: #{error.message}. "\
87
+ rescue MultipartUploadError => e
88
+ raise e
89
+ rescue StandardError => e
90
+ msg = "failed to abort multipart upload: #{e.message}. "\
111
91
  "Multipart upload failed: #{errors.map(&:message).join('; ')}"
112
- raise MultipartUploadError.new(msg, errors + [error])
92
+ raise MultipartUploadError.new(msg, errors + [e])
113
93
  end
114
94
 
115
95
  def create_opts(options)
116
- CREATE_OPTIONS.inject({}) do |hash, key|
96
+ CREATE_OPTIONS.each_with_object({}) do |key, hash|
117
97
  hash[key] = options[key] if options.key?(key)
118
- hash
119
98
  end
120
99
  end
121
100
 
122
101
  def upload_part_opts(options)
123
- UPLOAD_PART_OPTIONS.inject({}) do |hash, key|
102
+ UPLOAD_PART_OPTIONS.each_with_object({}) do |key, hash|
124
103
  hash[key] = options[key] if options.key?(key)
125
- hash
126
104
  end
127
105
  end
128
106
 
129
107
  def complete_opts(options)
130
- COMPLETE_UPLOAD_OPTIONS.inject({}) do |hash, key|
108
+ COMPLETE_UPLOAD_OPTIONS.each_with_object({}) do |key, hash|
131
109
  hash[key] = options[key] if options.key?(key)
132
- hash
133
110
  end
134
111
  end
135
112
 
136
113
  def read_to_part_body(read_pipe)
137
114
  return if read_pipe.closed?
138
- temp_io = @tempfile ? Tempfile.new(TEMPFILE_PREIX) : StringIO.new(String.new)
115
+
116
+ temp_io = @tempfile ? Tempfile.new('aws-sdk-s3-upload_stream') : StringIO.new(String.new)
139
117
  temp_io.binmode
140
118
  bytes_copied = IO.copy_stream(read_pipe, temp_io, @part_size)
141
119
  temp_io.rewind
142
- if bytes_copied == 0
143
- if Tempfile === temp_io
120
+ if bytes_copied.zero?
121
+ if temp_io.is_a?(Tempfile)
144
122
  temp_io.close
145
123
  temp_io.unlink
146
124
  end
@@ -155,48 +133,62 @@ module Aws
155
133
  part_number = 0
156
134
  options.fetch(:thread_count, @thread_count).times.map do
157
135
  thread = Thread.new do
158
- begin
159
- loop do
160
- body, thread_part_number = mutex.synchronize do
161
- [read_to_part_body(read_pipe), part_number += 1]
162
- end
163
- break unless (body || thread_part_number == 1)
164
- begin
165
- part = options.merge(
166
- body: body,
167
- part_number: thread_part_number,
168
- )
169
- resp = @client.upload_part(part)
170
- completed_part = {etag: resp.etag, part_number: part[:part_number]}
171
-
172
- # get the requested checksum from the response
173
- if part[:checksum_algorithm]
174
- k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
175
- completed_part[k] = resp[k]
176
- end
177
- completed.push(completed_part)
178
- ensure
179
- if Tempfile === body
180
- body.close
181
- body.unlink
182
- elsif StringIO === body
183
- body.string.clear
184
- end
185
- end
136
+ loop do
137
+ body, thread_part_number = mutex.synchronize do
138
+ [read_to_part_body(read_pipe), part_number += 1]
186
139
  end
187
- nil
188
- rescue => error
189
- # keep other threads from uploading other parts
190
- mutex.synchronize do
191
- thread_errors.push(error)
192
- read_pipe.close_read unless read_pipe.closed?
140
+ break unless body || thread_part_number == 1
141
+
142
+ begin
143
+ part = options.merge(body: body, part_number: thread_part_number)
144
+ resp = @client.upload_part(part)
145
+ completed_part = create_completed_part(resp, part)
146
+ completed.push(completed_part)
147
+ ensure
148
+ clear_body(body)
193
149
  end
194
- error
195
150
  end
151
+ nil
152
+ rescue StandardError => e
153
+ # keep other threads from uploading other parts
154
+ mutex.synchronize do
155
+ thread_errors.push(e)
156
+ read_pipe.close_read unless read_pipe.closed?
157
+ end
158
+ e
196
159
  end
197
160
  thread
198
161
  end
199
162
  end
163
+
164
+ def create_completed_part(resp, part)
165
+ completed_part = { etag: resp.etag, part_number: part[:part_number] }
166
+ return completed_part unless part[:checksum_algorithm]
167
+
168
+ # get the requested checksum from the response
169
+ k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
170
+ completed_part[k] = resp[k]
171
+ completed_part
172
+ end
173
+
174
+ def ordered_parts(parts)
175
+ sorted = []
176
+ until parts.empty?
177
+ part = parts.pop
178
+ index = sorted.bsearch_index { |p| p[:part_number] >= part[:part_number] } || sorted.size
179
+ sorted.insert(index, part)
180
+ end
181
+ sorted
182
+ end
183
+
184
+ def clear_body(body)
185
+ if body.is_a?(Tempfile)
186
+ body.close
187
+ body.unlink
188
+ elsif body.is_a?(StringIO)
189
+ body.string.clear
190
+ end
191
+ end
200
192
  end
201
193
  end
202
194
  end
@@ -0,0 +1,244 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aws
4
+ module S3
5
+ # A high-level S3 transfer utility that provides enhanced upload and download
6
+ # capabilities with automatic multipart handling, progress tracking, and
7
+ # handling of large files. The following features are supported:
8
+ #
9
+ # * upload a file with multipart upload
10
+ # * upload a stream with multipart upload
11
+ # * download a S3 object with multipart download
12
+ # * track transfer progress by using progress listener
13
+ #
14
+ class TransferManager
15
+ # @param [Hash] options
16
+ # @option options [S3::Client] :client (S3::Client.new)
17
+ # The S3 client to use for {TransferManager} operations. If not provided, a new default client
18
+ # will be created automatically.
19
+ def initialize(options = {})
20
+ @client = options.delete(:client) || Client.new
21
+ end
22
+
23
+ # @return [S3::Client]
24
+ attr_reader :client
25
+
26
+ # Downloads a file in S3 to a path on disk.
27
+ #
28
+ # # small files (< 5MB) are downloaded in a single API call
29
+ # tm = TransferManager.new
30
+ # tm.download_file('/path/to/file', bucket: 'bucket', key: 'key')
31
+ #
32
+ # Files larger than 5MB are downloaded using multipart method:
33
+ #
34
+ # # large files are split into parts and the parts are downloaded in parallel
35
+ # tm.download_file('/path/to/large_file', bucket: 'bucket', key: 'key')
36
+ #
37
+ # You can provide a callback to monitor progress of the download:
38
+ #
39
+ # # bytes and part_sizes are each an array with 1 entry per part
40
+ # # part_sizes may not be known until the first bytes are retrieved
41
+ # progress = proc do |bytes, part_sizes, file_size|
42
+ # bytes.map.with_index do |b, i|
43
+ # puts "Part #{i + 1}: #{b} / #{part_sizes[i]}".join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
44
+ # end
45
+ # end
46
+ # tm.download_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
47
+ #
48
+ # @param [String] destination
49
+ # Where to download the file to.
50
+ #
51
+ # @param [String] bucket
52
+ # The name of the S3 bucket to upload to.
53
+ #
54
+ # @param [String] key
55
+ # The object key name in S3 bucket.
56
+ #
57
+ # @param [Hash] options
58
+ # Additional options for {Client#get_object} and #{Client#head_object} may be provided.
59
+ #
60
+ # @option options [String] :mode ("auto") `"auto"`, `"single_request"` or `"get_range"`
61
+ #
62
+ # * `"auto"` mode is enabled by default, which performs `multipart_download`
63
+ # * `"single_request`" mode forces only 1 GET request is made in download
64
+ # * `"get_range"` mode requires `:chunk_size` parameter to configured in customizing each range size
65
+ #
66
+ # @option options [Integer] :chunk_size required in `"get_range"` mode.
67
+ #
68
+ # @option options [Integer] :thread_count (10) Customize threads used in the multipart download.
69
+ #
70
+ # @option options [String] :version_id The object version id used to retrieve the object.
71
+ #
72
+ # @see https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html ObjectVersioning
73
+ #
74
+ # @option options [String] :checksum_mode ("ENABLED")
75
+ # When `"ENABLED"` and the object has a stored checksum, it will be used to validate the download and will
76
+ # raise an `Aws::Errors::ChecksumError` if checksum validation fails. You may provide a `on_checksum_validated`
77
+ # callback if you need to verify that validation occurred and which algorithm was used.
78
+ # To disable checksum validation, set `checksum_mode` to `"DISABLED"`.
79
+ #
80
+ # @option options [Callable] :on_checksum_validated
81
+ # Called each time a request's checksum is validated with the checksum algorithm and the
82
+ # response. For multipart downloads, this will be called for each part that is downloaded and validated.
83
+ #
84
+ # @option options [Proc] :progress_callback
85
+ # A Proc that will be called when each chunk of the download is received. It will be invoked with
86
+ # `bytes_read`, `part_sizes`, `file_size`. When the object is downloaded as parts (rather than by ranges),
87
+ # the `part_sizes` will not be known ahead of time and will be `nil` in the callback until the first bytes
88
+ # in the part are received.
89
+ #
90
+ # @raise [MultipartDownloadError] Raised when an object validation fails outside of service errors.
91
+ #
92
+ # @return [Boolean] Returns `true` when the file is downloaded without any errors.
93
+ #
94
+ # @see Client#get_object
95
+ # @see Client#head_object
96
+ def download_file(destination, bucket:, key:, **options)
97
+ downloader = FileDownloader.new(client: @client)
98
+ downloader.download(destination, options.merge(bucket: bucket, key: key))
99
+ true
100
+ end
101
+
102
+ # Uploads a file from disk to S3.
103
+ #
104
+ # # a small file are uploaded with PutObject API
105
+ # tm = TransferManager.new
106
+ # tm.upload_file('/path/to/small_file', bucket: 'bucket', key: 'key')
107
+ #
108
+ # Files larger than or equal to `:multipart_threshold` are uploaded using multipart upload APIs.
109
+ #
110
+ # # large files are automatically split into parts and the parts are uploaded in parallel
111
+ # tm.upload_file('/path/to/large_file', bucket: 'bucket', key: 'key')
112
+ #
113
+ # The response of the S3 upload API is yielded if a block given.
114
+ #
115
+ # # API response will have etag value of the file
116
+ # tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key') do |response|
117
+ # etag = response.etag
118
+ # end
119
+ #
120
+ # You can provide a callback to monitor progress of the upload:
121
+ #
122
+ # # bytes and totals are each an array with 1 entry per part
123
+ # progress = proc do |bytes, totals|
124
+ # bytes.map.with_index do |b, i|
125
+ # puts "Part #{i + 1}: #{b} / #{totals[i]} " + "Total: #{100.0 * bytes.sum / totals.sum}%"
126
+ # end
127
+ # end
128
+ # tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
129
+ #
130
+ # @param [String, Pathname, File, Tempfile] source
131
+ # A file on the local file system that will be uploaded. This can either be a `String` or `Pathname` to the
132
+ # file, an open `File` object, or an open `Tempfile` object. If you pass an open `File` or `Tempfile` object,
133
+ # then you are responsible for closing it after the upload completes. When using an open Tempfile, rewind it
134
+ # before uploading or else the object will be empty.
135
+ #
136
+ # @param [String] bucket
137
+ # The name of the S3 bucket to upload to.
138
+ #
139
+ # @param [String] key
140
+ # The object key name for the uploaded file.
141
+ #
142
+ # @param [Hash] options
143
+ # Additional options for {Client#put_object} when file sizes below the multipart threshold.
144
+ # For files larger than the multipart threshold, options for {Client#create_multipart_upload},
145
+ # {Client#complete_multipart_upload}, and {Client#upload_part} can be provided.
146
+ #
147
+ # @option options [Integer] :multipart_threshold (104857600)
148
+ # Files larger han or equal to `:multipart_threshold` are uploaded using the S3 multipart upload APIs.
149
+ # Default threshold is `100MB`.
150
+ #
151
+ # @option options [Integer] :thread_count (10)
152
+ # The number of parallel multipart uploads. This option is not used if the file is smaller than
153
+ # `:multipart_threshold`.
154
+ #
155
+ # @option options [Proc] :progress_callback (nil)
156
+ # A Proc that will be called when each chunk of the upload is sent.
157
+ # It will be invoked with `[bytes_read]` and `[total_sizes]`.
158
+ #
159
+ # @raise [MultipartUploadError] If an file is being uploaded in parts, and the upload can not be completed,
160
+ # then the upload is aborted and this error is raised. The raised error has a `#errors` method that
161
+ # returns the failures that caused the upload to be aborted.
162
+ #
163
+ # @return [Boolean] Returns `true` when the file is uploaded without any errors.
164
+ #
165
+ # @see Client#put_object
166
+ # @see Client#create_multipart_upload
167
+ # @see Client#complete_multipart_upload
168
+ # @see Client#upload_part
169
+ def upload_file(source, bucket:, key:, **options)
170
+ uploading_options = options.dup
171
+ uploader = FileUploader.new(
172
+ multipart_threshold: uploading_options.delete(:multipart_threshold),
173
+ client: @client
174
+ )
175
+ response = uploader.upload(source, uploading_options.merge(bucket: bucket, key: key))
176
+ yield response if block_given?
177
+ true
178
+ end
179
+
180
+ # Uploads a stream in a streaming fashion to S3.
181
+ #
182
+ # Passed chunks automatically split into multipart upload parts and the parts are uploaded in parallel.
183
+ # This allows for streaming uploads that never touch the disk.
184
+ #
185
+ # **Note**: There are known issues in JRuby until jruby-9.1.15.0, so avoid using this with older JRuby versions.
186
+ #
187
+ # @example Streaming chunks of data
188
+ # tm = TransferManager.new
189
+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
190
+ # 10.times { write_stream << 'foo' }
191
+ # end
192
+ # @example Streaming chunks of data
193
+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
194
+ # IO.copy_stream(IO.popen('ls'), write_stream)
195
+ # end
196
+ # @example Streaming chunks of data
197
+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
198
+ # IO.copy_stream(STDIN, write_stream)
199
+ # end
200
+ #
201
+ # @param [String] bucket
202
+ # The name of the S3 bucket to upload to.
203
+ #
204
+ # @param [String] key
205
+ # The object key name for the uploaded file.
206
+ #
207
+ # @param [Hash] options
208
+ # Additional options for {Client#create_multipart_upload}, {Client#complete_multipart_upload}, and
209
+ # {Client#upload_part} can be provided.
210
+ #
211
+ # @option options [Integer] :thread_count (10)
212
+ # The number of parallel multipart uploads.
213
+ #
214
+ # @option options [Boolean] :tempfile (false)
215
+ # Normally read data is stored in memory when building the parts in order to complete the underlying
216
+ # multipart upload. By passing `:tempfile => true`, the data read will be temporarily stored on disk reducing
217
+ # the memory footprint vastly.
218
+ #
219
+ # @option options [Integer] :part_size (5242880)
220
+ # Define how big each part size but the last should be. Default `:part_size` is `5 * 1024 * 1024`.
221
+ #
222
+ # @raise [MultipartUploadError] If an object is being uploaded in parts, and the upload can not be completed,
223
+ # then the upload is aborted and this error is raised. The raised error has a `#errors` method that returns
224
+ # the failures that caused the upload to be aborted.
225
+ #
226
+ # @return [Boolean] Returns `true` when the object is uploaded without any errors.
227
+ #
228
+ # @see Client#create_multipart_upload
229
+ # @see Client#complete_multipart_upload
230
+ # @see Client#upload_part
231
+ def upload_stream(bucket:, key:, **options, &block)
232
+ uploading_options = options.dup
233
+ uploader = MultipartStreamUploader.new(
234
+ client: @client,
235
+ thread_count: uploading_options.delete(:thread_count),
236
+ tempfile: uploading_options.delete(:tempfile),
237
+ part_size: uploading_options.delete(:part_size)
238
+ )
239
+ uploader.upload(uploading_options.merge(bucket: bucket, key: key), &block)
240
+ true
241
+ end
242
+ end
243
+ end
244
+ end
data/lib/aws-sdk-s3.rb CHANGED
@@ -75,7 +75,7 @@ module Aws::S3
75
75
  autoload :ObjectVersion, 'aws-sdk-s3/object_version'
76
76
  autoload :EventStreams, 'aws-sdk-s3/event_streams'
77
77
 
78
- GEM_VERSION = '1.196.1'
78
+ GEM_VERSION = '1.197.0'
79
79
 
80
80
  end
81
81
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-s3
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.196.1
4
+ version: 1.197.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
@@ -170,6 +170,7 @@ files:
170
170
  - lib/aws-sdk-s3/presigned_post.rb
171
171
  - lib/aws-sdk-s3/presigner.rb
172
172
  - lib/aws-sdk-s3/resource.rb
173
+ - lib/aws-sdk-s3/transfer_manager.rb
173
174
  - lib/aws-sdk-s3/types.rb
174
175
  - lib/aws-sdk-s3/waiters.rb
175
176
  - sig/bucket.rbs