aws-sdk-s3 1.196.1 → 1.198.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 72291dc4a45c81045393df33c2dc26b215e8b67d50f33e097d7685da2a02ca47
4
- data.tar.gz: eeb0aacd605389c7511cb63684362c12351c68648fa32f7d888e00ea7b0a7d9d
3
+ metadata.gz: 8f8abf2a326b6ebef3c9c9ceac08c2378c4e0d9bc3e187a4a3dd89bb81fb8154
4
+ data.tar.gz: 6186fe037f7fc5541c2a8e1006c9cb0a34630c69fb4a0bdebc40a63b872c9189
5
5
  SHA512:
6
- metadata.gz: 20dcb199634a44435dee8acb5d775dc9ed7d50b60c10091425416b692783e5de48ae91df2a4525c0543391584e08338a37c8c9b43dfebe60ffb609a99c535910
7
- data.tar.gz: 2c4fbba3c97ae00405a8a2ea973145194126f027cf070229a3d80cc47900b2d8e3a1106f69f510ef4c765f090e3d56806f9b5a470449caa3ca898527377b9601
6
+ metadata.gz: 5aade0fd0688cee874c30704211d6f166ea8aaca4838adcdd2ad139fd0cb51b24f97e6715394c3aa484ac601628204d47c057b11d448f15c5d1bf1e90760d10f
7
+ data.tar.gz: 245ada14810d09437af36e44f02898129f259902281a10cf0b870c6654ad1525c1b74d4f34138cdcfdf1920c4602a4880cc78194d18a8c5912db67d386f1ab99
data/CHANGELOG.md CHANGED
@@ -1,6 +1,22 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.198.0 (2025-08-26)
5
+ ------------------
6
+
7
+ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
8
+
9
+ * Issue - Fix multipart `download_file` to support `Pathname`, `File` and `Tempfile` objects as download destinations.
10
+
11
+ 1.197.0 (2025-08-19)
12
+ ------------------
13
+
14
+ * Issue - When multipart stream uploader fails to complete multipart upload, it calls abort multipart upload.
15
+
16
+ * Issue - For `Aws::S3::Object` class, the following methods have been deprecated: `download_file`, `upload_file` and `upload_stream`. Use `Aws::S3::TransferManager` instead.
17
+
18
+ * Feature - Add `Aws::S3::TransferManager`, a S3 transfer utility that provides upload/download capabilities with automatic multipart handling, progress tracking, and handling of large files.
19
+
4
20
  1.196.1 (2025-08-05)
5
21
  ------------------
6
22
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.196.1
1
+ 1.198.0
@@ -21735,7 +21735,7 @@ module Aws::S3
21735
21735
  tracer: tracer
21736
21736
  )
21737
21737
  context[:gem_name] = 'aws-sdk-s3'
21738
- context[:gem_version] = '1.196.1'
21738
+ context[:gem_version] = '1.198.0'
21739
21739
  Seahorse::Client::Request.new(handlers, context)
21740
21740
  end
21741
21741
 
@@ -398,6 +398,7 @@ module Aws
398
398
  end
399
399
  true
400
400
  end
401
+ deprecated(:upload_stream, use: 'Aws::S3::TransferManager#upload_stream', version: 'next major version')
401
402
 
402
403
  # Uploads a file from disk to the current object in S3.
403
404
  #
@@ -465,6 +466,7 @@ module Aws
465
466
  yield response if block_given?
466
467
  true
467
468
  end
469
+ deprecated(:upload_file, use: 'Aws::S3::TransferManager#upload_file', version: 'next major version')
468
470
 
469
471
  # Downloads a file in S3 to a path on disk.
470
472
  #
@@ -486,7 +488,10 @@ module Aws
486
488
  # end
487
489
  # obj.download_file('/path/to/file', progress_callback: progress)
488
490
  #
489
- # @param [String] destination Where to download the file to.
491
+ # @param [String, Pathname, File, Tempfile] destination
492
+ # Where to download the file to. This can either be a String or Pathname to the file, an open File object,
493
+ # or an open Tempfile object. If you pass an open File or Tempfile object, then you are responsible for
494
+ # closing it after the download completes.
490
495
  #
491
496
  # @param [Hash] options
492
497
  # Additional options for {Client#get_object} and #{Client#head_object} may be provided.
@@ -534,6 +539,7 @@ module Aws
534
539
  end
535
540
  true
536
541
  end
542
+ deprecated(:download_file, use: 'Aws::S3::TransferManager#download_file', version: 'next major version')
537
543
 
538
544
  class Collection < Aws::Resources::Collection
539
545
  alias_method :delete, :batch_delete!
@@ -18,13 +18,13 @@ module Aws
18
18
  autoload :ObjectMultipartCopier, 'aws-sdk-s3/object_multipart_copier'
19
19
  autoload :PresignedPost, 'aws-sdk-s3/presigned_post'
20
20
  autoload :Presigner, 'aws-sdk-s3/presigner'
21
+ autoload :TransferManager, 'aws-sdk-s3/transfer_manager'
21
22
 
22
23
  # s3 express session auth
23
24
  autoload :ExpressCredentials, 'aws-sdk-s3/express_credentials'
24
25
  autoload :ExpressCredentialsProvider, 'aws-sdk-s3/express_credentials_provider'
25
26
 
26
27
  # s3 access grants auth
27
-
28
28
  autoload :AccessGrantsCredentials, 'aws-sdk-s3/access_grants_credentials'
29
29
  autoload :AccessGrantsCredentialsProvider, 'aws-sdk-s3/access_grants_credentials_provider'
30
30
  end
@@ -20,7 +20,12 @@ module Aws
20
20
  attr_reader :client
21
21
 
22
22
  def download(destination, options = {})
23
- @path = destination
23
+ valid_types = [String, Pathname, File, Tempfile]
24
+ unless valid_types.include?(destination.class)
25
+ raise ArgumentError, "Invalid destination, expected #{valid_types.join(', ')} but got: #{destination.class}"
26
+ end
27
+
28
+ @destination = destination
24
29
  @mode = options.delete(:mode) || 'auto'
25
30
  @thread_count = options.delete(:thread_count) || 10
26
31
  @chunk_size = options.delete(:chunk_size)
@@ -42,7 +47,7 @@ module Aws
42
47
  raise ArgumentError, "Invalid mode #{@mode} provided, :mode should be single_request, get_range or auto"
43
48
  end
44
49
  end
45
- File.rename(@temp_path, @path) if @temp_path
50
+ File.rename(@temp_path, @destination) if @temp_path
46
51
  ensure
47
52
  File.delete(@temp_path) if @temp_path && File.exist?(@temp_path)
48
53
  end
@@ -118,7 +123,9 @@ module Aws
118
123
  def download_in_threads(pending, total_size)
119
124
  threads = []
120
125
  progress = MultipartProgress.new(pending, total_size, @progress_callback) if @progress_callback
121
- @temp_path = "#{@path}.s3tmp.#{SecureRandom.alphanumeric(8)}"
126
+ unless [File, Tempfile].include?(@destination.class)
127
+ @temp_path = "#{@destination}.s3tmp.#{SecureRandom.alphanumeric(8)}"
128
+ end
122
129
  @thread_count.times do
123
130
  thread = Thread.new do
124
131
  begin
@@ -159,11 +166,12 @@ module Aws
159
166
  end
160
167
 
161
168
  def write(body, range)
162
- File.write(@temp_path, body.read, range.split('-').first.to_i)
169
+ path = @temp_path || @destination
170
+ File.write(path, body.read, range.split('-').first.to_i)
163
171
  end
164
172
 
165
173
  def single_request
166
- params = @params.merge(response_target: @path)
174
+ params = @params.merge(response_target: @destination)
167
175
  params[:on_chunk_received] = single_part_progress if @progress_callback
168
176
  resp = @client.get_object(params)
169
177
  return resp unless @on_checksum_validated
@@ -7,7 +7,7 @@ module Aws
7
7
  # @api private
8
8
  class FileUploader
9
9
 
10
- ONE_HUNDRED_MEGABYTES = 100 * 1024 * 1024
10
+ DEFAULT_MULTIPART_THRESHOLD = 100 * 1024 * 1024
11
11
 
12
12
  # @param [Hash] options
13
13
  # @option options [Client] :client
@@ -15,15 +15,13 @@ module Aws
15
15
  def initialize(options = {})
16
16
  @options = options
17
17
  @client = options[:client] || Client.new
18
- @multipart_threshold = options[:multipart_threshold] ||
19
- ONE_HUNDRED_MEGABYTES
18
+ @multipart_threshold = options[:multipart_threshold] || DEFAULT_MULTIPART_THRESHOLD
20
19
  end
21
20
 
22
21
  # @return [Client]
23
22
  attr_reader :client
24
23
 
25
- # @return [Integer] Files larger than or equal to this in bytes are uploaded
26
- # using a {MultipartFileUploader}.
24
+ # @return [Integer] Files larger than or equal to this in bytes are uploaded using a {MultipartFileUploader}.
27
25
  attr_reader :multipart_threshold
28
26
 
29
27
  # @param [String, Pathname, File, Tempfile] source The file to upload.
@@ -9,17 +9,11 @@ module Aws
9
9
  class MultipartFileUploader
10
10
 
11
11
  MIN_PART_SIZE = 5 * 1024 * 1024 # 5MB
12
-
13
12
  MAX_PARTS = 10_000
14
-
15
- THREAD_COUNT = 10
16
-
13
+ DEFAULT_THREAD_COUNT = 10
17
14
  CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
18
-
19
15
  COMPLETE_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
20
-
21
16
  UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
22
-
23
17
  CHECKSUM_KEYS = Set.new(
24
18
  Client.api.operation(:upload_part).input.shape.members.map do |n, s|
25
19
  n if s.location == 'header' && s.location_name.start_with?('x-amz-checksum-')
@@ -27,10 +21,10 @@ module Aws
27
21
  )
28
22
 
29
23
  # @option options [Client] :client
30
- # @option options [Integer] :thread_count (THREAD_COUNT)
24
+ # @option options [Integer] :thread_count (DEFAULT_THREAD_COUNT)
31
25
  def initialize(options = {})
32
26
  @client = options[:client] || Client.new
33
- @thread_count = options[:thread_count] || THREAD_COUNT
27
+ @thread_count = options[:thread_count] || DEFAULT_THREAD_COUNT
34
28
  end
35
29
 
36
30
  # @return [Client]
@@ -9,33 +9,19 @@ module Aws
9
9
  module S3
10
10
  # @api private
11
11
  class MultipartStreamUploader
12
- # api private
13
- PART_SIZE = 5 * 1024 * 1024 # 5MB
14
12
 
15
- # api private
16
- THREAD_COUNT = 10
17
-
18
- # api private
19
- TEMPFILE_PREIX = 'aws-sdk-s3-upload_stream'.freeze
20
-
21
- # @api private
22
- CREATE_OPTIONS =
23
- Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
24
-
25
- # @api private
26
- UPLOAD_PART_OPTIONS =
27
- Set.new(Client.api.operation(:upload_part).input.shape.member_names)
28
-
29
- # @api private
30
- COMPLETE_UPLOAD_OPTIONS =
31
- Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
13
+ DEFAULT_PART_SIZE = 5 * 1024 * 1024 # 5MB
14
+ DEFAULT_THREAD_COUNT = 10
15
+ CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
16
+ UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names)
17
+ COMPLETE_UPLOAD_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names)
32
18
 
33
19
  # @option options [Client] :client
34
20
  def initialize(options = {})
35
21
  @client = options[:client] || Client.new
36
22
  @tempfile = options[:tempfile]
37
- @part_size = options[:part_size] || PART_SIZE
38
- @thread_count = options[:thread_count] || THREAD_COUNT
23
+ @part_size = options[:part_size] || DEFAULT_PART_SIZE
24
+ @thread_count = options[:thread_count] || DEFAULT_THREAD_COUNT
39
25
  end
40
26
 
41
27
  # @return [Client]
@@ -43,7 +29,7 @@ module Aws
43
29
 
44
30
  # @option options [required,String] :bucket
45
31
  # @option options [required,String] :key
46
- # @option options [Integer] :thread_count (THREAD_COUNT)
32
+ # @option options [Integer] :thread_count (DEFAULT_THREAD_COUNT)
47
33
  # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse
48
34
  def upload(options = {}, &block)
49
35
  Aws::Plugins::UserAgent.metric('S3_TRANSFER') do
@@ -61,11 +47,10 @@ module Aws
61
47
 
62
48
  def complete_upload(upload_id, parts, options)
63
49
  @client.complete_multipart_upload(
64
- **complete_opts(options).merge(
65
- upload_id: upload_id,
66
- multipart_upload: { parts: parts }
67
- )
50
+ **complete_opts(options).merge(upload_id: upload_id, multipart_upload: { parts: parts })
68
51
  )
52
+ rescue StandardError => e
53
+ abort_upload(upload_id, options, [e])
69
54
  end
70
55
 
71
56
  def upload_parts(upload_id, options, &block)
@@ -74,9 +59,11 @@ module Aws
74
59
  errors = begin
75
60
  IO.pipe do |read_pipe, write_pipe|
76
61
  threads = upload_in_threads(
77
- read_pipe, completed,
62
+ read_pipe,
63
+ completed,
78
64
  upload_part_opts(options).merge(upload_id: upload_id),
79
- thread_errors)
65
+ thread_errors
66
+ )
80
67
  begin
81
68
  block.call(write_pipe)
82
69
  ensure
@@ -85,62 +72,53 @@ module Aws
85
72
  end
86
73
  threads.map(&:value).compact
87
74
  end
88
- rescue => e
75
+ rescue StandardError => e
89
76
  thread_errors + [e]
90
77
  end
78
+ return ordered_parts(completed) if errors.empty?
91
79
 
92
- if errors.empty?
93
- Array.new(completed.size) { completed.pop }.sort_by { |part| part[:part_number] }
94
- else
95
- abort_upload(upload_id, options, errors)
96
- end
80
+ abort_upload(upload_id, options, errors)
97
81
  end
98
82
 
99
83
  def abort_upload(upload_id, options, errors)
100
- @client.abort_multipart_upload(
101
- bucket: options[:bucket],
102
- key: options[:key],
103
- upload_id: upload_id
104
- )
84
+ @client.abort_multipart_upload(bucket: options[:bucket], key: options[:key], upload_id: upload_id)
105
85
  msg = "multipart upload failed: #{errors.map(&:message).join('; ')}"
106
86
  raise MultipartUploadError.new(msg, errors)
107
- rescue MultipartUploadError => error
108
- raise error
109
- rescue => error
110
- msg = "failed to abort multipart upload: #{error.message}. "\
87
+ rescue MultipartUploadError => e
88
+ raise e
89
+ rescue StandardError => e
90
+ msg = "failed to abort multipart upload: #{e.message}. "\
111
91
  "Multipart upload failed: #{errors.map(&:message).join('; ')}"
112
- raise MultipartUploadError.new(msg, errors + [error])
92
+ raise MultipartUploadError.new(msg, errors + [e])
113
93
  end
114
94
 
115
95
  def create_opts(options)
116
- CREATE_OPTIONS.inject({}) do |hash, key|
96
+ CREATE_OPTIONS.each_with_object({}) do |key, hash|
117
97
  hash[key] = options[key] if options.key?(key)
118
- hash
119
98
  end
120
99
  end
121
100
 
122
101
  def upload_part_opts(options)
123
- UPLOAD_PART_OPTIONS.inject({}) do |hash, key|
102
+ UPLOAD_PART_OPTIONS.each_with_object({}) do |key, hash|
124
103
  hash[key] = options[key] if options.key?(key)
125
- hash
126
104
  end
127
105
  end
128
106
 
129
107
  def complete_opts(options)
130
- COMPLETE_UPLOAD_OPTIONS.inject({}) do |hash, key|
108
+ COMPLETE_UPLOAD_OPTIONS.each_with_object({}) do |key, hash|
131
109
  hash[key] = options[key] if options.key?(key)
132
- hash
133
110
  end
134
111
  end
135
112
 
136
113
  def read_to_part_body(read_pipe)
137
114
  return if read_pipe.closed?
138
- temp_io = @tempfile ? Tempfile.new(TEMPFILE_PREIX) : StringIO.new(String.new)
115
+
116
+ temp_io = @tempfile ? Tempfile.new('aws-sdk-s3-upload_stream') : StringIO.new(String.new)
139
117
  temp_io.binmode
140
118
  bytes_copied = IO.copy_stream(read_pipe, temp_io, @part_size)
141
119
  temp_io.rewind
142
- if bytes_copied == 0
143
- if Tempfile === temp_io
120
+ if bytes_copied.zero?
121
+ if temp_io.is_a?(Tempfile)
144
122
  temp_io.close
145
123
  temp_io.unlink
146
124
  end
@@ -155,48 +133,62 @@ module Aws
155
133
  part_number = 0
156
134
  options.fetch(:thread_count, @thread_count).times.map do
157
135
  thread = Thread.new do
158
- begin
159
- loop do
160
- body, thread_part_number = mutex.synchronize do
161
- [read_to_part_body(read_pipe), part_number += 1]
162
- end
163
- break unless (body || thread_part_number == 1)
164
- begin
165
- part = options.merge(
166
- body: body,
167
- part_number: thread_part_number,
168
- )
169
- resp = @client.upload_part(part)
170
- completed_part = {etag: resp.etag, part_number: part[:part_number]}
171
-
172
- # get the requested checksum from the response
173
- if part[:checksum_algorithm]
174
- k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
175
- completed_part[k] = resp[k]
176
- end
177
- completed.push(completed_part)
178
- ensure
179
- if Tempfile === body
180
- body.close
181
- body.unlink
182
- elsif StringIO === body
183
- body.string.clear
184
- end
185
- end
136
+ loop do
137
+ body, thread_part_number = mutex.synchronize do
138
+ [read_to_part_body(read_pipe), part_number += 1]
186
139
  end
187
- nil
188
- rescue => error
189
- # keep other threads from uploading other parts
190
- mutex.synchronize do
191
- thread_errors.push(error)
192
- read_pipe.close_read unless read_pipe.closed?
140
+ break unless body || thread_part_number == 1
141
+
142
+ begin
143
+ part = options.merge(body: body, part_number: thread_part_number)
144
+ resp = @client.upload_part(part)
145
+ completed_part = create_completed_part(resp, part)
146
+ completed.push(completed_part)
147
+ ensure
148
+ clear_body(body)
193
149
  end
194
- error
195
150
  end
151
+ nil
152
+ rescue StandardError => e
153
+ # keep other threads from uploading other parts
154
+ mutex.synchronize do
155
+ thread_errors.push(e)
156
+ read_pipe.close_read unless read_pipe.closed?
157
+ end
158
+ e
196
159
  end
197
160
  thread
198
161
  end
199
162
  end
163
+
164
+ def create_completed_part(resp, part)
165
+ completed_part = { etag: resp.etag, part_number: part[:part_number] }
166
+ return completed_part unless part[:checksum_algorithm]
167
+
168
+ # get the requested checksum from the response
169
+ k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym
170
+ completed_part[k] = resp[k]
171
+ completed_part
172
+ end
173
+
174
+ def ordered_parts(parts)
175
+ sorted = []
176
+ until parts.empty?
177
+ part = parts.pop
178
+ index = sorted.bsearch_index { |p| p[:part_number] >= part[:part_number] } || sorted.size
179
+ sorted.insert(index, part)
180
+ end
181
+ sorted
182
+ end
183
+
184
+ def clear_body(body)
185
+ if body.is_a?(Tempfile)
186
+ body.close
187
+ body.unlink
188
+ elsif body.is_a?(StringIO)
189
+ body.string.clear
190
+ end
191
+ end
200
192
  end
201
193
  end
202
194
  end
@@ -0,0 +1,246 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Aws
4
+ module S3
5
+ # A high-level S3 transfer utility that provides enhanced upload and download
6
+ # capabilities with automatic multipart handling, progress tracking, and
7
+ # handling of large files. The following features are supported:
8
+ #
9
+ # * upload a file with multipart upload
10
+ # * upload a stream with multipart upload
11
+ # * download a S3 object with multipart download
12
+ # * track transfer progress by using progress listener
13
+ #
14
+ class TransferManager
15
+ # @param [Hash] options
16
+ # @option options [S3::Client] :client (S3::Client.new)
17
+ # The S3 client to use for {TransferManager} operations. If not provided, a new default client
18
+ # will be created automatically.
19
+ def initialize(options = {})
20
+ @client = options.delete(:client) || Client.new
21
+ end
22
+
23
+ # @return [S3::Client]
24
+ attr_reader :client
25
+
26
+ # Downloads a file in S3 to a path on disk.
27
+ #
28
+ # # small files (< 5MB) are downloaded in a single API call
29
+ # tm = TransferManager.new
30
+ # tm.download_file('/path/to/file', bucket: 'bucket', key: 'key')
31
+ #
32
+ # Files larger than 5MB are downloaded using multipart method:
33
+ #
34
+ # # large files are split into parts and the parts are downloaded in parallel
35
+ # tm.download_file('/path/to/large_file', bucket: 'bucket', key: 'key')
36
+ #
37
+ # You can provide a callback to monitor progress of the download:
38
+ #
39
+ # # bytes and part_sizes are each an array with 1 entry per part
40
+ # # part_sizes may not be known until the first bytes are retrieved
41
+ # progress = proc do |bytes, part_sizes, file_size|
42
+ # bytes.map.with_index do |b, i|
43
+ # puts "Part #{i + 1}: #{b} / #{part_sizes[i]}".join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
44
+ # end
45
+ # end
46
+ # tm.download_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
47
+ #
48
+ # @param [String, Pathname, File, Tempfile] destination
49
+ # Where to download the file to. This can either be a String or Pathname to the file, an open File object,
50
+ # or an open Tempfile object. If you pass an open File or Tempfile object, then you are responsible for
51
+ # closing it after the download completes.
52
+ #
53
+ # @param [String] bucket
54
+ # The name of the S3 bucket to upload to.
55
+ #
56
+ # @param [String] key
57
+ # The object key name in S3 bucket.
58
+ #
59
+ # @param [Hash] options
60
+ # Additional options for {Client#get_object} and #{Client#head_object} may be provided.
61
+ #
62
+ # @option options [String] :mode ("auto") `"auto"`, `"single_request"` or `"get_range"`
63
+ #
64
+ # * `"auto"` mode is enabled by default, which performs `multipart_download`
65
+ # * `"single_request`" mode forces only 1 GET request is made in download
66
+ # * `"get_range"` mode requires `:chunk_size` parameter to configured in customizing each range size
67
+ #
68
+ # @option options [Integer] :chunk_size required in `"get_range"` mode.
69
+ #
70
+ # @option options [Integer] :thread_count (10) Customize threads used in the multipart download.
71
+ #
72
+ # @option options [String] :version_id The object version id used to retrieve the object.
73
+ #
74
+ # @see https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html ObjectVersioning
75
+ #
76
+ # @option options [String] :checksum_mode ("ENABLED")
77
+ # When `"ENABLED"` and the object has a stored checksum, it will be used to validate the download and will
78
+ # raise an `Aws::Errors::ChecksumError` if checksum validation fails. You may provide a `on_checksum_validated`
79
+ # callback if you need to verify that validation occurred and which algorithm was used.
80
+ # To disable checksum validation, set `checksum_mode` to `"DISABLED"`.
81
+ #
82
+ # @option options [Callable] :on_checksum_validated
83
+ # Called each time a request's checksum is validated with the checksum algorithm and the
84
+ # response. For multipart downloads, this will be called for each part that is downloaded and validated.
85
+ #
86
+ # @option options [Proc] :progress_callback
87
+ # A Proc that will be called when each chunk of the download is received. It will be invoked with
88
+ # `bytes_read`, `part_sizes`, `file_size`. When the object is downloaded as parts (rather than by ranges),
89
+ # the `part_sizes` will not be known ahead of time and will be `nil` in the callback until the first bytes
90
+ # in the part are received.
91
+ #
92
+ # @raise [MultipartDownloadError] Raised when an object validation fails outside of service errors.
93
+ #
94
+ # @return [Boolean] Returns `true` when the file is downloaded without any errors.
95
+ #
96
+ # @see Client#get_object
97
+ # @see Client#head_object
98
+ def download_file(destination, bucket:, key:, **options)
99
+ downloader = FileDownloader.new(client: @client)
100
+ downloader.download(destination, options.merge(bucket: bucket, key: key))
101
+ true
102
+ end
103
+
104
+ # Uploads a file from disk to S3.
105
+ #
106
+ # # a small file are uploaded with PutObject API
107
+ # tm = TransferManager.new
108
+ # tm.upload_file('/path/to/small_file', bucket: 'bucket', key: 'key')
109
+ #
110
+ # Files larger than or equal to `:multipart_threshold` are uploaded using multipart upload APIs.
111
+ #
112
+ # # large files are automatically split into parts and the parts are uploaded in parallel
113
+ # tm.upload_file('/path/to/large_file', bucket: 'bucket', key: 'key')
114
+ #
115
+ # The response of the S3 upload API is yielded if a block given.
116
+ #
117
+ # # API response will have etag value of the file
118
+ # tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key') do |response|
119
+ # etag = response.etag
120
+ # end
121
+ #
122
+ # You can provide a callback to monitor progress of the upload:
123
+ #
124
+ # # bytes and totals are each an array with 1 entry per part
125
+ # progress = proc do |bytes, totals|
126
+ # bytes.map.with_index do |b, i|
127
+ # puts "Part #{i + 1}: #{b} / #{totals[i]} " + "Total: #{100.0 * bytes.sum / totals.sum}%"
128
+ # end
129
+ # end
130
+ # tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
131
+ #
132
+ # @param [String, Pathname, File, Tempfile] source
133
+ # A file on the local file system that will be uploaded. This can either be a `String` or `Pathname` to the
134
+ # file, an open `File` object, or an open `Tempfile` object. If you pass an open `File` or `Tempfile` object,
135
+ # then you are responsible for closing it after the upload completes. When using an open Tempfile, rewind it
136
+ # before uploading or else the object will be empty.
137
+ #
138
+ # @param [String] bucket
139
+ # The name of the S3 bucket to upload to.
140
+ #
141
+ # @param [String] key
142
+ # The object key name for the uploaded file.
143
+ #
144
+ # @param [Hash] options
145
+ # Additional options for {Client#put_object} when file sizes below the multipart threshold.
146
+ # For files larger than the multipart threshold, options for {Client#create_multipart_upload},
147
+ # {Client#complete_multipart_upload}, and {Client#upload_part} can be provided.
148
+ #
149
+ # @option options [Integer] :multipart_threshold (104857600)
150
+ # Files larger han or equal to `:multipart_threshold` are uploaded using the S3 multipart upload APIs.
151
+ # Default threshold is `100MB`.
152
+ #
153
+ # @option options [Integer] :thread_count (10)
154
+ # The number of parallel multipart uploads. This option is not used if the file is smaller than
155
+ # `:multipart_threshold`.
156
+ #
157
+ # @option options [Proc] :progress_callback (nil)
158
+ # A Proc that will be called when each chunk of the upload is sent.
159
+ # It will be invoked with `[bytes_read]` and `[total_sizes]`.
160
+ #
161
+ # @raise [MultipartUploadError] If an file is being uploaded in parts, and the upload can not be completed,
162
+ # then the upload is aborted and this error is raised. The raised error has a `#errors` method that
163
+ # returns the failures that caused the upload to be aborted.
164
+ #
165
+ # @return [Boolean] Returns `true` when the file is uploaded without any errors.
166
+ #
167
+ # @see Client#put_object
168
+ # @see Client#create_multipart_upload
169
+ # @see Client#complete_multipart_upload
170
+ # @see Client#upload_part
171
+ def upload_file(source, bucket:, key:, **options)
172
+ uploading_options = options.dup
173
+ uploader = FileUploader.new(
174
+ multipart_threshold: uploading_options.delete(:multipart_threshold),
175
+ client: @client
176
+ )
177
+ response = uploader.upload(source, uploading_options.merge(bucket: bucket, key: key))
178
+ yield response if block_given?
179
+ true
180
+ end
181
+
182
+ # Uploads a stream in a streaming fashion to S3.
183
+ #
184
+ # Passed chunks automatically split into multipart upload parts and the parts are uploaded in parallel.
185
+ # This allows for streaming uploads that never touch the disk.
186
+ #
187
+ # **Note**: There are known issues in JRuby until jruby-9.1.15.0, so avoid using this with older JRuby versions.
188
+ #
189
+ # @example Streaming chunks of data
190
+ # tm = TransferManager.new
191
+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
192
+ # 10.times { write_stream << 'foo' }
193
+ # end
194
+ # @example Streaming chunks of data
195
+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
196
+ # IO.copy_stream(IO.popen('ls'), write_stream)
197
+ # end
198
+ # @example Streaming chunks of data
199
+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
200
+ # IO.copy_stream(STDIN, write_stream)
201
+ # end
202
+ #
203
+ # @param [String] bucket
204
+ # The name of the S3 bucket to upload to.
205
+ #
206
+ # @param [String] key
207
+ # The object key name for the uploaded file.
208
+ #
209
+ # @param [Hash] options
210
+ # Additional options for {Client#create_multipart_upload}, {Client#complete_multipart_upload}, and
211
+ # {Client#upload_part} can be provided.
212
+ #
213
+ # @option options [Integer] :thread_count (10)
214
+ # The number of parallel multipart uploads.
215
+ #
216
+ # @option options [Boolean] :tempfile (false)
217
+ # Normally read data is stored in memory when building the parts in order to complete the underlying
218
+ # multipart upload. By passing `:tempfile => true`, the data read will be temporarily stored on disk reducing
219
+ # the memory footprint vastly.
220
+ #
221
+ # @option options [Integer] :part_size (5242880)
222
+ # Define how big each part size but the last should be. Default `:part_size` is `5 * 1024 * 1024`.
223
+ #
224
+ # @raise [MultipartUploadError] If an object is being uploaded in parts, and the upload can not be completed,
225
+ # then the upload is aborted and this error is raised. The raised error has a `#errors` method that returns
226
+ # the failures that caused the upload to be aborted.
227
+ #
228
+ # @return [Boolean] Returns `true` when the object is uploaded without any errors.
229
+ #
230
+ # @see Client#create_multipart_upload
231
+ # @see Client#complete_multipart_upload
232
+ # @see Client#upload_part
233
+ def upload_stream(bucket:, key:, **options, &block)
234
+ uploading_options = options.dup
235
+ uploader = MultipartStreamUploader.new(
236
+ client: @client,
237
+ thread_count: uploading_options.delete(:thread_count),
238
+ tempfile: uploading_options.delete(:tempfile),
239
+ part_size: uploading_options.delete(:part_size)
240
+ )
241
+ uploader.upload(uploading_options.merge(bucket: bucket, key: key), &block)
242
+ true
243
+ end
244
+ end
245
+ end
246
+ end
data/lib/aws-sdk-s3.rb CHANGED
@@ -75,7 +75,7 @@ module Aws::S3
75
75
  autoload :ObjectVersion, 'aws-sdk-s3/object_version'
76
76
  autoload :EventStreams, 'aws-sdk-s3/event_streams'
77
77
 
78
- GEM_VERSION = '1.196.1'
78
+ GEM_VERSION = '1.198.0'
79
79
 
80
80
  end
81
81
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-s3
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.196.1
4
+ version: 1.198.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
@@ -46,7 +46,7 @@ dependencies:
46
46
  version: '3'
47
47
  - - ">="
48
48
  - !ruby/object:Gem::Version
49
- version: 3.228.0
49
+ version: 3.231.0
50
50
  type: :runtime
51
51
  prerelease: false
52
52
  version_requirements: !ruby/object:Gem::Requirement
@@ -56,7 +56,7 @@ dependencies:
56
56
  version: '3'
57
57
  - - ">="
58
58
  - !ruby/object:Gem::Version
59
- version: 3.228.0
59
+ version: 3.231.0
60
60
  description: Official AWS Ruby gem for Amazon Simple Storage Service (Amazon S3).
61
61
  This gem is part of the AWS SDK for Ruby.
62
62
  email:
@@ -170,6 +170,7 @@ files:
170
170
  - lib/aws-sdk-s3/presigned_post.rb
171
171
  - lib/aws-sdk-s3/presigner.rb
172
172
  - lib/aws-sdk-s3/resource.rb
173
+ - lib/aws-sdk-s3/transfer_manager.rb
173
174
  - lib/aws-sdk-s3/types.rb
174
175
  - lib/aws-sdk-s3/waiters.rb
175
176
  - sig/bucket.rbs