aws-sdk-s3 1.13.0 → 1.23.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,6 +21,7 @@ module Aws::S3
21
21
  AccessControlPolicy = Shapes::StructureShape.new(name: 'AccessControlPolicy')
22
22
  AccessControlTranslation = Shapes::StructureShape.new(name: 'AccessControlTranslation')
23
23
  AccountId = Shapes::StringShape.new(name: 'AccountId')
24
+ AllowQuotedRecordDelimiter = Shapes::BooleanShape.new(name: 'AllowQuotedRecordDelimiter')
24
25
  AllowedHeader = Shapes::StringShape.new(name: 'AllowedHeader')
25
26
  AllowedHeaders = Shapes::ListShape.new(name: 'AllowedHeaders', flattened: true)
26
27
  AllowedMethod = Shapes::StringShape.new(name: 'AllowedMethod')
@@ -117,6 +118,8 @@ module Aws::S3
117
118
  DeleteBucketWebsiteRequest = Shapes::StructureShape.new(name: 'DeleteBucketWebsiteRequest')
118
119
  DeleteMarker = Shapes::BooleanShape.new(name: 'DeleteMarker')
119
120
  DeleteMarkerEntry = Shapes::StructureShape.new(name: 'DeleteMarkerEntry')
121
+ DeleteMarkerReplication = Shapes::StructureShape.new(name: 'DeleteMarkerReplication')
122
+ DeleteMarkerReplicationStatus = Shapes::StringShape.new(name: 'DeleteMarkerReplicationStatus')
120
123
  DeleteMarkerVersionId = Shapes::StringShape.new(name: 'DeleteMarkerVersionId')
121
124
  DeleteMarkers = Shapes::ListShape.new(name: 'DeleteMarkers', flattened: true)
122
125
  DeleteObjectOutput = Shapes::StructureShape.new(name: 'DeleteObjectOutput')
@@ -335,6 +338,7 @@ module Aws::S3
335
338
  OutputSerialization = Shapes::StructureShape.new(name: 'OutputSerialization')
336
339
  Owner = Shapes::StructureShape.new(name: 'Owner')
337
340
  OwnerOverride = Shapes::StringShape.new(name: 'OwnerOverride')
341
+ ParquetInput = Shapes::StructureShape.new(name: 'ParquetInput')
338
342
  Part = Shapes::StructureShape.new(name: 'Part')
339
343
  PartNumber = Shapes::IntegerShape.new(name: 'PartNumber')
340
344
  PartNumberMarker = Shapes::IntegerShape.new(name: 'PartNumberMarker')
@@ -344,6 +348,7 @@ module Aws::S3
344
348
  Permission = Shapes::StringShape.new(name: 'Permission')
345
349
  Policy = Shapes::StringShape.new(name: 'Policy')
346
350
  Prefix = Shapes::StringShape.new(name: 'Prefix')
351
+ Priority = Shapes::IntegerShape.new(name: 'Priority')
347
352
  Progress = Shapes::StructureShape.new(name: 'Progress')
348
353
  ProgressEvent = Shapes::StructureShape.new(name: 'ProgressEvent')
349
354
  Protocol = Shapes::StringShape.new(name: 'Protocol')
@@ -389,6 +394,8 @@ module Aws::S3
389
394
  ReplicaKmsKeyID = Shapes::StringShape.new(name: 'ReplicaKmsKeyID')
390
395
  ReplicationConfiguration = Shapes::StructureShape.new(name: 'ReplicationConfiguration')
391
396
  ReplicationRule = Shapes::StructureShape.new(name: 'ReplicationRule')
397
+ ReplicationRuleAndOperator = Shapes::StructureShape.new(name: 'ReplicationRuleAndOperator')
398
+ ReplicationRuleFilter = Shapes::StructureShape.new(name: 'ReplicationRuleFilter')
392
399
  ReplicationRuleStatus = Shapes::StringShape.new(name: 'ReplicationRuleStatus')
393
400
  ReplicationRules = Shapes::ListShape.new(name: 'ReplicationRules', flattened: true)
394
401
  ReplicationStatus = Shapes::StringShape.new(name: 'ReplicationStatus')
@@ -558,6 +565,7 @@ module Aws::S3
558
565
  CSVInput.add_member(:record_delimiter, Shapes::ShapeRef.new(shape: RecordDelimiter, location_name: "RecordDelimiter"))
559
566
  CSVInput.add_member(:field_delimiter, Shapes::ShapeRef.new(shape: FieldDelimiter, location_name: "FieldDelimiter"))
560
567
  CSVInput.add_member(:quote_character, Shapes::ShapeRef.new(shape: QuoteCharacter, location_name: "QuoteCharacter"))
568
+ CSVInput.add_member(:allow_quoted_record_delimiter, Shapes::ShapeRef.new(shape: AllowQuotedRecordDelimiter, location_name: "AllowQuotedRecordDelimiter"))
561
569
  CSVInput.struct_class = Types::CSVInput
562
570
 
563
571
  CSVOutput.add_member(:quote_fields, Shapes::ShapeRef.new(shape: QuoteFields, location_name: "QuoteFields"))
@@ -772,6 +780,9 @@ module Aws::S3
772
780
  DeleteMarkerEntry.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified"))
773
781
  DeleteMarkerEntry.struct_class = Types::DeleteMarkerEntry
774
782
 
783
+ DeleteMarkerReplication.add_member(:status, Shapes::ShapeRef.new(shape: DeleteMarkerReplicationStatus, location_name: "Status"))
784
+ DeleteMarkerReplication.struct_class = Types::DeleteMarkerReplication
785
+
775
786
  DeleteMarkers.member = Shapes::ShapeRef.new(shape: DeleteMarkerEntry)
776
787
 
777
788
  DeleteObjectOutput.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-delete-marker"))
@@ -1135,6 +1146,7 @@ module Aws::S3
1135
1146
  InputSerialization.add_member(:csv, Shapes::ShapeRef.new(shape: CSVInput, location_name: "CSV"))
1136
1147
  InputSerialization.add_member(:compression_type, Shapes::ShapeRef.new(shape: CompressionType, location_name: "CompressionType"))
1137
1148
  InputSerialization.add_member(:json, Shapes::ShapeRef.new(shape: JSONInput, location_name: "JSON"))
1149
+ InputSerialization.add_member(:parquet, Shapes::ShapeRef.new(shape: ParquetInput, location_name: "Parquet"))
1138
1150
  InputSerialization.struct_class = Types::InputSerialization
1139
1151
 
1140
1152
  InventoryConfiguration.add_member(:destination, Shapes::ShapeRef.new(shape: InventoryDestination, required: true, location_name: "Destination"))
@@ -1463,6 +1475,8 @@ module Aws::S3
1463
1475
  Owner.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID"))
1464
1476
  Owner.struct_class = Types::Owner
1465
1477
 
1478
+ ParquetInput.struct_class = Types::ParquetInput
1479
+
1466
1480
  Part.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location_name: "PartNumber"))
1467
1481
  Part.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified"))
1468
1482
  Part.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag"))
@@ -1714,12 +1728,24 @@ module Aws::S3
1714
1728
  ReplicationConfiguration.struct_class = Types::ReplicationConfiguration
1715
1729
 
1716
1730
  ReplicationRule.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID"))
1717
- ReplicationRule.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, required: true, location_name: "Prefix"))
1731
+ ReplicationRule.add_member(:priority, Shapes::ShapeRef.new(shape: Priority, location_name: "Priority"))
1732
+ ReplicationRule.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, deprecated: true, location_name: "Prefix"))
1733
+ ReplicationRule.add_member(:filter, Shapes::ShapeRef.new(shape: ReplicationRuleFilter, location_name: "Filter"))
1718
1734
  ReplicationRule.add_member(:status, Shapes::ShapeRef.new(shape: ReplicationRuleStatus, required: true, location_name: "Status"))
1719
1735
  ReplicationRule.add_member(:source_selection_criteria, Shapes::ShapeRef.new(shape: SourceSelectionCriteria, location_name: "SourceSelectionCriteria"))
1720
1736
  ReplicationRule.add_member(:destination, Shapes::ShapeRef.new(shape: Destination, required: true, location_name: "Destination"))
1737
+ ReplicationRule.add_member(:delete_marker_replication, Shapes::ShapeRef.new(shape: DeleteMarkerReplication, location_name: "DeleteMarkerReplication"))
1721
1738
  ReplicationRule.struct_class = Types::ReplicationRule
1722
1739
 
1740
+ ReplicationRuleAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix"))
1741
+ ReplicationRuleAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true}))
1742
+ ReplicationRuleAndOperator.struct_class = Types::ReplicationRuleAndOperator
1743
+
1744
+ ReplicationRuleFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix"))
1745
+ ReplicationRuleFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag"))
1746
+ ReplicationRuleFilter.add_member(:and, Shapes::ShapeRef.new(shape: ReplicationRuleAndOperator, location_name: "And"))
1747
+ ReplicationRuleFilter.struct_class = Types::ReplicationRuleFilter
1748
+
1723
1749
  ReplicationRules.member = Shapes::ShapeRef.new(shape: ReplicationRule)
1724
1750
 
1725
1751
  RequestPaymentConfiguration.add_member(:payer, Shapes::ShapeRef.new(shape: Payer, required: true, location_name: "Payer"))
@@ -1957,10 +1983,15 @@ module Aws::S3
1957
1983
  api.version = "2006-03-01"
1958
1984
 
1959
1985
  api.metadata = {
1986
+ "apiVersion" => "2006-03-01",
1987
+ "checksumFormat" => "md5",
1960
1988
  "endpointPrefix" => "s3",
1989
+ "globalEndpoint" => "s3.amazonaws.com",
1961
1990
  "protocol" => "rest-xml",
1991
+ "serviceAbbreviation" => "Amazon S3",
1962
1992
  "serviceFullName" => "Amazon Simple Storage Service",
1963
- "timestampFormat" => "rfc822",
1993
+ "serviceId" => "S3",
1994
+ "uid" => "s3-2006-03-01",
1964
1995
  }
1965
1996
 
1966
1997
  api.add_operation(:abort_multipart_upload, Seahorse::Model::Operation.new.tap do |o|
@@ -61,8 +61,8 @@ module Aws
61
61
  # You can pass `virtual_host: true` to use the bucket name as the
62
62
  # host name.
63
63
  #
64
- # bucket = s3.bucket('my.bucket.com', virtual_host: true)
65
- # bucket.url
64
+ # bucket = s3.bucket('my.bucket.com')
65
+ # bucket.url(virtual_host: true)
66
66
  # #=> "http://my.bucket.com"
67
67
  #
68
68
  # @option options [Boolean] :virtual_host (false) When `true`,
@@ -214,6 +214,62 @@ module Aws
214
214
  url.to_s
215
215
  end
216
216
 
217
+ # Uploads a stream in a streaming fashion to the current object in S3.
218
+ #
219
+ # # Passed chunks automatically split into multipart upload parts
220
+ # # and the parts are uploaded in parallel. This allows for streaming uploads
221
+ # # that never touch the disk.
222
+ #
223
+ # Note that this is known to have issues in JRuby until jruby-9.1.15.0, so avoid using this with older versions of JRuby.
224
+ #
225
+ # @example Streaming chunks of data
226
+ # obj.upload_stream do |write_stream|
227
+ # 10.times { write_stream << 'foo' }
228
+ # end
229
+ # @example Streaming chunks of data
230
+ # obj.upload_stream do |write_stream|
231
+ # IO.copy_stream(IO.popen('ls'), write_stream)
232
+ # end
233
+ # @example Streaming chunks of data
234
+ # obj.upload_stream do |write_stream|
235
+ # IO.copy_stream(STDIN, write_stream)
236
+ # end
237
+ #
238
+ # @option options [Integer] :thread_count
239
+ # The number of parallel multipart uploads
240
+ # Default `:thread_count` is `10`.
241
+ #
242
+ # @option options [Boolean] :tempfile
243
+ # Normally read data is stored in memory when building the parts in order to complete
244
+ # the underlying multipart upload. By passing `:tempfile => true` data read will be
245
+ # temporarily stored on disk reducing the memory footprint vastly.
246
+ # Default `:tempfile` is `false`.
247
+ #
248
+ # @option options [Integer] :part_size
249
+ # Define how big each part size but the last should be.
250
+ # Default `:part_size` is `5 * 1024 * 1024`.
251
+ #
252
+ # @raise [MultipartUploadError] If an object is being uploaded in
253
+ # parts, and the upload can not be completed, then the upload is
254
+ # aborted and this error is raised. The raised error has a `#errors`
255
+ # method that returns the failures that caused the upload to be
256
+ # aborted.
257
+ #
258
+ # @return [Boolean] Returns `true` when the object is uploaded
259
+ # without any errors.
260
+ #
261
+ def upload_stream(options = {}, &block)
262
+ uploading_options = options.dup
263
+ uploader = MultipartStreamUploader.new(
264
+ client: client,
265
+ thread_count: uploading_options.delete(:thread_count),
266
+ tempfile: uploading_options.delete(:tempfile),
267
+ part_size: uploading_options.delete(:part_size),
268
+ )
269
+ uploader.upload(uploading_options.merge(bucket: bucket_name, key: key), &block)
270
+ true
271
+ end
272
+
217
273
  # Uploads a file from disk to the current object in S3.
218
274
  #
219
275
  # # small files are uploaded in a single API call
@@ -277,6 +333,10 @@ module Aws
277
333
  # @option options [String] thread_count Customize threads used in multipart
278
334
  # download, if not provided, 10 is default value
279
335
  #
336
+ # @option options [String] version_id The object version id used to retrieve
337
+ # the object, to know more about object versioning, see:
338
+ # https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html
339
+ #
280
340
  # @return [Boolean] Returns `true` when the file is downloaded
281
341
  # without any errors.
282
342
  def download_file(destination, options = {})
@@ -60,6 +60,13 @@ module Aws
60
60
  object.upload_file(source, options)
61
61
  end
62
62
 
63
+ # @options (see Object#upload_stream)
64
+ # @return (see Object#upload_stream)
65
+ # @see Object#upload_stream
66
+ def upload_stream(options = {}, &block)
67
+ object.upload_stream(options, &block)
68
+ end
69
+
63
70
  # @param (see Object#download_file)
64
71
  # @options (see Object#download_file)
65
72
  # @return (see Object#download_file)
@@ -6,6 +6,7 @@ require 'aws-sdk-s3/file_uploader'
6
6
  require 'aws-sdk-s3/file_downloader'
7
7
  require 'aws-sdk-s3/legacy_signer'
8
8
  require 'aws-sdk-s3/multipart_file_uploader'
9
+ require 'aws-sdk-s3/multipart_stream_uploader'
9
10
  require 'aws-sdk-s3/multipart_upload_error'
10
11
  require 'aws-sdk-s3/object_copier'
11
12
  require 'aws-sdk-s3/object_multipart_copier'
@@ -22,8 +22,10 @@ module Aws
22
22
 
23
23
  def write(chunk)
24
24
  chunk = truncate_chunk(chunk)
25
- @bytes_written += chunk.bytesize
26
- @decrypter.write(chunk)
25
+ if chunk.bytesize > 0
26
+ @bytes_written += chunk.bytesize
27
+ @decrypter.write(chunk)
28
+ end
27
29
  end
28
30
 
29
31
  def finalize
@@ -39,8 +41,12 @@ module Aws
39
41
  def truncate_chunk(chunk)
40
42
  if chunk.bytesize + @bytes_written <= @max_bytes
41
43
  chunk
42
- else
44
+ elsif @bytes_written < @max_bytes
43
45
  chunk[0..(@max_bytes - @bytes_written - 1)]
46
+ else
47
+ # If the tag was sent over after the full body has been read,
48
+ # we don't want to accidentally append it.
49
+ ""
44
50
  end
45
51
  end
46
52
 
@@ -24,15 +24,18 @@ module Aws
24
24
  @mode = options[:mode] || "auto"
25
25
  @thread_count = options[:thread_count] || THREAD_COUNT
26
26
  @chunk_size = options[:chunk_size]
27
- @bucket = options[:bucket]
28
- @key = options[:key]
27
+ @params = {
28
+ bucket: options[:bucket],
29
+ key: options[:key],
30
+ }
31
+ @params[:version_id] = options[:version_id] if options[:version_id]
29
32
 
30
33
  case @mode
31
34
  when "auto" then multipart_download
32
35
  when "single_request" then single_request
33
36
  when "get_range"
34
37
  if @chunk_size
35
- resp = @client.head_object(bucket: @bucket, key: @key)
38
+ resp = @client.head_object(@params)
36
39
  multithreaded_get_by_ranges(construct_chunks(resp.content_length))
37
40
  else
38
41
  msg = "In :get_range mode, :chunk_size must be provided"
@@ -48,7 +51,7 @@ module Aws
48
51
  private
49
52
 
50
53
  def multipart_download
51
- resp = @client.head_object(bucket: @bucket, key: @key, part_number: 1)
54
+ resp = @client.head_object(@params.merge(part_number: 1))
52
55
  count = resp.parts_count
53
56
  if count.nil? || count <= 1
54
57
  resp.content_length < MIN_CHUNK_SIZE ?
@@ -56,7 +59,7 @@ module Aws
56
59
  multithreaded_get_by_ranges(construct_chunks(resp.content_length))
57
60
  else
58
61
  # partNumber is an option
59
- resp = @client.head_object(bucket: @bucket, key: @key)
62
+ resp = @client.head_object(@params)
60
63
  resp.content_length < MIN_CHUNK_SIZE ?
61
64
  single_request :
62
65
  compute_mode(resp.content_length, count)
@@ -112,9 +115,7 @@ module Aws
112
115
  batch.each do |chunk|
113
116
  threads << Thread.new do
114
117
  resp = @client.get_object(
115
- :bucket => @bucket,
116
- :key => @key,
117
- param.to_sym => chunk
118
+ @params.merge(param.to_sym => chunk)
118
119
  )
119
120
  write(resp)
120
121
  end
@@ -131,7 +132,7 @@ module Aws
131
132
 
132
133
  def single_request
133
134
  @client.get_object(
134
- bucket: @bucket, key: @key, response_target: @path
135
+ @params.merge(response_target: @path)
135
136
  )
136
137
  end
137
138
  end
@@ -56,14 +56,12 @@ module Aws
56
56
  end
57
57
 
58
58
  def read_from_file(bytes, output_buffer)
59
- if bytes
60
- data = @file.read([remaining_bytes, bytes].min)
61
- data = nil if data == ''
62
- else
63
- data = @file.read(remaining_bytes)
64
- end
59
+ length = [remaining_bytes, *bytes].min
60
+ data = @file.read(length, output_buffer)
61
+
65
62
  @position += data ? data.bytesize : 0
66
- output_buffer ? output_buffer.replace(data || '') : data
63
+
64
+ data.to_s unless bytes && (data.nil? || data.empty?)
67
65
  end
68
66
 
69
67
  def remaining_bytes
@@ -45,9 +45,7 @@ module Aws
45
45
 
46
46
  def open_file(source)
47
47
  if String === source || Pathname === source
48
- file = File.open(source, 'rb')
49
- yield(file)
50
- file.close
48
+ File.open(source, 'rb') { |file| yield(file) }
51
49
  else
52
50
  yield(source)
53
51
  end
@@ -0,0 +1,160 @@
1
+ require 'thread'
2
+ require 'set'
3
+ require 'tempfile'
4
+ require 'stringio'
5
+
6
+ module Aws
7
+ module S3
8
+ # @api private
9
+ class MultipartStreamUploader
10
+ # api private
11
+ PART_SIZE = 5 * 1024 * 1024 # 5MB
12
+
13
+ # api private
14
+ THREAD_COUNT = 10
15
+
16
+ # api private
17
+ TEMPFILE_PREIX = 'aws-sdk-s3-upload_stream'.freeze
18
+
19
+ # @api private
20
+ CREATE_OPTIONS =
21
+ Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
22
+
23
+ # @api private
24
+ UPLOAD_PART_OPTIONS =
25
+ Set.new(Client.api.operation(:upload_part).input.shape.member_names)
26
+
27
+ # @option options [Client] :client
28
+ def initialize(options = {})
29
+ @client = options[:client] || Client.new
30
+ @tempfile = options[:tempfile]
31
+ @part_size = options[:part_size] || PART_SIZE
32
+ @thread_count = options[:thread_count] || THREAD_COUNT
33
+ end
34
+
35
+ # @return [Client]
36
+ attr_reader :client
37
+
38
+ # @option options [required,String] :bucket
39
+ # @option options [required,String] :key
40
+ # @return [void]
41
+ def upload(options = {}, &block)
42
+ upload_id = initiate_upload(options)
43
+ parts = upload_parts(upload_id, options, &block)
44
+ complete_upload(upload_id, parts, options)
45
+ end
46
+
47
+ private
48
+
49
+ def initiate_upload(options)
50
+ @client.create_multipart_upload(create_opts(options)).upload_id
51
+ end
52
+
53
+ def complete_upload(upload_id, parts, options)
54
+ @client.complete_multipart_upload(
55
+ bucket: options[:bucket],
56
+ key: options[:key],
57
+ upload_id: upload_id,
58
+ multipart_upload: { parts: parts })
59
+ end
60
+
61
+ def upload_parts(upload_id, options, &block)
62
+ completed = Queue.new
63
+ errors = IO.pipe do |read_pipe, write_pipe|
64
+ threads = upload_in_threads(read_pipe, completed, upload_part_opts(options).merge(upload_id: upload_id))
65
+ block.call(write_pipe)
66
+ write_pipe.close
67
+ threads.map(&:value).compact
68
+ end
69
+ if errors.empty?
70
+ Array.new(completed.size) { completed.pop }.sort_by { |part| part[:part_number] }
71
+ else
72
+ abort_upload(upload_id, options, errors)
73
+ end
74
+ end
75
+
76
+ def abort_upload(upload_id, options, errors)
77
+ @client.abort_multipart_upload(
78
+ bucket: options[:bucket],
79
+ key: options[:key],
80
+ upload_id: upload_id
81
+ )
82
+ msg = "multipart upload failed: #{errors.map(&:message).join("; ")}"
83
+ raise MultipartUploadError.new(msg, errors)
84
+ rescue MultipartUploadError => error
85
+ raise error
86
+ rescue => error
87
+ msg = "failed to abort multipart upload: #{error.message}"
88
+ raise MultipartUploadError.new(msg, errors + [error])
89
+ end
90
+
91
+ def create_opts(options)
92
+ CREATE_OPTIONS.inject({}) do |hash, key|
93
+ hash[key] = options[key] if options.key?(key)
94
+ hash
95
+ end
96
+ end
97
+
98
+ def upload_part_opts(options)
99
+ UPLOAD_PART_OPTIONS.inject({}) do |hash, key|
100
+ hash[key] = options[key] if options.key?(key)
101
+ hash
102
+ end
103
+ end
104
+
105
+ def read_to_part_body(read_pipe)
106
+ return if read_pipe.closed?
107
+ temp_io = @tempfile ? Tempfile.new(TEMPFILE_PREIX) : StringIO.new
108
+ temp_io.binmode
109
+ bytes_copied = IO.copy_stream(read_pipe, temp_io, @part_size)
110
+ temp_io.rewind
111
+ if bytes_copied == 0
112
+ if Tempfile === temp_io
113
+ temp_io.close
114
+ temp_io.unlink
115
+ end
116
+ nil
117
+ else
118
+ temp_io
119
+ end
120
+ end
121
+
122
+ def upload_in_threads(read_pipe, completed, options)
123
+ mutex = Mutex.new
124
+ part_number = 0
125
+ @thread_count.times.map do
126
+ thread = Thread.new do
127
+ begin
128
+ loop do
129
+ body, thread_part_number = mutex.synchronize do
130
+ [read_to_part_body(read_pipe), part_number += 1]
131
+ end
132
+ break unless (body || thread_part_number == 1)
133
+ begin
134
+ part = options.merge(
135
+ body: body,
136
+ part_number: thread_part_number,
137
+ )
138
+ resp = @client.upload_part(part)
139
+ completed << {etag: resp.etag, part_number: part[:part_number]}
140
+ ensure
141
+ if Tempfile === body
142
+ body.close
143
+ body.unlink
144
+ end
145
+ end
146
+ end
147
+ nil
148
+ rescue => error
149
+ # keep other threads from uploading other parts
150
+ mutex.synchronize { read_pipe.close_read }
151
+ error
152
+ end
153
+ end
154
+ thread.abort_on_exception = true
155
+ thread
156
+ end
157
+ end
158
+ end
159
+ end
160
+ end
@@ -898,11 +898,14 @@ module Aws::S3
898
898
  # record_delimiter: "RecordDelimiter",
899
899
  # field_delimiter: "FieldDelimiter",
900
900
  # quote_character: "QuoteCharacter",
901
+ # allow_quoted_record_delimiter: false,
901
902
  # },
902
- # compression_type: "NONE", # accepts NONE, GZIP
903
+ # compression_type: "NONE", # accepts NONE, GZIP, BZIP2
903
904
  # json: {
904
905
  # type: "DOCUMENT", # accepts DOCUMENT, LINES
905
906
  # },
907
+ # parquet: {
908
+ # },
906
909
  # },
907
910
  # expression_type: "SQL", # required, accepts SQL
908
911
  # expression: "Expression", # required
@@ -741,11 +741,14 @@ module Aws::S3
741
741
  # record_delimiter: "RecordDelimiter",
742
742
  # field_delimiter: "FieldDelimiter",
743
743
  # quote_character: "QuoteCharacter",
744
+ # allow_quoted_record_delimiter: false,
744
745
  # },
745
- # compression_type: "NONE", # accepts NONE, GZIP
746
+ # compression_type: "NONE", # accepts NONE, GZIP, BZIP2
746
747
  # json: {
747
748
  # type: "DOCUMENT", # accepts DOCUMENT, LINES
748
749
  # },
750
+ # parquet: {
751
+ # },
749
752
  # },
750
753
  # expression_type: "SQL", # required, accepts SQL
751
754
  # expression: "Expression", # required
@@ -45,7 +45,7 @@ module Aws
45
45
  end
46
46
 
47
47
  def update_in_chunks(digest, io)
48
- while chunk = io.read(CHUNK_SIZE)
48
+ while chunk = io.read(CHUNK_SIZE, buffer ||= "")
49
49
  digest.update(chunk)
50
50
  end
51
51
  io.rewind
@@ -18,7 +18,9 @@ by Amazon S3.
18
18
  response = @handler.call(context)
19
19
  if context.http_response.status_code == 307
20
20
  endpoint = context.http_response.headers['location']
21
- context.http_request.endpoint = endpoint
21
+ unless context.http_request.endpoint.host.include?('fips')
22
+ context.http_request.endpoint = endpoint
23
+ end
22
24
  context.http_response.body.truncate(0)
23
25
  @handler.call(context)
24
26
  else
@@ -113,7 +113,7 @@ module Aws
113
113
  private
114
114
 
115
115
  def handle_region_errors(response)
116
- if wrong_sigv4_region?(response)
116
+ if wrong_sigv4_region?(response) && !fips_region?(response)
117
117
  get_region_and_retry(response.context)
118
118
  else
119
119
  response
@@ -133,6 +133,10 @@ module Aws
133
133
  S3::BUCKET_REGIONS[context.params[:bucket]] = actual_region
134
134
  end
135
135
 
136
+ def fips_region?(resp)
137
+ resp.context.http_request.endpoint.host.include?('fips')
138
+ end
139
+
136
140
  def wrong_sigv4_region?(resp)
137
141
  resp.context.http_response.status_code == 400 &&
138
142
  (
@@ -98,6 +98,7 @@ module Aws
98
98
  req.handlers.remove(Seahorse::Client::Plugins::ContentLength::Handler)
99
99
 
100
100
  signer = build_signer(req.context.config)
101
+ req.context[:presigned_url] = true
101
102
 
102
103
  req.handle(step: :send) do |context|
103
104