aws-sdk-resources 2.11.549

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. checksums.yaml +7 -0
  2. data/lib/aws-sdk-resources.rb +91 -0
  3. data/lib/aws-sdk-resources/batch.rb +143 -0
  4. data/lib/aws-sdk-resources/builder.rb +85 -0
  5. data/lib/aws-sdk-resources/builder_sources.rb +105 -0
  6. data/lib/aws-sdk-resources/collection.rb +107 -0
  7. data/lib/aws-sdk-resources/definition.rb +331 -0
  8. data/lib/aws-sdk-resources/documenter.rb +70 -0
  9. data/lib/aws-sdk-resources/documenter/base_operation_documenter.rb +279 -0
  10. data/lib/aws-sdk-resources/documenter/data_operation_documenter.rb +25 -0
  11. data/lib/aws-sdk-resources/documenter/has_many_operation_documenter.rb +69 -0
  12. data/lib/aws-sdk-resources/documenter/has_operation_documenter.rb +66 -0
  13. data/lib/aws-sdk-resources/documenter/operation_documenter.rb +20 -0
  14. data/lib/aws-sdk-resources/documenter/resource_operation_documenter.rb +53 -0
  15. data/lib/aws-sdk-resources/documenter/waiter_operation_documenter.rb +77 -0
  16. data/lib/aws-sdk-resources/errors.rb +15 -0
  17. data/lib/aws-sdk-resources/operation_methods.rb +83 -0
  18. data/lib/aws-sdk-resources/operations.rb +280 -0
  19. data/lib/aws-sdk-resources/options.rb +17 -0
  20. data/lib/aws-sdk-resources/request.rb +39 -0
  21. data/lib/aws-sdk-resources/request_params.rb +140 -0
  22. data/lib/aws-sdk-resources/resource.rb +243 -0
  23. data/lib/aws-sdk-resources/services/ec2.rb +21 -0
  24. data/lib/aws-sdk-resources/services/ec2/instance.rb +29 -0
  25. data/lib/aws-sdk-resources/services/iam.rb +19 -0
  26. data/lib/aws-sdk-resources/services/s3.rb +20 -0
  27. data/lib/aws-sdk-resources/services/s3/bucket.rb +131 -0
  28. data/lib/aws-sdk-resources/services/s3/encryption.rb +21 -0
  29. data/lib/aws-sdk-resources/services/s3/encryption/client.rb +369 -0
  30. data/lib/aws-sdk-resources/services/s3/encryption/decrypt_handler.rb +174 -0
  31. data/lib/aws-sdk-resources/services/s3/encryption/default_cipher_provider.rb +63 -0
  32. data/lib/aws-sdk-resources/services/s3/encryption/default_key_provider.rb +38 -0
  33. data/lib/aws-sdk-resources/services/s3/encryption/encrypt_handler.rb +50 -0
  34. data/lib/aws-sdk-resources/services/s3/encryption/errors.rb +13 -0
  35. data/lib/aws-sdk-resources/services/s3/encryption/io_auth_decrypter.rb +56 -0
  36. data/lib/aws-sdk-resources/services/s3/encryption/io_decrypter.rb +29 -0
  37. data/lib/aws-sdk-resources/services/s3/encryption/io_encrypter.rb +69 -0
  38. data/lib/aws-sdk-resources/services/s3/encryption/key_provider.rb +29 -0
  39. data/lib/aws-sdk-resources/services/s3/encryption/kms_cipher_provider.rb +71 -0
  40. data/lib/aws-sdk-resources/services/s3/encryption/materials.rb +58 -0
  41. data/lib/aws-sdk-resources/services/s3/encryption/utils.rb +79 -0
  42. data/lib/aws-sdk-resources/services/s3/file_downloader.rb +169 -0
  43. data/lib/aws-sdk-resources/services/s3/file_part.rb +75 -0
  44. data/lib/aws-sdk-resources/services/s3/file_uploader.rb +58 -0
  45. data/lib/aws-sdk-resources/services/s3/multipart_file_uploader.rb +187 -0
  46. data/lib/aws-sdk-resources/services/s3/multipart_upload.rb +42 -0
  47. data/lib/aws-sdk-resources/services/s3/multipart_upload_error.rb +16 -0
  48. data/lib/aws-sdk-resources/services/s3/object.rb +290 -0
  49. data/lib/aws-sdk-resources/services/s3/object_copier.rb +99 -0
  50. data/lib/aws-sdk-resources/services/s3/object_multipart_copier.rb +180 -0
  51. data/lib/aws-sdk-resources/services/s3/object_summary.rb +73 -0
  52. data/lib/aws-sdk-resources/services/s3/presigned_post.rb +651 -0
  53. data/lib/aws-sdk-resources/services/sns.rb +7 -0
  54. data/lib/aws-sdk-resources/services/sns/message_verifier.rb +171 -0
  55. data/lib/aws-sdk-resources/services/sqs.rb +7 -0
  56. data/lib/aws-sdk-resources/services/sqs/queue_poller.rb +521 -0
  57. data/lib/aws-sdk-resources/source.rb +39 -0
  58. metadata +118 -0
@@ -0,0 +1,58 @@
1
+ require 'pathname'
2
+
3
+ module Aws
4
+ module S3
5
+ # @api private
6
+ class FileUploader
7
+
8
+ FIFTEEN_MEGABYTES = 15 * 1024 * 1024
9
+
10
+ # @option options [Client] :client
11
+ # @option options [Integer] :multipart_threshold Files greater than
12
+ # `:multipart_threshold` bytes are uploaded using S3 multipart APIs.
13
+ def initialize(options = {})
14
+ @options = options
15
+ @client = options[:client] || Client.new
16
+ @multipart_threshold = options[:multipart_threshold] || FIFTEEN_MEGABYTES
17
+ end
18
+
19
+ # @return [Client]
20
+ attr_reader :client
21
+
22
+ # @return [Integer] Files larger than this in bytes are uploaded
23
+ # using a {MultipartFileUploader}.
24
+ attr_reader :multipart_threshold
25
+
26
+ # @param [String,Pathname,File,Tempfile] source
27
+ # @option options [required,String] :bucket
28
+ # @option options [required,String] :key
29
+ # @return [void]
30
+ def upload(source, options = {})
31
+ if File.size(source) >= multipart_threshold
32
+ MultipartFileUploader.new(@options).upload(source, options)
33
+ else
34
+ put_object(source, options)
35
+ end
36
+ end
37
+
38
+ private
39
+
40
+ def put_object(source, options)
41
+ open_file(source) do |file|
42
+ @client.put_object(options.merge(body: file))
43
+ end
44
+ end
45
+
46
+ def open_file(source)
47
+ if String === source || Pathname === source
48
+ file = File.open(source, 'rb')
49
+ yield(file)
50
+ file.close
51
+ else
52
+ yield(source)
53
+ end
54
+ end
55
+
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,187 @@
1
+ require 'pathname'
2
+ require 'thread'
3
+ require 'set'
4
+
5
+ module Aws
6
+ module S3
7
+ # @api private
8
+ class MultipartFileUploader
9
+
10
+ MIN_PART_SIZE = 5 * 1024 * 1024 # 5MB
11
+
12
+ FILE_TOO_SMALL = "unable to multipart upload files smaller than 5MB"
13
+
14
+ MAX_PARTS = 10_000
15
+
16
+ THREAD_COUNT = 10
17
+
18
+ # @api private
19
+ CREATE_OPTIONS =
20
+ Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names)
21
+
22
+ # @api private
23
+ UPLOAD_PART_OPTIONS =
24
+ Set.new(Client.api.operation(:upload_part).input.shape.member_names)
25
+
26
+ # @option options [Client] :client
27
+ def initialize(options = {})
28
+ @client = options[:client] || Client.new
29
+ @thread_count = options[:thread_count] || THREAD_COUNT
30
+ end
31
+
32
+ # @return [Client]
33
+ attr_reader :client
34
+
35
+ # @param [String,Pathname,File,Tempfile] source
36
+ # @option options [required,String] :bucket
37
+ # @option options [required,String] :key
38
+ # @return [void]
39
+ def upload(source, options = {})
40
+ if File.size(source) < MIN_PART_SIZE
41
+ raise ArgumentError, FILE_TOO_SMALL
42
+ else
43
+ upload_id = initiate_upload(options)
44
+ parts = upload_parts(upload_id, source, options)
45
+ complete_upload(upload_id, parts, options)
46
+ end
47
+ end
48
+
49
+ private
50
+
51
+ def initiate_upload(options)
52
+ @client.create_multipart_upload(create_opts(options)).upload_id
53
+ end
54
+
55
+ def complete_upload(upload_id, parts, options)
56
+ @client.complete_multipart_upload(
57
+ bucket: options[:bucket],
58
+ key: options[:key],
59
+ upload_id: upload_id,
60
+ multipart_upload: { parts: parts })
61
+ end
62
+
63
+ def upload_parts(upload_id, source, options)
64
+ pending = PartList.new(compute_parts(upload_id, source, options))
65
+ completed = PartList.new
66
+ errors = upload_in_threads(pending, completed)
67
+ if errors.empty?
68
+ completed.to_a.sort_by { |part| part[:part_number] }
69
+ else
70
+ abort_upload(upload_id, options, errors)
71
+ end
72
+ end
73
+
74
+ def abort_upload(upload_id, options, errors)
75
+ @client.abort_multipart_upload(
76
+ bucket: options[:bucket],
77
+ key: options[:key],
78
+ upload_id: upload_id
79
+ )
80
+ msg = "multipart upload failed: #{errors.map(&:message).join("; ")}"
81
+ raise MultipartUploadError.new(msg, errors)
82
+ rescue MultipartUploadError => error
83
+ raise error
84
+ rescue => error
85
+ msg = "failed to abort multipart upload: #{error.message}"
86
+ raise MultipartUploadError.new(msg, errors + [error])
87
+ end
88
+
89
+ def compute_parts(upload_id, source, options)
90
+ size = File.size(source)
91
+ default_part_size = compute_default_part_size(size)
92
+ offset = 0
93
+ part_number = 1
94
+ parts = []
95
+ while offset < size
96
+ parts << upload_part_opts(options).merge({
97
+ upload_id: upload_id,
98
+ part_number: part_number,
99
+ body: FilePart.new(
100
+ source: source,
101
+ offset: offset,
102
+ size: part_size(size, default_part_size, offset)
103
+ )
104
+ })
105
+ part_number += 1
106
+ offset += default_part_size
107
+ end
108
+ parts
109
+ end
110
+
111
+ def create_opts(options)
112
+ CREATE_OPTIONS.inject({}) do |hash, key|
113
+ hash[key] = options[key] if options.key?(key)
114
+ hash
115
+ end
116
+ end
117
+
118
+ def upload_part_opts(options)
119
+ UPLOAD_PART_OPTIONS.inject({}) do |hash, key|
120
+ hash[key] = options[key] if options.key?(key)
121
+ hash
122
+ end
123
+ end
124
+
125
+ def upload_in_threads(pending, completed)
126
+ threads = []
127
+ @thread_count.times do
128
+ thread = Thread.new do
129
+ begin
130
+ while part = pending.shift
131
+ resp = @client.upload_part(part)
132
+ part[:body].close
133
+ completed.push(etag: resp.etag, part_number: part[:part_number])
134
+ end
135
+ nil
136
+ rescue => error
137
+ # keep other threads from uploading other parts
138
+ pending.clear!
139
+ error
140
+ end
141
+ end
142
+ thread.abort_on_exception = true
143
+ threads << thread
144
+ end
145
+ threads.map(&:value).compact
146
+ end
147
+
148
+ def compute_default_part_size(source_size)
149
+ [(source_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i
150
+ end
151
+
152
+ def part_size(total_size, part_size, offset)
153
+ if offset + part_size > total_size
154
+ total_size - offset
155
+ else
156
+ part_size
157
+ end
158
+ end
159
+
160
+ # @api private
161
+ class PartList
162
+
163
+ def initialize(parts = [])
164
+ @parts = parts
165
+ @mutex = Mutex.new
166
+ end
167
+
168
+ def push(part)
169
+ @mutex.synchronize { @parts.push(part) }
170
+ end
171
+
172
+ def shift
173
+ @mutex.synchronize { @parts.shift }
174
+ end
175
+
176
+ def clear!
177
+ @mutex.synchronize { @parts.clear }
178
+ end
179
+
180
+ def to_a
181
+ @mutex.synchronize { @parts.dup }
182
+ end
183
+
184
+ end
185
+ end
186
+ end
187
+ end
@@ -0,0 +1,42 @@
1
+ module Aws
2
+ module S3
3
+ class MultipartUpload
4
+
5
+ alias_method :basic_complete, :complete
6
+
7
+ # Completes the upload, requires a list of completed parts. You can
8
+ # provide the list of parts with `:part_number` and `:etag` values.
9
+ #
10
+ # upload.complete(multipart_upload: { parts: [
11
+ # { part_number: 1, etag:'etag1' },
12
+ # { part_number: 2, etag:'etag2' },
13
+ # ...
14
+ # ]})
15
+ #
16
+ # Alternatively, you can pass **`compute_parts: true`** and the part
17
+ # list will be computed by calling {Client#list_parts}.
18
+ #
19
+ # upload.complete(compute_parts: true)
20
+ #
21
+ # @option options [Boolean] :compute_parts (false) When `true`,
22
+ # the {Client#list_parts} method will be called to determine
23
+ # the list of required part numbers and their ETags.
24
+ #
25
+ def complete(options = {})
26
+ if options.delete(:compute_parts)
27
+ options[:multipart_upload] = { parts: compute_parts }
28
+ end
29
+ basic_complete(options)
30
+ end
31
+
32
+ private
33
+
34
+ def compute_parts
35
+ parts.sort_by(&:part_number).each.with_object([]) do |part, part_list|
36
+ part_list << { part_number: part.part_number, etag: part.etag }
37
+ end
38
+ end
39
+
40
+ end
41
+ end
42
+ end
@@ -0,0 +1,16 @@
1
+ module Aws
2
+ module S3
3
+ class MultipartUploadError < StandardError
4
+
5
+ def initialize(message, errors)
6
+ @errors = errors
7
+ super(message)
8
+ end
9
+
10
+ # @return [Array<StandardError>] The list of errors encountered
11
+ # when uploading or aborting the upload.
12
+ attr_reader :errors
13
+
14
+ end
15
+ end
16
+ end
@@ -0,0 +1,290 @@
1
+ module Aws
2
+ module S3
3
+ class Object
4
+
5
+ alias size content_length
6
+
7
+ # Copies another object to this object. Use `multipart_copy: true`
8
+ # for large objects. This is required for objects that exceed 5GB.
9
+ #
10
+ # @param [S3::Object, S3::ObjectVersion, S3::ObjectSummary, String, Hash] source
11
+ # Where to copy object data from. `source` must be one of the following:
12
+ #
13
+ # * {Aws::S3::Object}
14
+ # * {Aws::S3::ObjectSummary}
15
+ # * {Aws::S3::ObjectVersion}
16
+ # * Hash - with `:bucket` and `:key` and optional `:version_id`
17
+ # * String - formatted like `"source-bucket-name/uri-escaped-key"`
18
+ # or `"source-bucket-name/uri-escaped-key?versionId=version-id"`
19
+ #
20
+ # @option options [Boolean] :multipart_copy (false) When `true`,
21
+ # the object will be copied using the multipart APIs. This is
22
+ # necessary for objects larger than 5GB and can provide
23
+ # performance improvements on large objects. Amazon S3 does
24
+ # not accept multipart copies for objects smaller than 5MB.
25
+ #
26
+ # @option options [Integer] :content_length Only used when
27
+ # `:multipart_copy` is `true`. Passing this options avoids a HEAD
28
+ # request to query the source object size.
29
+ #
30
+ # @option options [S3::Client] :copy_source_client Only used when
31
+ # `:multipart_copy` is `true` and the source object is in a
32
+ # different region. You do not need to specify this option
33
+ # if you have provided `:content_length`.
34
+ #
35
+ # @option options [String] :copy_source_region Only used when
36
+ # `:multipart_copy` is `true` and the source object is in a
37
+ # different region. You do not need to specify this option
38
+ # if you have provided a `:source_client` or a `:content_length`.
39
+ #
40
+ # @example Basic object copy
41
+ #
42
+ # bucket = Aws::S3::Bucket.new('target-bucket')
43
+ # object = bucket.object('target-key')
44
+ #
45
+ # # source as String
46
+ # object.copy_from('source-bucket/source-key')
47
+ #
48
+ # # source as Hash
49
+ # object.copy_from(bucket:'source-bucket', key:'source-key')
50
+ #
51
+ # # source as Aws::S3::Object
52
+ # object.copy_from(bucket.object('source-key'))
53
+ #
54
+ # @example Managed copy of large objects
55
+ #
56
+ # # uses multipart upload APIs to copy object
57
+ # object.copy_from('src-bucket/src-key', multipart_copy: true)
58
+ #
59
+ # @see #copy_to
60
+ #
61
+ def copy_from(source, options = {})
62
+ if Hash === source && source[:copy_source]
63
+ # for backwards compatibility
64
+ @client.copy_object(source.merge(bucket: bucket_name, key: key))
65
+ else
66
+ ObjectCopier.new(self, options).copy_from(source, options)
67
+ end
68
+ end
69
+
70
+ # Copies this object to another object. Use `multipart_copy: true`
71
+ # for large objects. This is required for objects that exceed 5GB.
72
+ #
73
+ # @note If you need to copy to a bucket in a different region, use
74
+ # {#copy_from}.
75
+ #
76
+ # @param [S3::Object, String, Hash] target Where to copy the object
77
+ # data to. `target` must be one of the following:
78
+ #
79
+ # * {Aws::S3::Object}
80
+ # * Hash - with `:bucket` and `:key`
81
+ # * String - formatted like `"target-bucket-name/target-key"`
82
+ #
83
+ # @example Basic object copy
84
+ #
85
+ # bucket = Aws::S3::Bucket.new('source-bucket')
86
+ # object = bucket.object('source-key')
87
+ #
88
+ # # target as String
89
+ # object.copy_to('target-bucket/target-key')
90
+ #
91
+ # # target as Hash
92
+ # object.copy_to(bucket: 'target-bucket', key: 'target-key')
93
+ #
94
+ # # target as Aws::S3::Object
95
+ # object.copy_to(bucket.object('target-key'))
96
+ #
97
+ # @example Managed copy of large objects
98
+ #
99
+ # # uses multipart upload APIs to copy object
100
+ # object.copy_to('src-bucket/src-key', multipart_copy: true)
101
+ #
102
+ def copy_to(target, options = {})
103
+ ObjectCopier.new(self, options).copy_to(target, options)
104
+ end
105
+
106
+ # Copies and deletes the current object. The object will only be
107
+ # deleted if the copy operation succeeds.
108
+ # @param (see Object#copy_to)
109
+ # @option (see Object#copy_to)
110
+ # @return [void]
111
+ # @see Object#copy_to
112
+ # @see Object#delete
113
+ def move_to(target, options = {})
114
+ copy_to(target, options)
115
+ delete
116
+ end
117
+
118
+ # Creates a {PresignedPost} that makes it easy to upload a file from
119
+ # a web browser direct to Amazon S3 using an HTML post form with
120
+ # a file field.
121
+ #
122
+ # See the {PresignedPost} documentation for more information.
123
+ #
124
+ # @option (see PresignedPost#initialize)
125
+ # @return [PresignedPost]
126
+ # @see PresignedPost
127
+ def presigned_post(options = {})
128
+ PresignedPost.new(
129
+ client.config.credentials,
130
+ client.config.region,
131
+ bucket_name,
132
+ {
133
+ key: key,
134
+ url: bucket.url,
135
+ }.merge(options)
136
+ )
137
+ end
138
+
139
+ # Generates a pre-signed URL for this object.
140
+ #
141
+ # @example Pre-signed GET URL, valid for one hour
142
+ #
143
+ # obj.presigned_url(:get, expires_in: 3600)
144
+ # #=> "https://bucket-name.s3.amazonaws.com/object-key?..."
145
+ #
146
+ # @example Pre-signed PUT with a canned ACL
147
+ #
148
+ # # the object uploaded using this URL will be publicly accessible
149
+ # obj.presigned_url(:put, acl: 'public-read')
150
+ # #=> "https://bucket-name.s3.amazonaws.com/object-key?..."
151
+ #
152
+ # @param [Symbol] http_method
153
+ # The HTTP method to generate a presigned URL for. Valid values
154
+ # are `:get`, `:put`, `:head`, and `:delete`.
155
+ #
156
+ # @param [Hash] params
157
+ # Additional request parameters to use when generating the pre-signed
158
+ # URL. See the related documentation in {Client} for accepted
159
+ # params.
160
+ #
161
+ # | HTTP Method | Client Method |
162
+ # |---------------|------------------------|
163
+ # | `:get` | {Client#get_object} |
164
+ # | `:put` | {Client#put_object} |
165
+ # | `:head` | {Client#head_object} |
166
+ # | `:delete` | {Client#delete_object} |
167
+ #
168
+ # @option params [Boolean] :virtual_host (false) When `true` the
169
+ # presigned URL will use the bucket name as a virtual host.
170
+ #
171
+ # bucket = Aws::S3::Bucket.new('my.bucket.com')
172
+ # bucket.object('key').presigned_url(virtual_host: true)
173
+ # #=> "http://my.bucket.com/key?..."
174
+ #
175
+ # @option params [Integer] :expires_in (900) Number of seconds before
176
+ # the pre-signed URL expires. This may not exceed one week (604800
177
+ # seconds). Note that the pre-signed URL is also only valid as long as
178
+ # credentials used to sign it are. For example, when using IAM roles,
179
+ # temporary tokens generated for signing also have a default expiration
180
+ # which will affect the effective expiration of the pre-signed URL.
181
+ #
182
+ # @raise [ArgumentError] Raised if `:expires_in` exceeds one week
183
+ # (604800 seconds).
184
+ #
185
+ # @return [String]
186
+ #
187
+ def presigned_url(http_method, params = {})
188
+ presigner = Presigner.new(client: client)
189
+ presigner.presigned_url("#{http_method.downcase}_object", params.merge(
190
+ bucket: bucket_name,
191
+ key: key,
192
+ ))
193
+ end
194
+
195
+ # Returns the public (un-signed) URL for this object.
196
+ #
197
+ # s3.bucket('bucket-name').object('obj-key').public_url
198
+ # #=> "https://bucket-name.s3.amazonaws.com/obj-key"
199
+ #
200
+ # To use virtual hosted bucket url (disables https):
201
+ #
202
+ # s3.bucket('my.bucket.com').object('key').public_url(virtual_host: true)
203
+ # #=> "http://my.bucket.com/key"
204
+ #
205
+ # @option options [Boolean] :virtual_host (false) When `true`, the bucket
206
+ # name will be used as the host name. This is useful when you have
207
+ # a CNAME configured for the bucket.
208
+ #
209
+ # @return [String]
210
+ def public_url(options = {})
211
+ url = URI.parse(bucket.url(options))
212
+ url.path += '/' unless url.path[-1] == '/'
213
+ url.path += key.gsub(/[^\/]+/) { |s| Seahorse::Util.uri_escape(s) }
214
+ url.to_s
215
+ end
216
+
217
+ # Uploads a file from disk to the current object in S3.
218
+ #
219
+ # # small files are uploaded in a single API call
220
+ # obj.upload_file('/path/to/file')
221
+ #
222
+ # Files larger than `:multipart_threshold` are uploaded using the
223
+ # Amazon S3 multipart upload APIs.
224
+ #
225
+ # # large files are automatically split into parts
226
+ # # and the parts are uploaded in parallel
227
+ # obj.upload_file('/path/to/very_large_file')
228
+ #
229
+ # @param [String,Pathname,File,Tempfile] source A file or path to a file
230
+ # on the local file system that should be uploaded to this object.
231
+ # If you pass an open file object, then it is your responsibility
232
+ # to close the file object once the upload completes.
233
+ #
234
+ # @option options [Integer] :multipart_threshold (15728640) Files larger
235
+ # than `:multipart_threshold` are uploaded using the S3 multipart APIs.
236
+ # Default threshold is 15MB.
237
+ #
238
+ # @raise [MultipartUploadError] If an object is being uploaded in
239
+ # parts, and the upload can not be completed, then the upload is
240
+ # aborted and this error is raised. The raised error has a `#errors`
241
+ # method that returns the failures that caused the upload to be
242
+ # aborted.
243
+ #
244
+ # @return [Boolean] Returns `true` when the object is uploaded
245
+ # without any errors.
246
+ #
247
+ def upload_file(source, options = {})
248
+ uploading_options = options.dup
249
+ uploader = FileUploader.new(
250
+ multipart_threshold: uploading_options.delete(:multipart_threshold),
251
+ client: client)
252
+ uploader.upload(source, uploading_options.merge(bucket: bucket_name, key: key))
253
+ true
254
+ end
255
+
256
+ # Downloads a file in S3 to a path on disk.
257
+ #
258
+ # # small files (< 5MB) are downloaded in a single API call
259
+ # obj.download_file('/path/to/file')
260
+ #
261
+ # Files larger than 5MB are downloaded using multipart method
262
+ #
263
+ # # large files are split into parts
264
+ # # and the parts are downloaded in parallel
265
+ # obj.download_file('/path/to/very_large_file')
266
+ #
267
+ # @param [String] destination Where to download the file to
268
+ #
269
+ # @option options [String] mode `auto`, `single_request`, `get_range`
270
+ # `single_request` mode forces only 1 GET request is made in download,
271
+ # `get_range` mode allows `chunk_size` parameter to configured in
272
+ # customizing each range size in multipart_download,
273
+ # By default, `auto` mode is enabled, which performs multipart_download
274
+ #
275
+ # @option options [String] chunk_size required in get_range mode
276
+ #
277
+ # @option options [String] thread_count Customize threads used in multipart
278
+ # download, if not provided, 10 is default value
279
+ #
280
+ # @return [Boolean] Returns `true` when the file is downloaded
281
+ # without any errors.
282
+ def download_file(destination, options = {})
283
+ downloader = FileDownloader.new(client: client)
284
+ downloader.download(
285
+ destination, options.merge(bucket: bucket_name, key: key))
286
+ true
287
+ end
288
+ end
289
+ end
290
+ end