aws-sdk-s3 1.75.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/aws-sdk-s3.rb +73 -0
- data/lib/aws-sdk-s3/bucket.rb +861 -0
- data/lib/aws-sdk-s3/bucket_acl.rb +277 -0
- data/lib/aws-sdk-s3/bucket_cors.rb +262 -0
- data/lib/aws-sdk-s3/bucket_lifecycle.rb +264 -0
- data/lib/aws-sdk-s3/bucket_lifecycle_configuration.rb +283 -0
- data/lib/aws-sdk-s3/bucket_logging.rb +251 -0
- data/lib/aws-sdk-s3/bucket_notification.rb +293 -0
- data/lib/aws-sdk-s3/bucket_policy.rb +242 -0
- data/lib/aws-sdk-s3/bucket_region_cache.rb +81 -0
- data/lib/aws-sdk-s3/bucket_request_payment.rb +236 -0
- data/lib/aws-sdk-s3/bucket_tagging.rb +251 -0
- data/lib/aws-sdk-s3/bucket_versioning.rb +312 -0
- data/lib/aws-sdk-s3/bucket_website.rb +292 -0
- data/lib/aws-sdk-s3/client.rb +11818 -0
- data/lib/aws-sdk-s3/client_api.rb +3014 -0
- data/lib/aws-sdk-s3/customizations.rb +34 -0
- data/lib/aws-sdk-s3/customizations/bucket.rb +162 -0
- data/lib/aws-sdk-s3/customizations/multipart_upload.rb +44 -0
- data/lib/aws-sdk-s3/customizations/object.rb +389 -0
- data/lib/aws-sdk-s3/customizations/object_summary.rb +85 -0
- data/lib/aws-sdk-s3/customizations/types/list_object_versions_output.rb +13 -0
- data/lib/aws-sdk-s3/encryption.rb +21 -0
- data/lib/aws-sdk-s3/encryption/client.rb +375 -0
- data/lib/aws-sdk-s3/encryption/decrypt_handler.rb +190 -0
- data/lib/aws-sdk-s3/encryption/default_cipher_provider.rb +65 -0
- data/lib/aws-sdk-s3/encryption/default_key_provider.rb +40 -0
- data/lib/aws-sdk-s3/encryption/encrypt_handler.rb +61 -0
- data/lib/aws-sdk-s3/encryption/errors.rb +15 -0
- data/lib/aws-sdk-s3/encryption/io_auth_decrypter.rb +58 -0
- data/lib/aws-sdk-s3/encryption/io_decrypter.rb +36 -0
- data/lib/aws-sdk-s3/encryption/io_encrypter.rb +71 -0
- data/lib/aws-sdk-s3/encryption/key_provider.rb +31 -0
- data/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb +75 -0
- data/lib/aws-sdk-s3/encryption/materials.rb +60 -0
- data/lib/aws-sdk-s3/encryption/utils.rb +81 -0
- data/lib/aws-sdk-s3/encryptionV2/client.rb +388 -0
- data/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb +198 -0
- data/lib/aws-sdk-s3/encryptionV2/default_cipher_provider.rb +103 -0
- data/lib/aws-sdk-s3/encryptionV2/default_key_provider.rb +38 -0
- data/lib/aws-sdk-s3/encryptionV2/encrypt_handler.rb +66 -0
- data/lib/aws-sdk-s3/encryptionV2/errors.rb +13 -0
- data/lib/aws-sdk-s3/encryptionV2/io_auth_decrypter.rb +56 -0
- data/lib/aws-sdk-s3/encryptionV2/io_decrypter.rb +35 -0
- data/lib/aws-sdk-s3/encryptionV2/io_encrypter.rb +71 -0
- data/lib/aws-sdk-s3/encryptionV2/key_provider.rb +29 -0
- data/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb +99 -0
- data/lib/aws-sdk-s3/encryptionV2/materials.rb +58 -0
- data/lib/aws-sdk-s3/encryptionV2/utils.rb +116 -0
- data/lib/aws-sdk-s3/encryption_v2.rb +20 -0
- data/lib/aws-sdk-s3/errors.rb +115 -0
- data/lib/aws-sdk-s3/event_streams.rb +69 -0
- data/lib/aws-sdk-s3/file_downloader.rb +142 -0
- data/lib/aws-sdk-s3/file_part.rb +78 -0
- data/lib/aws-sdk-s3/file_uploader.rb +70 -0
- data/lib/aws-sdk-s3/legacy_signer.rb +189 -0
- data/lib/aws-sdk-s3/multipart_file_uploader.rb +227 -0
- data/lib/aws-sdk-s3/multipart_stream_uploader.rb +173 -0
- data/lib/aws-sdk-s3/multipart_upload.rb +401 -0
- data/lib/aws-sdk-s3/multipart_upload_error.rb +18 -0
- data/lib/aws-sdk-s3/multipart_upload_part.rb +423 -0
- data/lib/aws-sdk-s3/object.rb +1422 -0
- data/lib/aws-sdk-s3/object_acl.rb +333 -0
- data/lib/aws-sdk-s3/object_copier.rb +101 -0
- data/lib/aws-sdk-s3/object_multipart_copier.rb +182 -0
- data/lib/aws-sdk-s3/object_summary.rb +1181 -0
- data/lib/aws-sdk-s3/object_version.rb +550 -0
- data/lib/aws-sdk-s3/plugins/accelerate.rb +87 -0
- data/lib/aws-sdk-s3/plugins/bucket_arn.rb +212 -0
- data/lib/aws-sdk-s3/plugins/bucket_dns.rb +91 -0
- data/lib/aws-sdk-s3/plugins/bucket_name_restrictions.rb +45 -0
- data/lib/aws-sdk-s3/plugins/dualstack.rb +74 -0
- data/lib/aws-sdk-s3/plugins/expect_100_continue.rb +28 -0
- data/lib/aws-sdk-s3/plugins/get_bucket_location_fix.rb +25 -0
- data/lib/aws-sdk-s3/plugins/http_200_errors.rb +55 -0
- data/lib/aws-sdk-s3/plugins/iad_regional_endpoint.rb +62 -0
- data/lib/aws-sdk-s3/plugins/location_constraint.rb +35 -0
- data/lib/aws-sdk-s3/plugins/md5s.rb +84 -0
- data/lib/aws-sdk-s3/plugins/redirects.rb +45 -0
- data/lib/aws-sdk-s3/plugins/s3_host_id.rb +30 -0
- data/lib/aws-sdk-s3/plugins/s3_signer.rb +222 -0
- data/lib/aws-sdk-s3/plugins/sse_cpk.rb +70 -0
- data/lib/aws-sdk-s3/plugins/streaming_retry.rb +118 -0
- data/lib/aws-sdk-s3/plugins/url_encoded_keys.rb +97 -0
- data/lib/aws-sdk-s3/presigned_post.rb +686 -0
- data/lib/aws-sdk-s3/presigner.rb +253 -0
- data/lib/aws-sdk-s3/resource.rb +117 -0
- data/lib/aws-sdk-s3/types.rb +13154 -0
- data/lib/aws-sdk-s3/waiters.rb +243 -0
- metadata +184 -0
@@ -0,0 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# utility classes
|
4
|
+
require 'aws-sdk-s3/bucket_region_cache'
|
5
|
+
require 'aws-sdk-s3/encryption'
|
6
|
+
require 'aws-sdk-s3/encryption_v2'
|
7
|
+
require 'aws-sdk-s3/file_part'
|
8
|
+
require 'aws-sdk-s3/file_uploader'
|
9
|
+
require 'aws-sdk-s3/file_downloader'
|
10
|
+
require 'aws-sdk-s3/legacy_signer'
|
11
|
+
require 'aws-sdk-s3/multipart_file_uploader'
|
12
|
+
require 'aws-sdk-s3/multipart_stream_uploader'
|
13
|
+
require 'aws-sdk-s3/multipart_upload_error'
|
14
|
+
require 'aws-sdk-s3/object_copier'
|
15
|
+
require 'aws-sdk-s3/object_multipart_copier'
|
16
|
+
require 'aws-sdk-s3/presigned_post'
|
17
|
+
require 'aws-sdk-s3/presigner'
|
18
|
+
|
19
|
+
# customizations to generated classes
|
20
|
+
require 'aws-sdk-s3/customizations/bucket'
|
21
|
+
require 'aws-sdk-s3/customizations/object'
|
22
|
+
require 'aws-sdk-s3/customizations/object_summary'
|
23
|
+
require 'aws-sdk-s3/customizations/multipart_upload'
|
24
|
+
require 'aws-sdk-s3/customizations/types/list_object_versions_output'
|
25
|
+
|
26
|
+
[
|
27
|
+
Aws::S3::Object::Collection,
|
28
|
+
Aws::S3::ObjectSummary::Collection,
|
29
|
+
Aws::S3::ObjectVersion::Collection,
|
30
|
+
].each do |klass|
|
31
|
+
klass.send(:alias_method, :delete, :batch_delete!)
|
32
|
+
klass.send(:extend, Aws::Deprecations)
|
33
|
+
klass.send(:deprecated, :delete, use: :batch_delete!)
|
34
|
+
end
|
@@ -0,0 +1,162 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'uri'
|
4
|
+
|
5
|
+
module Aws
|
6
|
+
module S3
|
7
|
+
class Bucket
|
8
|
+
# Save the old initialize method so that we can call 'super'.
|
9
|
+
old_initialize = instance_method(:initialize)
|
10
|
+
# Make the method redefinable
|
11
|
+
alias_method :initialize, :initialize
|
12
|
+
# Define a new initialize method that extracts out a bucket ARN.
|
13
|
+
define_method(:initialize) do |*args|
|
14
|
+
old_initialize.bind(self).call(*args)
|
15
|
+
bucket_name, region, arn = Plugins::BucketARN.resolve_arn!(
|
16
|
+
name,
|
17
|
+
client.config.region,
|
18
|
+
client.config.s3_use_arn_region
|
19
|
+
)
|
20
|
+
@name = bucket_name
|
21
|
+
@client.config.region = region
|
22
|
+
@arn = arn
|
23
|
+
end
|
24
|
+
|
25
|
+
# Deletes all objects and versioned objects from this bucket
|
26
|
+
#
|
27
|
+
# @example
|
28
|
+
#
|
29
|
+
# bucket.clear!
|
30
|
+
#
|
31
|
+
# @return [void]
|
32
|
+
def clear!
|
33
|
+
object_versions.batch_delete!
|
34
|
+
end
|
35
|
+
|
36
|
+
# Deletes all objects and versioned objects from this bucket and
|
37
|
+
# then deletes the bucket.
|
38
|
+
#
|
39
|
+
# @example
|
40
|
+
#
|
41
|
+
# bucket.delete!
|
42
|
+
#
|
43
|
+
# @option options [Integer] :max_attempts (3) Maximum number of times to
|
44
|
+
# attempt to delete the empty bucket before raising
|
45
|
+
# `Aws::S3::Errors::BucketNotEmpty`.
|
46
|
+
#
|
47
|
+
# @option options [Float] :initial_wait (1.3) Seconds to wait before
|
48
|
+
# retrying the call to delete the bucket, exponentially increased for
|
49
|
+
# each attempt.
|
50
|
+
#
|
51
|
+
# @return [void]
|
52
|
+
def delete!(options = {})
|
53
|
+
options = {
|
54
|
+
initial_wait: 1.3,
|
55
|
+
max_attempts: 3
|
56
|
+
}.merge(options)
|
57
|
+
|
58
|
+
attempts = 0
|
59
|
+
begin
|
60
|
+
clear!
|
61
|
+
delete
|
62
|
+
rescue Errors::BucketNotEmpty
|
63
|
+
attempts += 1
|
64
|
+
raise if attempts >= options[:max_attempts]
|
65
|
+
|
66
|
+
Kernel.sleep(options[:initial_wait]**attempts)
|
67
|
+
retry
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
# Returns a public URL for this bucket.
|
72
|
+
#
|
73
|
+
# @example
|
74
|
+
#
|
75
|
+
# bucket = s3.bucket('bucket-name')
|
76
|
+
# bucket.url
|
77
|
+
# #=> "https://bucket-name.s3.amazonaws.com"
|
78
|
+
#
|
79
|
+
# It will also work when provided an Access Point ARN.
|
80
|
+
#
|
81
|
+
# @example
|
82
|
+
#
|
83
|
+
# bucket = s3.bucket(
|
84
|
+
# 'arn:aws:s3:us-east-1:123456789012:accesspoint:myendpoint'
|
85
|
+
# )
|
86
|
+
# bucket.url
|
87
|
+
# #=> "https://myendpoint-123456789012.s3-accesspoint.us-west-2.amazonaws.com"
|
88
|
+
#
|
89
|
+
# You can pass `virtual_host: true` to use the bucket name as the
|
90
|
+
# host name.
|
91
|
+
#
|
92
|
+
# bucket = s3.bucket('my.bucket.com')
|
93
|
+
# bucket.url(virtual_host: true)
|
94
|
+
# #=> "http://my.bucket.com"
|
95
|
+
#
|
96
|
+
# @option options [Boolean] :virtual_host (false) When `true`,
|
97
|
+
# the bucket name will be used as the host name. This is useful
|
98
|
+
# when you have a CNAME configured for this bucket.
|
99
|
+
#
|
100
|
+
# @return [String] the URL for this bucket.
|
101
|
+
def url(options = {})
|
102
|
+
if options[:virtual_host]
|
103
|
+
"http://#{name}"
|
104
|
+
elsif @arn
|
105
|
+
Plugins::BucketARN.resolve_url!(URI.parse(s3_bucket_url), @arn).to_s
|
106
|
+
else
|
107
|
+
s3_bucket_url
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
# Creates a {PresignedPost} that makes it easy to upload a file from
|
112
|
+
# a web browser direct to Amazon S3 using an HTML post form with
|
113
|
+
# a file field.
|
114
|
+
#
|
115
|
+
# See the {PresignedPost} documentation for more information.
|
116
|
+
# @note You must specify `:key` or `:key_starts_with`. All other options
|
117
|
+
# are optional.
|
118
|
+
# @option (see PresignedPost#initialize)
|
119
|
+
# @return [PresignedPost]
|
120
|
+
# @see PresignedPost
|
121
|
+
def presigned_post(options = {})
|
122
|
+
PresignedPost.new(
|
123
|
+
client.config.credentials,
|
124
|
+
client.config.region,
|
125
|
+
name,
|
126
|
+
{ url: url }.merge(options)
|
127
|
+
)
|
128
|
+
end
|
129
|
+
|
130
|
+
# @api private
|
131
|
+
def load
|
132
|
+
@data = client.list_buckets.buckets.find { |b| b.name == name }
|
133
|
+
raise "unable to load bucket #{name}" if @data.nil?
|
134
|
+
|
135
|
+
self
|
136
|
+
end
|
137
|
+
|
138
|
+
private
|
139
|
+
|
140
|
+
def s3_bucket_url
|
141
|
+
url = client.config.endpoint.dup
|
142
|
+
if bucket_as_hostname?(url.scheme == 'https')
|
143
|
+
url.host = "#{name}.#{url.host}"
|
144
|
+
else
|
145
|
+
url.path += '/' unless url.path[-1] == '/'
|
146
|
+
url.path += Seahorse::Util.uri_escape(name)
|
147
|
+
end
|
148
|
+
if (client.config.region == 'us-east-1') &&
|
149
|
+
(client.config.s3_us_east_1_regional_endpoint == 'legacy')
|
150
|
+
url.host = Plugins::IADRegionalEndpoint.legacy_host(url.host)
|
151
|
+
end
|
152
|
+
url.to_s
|
153
|
+
end
|
154
|
+
|
155
|
+
def bucket_as_hostname?(https)
|
156
|
+
Plugins::BucketDns.dns_compatible?(name, https) &&
|
157
|
+
!client.config.force_path_style
|
158
|
+
end
|
159
|
+
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Aws
|
4
|
+
module S3
|
5
|
+
class MultipartUpload
|
6
|
+
|
7
|
+
alias_method :basic_complete, :complete
|
8
|
+
|
9
|
+
# Completes the upload, requires a list of completed parts. You can
|
10
|
+
# provide the list of parts with `:part_number` and `:etag` values.
|
11
|
+
#
|
12
|
+
# upload.complete(multipart_upload: { parts: [
|
13
|
+
# { part_number: 1, etag:'etag1' },
|
14
|
+
# { part_number: 2, etag:'etag2' },
|
15
|
+
# ...
|
16
|
+
# ]})
|
17
|
+
#
|
18
|
+
# Alternatively, you can pass **`compute_parts: true`** and the part
|
19
|
+
# list will be computed by calling {Client#list_parts}.
|
20
|
+
#
|
21
|
+
# upload.complete(compute_parts: true)
|
22
|
+
#
|
23
|
+
# @option options [Boolean] :compute_parts (false) When `true`,
|
24
|
+
# the {Client#list_parts} method will be called to determine
|
25
|
+
# the list of required part numbers and their ETags.
|
26
|
+
#
|
27
|
+
def complete(options = {})
|
28
|
+
if options.delete(:compute_parts)
|
29
|
+
options[:multipart_upload] = { parts: compute_parts }
|
30
|
+
end
|
31
|
+
basic_complete(options)
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
|
36
|
+
def compute_parts
|
37
|
+
parts.sort_by(&:part_number).each.with_object([]) do |part, part_list|
|
38
|
+
part_list << { part_number: part.part_number, etag: part.etag }
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,389 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Aws
|
4
|
+
module S3
|
5
|
+
class Object
|
6
|
+
alias size content_length
|
7
|
+
|
8
|
+
# Make the method redefinable
|
9
|
+
alias_method :copy_from, :copy_from
|
10
|
+
|
11
|
+
# Copies another object to this object. Use `multipart_copy: true`
|
12
|
+
# for large objects. This is required for objects that exceed 5GB.
|
13
|
+
#
|
14
|
+
# @param [S3::Object, S3::ObjectVersion, S3::ObjectSummary, String, Hash]
|
15
|
+
# source Where to copy object data from. `source` must be one of the
|
16
|
+
# following:
|
17
|
+
#
|
18
|
+
# * {Aws::S3::Object}
|
19
|
+
# * {Aws::S3::ObjectSummary}
|
20
|
+
# * {Aws::S3::ObjectVersion}
|
21
|
+
# * Hash - with `:bucket` and `:key` and optional `:version_id`
|
22
|
+
# * String - formatted like `"source-bucket-name/uri-escaped-key"`
|
23
|
+
# or `"source-bucket-name/uri-escaped-key?versionId=version-id"`
|
24
|
+
#
|
25
|
+
# @option options [Boolean] :multipart_copy (false) When `true`,
|
26
|
+
# the object will be copied using the multipart APIs. This is
|
27
|
+
# necessary for objects larger than 5GB and can provide
|
28
|
+
# performance improvements on large objects. Amazon S3 does
|
29
|
+
# not accept multipart copies for objects smaller than 5MB.
|
30
|
+
#
|
31
|
+
# @option options [Integer] :content_length Only used when
|
32
|
+
# `:multipart_copy` is `true`. Passing this options avoids a HEAD
|
33
|
+
# request to query the source object size. Raises an `ArgumentError` if
|
34
|
+
# this option is provided when `:multipart_copy` is `false` or not set.
|
35
|
+
#
|
36
|
+
# @option options [S3::Client] :copy_source_client Only used when
|
37
|
+
# `:multipart_copy` is `true` and the source object is in a
|
38
|
+
# different region. You do not need to specify this option
|
39
|
+
# if you have provided `:content_length`.
|
40
|
+
#
|
41
|
+
# @option options [String] :copy_source_region Only used when
|
42
|
+
# `:multipart_copy` is `true` and the source object is in a
|
43
|
+
# different region. You do not need to specify this option
|
44
|
+
# if you have provided a `:source_client` or a `:content_length`.
|
45
|
+
#
|
46
|
+
# @example Basic object copy
|
47
|
+
#
|
48
|
+
# bucket = Aws::S3::Bucket.new('target-bucket')
|
49
|
+
# object = bucket.object('target-key')
|
50
|
+
#
|
51
|
+
# # source as String
|
52
|
+
# object.copy_from('source-bucket/source-key')
|
53
|
+
#
|
54
|
+
# # source as Hash
|
55
|
+
# object.copy_from(bucket:'source-bucket', key:'source-key')
|
56
|
+
#
|
57
|
+
# # source as Aws::S3::Object
|
58
|
+
# object.copy_from(bucket.object('source-key'))
|
59
|
+
#
|
60
|
+
# @example Managed copy of large objects
|
61
|
+
#
|
62
|
+
# # uses multipart upload APIs to copy object
|
63
|
+
# object.copy_from('src-bucket/src-key', multipart_copy: true)
|
64
|
+
#
|
65
|
+
# @see #copy_to
|
66
|
+
#
|
67
|
+
def copy_from(source, options = {})
|
68
|
+
if Hash === source && source[:copy_source]
|
69
|
+
# for backwards compatibility
|
70
|
+
@client.copy_object(source.merge(bucket: bucket_name, key: key))
|
71
|
+
else
|
72
|
+
ObjectCopier.new(self, options).copy_from(source, options)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
# Copies this object to another object. Use `multipart_copy: true`
|
77
|
+
# for large objects. This is required for objects that exceed 5GB.
|
78
|
+
#
|
79
|
+
# @note If you need to copy to a bucket in a different region, use
|
80
|
+
# {#copy_from}.
|
81
|
+
#
|
82
|
+
# @param [S3::Object, String, Hash] target Where to copy the object
|
83
|
+
# data to. `target` must be one of the following:
|
84
|
+
#
|
85
|
+
# * {Aws::S3::Object}
|
86
|
+
# * Hash - with `:bucket` and `:key`
|
87
|
+
# * String - formatted like `"target-bucket-name/target-key"`
|
88
|
+
#
|
89
|
+
# @example Basic object copy
|
90
|
+
#
|
91
|
+
# bucket = Aws::S3::Bucket.new('source-bucket')
|
92
|
+
# object = bucket.object('source-key')
|
93
|
+
#
|
94
|
+
# # target as String
|
95
|
+
# object.copy_to('target-bucket/target-key')
|
96
|
+
#
|
97
|
+
# # target as Hash
|
98
|
+
# object.copy_to(bucket: 'target-bucket', key: 'target-key')
|
99
|
+
#
|
100
|
+
# # target as Aws::S3::Object
|
101
|
+
# object.copy_to(bucket.object('target-key'))
|
102
|
+
#
|
103
|
+
# @example Managed copy of large objects
|
104
|
+
#
|
105
|
+
# # uses multipart upload APIs to copy object
|
106
|
+
# object.copy_to('src-bucket/src-key', multipart_copy: true)
|
107
|
+
#
|
108
|
+
def copy_to(target, options = {})
|
109
|
+
ObjectCopier.new(self, options).copy_to(target, options)
|
110
|
+
end
|
111
|
+
|
112
|
+
# Copies and deletes the current object. The object will only be deleted
|
113
|
+
# if the copy operation succeeds.
|
114
|
+
#
|
115
|
+
# @param (see Object#copy_to)
|
116
|
+
# @option (see Object#copy_to)
|
117
|
+
# @return [void]
|
118
|
+
# @see Object#copy_to
|
119
|
+
# @see Object#delete
|
120
|
+
def move_to(target, options = {})
|
121
|
+
copy_to(target, options)
|
122
|
+
delete
|
123
|
+
end
|
124
|
+
|
125
|
+
# Creates a {PresignedPost} that makes it easy to upload a file from
|
126
|
+
# a web browser direct to Amazon S3 using an HTML post form with
|
127
|
+
# a file field.
|
128
|
+
#
|
129
|
+
# See the {PresignedPost} documentation for more information.
|
130
|
+
#
|
131
|
+
# @option (see PresignedPost#initialize)
|
132
|
+
# @return [PresignedPost]
|
133
|
+
# @see PresignedPost
|
134
|
+
def presigned_post(options = {})
|
135
|
+
PresignedPost.new(
|
136
|
+
client.config.credentials,
|
137
|
+
client.config.region,
|
138
|
+
bucket_name,
|
139
|
+
{ key: key, url: bucket.url }.merge(options)
|
140
|
+
)
|
141
|
+
end
|
142
|
+
|
143
|
+
# Generates a pre-signed URL for this object.
|
144
|
+
#
|
145
|
+
# @example Pre-signed GET URL, valid for one hour
|
146
|
+
#
|
147
|
+
# obj.presigned_url(:get, expires_in: 3600)
|
148
|
+
# #=> "https://bucket-name.s3.amazonaws.com/object-key?..."
|
149
|
+
#
|
150
|
+
# @example Pre-signed PUT with a canned ACL
|
151
|
+
#
|
152
|
+
# # the object uploaded using this URL will be publicly accessible
|
153
|
+
# obj.presigned_url(:put, acl: 'public-read')
|
154
|
+
# #=> "https://bucket-name.s3.amazonaws.com/object-key?..."
|
155
|
+
#
|
156
|
+
# @param [Symbol] http_method
|
157
|
+
# The HTTP method to generate a presigned URL for. Valid values
|
158
|
+
# are `:get`, `:put`, `:head`, and `:delete`.
|
159
|
+
#
|
160
|
+
# @param [Hash] params
|
161
|
+
# Additional request parameters to use when generating the pre-signed
|
162
|
+
# URL. See the related documentation in {Client} for accepted
|
163
|
+
# params.
|
164
|
+
#
|
165
|
+
# | HTTP Method | Client Method |
|
166
|
+
# |---------------|------------------------|
|
167
|
+
# | `:get` | {Client#get_object} |
|
168
|
+
# | `:put` | {Client#put_object} |
|
169
|
+
# | `:head` | {Client#head_object} |
|
170
|
+
# | `:delete` | {Client#delete_object} |
|
171
|
+
#
|
172
|
+
# @option params [Boolean] :virtual_host (false) When `true` the
|
173
|
+
# presigned URL will use the bucket name as a virtual host.
|
174
|
+
#
|
175
|
+
# bucket = Aws::S3::Bucket.new('my.bucket.com')
|
176
|
+
# bucket.object('key').presigned_url(virtual_host: true)
|
177
|
+
# #=> "http://my.bucket.com/key?..."
|
178
|
+
#
|
179
|
+
# @option params [Integer] :expires_in (900) Number of seconds before
|
180
|
+
# the pre-signed URL expires. This may not exceed one week (604800
|
181
|
+
# seconds). Note that the pre-signed URL is also only valid as long as
|
182
|
+
# credentials used to sign it are. For example, when using IAM roles,
|
183
|
+
# temporary tokens generated for signing also have a default expiration
|
184
|
+
# which will affect the effective expiration of the pre-signed URL.
|
185
|
+
#
|
186
|
+
# @raise [ArgumentError] Raised if `:expires_in` exceeds one week
|
187
|
+
# (604800 seconds).
|
188
|
+
#
|
189
|
+
# @return [String]
|
190
|
+
#
|
191
|
+
def presigned_url(http_method, params = {})
|
192
|
+
presigner = Presigner.new(client: client)
|
193
|
+
presigner.presigned_url(
|
194
|
+
"#{http_method.downcase}_object",
|
195
|
+
params.merge(bucket: bucket_name, key: key)
|
196
|
+
)
|
197
|
+
end
|
198
|
+
|
199
|
+
# Returns the public (un-signed) URL for this object.
|
200
|
+
#
|
201
|
+
# s3.bucket('bucket-name').object('obj-key').public_url
|
202
|
+
# #=> "https://bucket-name.s3.amazonaws.com/obj-key"
|
203
|
+
#
|
204
|
+
# To use virtual hosted bucket url (disables https):
|
205
|
+
#
|
206
|
+
# s3.bucket('my.bucket.com').object('key')
|
207
|
+
# .public_url(virtual_host: true)
|
208
|
+
# #=> "http://my.bucket.com/key"
|
209
|
+
#
|
210
|
+
# @option options [Boolean] :virtual_host (false) When `true`, the bucket
|
211
|
+
# name will be used as the host name. This is useful when you have
|
212
|
+
# a CNAME configured for the bucket.
|
213
|
+
#
|
214
|
+
# @return [String]
|
215
|
+
def public_url(options = {})
|
216
|
+
url = URI.parse(bucket.url(options))
|
217
|
+
url.path += '/' unless url.path[-1] == '/'
|
218
|
+
url.path += key.gsub(/[^\/]+/) { |s| Seahorse::Util.uri_escape(s) }
|
219
|
+
url.to_s
|
220
|
+
end
|
221
|
+
|
222
|
+
# Uploads a stream in a streaming fashion to the current object in S3.
|
223
|
+
#
|
224
|
+
# Passed chunks automatically split into multipart upload parts and the
|
225
|
+
# parts are uploaded in parallel. This allows for streaming uploads that
|
226
|
+
# never touch the disk.
|
227
|
+
#
|
228
|
+
# Note that this is known to have issues in JRuby until jruby-9.1.15.0,
|
229
|
+
# so avoid using this with older versions of JRuby.
|
230
|
+
#
|
231
|
+
# @example Streaming chunks of data
|
232
|
+
# obj.upload_stream do |write_stream|
|
233
|
+
# 10.times { write_stream << 'foo' }
|
234
|
+
# end
|
235
|
+
# @example Streaming chunks of data
|
236
|
+
# obj.upload_stream do |write_stream|
|
237
|
+
# IO.copy_stream(IO.popen('ls'), write_stream)
|
238
|
+
# end
|
239
|
+
# @example Streaming chunks of data
|
240
|
+
# obj.upload_stream do |write_stream|
|
241
|
+
# IO.copy_stream(STDIN, write_stream)
|
242
|
+
# end
|
243
|
+
#
|
244
|
+
# @option options [Integer] :thread_count (10) The number of parallel
|
245
|
+
# multipart uploads
|
246
|
+
#
|
247
|
+
# @option options [Boolean] :tempfile (false) Normally read data is stored
|
248
|
+
# in memory when building the parts in order to complete the underlying
|
249
|
+
# multipart upload. By passing `:tempfile => true` data read will be
|
250
|
+
# temporarily stored on disk reducing the memory footprint vastly.
|
251
|
+
#
|
252
|
+
# @option options [Integer] :part_size (5242880)
|
253
|
+
# Define how big each part size but the last should be.
|
254
|
+
# Default `:part_size` is `5 * 1024 * 1024`.
|
255
|
+
#
|
256
|
+
# @raise [MultipartUploadError] If an object is being uploaded in
|
257
|
+
# parts, and the upload can not be completed, then the upload is
|
258
|
+
# aborted and this error is raised. The raised error has a `#errors`
|
259
|
+
# method that returns the failures that caused the upload to be
|
260
|
+
# aborted.
|
261
|
+
#
|
262
|
+
# @return [Boolean] Returns `true` when the object is uploaded
|
263
|
+
# without any errors.
|
264
|
+
#
|
265
|
+
def upload_stream(options = {}, &block)
|
266
|
+
uploading_options = options.dup
|
267
|
+
uploader = MultipartStreamUploader.new(
|
268
|
+
client: client,
|
269
|
+
thread_count: uploading_options.delete(:thread_count),
|
270
|
+
tempfile: uploading_options.delete(:tempfile),
|
271
|
+
part_size: uploading_options.delete(:part_size)
|
272
|
+
)
|
273
|
+
uploader.upload(
|
274
|
+
uploading_options.merge(bucket: bucket_name, key: key),
|
275
|
+
&block
|
276
|
+
)
|
277
|
+
true
|
278
|
+
end
|
279
|
+
|
280
|
+
# Uploads a file from disk to the current object in S3.
|
281
|
+
#
|
282
|
+
# # small files are uploaded in a single API call
|
283
|
+
# obj.upload_file('/path/to/file')
|
284
|
+
#
|
285
|
+
# Files larger than `:multipart_threshold` are uploaded using the
|
286
|
+
# Amazon S3 multipart upload APIs.
|
287
|
+
#
|
288
|
+
# # large files are automatically split into parts
|
289
|
+
# # and the parts are uploaded in parallel
|
290
|
+
# obj.upload_file('/path/to/very_large_file')
|
291
|
+
#
|
292
|
+
# The response of the S3 upload API is yielded if a block given.
|
293
|
+
#
|
294
|
+
# # API response will have etag value of the file
|
295
|
+
# obj.upload_file('/path/to/file') do |response|
|
296
|
+
# etag = response.etag
|
297
|
+
# end
|
298
|
+
#
|
299
|
+
# You can provide a callback to monitor progress of the upload:
|
300
|
+
#
|
301
|
+
# # bytes and totals are each an array with 1 entry per part
|
302
|
+
# progress = Proc.new do |bytes, totals|
|
303
|
+
# puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%" }
|
304
|
+
# end
|
305
|
+
# obj.upload_file('/path/to/file')
|
306
|
+
#
|
307
|
+
# @param [String, Pathname, File, Tempfile] source A file on the local
|
308
|
+
# file system that will be uploaded as this object. This can either be
|
309
|
+
# a String or Pathname to the file, an open File object, or an open
|
310
|
+
# Tempfile object. If you pass an open File or Tempfile object, then
|
311
|
+
# you are responsible for closing it after the upload completes. When
|
312
|
+
# using an open Tempfile, rewind it before uploading or else the object
|
313
|
+
# will be empty.
|
314
|
+
#
|
315
|
+
# @option options [Integer] :multipart_threshold (15728640) Files larger
|
316
|
+
# than `:multipart_threshold` are uploaded using the S3 multipart APIs.
|
317
|
+
# Default threshold is 15MB.
|
318
|
+
#
|
319
|
+
# @option options [Integer] :thread_count (10) The number of parallel
|
320
|
+
# multipart uploads. This option is not used if the file is smaller than
|
321
|
+
# `:multipart_threshold`.
|
322
|
+
#
|
323
|
+
# @option options [Proc] :progress_callback
|
324
|
+
# A Proc that will be called when each chunk of the upload is sent.
|
325
|
+
# It will be invoked with [bytes_read], [total_sizes]
|
326
|
+
#
|
327
|
+
# @raise [MultipartUploadError] If an object is being uploaded in
|
328
|
+
# parts, and the upload can not be completed, then the upload is
|
329
|
+
# aborted and this error is raised. The raised error has a `#errors`
|
330
|
+
# method that returns the failures that caused the upload to be
|
331
|
+
# aborted.
|
332
|
+
#
|
333
|
+
# @return [Boolean] Returns `true` when the object is uploaded
|
334
|
+
# without any errors.
|
335
|
+
def upload_file(source, options = {})
|
336
|
+
uploading_options = options.dup
|
337
|
+
uploader = FileUploader.new(
|
338
|
+
multipart_threshold: uploading_options.delete(:multipart_threshold),
|
339
|
+
client: client
|
340
|
+
)
|
341
|
+
response = uploader.upload(
|
342
|
+
source,
|
343
|
+
uploading_options.merge(bucket: bucket_name, key: key)
|
344
|
+
)
|
345
|
+
yield response if block_given?
|
346
|
+
true
|
347
|
+
end
|
348
|
+
|
349
|
+
# Downloads a file in S3 to a path on disk.
|
350
|
+
#
|
351
|
+
# # small files (< 5MB) are downloaded in a single API call
|
352
|
+
# obj.download_file('/path/to/file')
|
353
|
+
#
|
354
|
+
# Files larger than 5MB are downloaded using multipart method
|
355
|
+
#
|
356
|
+
# # large files are split into parts
|
357
|
+
# # and the parts are downloaded in parallel
|
358
|
+
# obj.download_file('/path/to/very_large_file')
|
359
|
+
#
|
360
|
+
# @param [String] destination Where to download the file to.
|
361
|
+
#
|
362
|
+
# @option options [String] mode `auto`, `single_request`, `get_range`
|
363
|
+
# `single_request` mode forces only 1 GET request is made in download,
|
364
|
+
# `get_range` mode allows `chunk_size` parameter to configured in
|
365
|
+
# customizing each range size in multipart_download,
|
366
|
+
# By default, `auto` mode is enabled, which performs multipart_download
|
367
|
+
#
|
368
|
+
# @option options [String] chunk_size required in get_range mode.
|
369
|
+
#
|
370
|
+
# @option options [Integer] thread_count (10) Customize threads used in
|
371
|
+
# the multipart download.
|
372
|
+
#
|
373
|
+
# @option options [String] version_id The object version id used to
|
374
|
+
# retrieve the object. For more about object versioning, see:
|
375
|
+
# https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html
|
376
|
+
#
|
377
|
+
# @return [Boolean] Returns `true` when the file is downloaded without
|
378
|
+
# any errors.
|
379
|
+
def download_file(destination, options = {})
|
380
|
+
downloader = FileDownloader.new(client: client)
|
381
|
+
downloader.download(
|
382
|
+
destination,
|
383
|
+
options.merge(bucket: bucket_name, key: key)
|
384
|
+
)
|
385
|
+
true
|
386
|
+
end
|
387
|
+
end
|
388
|
+
end
|
389
|
+
end
|