aws-sdk-s3 1.0.0.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/aws-sdk-s3.rb +66 -0
- data/lib/aws-sdk-s3/bucket.rb +595 -0
- data/lib/aws-sdk-s3/bucket_acl.rb +168 -0
- data/lib/aws-sdk-s3/bucket_cors.rb +146 -0
- data/lib/aws-sdk-s3/bucket_lifecycle.rb +164 -0
- data/lib/aws-sdk-s3/bucket_logging.rb +142 -0
- data/lib/aws-sdk-s3/bucket_notification.rb +187 -0
- data/lib/aws-sdk-s3/bucket_policy.rb +138 -0
- data/lib/aws-sdk-s3/bucket_region_cache.rb +79 -0
- data/lib/aws-sdk-s3/bucket_request_payment.rb +128 -0
- data/lib/aws-sdk-s3/bucket_tagging.rb +143 -0
- data/lib/aws-sdk-s3/bucket_versioning.rb +188 -0
- data/lib/aws-sdk-s3/bucket_website.rb +177 -0
- data/lib/aws-sdk-s3/client.rb +3171 -0
- data/lib/aws-sdk-s3/client_api.rb +1991 -0
- data/lib/aws-sdk-s3/customizations.rb +29 -0
- data/lib/aws-sdk-s3/customizations/bucket.rb +127 -0
- data/lib/aws-sdk-s3/customizations/multipart_upload.rb +42 -0
- data/lib/aws-sdk-s3/customizations/object.rb +257 -0
- data/lib/aws-sdk-s3/customizations/object_summary.rb +65 -0
- data/lib/aws-sdk-s3/customizations/types/list_object_versions_output.rb +11 -0
- data/lib/aws-sdk-s3/encryption.rb +19 -0
- data/lib/aws-sdk-s3/encryption/client.rb +369 -0
- data/lib/aws-sdk-s3/encryption/decrypt_handler.rb +178 -0
- data/lib/aws-sdk-s3/encryption/default_cipher_provider.rb +63 -0
- data/lib/aws-sdk-s3/encryption/default_key_provider.rb +38 -0
- data/lib/aws-sdk-s3/encryption/encrypt_handler.rb +50 -0
- data/lib/aws-sdk-s3/encryption/errors.rb +13 -0
- data/lib/aws-sdk-s3/encryption/io_auth_decrypter.rb +50 -0
- data/lib/aws-sdk-s3/encryption/io_decrypter.rb +29 -0
- data/lib/aws-sdk-s3/encryption/io_encrypter.rb +69 -0
- data/lib/aws-sdk-s3/encryption/key_provider.rb +29 -0
- data/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb +71 -0
- data/lib/aws-sdk-s3/encryption/materials.rb +58 -0
- data/lib/aws-sdk-s3/encryption/utils.rb +79 -0
- data/lib/aws-sdk-s3/errors.rb +23 -0
- data/lib/aws-sdk-s3/file_part.rb +75 -0
- data/lib/aws-sdk-s3/file_uploader.rb +58 -0
- data/lib/aws-sdk-s3/legacy_signer.rb +186 -0
- data/lib/aws-sdk-s3/multipart_file_uploader.rb +187 -0
- data/lib/aws-sdk-s3/multipart_upload.rb +287 -0
- data/lib/aws-sdk-s3/multipart_upload_error.rb +16 -0
- data/lib/aws-sdk-s3/multipart_upload_part.rb +314 -0
- data/lib/aws-sdk-s3/object.rb +942 -0
- data/lib/aws-sdk-s3/object_acl.rb +214 -0
- data/lib/aws-sdk-s3/object_copier.rb +99 -0
- data/lib/aws-sdk-s3/object_multipart_copier.rb +179 -0
- data/lib/aws-sdk-s3/object_summary.rb +794 -0
- data/lib/aws-sdk-s3/object_version.rb +406 -0
- data/lib/aws-sdk-s3/plugins/accelerate.rb +92 -0
- data/lib/aws-sdk-s3/plugins/bucket_dns.rb +89 -0
- data/lib/aws-sdk-s3/plugins/bucket_name_restrictions.rb +23 -0
- data/lib/aws-sdk-s3/plugins/dualstack.rb +70 -0
- data/lib/aws-sdk-s3/plugins/expect_100_continue.rb +29 -0
- data/lib/aws-sdk-s3/plugins/get_bucket_location_fix.rb +23 -0
- data/lib/aws-sdk-s3/plugins/http_200_errors.rb +47 -0
- data/lib/aws-sdk-s3/plugins/location_constraint.rb +33 -0
- data/lib/aws-sdk-s3/plugins/md5s.rb +79 -0
- data/lib/aws-sdk-s3/plugins/redirects.rb +41 -0
- data/lib/aws-sdk-s3/plugins/s3_signer.rb +208 -0
- data/lib/aws-sdk-s3/plugins/sse_cpk.rb +68 -0
- data/lib/aws-sdk-s3/plugins/url_encoded_keys.rb +94 -0
- data/lib/aws-sdk-s3/presigned_post.rb +647 -0
- data/lib/aws-sdk-s3/presigner.rb +160 -0
- data/lib/aws-sdk-s3/resource.rb +96 -0
- data/lib/aws-sdk-s3/types.rb +5750 -0
- data/lib/aws-sdk-s3/waiters.rb +178 -0
- metadata +154 -0
@@ -0,0 +1,29 @@
|
|
1
|
+
# utility classes
|
2
|
+
require 'aws-sdk-s3/bucket_region_cache'
|
3
|
+
require 'aws-sdk-s3/encryption'
|
4
|
+
require 'aws-sdk-s3/file_part'
|
5
|
+
require 'aws-sdk-s3/file_uploader'
|
6
|
+
require 'aws-sdk-s3/legacy_signer'
|
7
|
+
require 'aws-sdk-s3/multipart_file_uploader'
|
8
|
+
require 'aws-sdk-s3/multipart_upload_error'
|
9
|
+
require 'aws-sdk-s3/object_copier'
|
10
|
+
require 'aws-sdk-s3/object_multipart_copier'
|
11
|
+
require 'aws-sdk-s3/presigned_post'
|
12
|
+
require 'aws-sdk-s3/presigner'
|
13
|
+
|
14
|
+
# customizations to generated classes
|
15
|
+
require 'aws-sdk-s3/customizations/bucket'
|
16
|
+
require 'aws-sdk-s3/customizations/object'
|
17
|
+
require 'aws-sdk-s3/customizations/object_summary'
|
18
|
+
require 'aws-sdk-s3/customizations/multipart_upload'
|
19
|
+
require 'aws-sdk-s3/customizations/types/list_object_versions_output'
|
20
|
+
|
21
|
+
[
|
22
|
+
Aws::S3::Object::Collection,
|
23
|
+
Aws::S3::ObjectSummary::Collection,
|
24
|
+
Aws::S3::ObjectVersion::Collection,
|
25
|
+
].each do |klass|
|
26
|
+
klass.send(:alias_method, :delete, :batch_delete!)
|
27
|
+
klass.send(:extend, Aws::Deprecations)
|
28
|
+
klass.send(:deprecated, :delete, use: :batch_delete!)
|
29
|
+
end
|
@@ -0,0 +1,127 @@
|
|
1
|
+
require 'uri'
|
2
|
+
|
3
|
+
module Aws
|
4
|
+
module S3
|
5
|
+
class Bucket
|
6
|
+
|
7
|
+
# Deletes all objects and versioned objects from this bucket
|
8
|
+
#
|
9
|
+
# @example
|
10
|
+
#
|
11
|
+
# bucket.clear!
|
12
|
+
#
|
13
|
+
# @return [void]
|
14
|
+
def clear!
|
15
|
+
object_versions.batch_delete!
|
16
|
+
end
|
17
|
+
|
18
|
+
# Deletes all objects and versioned objects from this bucket and
|
19
|
+
# then deletes the bucket.
|
20
|
+
#
|
21
|
+
# @example
|
22
|
+
#
|
23
|
+
# bucket.delete!
|
24
|
+
#
|
25
|
+
# @option options [Integer] :max_attempts (3) Maximum number of times to
|
26
|
+
# attempt to delete the empty bucket before raising
|
27
|
+
# `Aws::S3::Errors::BucketNotEmpty`.
|
28
|
+
#
|
29
|
+
# @option options [Float] :initial_wait (1.3) Seconds to wait before
|
30
|
+
# retrying the call to delete the bucket, exponentially increased for
|
31
|
+
# each attempt.
|
32
|
+
#
|
33
|
+
# @return [void]
|
34
|
+
def delete! options = { }
|
35
|
+
options = {
|
36
|
+
initial_wait: 1.3,
|
37
|
+
max_attempts: 3,
|
38
|
+
}.merge(options)
|
39
|
+
|
40
|
+
attempts = 0
|
41
|
+
begin
|
42
|
+
clear!
|
43
|
+
delete
|
44
|
+
rescue Errors::BucketNotEmpty
|
45
|
+
attempts += 1
|
46
|
+
if attempts >= options[:max_attempts]
|
47
|
+
raise
|
48
|
+
else
|
49
|
+
Kernel.sleep(options[:initial_wait] ** attempts)
|
50
|
+
retry
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
# Returns a public URL for this bucket.
|
56
|
+
#
|
57
|
+
# bucket = s3.bucket('bucket-name')
|
58
|
+
# bucket.url
|
59
|
+
# #=> "https://bucket-name.s3.amazonaws.com"
|
60
|
+
#
|
61
|
+
# You can pass `virtual_host: true` to use the bucket name as the
|
62
|
+
# host name.
|
63
|
+
#
|
64
|
+
# bucket = s3.bucket('my.bucket.com', virtual_host: true)
|
65
|
+
# bucket.url
|
66
|
+
# #=> "http://my.bucket.com"
|
67
|
+
#
|
68
|
+
# @option options [Boolean] :virtual_host (false) When `true`,
|
69
|
+
# the bucket name will be used as the host name. This is useful
|
70
|
+
# when you have a CNAME configured for this bucket.
|
71
|
+
#
|
72
|
+
# @return [String] the URL for this bucket.
|
73
|
+
def url(options = {})
|
74
|
+
if options[:virtual_host]
|
75
|
+
"http://#{name}"
|
76
|
+
else
|
77
|
+
s3_bucket_url
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
# Creates a {PresignedPost} that makes it easy to upload a file from
|
82
|
+
# a web browser direct to Amazon S3 using an HTML post form with
|
83
|
+
# a file field.
|
84
|
+
#
|
85
|
+
# See the {PresignedPost} documentation for more information.
|
86
|
+
# @note You must specify `:key` or `:key_starts_with`. All other options
|
87
|
+
# are optional.
|
88
|
+
# @option (see PresignedPost#initialize)
|
89
|
+
# @return [PresignedPost]
|
90
|
+
# @see PresignedPost
|
91
|
+
def presigned_post(options = {})
|
92
|
+
PresignedPost.new(
|
93
|
+
client.config.credentials,
|
94
|
+
client.config.region,
|
95
|
+
name,
|
96
|
+
{url: url}.merge(options)
|
97
|
+
)
|
98
|
+
end
|
99
|
+
|
100
|
+
# @api private
|
101
|
+
def load
|
102
|
+
@data = client.list_buckets.buckets.find { |b| b.name == name }
|
103
|
+
raise "unable to load bucket #{name}" if @data.nil?
|
104
|
+
self
|
105
|
+
end
|
106
|
+
|
107
|
+
private
|
108
|
+
|
109
|
+
def s3_bucket_url
|
110
|
+
url = client.config.endpoint.dup
|
111
|
+
if bucket_as_hostname?(url.scheme == 'https')
|
112
|
+
url.host = "#{name}.#{url.host}"
|
113
|
+
else
|
114
|
+
url.path += '/' unless url.path[-1] == '/'
|
115
|
+
url.path += Seahorse::Util.uri_escape(name)
|
116
|
+
end
|
117
|
+
url.to_s
|
118
|
+
end
|
119
|
+
|
120
|
+
def bucket_as_hostname?(https)
|
121
|
+
Plugins::BucketDns.dns_compatible?(name, https) &&
|
122
|
+
!client.config.force_path_style
|
123
|
+
end
|
124
|
+
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
module Aws
|
2
|
+
module S3
|
3
|
+
class MultipartUpload
|
4
|
+
|
5
|
+
alias_method :basic_complete, :complete
|
6
|
+
|
7
|
+
# Completes the upload, requires a list of completed parts. You can
|
8
|
+
# provide the list of parts with `:part_number` and `:etag` values.
|
9
|
+
#
|
10
|
+
# upload.complete(multipart_upload: { parts: [
|
11
|
+
# { part_number: 1, etag:'etag1' },
|
12
|
+
# { part_number: 2, etag:'etag2' },
|
13
|
+
# ...
|
14
|
+
# ]})
|
15
|
+
#
|
16
|
+
# Alternatively, you can pass **`compute_parts: true`** and the part
|
17
|
+
# list will be computed by calling {Client#list_parts}.
|
18
|
+
#
|
19
|
+
# upload.complete(compute_parts: true)
|
20
|
+
#
|
21
|
+
# @option options [Boolean] :compute_parts (false) When `true`,
|
22
|
+
# the {Client#list_parts} method will be called to determine
|
23
|
+
# the list of required part numbers and their ETags.
|
24
|
+
#
|
25
|
+
def complete(options = {})
|
26
|
+
if options.delete(:compute_parts)
|
27
|
+
options[:multipart_upload] = { parts: compute_parts }
|
28
|
+
end
|
29
|
+
basic_complete(options)
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def compute_parts
|
35
|
+
parts.sort_by(&:part_number).each.with_object([]) do |part, part_list|
|
36
|
+
part_list << { part_number: part.part_number, etag: part.etag }
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,257 @@
|
|
1
|
+
module Aws
|
2
|
+
module S3
|
3
|
+
class Object
|
4
|
+
|
5
|
+
alias size content_length
|
6
|
+
|
7
|
+
# Copies another object to this object. Use `multipart_copy: true`
|
8
|
+
# for large objects. This is required for objects that exceed 5GB.
|
9
|
+
#
|
10
|
+
# @param [S3::Object, S3::ObjectVersion, S3::ObjectSummary, String, Hash] source
|
11
|
+
# Where to copy object data from. `source` must be one of the following:
|
12
|
+
#
|
13
|
+
# * {Aws::S3::Object}
|
14
|
+
# * {Aws::S3::ObjectSummary}
|
15
|
+
# * {Aws::S3::ObjectVersion}
|
16
|
+
# * Hash - with `:bucket` and `:key` and optional `:version_id`
|
17
|
+
# * String - formatted like `"source-bucket-name/uri-escaped-key"`
|
18
|
+
# or `"source-bucket-name/uri-escaped-key?versionId=version-id"`
|
19
|
+
#
|
20
|
+
# @option options [Boolean] :multipart_copy (false) When `true`,
|
21
|
+
# the object will be copied using the multipart APIs. This is
|
22
|
+
# necessary for objects larger than 5GB and can provide
|
23
|
+
# performance improvements on large objects. Amazon S3 does
|
24
|
+
# not accept multipart copies for objects smaller than 5MB.
|
25
|
+
#
|
26
|
+
# @option options [Integer] :content_length Only used when
|
27
|
+
# `:multipart_copy` is `true`. Passing this options avoids a HEAD
|
28
|
+
# request to query the source object size.
|
29
|
+
#
|
30
|
+
# @option options [S3::Client] :copy_source_client Only used when
|
31
|
+
# `:multipart_copy` is `true` and the source object is in a
|
32
|
+
# different region. You do not need to specify this option
|
33
|
+
# if you have provided `:content_length`.
|
34
|
+
#
|
35
|
+
# @option options [String] :copy_source_region Only used when
|
36
|
+
# `:multipart_copy` is `true` and the source object is in a
|
37
|
+
# different region. You do not need to specify this option
|
38
|
+
# if you have provided a `:source_client` or a `:content_length`.
|
39
|
+
#
|
40
|
+
# @example Basic object copy
|
41
|
+
#
|
42
|
+
# bucket = Aws::S3::Bucket.new('target-bucket')
|
43
|
+
# object = bucket.object('target-key')
|
44
|
+
#
|
45
|
+
# # source as String
|
46
|
+
# object.copy_from('source-bucket/source-key')
|
47
|
+
#
|
48
|
+
# # source as Hash
|
49
|
+
# object.copy_from(bucket:'source-bucket', key:'source-key')
|
50
|
+
#
|
51
|
+
# # source as Aws::S3::Object
|
52
|
+
# object.copy_from(bucket.object('source-key'))
|
53
|
+
#
|
54
|
+
# @example Managed copy of large objects
|
55
|
+
#
|
56
|
+
# # uses multipart upload APIs to copy object
|
57
|
+
# object.copy_from('src-bucket/src-key', multipart_copy: true)
|
58
|
+
#
|
59
|
+
# @see #copy_to
|
60
|
+
#
|
61
|
+
def copy_from(source, options = {})
|
62
|
+
if Hash === source && source[:copy_source]
|
63
|
+
# for backwards compatibility
|
64
|
+
@client.copy_object(source.merge(bucket: bucket_name, key: key))
|
65
|
+
else
|
66
|
+
ObjectCopier.new(self, options).copy_from(source, options)
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
# Copies this object to another object. Use `multipart_copy: true`
|
71
|
+
# for large objects. This is required for objects that exceed 5GB.
|
72
|
+
#
|
73
|
+
# @note If you need to copy to a bucket in a different region, use
|
74
|
+
# {#copy_from}.
|
75
|
+
#
|
76
|
+
# @param [S3::Object, String, Hash] target Where to copy the object
|
77
|
+
# data to. `target` must be one of the following:
|
78
|
+
#
|
79
|
+
# * {Aws::S3::Object}
|
80
|
+
# * Hash - with `:bucket` and `:key`
|
81
|
+
# * String - formatted like `"target-bucket-name/target-key"`
|
82
|
+
#
|
83
|
+
# @example Basic object copy
|
84
|
+
#
|
85
|
+
# bucket = Aws::S3::Bucket.new('source-bucket')
|
86
|
+
# object = bucket.object('source-key')
|
87
|
+
#
|
88
|
+
# # target as String
|
89
|
+
# object.copy_to('target-bucket/target-key')
|
90
|
+
#
|
91
|
+
# # target as Hash
|
92
|
+
# object.copy_to(bucket: 'target-bucket', key: 'target-key')
|
93
|
+
#
|
94
|
+
# # target as Aws::S3::Object
|
95
|
+
# object.copy_to(bucket.object('target-key'))
|
96
|
+
#
|
97
|
+
# @example Managed copy of large objects
|
98
|
+
#
|
99
|
+
# # uses multipart upload APIs to copy object
|
100
|
+
# object.copy_to('src-bucket/src-key', multipart_copy: true)
|
101
|
+
#
|
102
|
+
def copy_to(target, options = {})
|
103
|
+
ObjectCopier.new(self, options).copy_to(target, options)
|
104
|
+
end
|
105
|
+
|
106
|
+
# Copies and deletes the current object. The object will only be
|
107
|
+
# deleted if the copy operation succeeds.
|
108
|
+
# @param (see Object#copy_to)
|
109
|
+
# @option (see Object#copy_to)
|
110
|
+
# @return [void]
|
111
|
+
# @see Object#copy_to
|
112
|
+
# @see Object#delete
|
113
|
+
def move_to(target, options = {})
|
114
|
+
copy_to(target, options)
|
115
|
+
delete
|
116
|
+
end
|
117
|
+
|
118
|
+
# Creates a {PresignedPost} that makes it easy to upload a file from
|
119
|
+
# a web browser direct to Amazon S3 using an HTML post form with
|
120
|
+
# a file field.
|
121
|
+
#
|
122
|
+
# See the {PresignedPost} documentation for more information.
|
123
|
+
#
|
124
|
+
# @option (see PresignedPost#initialize)
|
125
|
+
# @return [PresignedPost]
|
126
|
+
# @see PresignedPost
|
127
|
+
def presigned_post(options = {})
|
128
|
+
PresignedPost.new(
|
129
|
+
client.config.credentials,
|
130
|
+
client.config.region,
|
131
|
+
bucket_name,
|
132
|
+
{
|
133
|
+
key: key,
|
134
|
+
url: bucket.url,
|
135
|
+
}.merge(options)
|
136
|
+
)
|
137
|
+
end
|
138
|
+
|
139
|
+
# Generates a pre-signed URL for this object.
|
140
|
+
#
|
141
|
+
# @example Pre-signed GET URL, valid for one hour
|
142
|
+
#
|
143
|
+
# obj.presigned_url(:get, expires_in: 3600)
|
144
|
+
# #=> "https://bucket-name.s3.amazonaws.com/object-key?..."
|
145
|
+
#
|
146
|
+
# @example Pre-signed PUT with a canned ACL
|
147
|
+
#
|
148
|
+
# # the object uploaded using this URL will be publicly accessible
|
149
|
+
# obj.presigned_url(:put, acl: 'public-read')
|
150
|
+
# #=> "https://bucket-name.s3.amazonaws.com/object-key?..."
|
151
|
+
#
|
152
|
+
# @param [Symbol] http_method
|
153
|
+
# The HTTP method to generate a presigned URL for. Valid values
|
154
|
+
# are `:get`, `:put`, `:head`, and `:delete`.
|
155
|
+
#
|
156
|
+
# @param [Hash] params
|
157
|
+
# Additional request parameters to use when generating the pre-signed
|
158
|
+
# URL. See the related documentation in {Client} for accepted
|
159
|
+
# params.
|
160
|
+
#
|
161
|
+
# | HTTP Method | Client Method |
|
162
|
+
# |---------------|------------------------|
|
163
|
+
# | `:get` | {Client#get_object} |
|
164
|
+
# | `:put` | {Client#put_object} |
|
165
|
+
# | `:head` | {Client#head_object} |
|
166
|
+
# | `:delete` | {Client#delete_object} |
|
167
|
+
#
|
168
|
+
# @option params [Boolean] :virtual_host (false) When `true` the
|
169
|
+
# presigned URL will use the bucket name as a virtual host.
|
170
|
+
#
|
171
|
+
# bucket = Aws::S3::Bucket.new('my.bucket.com')
|
172
|
+
# bucket.object('key').presigned_url(virtual_host: true)
|
173
|
+
# #=> "http://my.bucket.com/key?..."
|
174
|
+
#
|
175
|
+
# @option params [Integer] :expires_in (900) Number of seconds before
|
176
|
+
# the pre-signed URL expires. This may not exceed one week (604800
|
177
|
+
# seconds). Note that the pre-signed URL is also only valid as long as
|
178
|
+
# credentials used to sign it are. For example, when using IAM roles,
|
179
|
+
# temporary tokens generated for signing also have a default expiration
|
180
|
+
# which will affect the effective expiration of the pre-signed URL.
|
181
|
+
#
|
182
|
+
# @raise [ArgumentError] Raised if `:expires_in` exceeds one week
|
183
|
+
# (604800 seconds).
|
184
|
+
#
|
185
|
+
# @return [String]
|
186
|
+
#
|
187
|
+
def presigned_url(http_method, params = {})
|
188
|
+
presigner = Presigner.new(client: client)
|
189
|
+
presigner.presigned_url("#{http_method.downcase}_object", params.merge(
|
190
|
+
bucket: bucket_name,
|
191
|
+
key: key,
|
192
|
+
))
|
193
|
+
end
|
194
|
+
|
195
|
+
# Returns the public (un-signed) URL for this object.
|
196
|
+
#
|
197
|
+
# s3.bucket('bucket-name').object('obj-key').public_url
|
198
|
+
# #=> "https://bucket-name.s3.amazonaws.com/obj-key"
|
199
|
+
#
|
200
|
+
# To use virtual hosted bucket url (disables https):
|
201
|
+
#
|
202
|
+
# s3.bucket('my.bucket.com').object('key').public_url(virtual_host: true)
|
203
|
+
# #=> "http://my.bucket.com/key"
|
204
|
+
#
|
205
|
+
# @option options [Boolean] :virtual_host (false) When `true`, the bucket
|
206
|
+
# name will be used as the host name. This is useful when you have
|
207
|
+
# a CNAME configured for the bucket.
|
208
|
+
#
|
209
|
+
# @return [String]
|
210
|
+
def public_url(options = {})
|
211
|
+
url = URI.parse(bucket.url(options))
|
212
|
+
url.path += '/' unless url.path[-1] == '/'
|
213
|
+
url.path += key.gsub(/[^\/]+/) { |s| Seahorse::Util.uri_escape(s) }
|
214
|
+
url.to_s
|
215
|
+
end
|
216
|
+
|
217
|
+
# Uploads a file from disk to the current object in S3.
|
218
|
+
#
|
219
|
+
# # small files are uploaded in a single API call
|
220
|
+
# obj.upload_file('/path/to/file')
|
221
|
+
#
|
222
|
+
# Files larger than `:multipart_threshold` are uploaded using the
|
223
|
+
# Amazon S3 multipart upload APIs.
|
224
|
+
#
|
225
|
+
# # large files are automatically split into parts
|
226
|
+
# # and the parts are uploaded in parallel
|
227
|
+
# obj.upload_file('/path/to/very_large_file')
|
228
|
+
#
|
229
|
+
# @param [String,Pathname,File,Tempfile] source A file or path to a file
|
230
|
+
# on the local file system that should be uploaded to this object.
|
231
|
+
# If you pass an open file object, then it is your responsibility
|
232
|
+
# to close the file object once the upload completes.
|
233
|
+
#
|
234
|
+
# @option options [Integer] :multipart_threshold (15728640) Files larger
|
235
|
+
# than `:multipart_threshold` are uploaded using the S3 multipart APIs.
|
236
|
+
# Default threshold is 15MB.
|
237
|
+
#
|
238
|
+
# @raise [MultipartUploadError] If an object is being uploaded in
|
239
|
+
# parts, and the upload can not be completed, then the upload is
|
240
|
+
# aborted and this error is raised. The raised error has a `#errors`
|
241
|
+
# method that returns the failures that caused the upload to be
|
242
|
+
# aborted.
|
243
|
+
#
|
244
|
+
# @return [Boolean] Returns `true` when the object is uploaded
|
245
|
+
# without any errors.
|
246
|
+
#
|
247
|
+
def upload_file(source, options = {})
|
248
|
+
uploader = FileUploader.new(
|
249
|
+
multipart_threshold: options.delete(:multipart_threshold),
|
250
|
+
client: client)
|
251
|
+
uploader.upload(source, options.merge(bucket: bucket_name, key: key))
|
252
|
+
true
|
253
|
+
end
|
254
|
+
|
255
|
+
end
|
256
|
+
end
|
257
|
+
end
|