yam-aws-s3 0.6.2.2
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGELOG +105 -0
- data/COPYING +19 -0
- data/INSTALL +55 -0
- data/README.erb +58 -0
- data/bin/s3sh +6 -0
- data/bin/setup.rb +10 -0
- data/lib/aws/s3/acl.rb +636 -0
- data/lib/aws/s3/authentication.rb +221 -0
- data/lib/aws/s3/base.rb +240 -0
- data/lib/aws/s3/bittorrent.rb +58 -0
- data/lib/aws/s3/bucket.rb +319 -0
- data/lib/aws/s3/connection.rb +278 -0
- data/lib/aws/s3/error.rb +69 -0
- data/lib/aws/s3/exceptions.rb +133 -0
- data/lib/aws/s3/extensions.rb +356 -0
- data/lib/aws/s3/logging.rb +314 -0
- data/lib/aws/s3/object.rb +612 -0
- data/lib/aws/s3/owner.rb +44 -0
- data/lib/aws/s3/parsing.rb +99 -0
- data/lib/aws/s3/response.rb +180 -0
- data/lib/aws/s3/service.rb +51 -0
- data/lib/aws/s3/version.rb +5 -0
- data/lib/aws/s3.rb +60 -0
- metadata +136 -0
@@ -0,0 +1,612 @@
|
|
1
|
+
module AWS
|
2
|
+
module S3
|
3
|
+
# S3Objects represent the data you store on S3. They have a key (their name) and a value (their data). All objects belong to a
|
4
|
+
# bucket.
|
5
|
+
#
|
6
|
+
# You can store an object on S3 by specifying a key, its data and the name of the bucket you want to put it in:
|
7
|
+
#
|
8
|
+
# S3Object.store('me.jpg', open('headshot.jpg'), 'photos')
|
9
|
+
#
|
10
|
+
# The content type of the object will be inferred by its extension. If the appropriate content type can not be inferred, S3 defaults
|
11
|
+
# to <tt>binary/octet-stream</tt>.
|
12
|
+
#
|
13
|
+
# If you want to override this, you can explicitly indicate what content type the object should have with the <tt>:content_type</tt> option:
|
14
|
+
#
|
15
|
+
# file = 'black-flowers.m4a'
|
16
|
+
# S3Object.store(
|
17
|
+
# file,
|
18
|
+
# open(file),
|
19
|
+
# 'jukebox',
|
20
|
+
# :content_type => 'audio/mp4a-latm'
|
21
|
+
# )
|
22
|
+
#
|
23
|
+
# You can read more about storing files on S3 in the documentation for S3Object.store.
|
24
|
+
#
|
25
|
+
# If you just want to fetch an object you've stored on S3, you just specify its name and its bucket:
|
26
|
+
#
|
27
|
+
# picture = S3Object.find 'headshot.jpg', 'photos'
|
28
|
+
#
|
29
|
+
# N.B. The actual data for the file is not downloaded in both the example where the file appeared in the bucket and when fetched directly.
|
30
|
+
# You get the data for the file like this:
|
31
|
+
#
|
32
|
+
# picture.value
|
33
|
+
#
|
34
|
+
# You can fetch just the object's data directly:
|
35
|
+
#
|
36
|
+
# S3Object.value 'headshot.jpg', 'photos'
|
37
|
+
#
|
38
|
+
# Or stream it by passing a block to <tt>stream</tt>:
|
39
|
+
#
|
40
|
+
# open('song.mp3', 'w') do |file|
|
41
|
+
# S3Object.stream('song.mp3', 'jukebox') do |chunk|
|
42
|
+
# file.write chunk
|
43
|
+
# end
|
44
|
+
# end
|
45
|
+
#
|
46
|
+
# The data of the file, once download, is cached, so subsequent calls to <tt>value</tt> won't redownload the file unless you
|
47
|
+
# tell the object to reload its <tt>value</tt>:
|
48
|
+
#
|
49
|
+
# # Redownloads the file's data
|
50
|
+
# song.value(:reload)
|
51
|
+
#
|
52
|
+
# Other functionality includes:
|
53
|
+
#
|
54
|
+
# # Check if an object exists?
|
55
|
+
# S3Object.exists? 'headshot.jpg', 'photos'
|
56
|
+
#
|
57
|
+
# # Copying an object
|
58
|
+
# S3Object.copy 'headshot.jpg', 'headshot2.jpg', 'photos'
|
59
|
+
#
|
60
|
+
# # Renaming an object
|
61
|
+
# S3Object.rename 'headshot.jpg', 'portrait.jpg', 'photos'
|
62
|
+
#
|
63
|
+
# # Deleting an object
|
64
|
+
# S3Object.delete 'headshot.jpg', 'photos'
|
65
|
+
#
|
66
|
+
# ==== More about objects and their metadata
|
67
|
+
#
|
68
|
+
# You can find out the content type of your object with the <tt>content_type</tt> method:
|
69
|
+
#
|
70
|
+
# song.content_type
|
71
|
+
# # => "audio/mpeg"
|
72
|
+
#
|
73
|
+
# You can change the content type as well if you like:
|
74
|
+
#
|
75
|
+
# song.content_type = 'application/pdf'
|
76
|
+
# song.store
|
77
|
+
#
|
78
|
+
# (Keep in mind that due to limitiations in S3's exposed API, the only way to change things like the content_type
|
79
|
+
# is to PUT the object onto S3 again. In the case of large files, this will result in fully re-uploading the file.)
|
80
|
+
#
|
81
|
+
# A bevie of information about an object can be had using the <tt>about</tt> method:
|
82
|
+
#
|
83
|
+
# pp song.about
|
84
|
+
# {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT",
|
85
|
+
# "content-type" => "binary/octet-stream",
|
86
|
+
# "etag" => "\"dc629038ffc674bee6f62eb64ff3a\"",
|
87
|
+
# "date" => "Sat, 28 Oct 2006 21:30:41 GMT",
|
88
|
+
# "x-amz-request-id" => "B7BC68F55495B1C8",
|
89
|
+
# "server" => "AmazonS3",
|
90
|
+
# "content-length" => "3418766"}
|
91
|
+
#
|
92
|
+
# You can get and set metadata for an object:
|
93
|
+
#
|
94
|
+
# song.metadata
|
95
|
+
# # => {}
|
96
|
+
# song.metadata[:album] = "A River Ain't Too Much To Love"
|
97
|
+
# # => "A River Ain't Too Much To Love"
|
98
|
+
# song.metadata[:released] = 2005
|
99
|
+
# pp song.metadata
|
100
|
+
# {"x-amz-meta-released" => 2005,
|
101
|
+
# "x-amz-meta-album" => "A River Ain't Too Much To Love"}
|
102
|
+
# song.store
|
103
|
+
#
|
104
|
+
# That metadata will be saved in S3 and is hence forth available from that object:
|
105
|
+
#
|
106
|
+
# song = S3Object.find('black-flowers.mp3', 'jukebox')
|
107
|
+
# pp song.metadata
|
108
|
+
# {"x-amz-meta-released" => "2005",
|
109
|
+
# "x-amz-meta-album" => "A River Ain't Too Much To Love"}
|
110
|
+
# song.metadata[:released]
|
111
|
+
# # => "2005"
|
112
|
+
# song.metadata[:released] = 2006
|
113
|
+
# pp song.metadata
|
114
|
+
# {"x-amz-meta-released" => 2006,
|
115
|
+
# "x-amz-meta-album" => "A River Ain't Too Much To Love"}
|
116
|
+
class S3Object < Base
|
117
|
+
class << self
|
118
|
+
# Returns the value of the object with <tt>key</tt> in the specified bucket.
|
119
|
+
#
|
120
|
+
# === Conditional GET options
|
121
|
+
#
|
122
|
+
# * <tt>:if_modified_since</tt> - Return the object only if it has been modified since the specified time,
|
123
|
+
# otherwise return a 304 (not modified).
|
124
|
+
# * <tt>:if_unmodified_since</tt> - Return the object only if it has not been modified since the specified time,
|
125
|
+
# otherwise raise PreconditionFailed.
|
126
|
+
# * <tt>:if_match</tt> - Return the object only if its entity tag (ETag) is the same as the one specified,
|
127
|
+
# otherwise raise PreconditionFailed.
|
128
|
+
# * <tt>:if_none_match</tt> - Return the object only if its entity tag (ETag) is different from the one specified,
|
129
|
+
# otherwise return a 304 (not modified).
|
130
|
+
#
|
131
|
+
# === Other options
|
132
|
+
# * <tt>:range</tt> - Return only the bytes of the object in the specified range.
|
133
|
+
def value(key, bucket = nil, options = {}, &block)
|
134
|
+
Value.new(get(path!(bucket, key, options), options, &block))
|
135
|
+
end
|
136
|
+
|
137
|
+
def stream(key, bucket = nil, options = {}, &block)
|
138
|
+
value(key, bucket, options) do |response|
|
139
|
+
response.read_body(&block)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
# Returns the object whose key is <tt>name</tt> in the specified bucket. If the specified key does not
|
144
|
+
# exist, a NoSuchKey exception will be raised.
|
145
|
+
def find(key, bucket = nil)
|
146
|
+
# N.B. This is arguably a hack. From what the current S3 API exposes, when you retrieve a bucket, it
|
147
|
+
# provides a listing of all the files in that bucket (assuming you haven't limited the scope of what it returns).
|
148
|
+
# Each file in the listing contains information about that file. It is from this information that an S3Object is built.
|
149
|
+
#
|
150
|
+
# If you know the specific file that you want, S3 allows you to make a get request for that specific file and it returns
|
151
|
+
# the value of that file in its response body. This response body is used to build an S3Object::Value object.
|
152
|
+
# If you want information about that file, you can make a head request and the headers of the response will contain
|
153
|
+
# information about that file. There is no way, though, to say, give me the representation of just this given file the same
|
154
|
+
# way that it would appear in a bucket listing.
|
155
|
+
#
|
156
|
+
# When fetching a bucket, you can provide options which narrow the scope of what files should be returned in that listing.
|
157
|
+
# Of those options, one is <tt>marker</tt> which is a string and instructs the bucket to return only object's who's key comes after
|
158
|
+
# the specified marker according to alphabetic order. Another option is <tt>max-keys</tt> which defaults to 1000 but allows you
|
159
|
+
# to dictate how many objects should be returned in the listing. With a combination of <tt>marker</tt> and <tt>max-keys</tt> you can
|
160
|
+
# *almost* specify exactly which file you'd like it to return, but <tt>marker</tt> is not inclusive. In other words, if there is a bucket
|
161
|
+
# which contains three objects who's keys are respectively 'a', 'b' and 'c', then fetching a bucket listing with marker set to 'b' will only
|
162
|
+
# return 'c', not 'b'.
|
163
|
+
#
|
164
|
+
# Given all that, my hack to fetch a bucket with only one specific file, is to set the marker to the result of calling String#previous on
|
165
|
+
# the desired object's key, which functionally makes the key ordered one degree higher than the desired object key according to
|
166
|
+
# alphabetic ordering. This is a hack, but it should work around 99% of the time. I can't think of a scenario where it would return
|
167
|
+
# something incorrect.
|
168
|
+
|
169
|
+
# We need to ensure the key doesn't have extended characters but not uri escape it before doing the lookup and comparing since if the object exists,
|
170
|
+
# the key on S3 will have been normalized
|
171
|
+
key = key.remove_extended unless key.valid_utf8?
|
172
|
+
bucket = Bucket.find(bucket_name(bucket), :marker => key.previous, :max_keys => 1)
|
173
|
+
# If our heuristic failed, trigger a NoSuchKey exception
|
174
|
+
if (object = bucket.objects.first) && object.key == key
|
175
|
+
object
|
176
|
+
else
|
177
|
+
raise NoSuchKey.new("No such key `#{key}'", bucket)
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
# Makes a copy of the object with <tt>key</tt> to <tt>copy_key</tt>, preserving the ACL of the existing object if the <tt>:copy_acl</tt> option is true (default false).
|
182
|
+
def copy(key, copy_key, bucket = nil, options = {})
|
183
|
+
bucket = bucket_name(bucket)
|
184
|
+
source_key = path!(bucket, key)
|
185
|
+
default_options = {'x-amz-copy-source' => source_key}
|
186
|
+
target_key = path!(bucket, copy_key)
|
187
|
+
returning put(target_key, default_options) do
|
188
|
+
acl(copy_key, bucket, acl(key, bucket)) if options[:copy_acl]
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
# Rename the object with key <tt>from</tt> to have key in <tt>to</tt>.
|
193
|
+
def rename(from, to, bucket = nil, options = {})
|
194
|
+
copy(from, to, bucket, options)
|
195
|
+
delete(from, bucket)
|
196
|
+
end
|
197
|
+
|
198
|
+
# Fetch information about the object with <tt>key</tt> from <tt>bucket</tt>. Information includes content type, content length,
|
199
|
+
# last modified time, and others.
|
200
|
+
#
|
201
|
+
# If the specified key does not exist, NoSuchKey is raised.
|
202
|
+
def about(key, bucket = nil, options = {})
|
203
|
+
response = head(path!(bucket, key, options), options)
|
204
|
+
raise NoSuchKey.new("No such key `#{key}'", bucket) if response.code == 404
|
205
|
+
About.new(response.headers)
|
206
|
+
end
|
207
|
+
|
208
|
+
# Checks if the object with <tt>key</tt> in <tt>bucket</tt> exists.
|
209
|
+
#
|
210
|
+
# S3Object.exists? 'kiss.jpg', 'marcel'
|
211
|
+
# # => true
|
212
|
+
def exists?(key, bucket = nil)
|
213
|
+
about(key, bucket)
|
214
|
+
true
|
215
|
+
rescue NoSuchKey
|
216
|
+
false
|
217
|
+
end
|
218
|
+
|
219
|
+
# Delete object with <tt>key</tt> from <tt>bucket</tt>.
|
220
|
+
def delete(key, bucket = nil, options = {})
|
221
|
+
# A bit confusing. Calling super actually makes an HTTP DELETE request. The delete method is
|
222
|
+
# defined in the Base class. It happens to have the same name.
|
223
|
+
super(path!(bucket, key, options), options).success?
|
224
|
+
end
|
225
|
+
|
226
|
+
# When storing an object on the S3 servers using S3Object.store, the <tt>data</tt> argument can be a string or an I/O stream.
|
227
|
+
# If <tt>data</tt> is an I/O stream it will be read in segments and written to the socket incrementally. This approach
|
228
|
+
# may be desirable for very large files so they are not read into memory all at once.
|
229
|
+
#
|
230
|
+
# # Non streamed upload
|
231
|
+
# S3Object.store('greeting.txt', 'hello world!', 'marcel')
|
232
|
+
#
|
233
|
+
# # Streamed upload
|
234
|
+
# S3Object.store('roots.mpeg', open('roots.mpeg'), 'marcel')
|
235
|
+
def store(key, data, bucket = nil, options = {})
|
236
|
+
validate_key!(key)
|
237
|
+
# Must build path before infering content type in case bucket is being used for options
|
238
|
+
path = path!(bucket, key, options)
|
239
|
+
infer_content_type!(key, options)
|
240
|
+
|
241
|
+
put(path, options, data) # Don't call .success? on response. We want to get the etag.
|
242
|
+
end
|
243
|
+
alias_method :create, :store
|
244
|
+
alias_method :save, :store
|
245
|
+
|
246
|
+
# All private objects are accessible via an authenticated GET request to the S3 servers. You can generate an
|
247
|
+
# authenticated url for an object like this:
|
248
|
+
#
|
249
|
+
# S3Object.url_for('beluga_baby.jpg', 'marcel_molina')
|
250
|
+
#
|
251
|
+
# By default authenticated urls expire 5 minutes after they were generated.
|
252
|
+
#
|
253
|
+
# Expiration options can be specified either with an absolute time since the epoch with the <tt>:expires</tt> options,
|
254
|
+
# or with a number of seconds relative to now with the <tt>:expires_in</tt> options:
|
255
|
+
#
|
256
|
+
# # Absolute expiration date
|
257
|
+
# # (Expires January 18th, 2038)
|
258
|
+
# doomsday = Time.mktime(2038, 1, 18).to_i
|
259
|
+
# S3Object.url_for('beluga_baby.jpg',
|
260
|
+
# 'marcel',
|
261
|
+
# :expires => doomsday)
|
262
|
+
#
|
263
|
+
# # Expiration relative to now specified in seconds
|
264
|
+
# # (Expires in 3 hours)
|
265
|
+
# S3Object.url_for('beluga_baby.jpg',
|
266
|
+
# 'marcel',
|
267
|
+
# :expires_in => 60 * 60 * 3)
|
268
|
+
#
|
269
|
+
# You can specify whether the url should go over SSL with the <tt>:use_ssl</tt> option:
|
270
|
+
#
|
271
|
+
# # Url will use https protocol
|
272
|
+
# S3Object.url_for('beluga_baby.jpg',
|
273
|
+
# 'marcel',
|
274
|
+
# :use_ssl => true)
|
275
|
+
#
|
276
|
+
# By default, the ssl settings for the current connection will be used.
|
277
|
+
#
|
278
|
+
# If you have an object handy, you can use its <tt>url</tt> method with the same objects:
|
279
|
+
#
|
280
|
+
# song.url(:expires_in => 30)
|
281
|
+
#
|
282
|
+
# To get an unauthenticated url for the object, such as in the case
|
283
|
+
# when the object is publicly readable, pass the
|
284
|
+
# <tt>:authenticated</tt> option with a value of <tt>false</tt>.
|
285
|
+
#
|
286
|
+
# S3Object.url_for('beluga_baby.jpg',
|
287
|
+
# 'marcel',
|
288
|
+
# :authenticated => false)
|
289
|
+
# # => http://s3.amazonaws.com/marcel/beluga_baby.jpg
|
290
|
+
def url_for(name, bucket = nil, options = {})
|
291
|
+
connection.url_for(path!(bucket, name, options), options) # Do not normalize options
|
292
|
+
end
|
293
|
+
|
294
|
+
def path!(bucket, name, options = {}) #:nodoc:
|
295
|
+
# We're using the second argument for options
|
296
|
+
if bucket.is_a?(Hash)
|
297
|
+
options.replace(bucket)
|
298
|
+
bucket = nil
|
299
|
+
end
|
300
|
+
'/' << File.join(bucket_name(bucket), name)
|
301
|
+
end
|
302
|
+
|
303
|
+
private
|
304
|
+
|
305
|
+
def validate_key!(key)
|
306
|
+
raise InvalidKeyName.new(key) unless key && key.size <= 1024
|
307
|
+
end
|
308
|
+
|
309
|
+
def infer_content_type!(key, options)
|
310
|
+
return if options.has_key?(:content_type)
|
311
|
+
if mime_type = MIME::Types.type_for(key).first
|
312
|
+
options[:content_type] = mime_type.content_type
|
313
|
+
end
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
class Value < String #:nodoc:
|
318
|
+
attr_reader :response
|
319
|
+
def initialize(response)
|
320
|
+
super(response.body)
|
321
|
+
@response = response
|
322
|
+
end
|
323
|
+
end
|
324
|
+
|
325
|
+
class About < Hash #:nodoc:
|
326
|
+
def initialize(headers)
|
327
|
+
super()
|
328
|
+
replace(headers)
|
329
|
+
metadata
|
330
|
+
end
|
331
|
+
|
332
|
+
def [](header)
|
333
|
+
super(header.to_header)
|
334
|
+
end
|
335
|
+
|
336
|
+
def []=(header, value)
|
337
|
+
super(header.to_header, value)
|
338
|
+
end
|
339
|
+
|
340
|
+
def to_headers
|
341
|
+
self.merge(metadata.to_headers)
|
342
|
+
end
|
343
|
+
|
344
|
+
def metadata
|
345
|
+
Metadata.new(self)
|
346
|
+
end
|
347
|
+
memoized :metadata
|
348
|
+
end
|
349
|
+
|
350
|
+
class Metadata < Hash #:nodoc:
|
351
|
+
HEADER_PREFIX = 'x-amz-meta-'
|
352
|
+
SIZE_LIMIT = 2048 # 2 kilobytes
|
353
|
+
|
354
|
+
def initialize(headers)
|
355
|
+
@headers = headers
|
356
|
+
super()
|
357
|
+
extract_metadata!
|
358
|
+
end
|
359
|
+
|
360
|
+
def []=(header, value)
|
361
|
+
super(header_name(header.to_header), value)
|
362
|
+
end
|
363
|
+
|
364
|
+
def [](header)
|
365
|
+
super(header_name(header.to_header))
|
366
|
+
end
|
367
|
+
|
368
|
+
def to_headers
|
369
|
+
validate!
|
370
|
+
self
|
371
|
+
end
|
372
|
+
|
373
|
+
private
|
374
|
+
attr_reader :headers
|
375
|
+
|
376
|
+
def extract_metadata!
|
377
|
+
headers.keys.grep(Regexp.new(HEADER_PREFIX)).each do |metadata_header|
|
378
|
+
self[metadata_header] = headers.delete(metadata_header)
|
379
|
+
end
|
380
|
+
end
|
381
|
+
|
382
|
+
def header_name(name)
|
383
|
+
name =~ Regexp.new(HEADER_PREFIX) ? name : [HEADER_PREFIX, name].join
|
384
|
+
end
|
385
|
+
|
386
|
+
def validate!
|
387
|
+
invalid_headers = inject([]) do |invalid, (name, value)|
|
388
|
+
invalid << name unless valid?(value)
|
389
|
+
invalid
|
390
|
+
end
|
391
|
+
|
392
|
+
raise InvalidMetadataValue.new(invalid_headers) unless invalid_headers.empty?
|
393
|
+
end
|
394
|
+
|
395
|
+
def valid?(value)
|
396
|
+
value && value.size < SIZE_LIMIT
|
397
|
+
end
|
398
|
+
end
|
399
|
+
|
400
|
+
attr_writer :value #:nodoc:
|
401
|
+
|
402
|
+
# Provides readers and writers for all valid header settings listed in <tt>valid_header_settings</tt>.
|
403
|
+
# Subsequent saves to the object after setting any of the valid headers settings will be reflected in
|
404
|
+
# information about the object.
|
405
|
+
#
|
406
|
+
# some_s3_object.content_type
|
407
|
+
# => nil
|
408
|
+
# some_s3_object.content_type = 'text/plain'
|
409
|
+
# => "text/plain"
|
410
|
+
# some_s3_object.content_type
|
411
|
+
# => "text/plain"
|
412
|
+
# some_s3_object.store
|
413
|
+
# S3Object.about(some_s3_object.key, some_s3_object.bucket.name)['content-type']
|
414
|
+
# => "text/plain"
|
415
|
+
include SelectiveAttributeProxy #:nodoc
|
416
|
+
|
417
|
+
proxy_to :about, :exclusively => false
|
418
|
+
|
419
|
+
# Initializes a new S3Object.
|
420
|
+
def initialize(attributes = {}, &block)
|
421
|
+
super
|
422
|
+
self.value = attributes.delete(:value)
|
423
|
+
self.bucket = attributes.delete(:bucket)
|
424
|
+
yield self if block_given?
|
425
|
+
end
|
426
|
+
|
427
|
+
# The current object's bucket. If no bucket has been set, a NoBucketSpecified exception will be raised. For
|
428
|
+
# cases where you are not sure if the bucket has been set, you can use the belongs_to_bucket? method.
|
429
|
+
def bucket
|
430
|
+
@bucket or raise NoBucketSpecified
|
431
|
+
end
|
432
|
+
|
433
|
+
# Sets the bucket that the object belongs to.
|
434
|
+
def bucket=(bucket)
|
435
|
+
@bucket = bucket
|
436
|
+
self
|
437
|
+
end
|
438
|
+
|
439
|
+
# Returns true if the current object has been assigned to a bucket yet. Objects must belong to a bucket before they
|
440
|
+
# can be saved onto S3.
|
441
|
+
def belongs_to_bucket?
|
442
|
+
!@bucket.nil?
|
443
|
+
end
|
444
|
+
alias_method :orphan?, :belongs_to_bucket?
|
445
|
+
|
446
|
+
# Returns the key of the object. If the key is not set, a NoKeySpecified exception will be raised. For cases
|
447
|
+
# where you are not sure if the key has been set, you can use the key_set? method. Objects must have a key
|
448
|
+
# set to be saved onto S3. Objects which have already been saved onto S3 will always have their key set.
|
449
|
+
def key
|
450
|
+
attributes['key'] or raise NoKeySpecified
|
451
|
+
end
|
452
|
+
|
453
|
+
# Sets the key for the current object.
|
454
|
+
def key=(value)
|
455
|
+
attributes['key'] = value
|
456
|
+
end
|
457
|
+
|
458
|
+
# Returns true if the current object has had its key set yet. Objects which have already been saved will
|
459
|
+
# always return true. This method is useful for objects which have not been saved yet so you know if you
|
460
|
+
# need to set the object's key since you can not save an object unless its key has been set.
|
461
|
+
#
|
462
|
+
# object.store if object.key_set? && object.belongs_to_bucket?
|
463
|
+
def key_set?
|
464
|
+
!attributes['key'].nil?
|
465
|
+
end
|
466
|
+
|
467
|
+
# Lazily loads object data.
|
468
|
+
#
|
469
|
+
# Force a reload of the data by passing <tt>:reload</tt>.
|
470
|
+
#
|
471
|
+
# object.value(:reload)
|
472
|
+
#
|
473
|
+
# When loading the data for the first time you can optionally yield to a block which will
|
474
|
+
# allow you to stream the data in segments.
|
475
|
+
#
|
476
|
+
# object.value do |segment|
|
477
|
+
# send_data segment
|
478
|
+
# end
|
479
|
+
#
|
480
|
+
# The full list of options are listed in the documentation for its class method counter part, S3Object::value.
|
481
|
+
def value(options = {}, &block)
|
482
|
+
if options.is_a?(Hash)
|
483
|
+
reload = !options.empty?
|
484
|
+
else
|
485
|
+
reload = options
|
486
|
+
options = {}
|
487
|
+
end
|
488
|
+
expirable_memoize(reload) do
|
489
|
+
self.class.stream(key, bucket.name, options, &block)
|
490
|
+
end
|
491
|
+
end
|
492
|
+
|
493
|
+
# Interface to information about the current object. Information is read only, though some of its data
|
494
|
+
# can be modified through specific methods, such as content_type and content_type=.
|
495
|
+
#
|
496
|
+
# pp some_object.about
|
497
|
+
# {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT",
|
498
|
+
# "x-amz-id-2" => "LdcQRk5qLwxJQiZ8OH50HhoyKuqyWoJ67B6i+rOE5MxpjJTWh1kCkL+I0NQzbVQn",
|
499
|
+
# "content-type" => "binary/octet-stream",
|
500
|
+
# "etag" => "\"dc629038ffc674bee6f62eb68454ff3a\"",
|
501
|
+
# "date" => "Sat, 28 Oct 2006 21:30:41 GMT",
|
502
|
+
# "x-amz-request-id" => "B7BC68F55495B1C8",
|
503
|
+
# "server" => "AmazonS3",
|
504
|
+
# "content-length" => "3418766"}
|
505
|
+
#
|
506
|
+
# some_object.content_type
|
507
|
+
# # => "binary/octet-stream"
|
508
|
+
# some_object.content_type = 'audio/mpeg'
|
509
|
+
# some_object.content_type
|
510
|
+
# # => 'audio/mpeg'
|
511
|
+
# some_object.store
|
512
|
+
def about
|
513
|
+
stored? ? self.class.about(key, bucket.name) : About.new
|
514
|
+
end
|
515
|
+
memoized :about
|
516
|
+
|
517
|
+
# Interface to viewing and editing metadata for the current object. To be treated like a Hash.
|
518
|
+
#
|
519
|
+
# some_object.metadata
|
520
|
+
# # => {}
|
521
|
+
# some_object.metadata[:author] = 'Dave Thomas'
|
522
|
+
# some_object.metadata
|
523
|
+
# # => {"x-amz-meta-author" => "Dave Thomas"}
|
524
|
+
# some_object.metadata[:author]
|
525
|
+
# # => "Dave Thomas"
|
526
|
+
def metadata
|
527
|
+
about.metadata
|
528
|
+
end
|
529
|
+
memoized :metadata
|
530
|
+
|
531
|
+
# Saves the current object with the specified <tt>options</tt>. Valid options are listed in the documentation for S3Object::store.
|
532
|
+
def store(options = {})
|
533
|
+
raise DeletedObject if frozen?
|
534
|
+
options = about.to_headers.merge(options) if stored?
|
535
|
+
response = self.class.store(key, value, bucket.name, options)
|
536
|
+
bucket.update(:stored, self)
|
537
|
+
response.success?
|
538
|
+
end
|
539
|
+
alias_method :create, :store
|
540
|
+
alias_method :save, :store
|
541
|
+
|
542
|
+
# Deletes the current object. Trying to save an object after it has been deleted with
|
543
|
+
# raise a DeletedObject exception.
|
544
|
+
def delete
|
545
|
+
bucket.update(:deleted, self)
|
546
|
+
freeze
|
547
|
+
self.class.delete(key, bucket.name)
|
548
|
+
end
|
549
|
+
|
550
|
+
# Copies the current object, given it the name <tt>copy_name</tt>. Keep in mind that due to limitations in
|
551
|
+
# S3's API, this operation requires retransmitting the entire object to S3.
|
552
|
+
def copy(copy_name, options = {})
|
553
|
+
self.class.copy(key, copy_name, bucket.name, options)
|
554
|
+
end
|
555
|
+
|
556
|
+
# Rename the current object. Keep in mind that due to limitations in S3's API, this operation requires
|
557
|
+
# retransmitting the entire object to S3.
|
558
|
+
def rename(to, options = {})
|
559
|
+
self.class.rename(key, to, bucket.name, options)
|
560
|
+
end
|
561
|
+
|
562
|
+
def etag(reload = false)
|
563
|
+
return nil unless stored?
|
564
|
+
expirable_memoize(reload) do
|
565
|
+
reload ? about(reload)['etag'][1...-1] : attributes['e_tag'][1...-1]
|
566
|
+
end
|
567
|
+
end
|
568
|
+
|
569
|
+
# Returns the owner of the current object.
|
570
|
+
def owner
|
571
|
+
Owner.new(attributes['owner'])
|
572
|
+
end
|
573
|
+
memoized :owner
|
574
|
+
|
575
|
+
# Generates an authenticated url for the current object. Accepts the same options as its class method
|
576
|
+
# counter part S3Object.url_for.
|
577
|
+
def url(options = {})
|
578
|
+
self.class.url_for(key, bucket.name, options)
|
579
|
+
end
|
580
|
+
|
581
|
+
# Returns true if the current object has been stored on S3 yet.
|
582
|
+
def stored?
|
583
|
+
!attributes['e_tag'].nil?
|
584
|
+
end
|
585
|
+
|
586
|
+
def ==(s3object) #:nodoc:
|
587
|
+
path == s3object.path
|
588
|
+
end
|
589
|
+
|
590
|
+
def path #:nodoc:
|
591
|
+
self.class.path!(
|
592
|
+
belongs_to_bucket? ? bucket.name : '(no bucket)',
|
593
|
+
key_set? ? key : '(no key)'
|
594
|
+
)
|
595
|
+
end
|
596
|
+
|
597
|
+
# Don't dump binary data :)
|
598
|
+
def inspect #:nodoc:
|
599
|
+
"#<%s:0x%s '%s'>" % [self.class, object_id, path]
|
600
|
+
end
|
601
|
+
|
602
|
+
private
|
603
|
+
def proxiable_attribute?(name)
|
604
|
+
valid_header_settings.include?(name)
|
605
|
+
end
|
606
|
+
|
607
|
+
def valid_header_settings
|
608
|
+
%w(cache_control content_type content_length content_md5 content_disposition content_encoding expires)
|
609
|
+
end
|
610
|
+
end
|
611
|
+
end
|
612
|
+
end
|
data/lib/aws/s3/owner.rb
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
module AWS
|
2
|
+
module S3
|
3
|
+
# Entities in S3 have an associated owner (the person who created them). The owner is a canonical representation of an
|
4
|
+
# entity in the S3 system. It has an <tt>id</tt> and a <tt>display_name</tt>.
|
5
|
+
#
|
6
|
+
# These attributes can be used when specifying a ACL::Grantee for an ACL::Grant.
|
7
|
+
#
|
8
|
+
# You can retrieve the owner of the current account by calling Owner.current.
|
9
|
+
class Owner
|
10
|
+
undef_method :id if method_defined?(:id) # Get rid of Object#id
|
11
|
+
include SelectiveAttributeProxy
|
12
|
+
|
13
|
+
class << self
|
14
|
+
# The owner of the current account.
|
15
|
+
def current
|
16
|
+
response = Service.get('/')
|
17
|
+
new(response.parsed['owner']) if response.parsed['owner']
|
18
|
+
end
|
19
|
+
memoized :current
|
20
|
+
end
|
21
|
+
|
22
|
+
def initialize(attributes = {}) #:nodoc:
|
23
|
+
@attributes = attributes
|
24
|
+
end
|
25
|
+
|
26
|
+
def ==(other_owner) #:nodoc:
|
27
|
+
hash == other_owner.hash
|
28
|
+
end
|
29
|
+
|
30
|
+
def hash #:nodoc
|
31
|
+
[id, display_name].join.hash
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
def proxiable_attribute?(name)
|
36
|
+
valid_attributes.include?(name)
|
37
|
+
end
|
38
|
+
|
39
|
+
def valid_attributes
|
40
|
+
%w(id display_name)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|