sml-aws-s3 0.5.1.1225474505

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,628 @@
1
+ module AWS
2
+ module S3
3
+ # S3Objects represent the data you store on S3. They have a key (their name) and a value (their data). All objects belong to a
4
+ # bucket.
5
+ #
6
+ # You can store an object on S3 by specifying a key, its data and the name of the bucket you want to put it in:
7
+ #
8
+ # S3Object.store('me.jpg', open('headshot.jpg'), 'photos')
9
+ #
10
+ # The content type of the object will be inferred by its extension. If the appropriate content type can not be inferred, S3 defaults
11
+ # to <tt>binary/octet-stream</tt>.
12
+ #
13
+ # If you want to override this, you can explicitly indicate what content type the object should have with the <tt>:content_type</tt> option:
14
+ #
15
+ # file = 'black-flowers.m4a'
16
+ # S3Object.store(
17
+ # file,
18
+ # open(file),
19
+ # 'jukebox',
20
+ # :content_type => 'audio/mp4a-latm'
21
+ # )
22
+ #
23
+ # You can read more about storing files on S3 in the documentation for S3Object.store.
24
+ #
25
+ # If you just want to fetch an object you've stored on S3, you just specify its name and its bucket:
26
+ #
27
+ # picture = S3Object.find 'headshot.jpg', 'photos'
28
+ #
29
+ # N.B. The actual data for the file is not downloaded in both the example where the file appeared in the bucket and when fetched directly.
30
+ # You get the data for the file like this:
31
+ #
32
+ # picture.value
33
+ #
34
+ # You can fetch just the object's data directly:
35
+ #
36
+ # S3Object.value 'headshot.jpg', 'photos'
37
+ #
38
+ # Or stream it by passing a block to <tt>stream</tt>:
39
+ #
40
+ # open('song.mp3', 'w') do |file|
41
+ # S3Object.stream('song.mp3', 'jukebox') do |chunk|
42
+ # file.write chunk
43
+ # end
44
+ # end
45
+ #
46
+ # The data of the file, once download, is cached, so subsequent calls to <tt>value</tt> won't redownload the file unless you
47
+ # tell the object to reload its <tt>value</tt>:
48
+ #
49
+ # # Redownloads the file's data
50
+ # song.value(:reload)
51
+ #
52
+ # Other functionality includes:
53
+ #
54
+ # # Check if an object exists?
55
+ # S3Object.exists? 'headshot.jpg', 'photos'
56
+ #
57
+ # # Copying an object
58
+ # S3Object.copy 'headshot.jpg', 'headshot2.jpg', 'photos'
59
+ #
60
+ # # Renaming an object
61
+ # S3Object.rename 'headshot.jpg', 'portrait.jpg', 'photos'
62
+ #
63
+ # # Deleting an object
64
+ # S3Object.delete 'headshot.jpg', 'photos'
65
+ #
66
+ # ==== More about objects and their metadata
67
+ #
68
+ # You can find out the content type of your object with the <tt>content_type</tt> method:
69
+ #
70
+ # song.content_type
71
+ # # => "audio/mpeg"
72
+ #
73
+ # You can change the content type as well if you like:
74
+ #
75
+ # song.content_type = 'application/pdf'
76
+ # song.store
77
+ #
78
+ # (Keep in mind that due to limitiations in S3's exposed API, the only way to change things like the content_type
79
+ # is to PUT the object onto S3 again. In the case of large files, this will result in fully re-uploading the file.)
80
+ #
81
+ # A bevie of information about an object can be had using the <tt>about</tt> method:
82
+ #
83
+ # pp song.about
84
+ # {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT",
85
+ # "content-type" => "binary/octet-stream",
86
+ # "etag" => "\"dc629038ffc674bee6f62eb64ff3a\"",
87
+ # "date" => "Sat, 28 Oct 2006 21:30:41 GMT",
88
+ # "x-amz-request-id" => "B7BC68F55495B1C8",
89
+ # "server" => "AmazonS3",
90
+ # "content-length" => "3418766"}
91
+ #
92
+ # You can get and set metadata for an object:
93
+ #
94
+ # song.metadata
95
+ # # => {}
96
+ # song.metadata[:album] = "A River Ain't Too Much To Love"
97
+ # # => "A River Ain't Too Much To Love"
98
+ # song.metadata[:released] = 2005
99
+ # pp song.metadata
100
+ # {"x-amz-meta-released" => 2005,
101
+ # "x-amz-meta-album" => "A River Ain't Too Much To Love"}
102
+ # song.store
103
+ #
104
+ # That metadata will be saved in S3 and is hence forth available from that object:
105
+ #
106
+ # song = S3Object.find('black-flowers.mp3', 'jukebox')
107
+ # pp song.metadata
108
+ # {"x-amz-meta-released" => "2005",
109
+ # "x-amz-meta-album" => "A River Ain't Too Much To Love"}
110
+ # song.metadata[:released]
111
+ # # => "2005"
112
+ # song.metadata[:released] = 2006
113
+ # pp song.metadata
114
+ # {"x-amz-meta-released" => 2006,
115
+ # "x-amz-meta-album" => "A River Ain't Too Much To Love"}
116
+ class S3Object < Base
117
+ class << self
118
+ # Returns the value of the object with <tt>key</tt> in the specified bucket.
119
+ #
120
+ # === Conditional GET options
121
+ #
122
+ # * <tt>:if_modified_since</tt> - Return the object only if it has been modified since the specified time,
123
+ # otherwise return a 304 (not modified).
124
+ # * <tt>:if_unmodified_since</tt> - Return the object only if it has not been modified since the specified time,
125
+ # otherwise raise PreconditionFailed.
126
+ # * <tt>:if_match</tt> - Return the object only if its entity tag (ETag) is the same as the one specified,
127
+ # otherwise raise PreconditionFailed.
128
+ # * <tt>:if_none_match</tt> - Return the object only if its entity tag (ETag) is different from the one specified,
129
+ # otherwise return a 304 (not modified).
130
+ #
131
+ # === Other options
132
+ # * <tt>:range</tt> - Return only the bytes of the object in the specified range.
133
+ def value(key, bucket = nil, options = {}, &block)
134
+ Value.new(get(path!(bucket, key, options), options, &block))
135
+ end
136
+
137
+ def stream(key, bucket = nil, options = {}, &block)
138
+ value(key, bucket, options) do |response|
139
+ response.read_body(&block)
140
+ end
141
+ end
142
+
143
+ # Returns the object whose key is <tt>name</tt> in the specified bucket. If the specified key does not
144
+ # exist, a NoSuchKey exception will be raised.
145
+ def find(key, bucket = nil)
146
+ # N.B. This is arguably a hack. From what the current S3 API exposes, when you retrieve a bucket, it
147
+ # provides a listing of all the files in that bucket (assuming you haven't limited the scope of what it returns).
148
+ # Each file in the listing contains information about that file. It is from this information that an S3Object is built.
149
+ #
150
+ # If you know the specific file that you want, S3 allows you to make a get request for that specific file and it returns
151
+ # the value of that file in its response body. This response body is used to build an S3Object::Value object.
152
+ # If you want information about that file, you can make a head request and the headers of the response will contain
153
+ # information about that file. There is no way, though, to say, give me the representation of just this given file the same
154
+ # way that it would appear in a bucket listing.
155
+ #
156
+ # When fetching a bucket, you can provide options which narrow the scope of what files should be returned in that listing.
157
+ # Of those options, one is <tt>marker</tt> which is a string and instructs the bucket to return only object's who's key comes after
158
+ # the specified marker according to alphabetic order. Another option is <tt>max-keys</tt> which defaults to 1000 but allows you
159
+ # to dictate how many objects should be returned in the listing. With a combination of <tt>marker</tt> and <tt>max-keys</tt> you can
160
+ # *almost* specify exactly which file you'd like it to return, but <tt>marker</tt> is not inclusive. In other words, if there is a bucket
161
+ # which contains three objects who's keys are respectively 'a', 'b' and 'c', then fetching a bucket listing with marker set to 'b' will only
162
+ # return 'c', not 'b'.
163
+ #
164
+ # Given all that, my hack to fetch a bucket with only one specific file, is to set the marker to the result of calling String#previous on
165
+ # the desired object's key, which functionally makes the key ordered one degree higher than the desired object key according to
166
+ # alphabetic ordering. This is a hack, but it should work around 99% of the time. I can't think of a scenario where it would return
167
+ # something incorrect.
168
+
169
+ # We need to ensure the key doesn't have extended characters but not uri escape it before doing the lookup and comparing since if the object exists,
170
+ # the key on S3 will have been normalized
171
+ key = key.remove_extended unless key.utf8?
172
+ bucket = Bucket.find(bucket_name(bucket), :marker => key.previous, :max_keys => 1)
173
+ # If our heuristic failed, trigger a NoSuchKey exception
174
+ if (object = bucket.objects.first) && object.key == key
175
+ object
176
+ else
177
+ raise NoSuchKey.new("No such key `#{key}'", bucket)
178
+ end
179
+ end
180
+
181
+ # Makes a copy of the object with <tt>key</tt> to <tt>copy_key</tt>, preserving the ACL of the existing object if the <tt>:copy_acl</tt> option is true (default false).
182
+ # If the <tt>:replace_meta<tt> option is true you can pass metadata (such as 'Content-type', 'Cache-Control') in options and the metadata associated with the
183
+ # object will be replaced. If the <tt>:dest_bucket</tt> option exists the value will be the bucket where we copy to and copy_key can be left blank to simulate a move.
184
+ def copy(key, copy_key = nil, bucket = nil, options = {})
185
+ copy_key ||= key
186
+ copy_acl = options.delete(:copy_acl)
187
+ replace_meta = options.delete(:replace_meta)
188
+
189
+ dest_bucket = bucket_name(options.delete(:dest_bucket) || bucket)
190
+ bucket = bucket_name(bucket)
191
+ source_key = path!(bucket, key)
192
+ target_key = path!(dest_bucket, copy_key)
193
+
194
+ default_options = {}
195
+ default_options['x-amz-copy-source'] = source_key
196
+ default_options['x-amz-metadata-directive'] = 'REPLACE' if replace_meta
197
+ options = default_options.merge(options)
198
+
199
+ returning put(target_key, options) do
200
+ acl(copy_key, dest_bucket, acl(key, bucket)) if copy_acl
201
+ end
202
+ end
203
+
204
+ # Rename the object with key <tt>from</tt> to have key in <tt>to</tt>.
205
+ def rename(from, to, bucket = nil, options = {})
206
+ copy(from, to, bucket, options)
207
+ delete(from, bucket)
208
+ end
209
+
210
+ # Fetch information about the object with <tt>key</tt> from <tt>bucket</tt>. Information includes content type, content length,
211
+ # last modified time, and others.
212
+ #
213
+ # If the specified key does not exist, NoSuchKey is raised.
214
+ def about(key, bucket = nil, options = {})
215
+ response = head(path!(bucket, key, options), options)
216
+ raise NoSuchKey.new("No such key `#{key}'", bucket) if response.code == 404
217
+ About.new(response.headers)
218
+ end
219
+
220
+ # Checks if the object with <tt>key</tt> in <tt>bucket</tt> exists.
221
+ #
222
+ # S3Object.exists? 'kiss.jpg', 'marcel'
223
+ # # => true
224
+ def exists?(key, bucket = nil)
225
+ about(key, bucket)
226
+ true
227
+ rescue NoSuchKey
228
+ false
229
+ end
230
+
231
+ # Delete object with <tt>key</tt> from <tt>bucket</tt>.
232
+ def delete(key, bucket = nil, options = {})
233
+ # A bit confusing. Calling super actually makes an HTTP DELETE request. The delete method is
234
+ # defined in the Base class. It happens to have the same name.
235
+ super(path!(bucket, key, options), options).success?
236
+ end
237
+
238
+ # When storing an object on the S3 servers using S3Object.store, the <tt>data</tt> argument can be a string or an I/O stream.
239
+ # If <tt>data</tt> is an I/O stream it will be read in segments and written to the socket incrementally. This approach
240
+ # may be desirable for very large files so they are not read into memory all at once.
241
+ #
242
+ # # Non streamed upload
243
+ # S3Object.store('greeting.txt', 'hello world!', 'marcel')
244
+ #
245
+ # # Streamed upload
246
+ # S3Object.store('roots.mpeg', open('roots.mpeg'), 'marcel')
247
+ def store(key, data, bucket = nil, options = {})
248
+ validate_key!(key)
249
+ # Must build path before infering content type in case bucket is being used for options
250
+ path = path!(bucket, key, options)
251
+ infer_content_type!(key, options)
252
+
253
+ put(path, options, data) # Don't call .success? on response. We want to get the etag.
254
+ end
255
+ alias_method :create, :store
256
+ alias_method :save, :store
257
+
258
+ # All private objects are accessible via an authenticated GET request to the S3 servers. You can generate an
259
+ # authenticated url for an object like this:
260
+ #
261
+ # S3Object.url_for('beluga_baby.jpg', 'marcel_molina')
262
+ #
263
+ # By default authenticated urls expire 5 minutes after they were generated.
264
+ #
265
+ # Expiration options can be specified either with an absolute time since the epoch with the <tt>:expires</tt> options,
266
+ # or with a number of seconds relative to now with the <tt>:expires_in</tt> options:
267
+ #
268
+ # # Absolute expiration date
269
+ # # (Expires January 18th, 2038)
270
+ # doomsday = Time.mktime(2038, 1, 18).to_i
271
+ # S3Object.url_for('beluga_baby.jpg',
272
+ # 'marcel',
273
+ # :expires => doomsday)
274
+ #
275
+ # # Expiration relative to now specified in seconds
276
+ # # (Expires in 3 hours)
277
+ # S3Object.url_for('beluga_baby.jpg',
278
+ # 'marcel',
279
+ # :expires_in => 60 * 60 * 3)
280
+ #
281
+ # You can specify whether the url should go over SSL with the <tt>:use_ssl</tt> option:
282
+ #
283
+ # # Url will use https protocol
284
+ # S3Object.url_for('beluga_baby.jpg',
285
+ # 'marcel',
286
+ # :use_ssl => true)
287
+ #
288
+ # By default, the ssl settings for the current connection will be used.
289
+ #
290
+ # If you have an object handy, you can use its <tt>url</tt> method with the same objects:
291
+ #
292
+ # song.url(:expires_in => 30)
293
+ #
294
+ # To get an unauthenticated url for the object, such as in the case
295
+ # when the object is publicly readable, pass the
296
+ # <tt>:authenticated</tt> option with a value of <tt>false</tt>.
297
+ #
298
+ # S3Object.url_for('beluga_baby.jpg',
299
+ # 'marcel',
300
+ # :authenticated => false)
301
+ # # => http://s3.amazonaws.com/marcel/beluga_baby.jpg
302
+ def url_for(name, bucket = nil, options = {})
303
+ connection.url_for(path!(bucket, name, options), options) # Do not normalize options
304
+ end
305
+
306
+ def path!(bucket, name, options = {}) #:nodoc:
307
+ # We're using the second argument for options
308
+ if bucket.is_a?(Hash)
309
+ options.replace(bucket)
310
+ bucket = nil
311
+ end
312
+ '/' << File.join(bucket_name(bucket), name)
313
+ end
314
+
315
+ private
316
+
317
+ def validate_key!(key)
318
+ raise InvalidKeyName.new(key) unless key && key.size <= 1024
319
+ end
320
+
321
+ def infer_content_type!(key, options)
322
+ return if options.has_key?(:content_type)
323
+ if mime_type = MIME::Types.type_for(key).first
324
+ options[:content_type] = mime_type.content_type
325
+ end
326
+ end
327
+ end
328
+
329
+ class Value < String #:nodoc:
330
+ attr_reader :response
331
+ def initialize(response)
332
+ super(response.body)
333
+ @response = response
334
+ end
335
+ end
336
+
337
+ class About < Hash #:nodoc:
338
+ def initialize(headers)
339
+ super()
340
+ replace(headers)
341
+ metadata
342
+ end
343
+
344
+ def [](header)
345
+ super(header.to_header)
346
+ end
347
+
348
+ def []=(header, value)
349
+ super(header.to_header, value)
350
+ end
351
+
352
+ def to_headers
353
+ self.merge(metadata.to_headers)
354
+ end
355
+
356
+ def metadata
357
+ Metadata.new(self)
358
+ end
359
+ memoized :metadata
360
+ end
361
+
362
+ class Metadata < Hash #:nodoc:
363
+ HEADER_PREFIX = 'x-amz-meta-'
364
+ SIZE_LIMIT = 2048 # 2 kilobytes
365
+
366
+ def initialize(headers)
367
+ @headers = headers
368
+ super()
369
+ extract_metadata!
370
+ end
371
+
372
+ def []=(header, value)
373
+ super(header_name(header.to_header), value)
374
+ end
375
+
376
+ def [](header)
377
+ super(header_name(header.to_header))
378
+ end
379
+
380
+ def to_headers
381
+ validate!
382
+ self
383
+ end
384
+
385
+ private
386
+ attr_reader :headers
387
+
388
+ def extract_metadata!
389
+ headers.keys.grep(Regexp.new(HEADER_PREFIX)).each do |metadata_header|
390
+ self[metadata_header] = headers.delete(metadata_header)
391
+ end
392
+ end
393
+
394
+ def header_name(name)
395
+ name =~ Regexp.new(HEADER_PREFIX) ? name : [HEADER_PREFIX, name].join
396
+ end
397
+
398
+ def validate!
399
+ invalid_headers = inject([]) do |invalid, (name, value)|
400
+ invalid << name unless valid?(value)
401
+ invalid
402
+ end
403
+
404
+ raise InvalidMetadataValue.new(invalid_headers) unless invalid_headers.empty?
405
+ end
406
+
407
+ def valid?(value)
408
+ value && value.size < SIZE_LIMIT
409
+ end
410
+ end
411
+
412
+ attr_writer :value #:nodoc:
413
+
414
+ # Provides readers and writers for all valid header settings listed in <tt>valid_header_settings</tt>.
415
+ # Subsequent saves to the object after setting any of the valid headers settings will be reflected in
416
+ # information about the object.
417
+ #
418
+ # some_s3_object.content_type
419
+ # => nil
420
+ # some_s3_object.content_type = 'text/plain'
421
+ # => "text/plain"
422
+ # some_s3_object.content_type
423
+ # => "text/plain"
424
+ # some_s3_object.store
425
+ # S3Object.about(some_s3_object.key, some_s3_object.bucket.name)['content-type']
426
+ # => "text/plain"
427
+ include SelectiveAttributeProxy #:nodoc
428
+
429
+ proxy_to :about, :exclusively => false
430
+
431
+ # Initializes a new S3Object.
432
+ def initialize(attributes = {}, &block)
433
+ super
434
+ self.value = attributes.delete(:value)
435
+ self.bucket = attributes.delete(:bucket)
436
+ yield self if block_given?
437
+ end
438
+
439
+ # The current object's bucket. If no bucket has been set, a NoBucketSpecified exception will be raised. For
440
+ # cases where you are not sure if the bucket has been set, you can use the belongs_to_bucket? method.
441
+ def bucket
442
+ @bucket or raise NoBucketSpecified
443
+ end
444
+
445
+ # Sets the bucket that the object belongs to.
446
+ def bucket=(bucket)
447
+ @bucket = bucket
448
+ self
449
+ end
450
+
451
+ # Returns true if the current object has been assigned to a bucket yet. Objects must belong to a bucket before they
452
+ # can be saved onto S3.
453
+ def belongs_to_bucket?
454
+ !@bucket.nil?
455
+ end
456
+ alias_method :orphan?, :belongs_to_bucket?
457
+
458
+ # Returns the key of the object. If the key is not set, a NoKeySpecified exception will be raised. For cases
459
+ # where you are not sure if the key has been set, you can use the key_set? method. Objects must have a key
460
+ # set to be saved onto S3. Objects which have already been saved onto S3 will always have their key set.
461
+ def key
462
+ attributes['key'] or raise NoKeySpecified
463
+ end
464
+
465
+ # Sets the key for the current object.
466
+ def key=(value)
467
+ attributes['key'] = value
468
+ end
469
+
470
+ # Returns true if the current object has had its key set yet. Objects which have already been saved will
471
+ # always return true. This method is useful for objects which have not been saved yet so you know if you
472
+ # need to set the object's key since you can not save an object unless its key has been set.
473
+ #
474
+ # object.store if object.key_set? && object.belongs_to_bucket?
475
+ def key_set?
476
+ !attributes['key'].nil?
477
+ end
478
+
479
+ # Lazily loads object data.
480
+ #
481
+ # Force a reload of the data by passing <tt>:reload</tt>.
482
+ #
483
+ # object.value(:reload)
484
+ #
485
+ # When loading the data for the first time you can optionally yield to a block which will
486
+ # allow you to stream the data in segments.
487
+ #
488
+ # object.value do |segment|
489
+ # send_data segment
490
+ # end
491
+ #
492
+ # The full list of options are listed in the documentation for its class method counter part, S3Object::value.
493
+ def value(options = {}, &block)
494
+ if options.is_a?(Hash)
495
+ reload = !options.empty?
496
+ else
497
+ reload = options
498
+ options = {}
499
+ end
500
+ memoize(reload) do
501
+ self.class.stream(key, bucket.name, options, &block)
502
+ end
503
+ end
504
+
505
+ # Interface to information about the current object. Information is read only, though some of its data
506
+ # can be modified through specific methods, such as content_type and content_type=.
507
+ #
508
+ # pp some_object.about
509
+ # {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT",
510
+ # "x-amz-id-2" => "LdcQRk5qLwxJQiZ8OH50HhoyKuqyWoJ67B6i+rOE5MxpjJTWh1kCkL+I0NQzbVQn",
511
+ # "content-type" => "binary/octet-stream",
512
+ # "etag" => "\"dc629038ffc674bee6f62eb68454ff3a\"",
513
+ # "date" => "Sat, 28 Oct 2006 21:30:41 GMT",
514
+ # "x-amz-request-id" => "B7BC68F55495B1C8",
515
+ # "server" => "AmazonS3",
516
+ # "content-length" => "3418766"}
517
+ #
518
+ # some_object.content_type
519
+ # # => "binary/octet-stream"
520
+ # some_object.content_type = 'audio/mpeg'
521
+ # some_object.content_type
522
+ # # => 'audio/mpeg'
523
+ # some_object.store
524
+ def about
525
+ stored? ? self.class.about(key, bucket.name) : About.new
526
+ end
527
+ memoized :about
528
+
529
+ # Interface to viewing and editing metadata for the current object. To be treated like a Hash.
530
+ #
531
+ # some_object.metadata
532
+ # # => {}
533
+ # some_object.metadata[:author] = 'Dave Thomas'
534
+ # some_object.metadata
535
+ # # => {"x-amz-meta-author" => "Dave Thomas"}
536
+ # some_object.metadata[:author]
537
+ # # => "Dave Thomas"
538
+ def metadata
539
+ about.metadata
540
+ end
541
+ memoized :metadata
542
+
543
+ # Saves the current object with the specified <tt>options</tt>. Valid options are listed in the documentation for S3Object::store.
544
+ def store(options = {})
545
+ raise DeletedObject if frozen?
546
+ options = about.to_headers.merge(options) if stored?
547
+ response = self.class.store(key, value, bucket.name, options)
548
+ bucket.update(:stored, self)
549
+ response.success?
550
+ end
551
+ alias_method :create, :store
552
+ alias_method :save, :store
553
+
554
+ # Deletes the current object. Trying to save an object after it has been deleted with
555
+ # raise a DeletedObject exception.
556
+ def delete
557
+ bucket.update(:deleted, self)
558
+ freeze
559
+ self.class.delete(key, bucket.name)
560
+ end
561
+
562
+ # Copies current object to a new bucket
563
+ def copy_to_bucket(dest_bucket)
564
+ copy(nil, :dest_bucket => dest_bucket, :copy_acl => true)
565
+ end
566
+
567
+ # Copies the current object, given it the name <tt>copy_name</tt>.
568
+ def copy(copy_name=nil, options = {})
569
+ self.class.copy(key, copy_name, bucket.name, options)
570
+ end
571
+
572
+ # Rename the current object. Keep in mind that due to limitations in S3's API, this operation requires
573
+ # retransmitting the entire object to S3.
574
+ def rename(to, options = {})
575
+ self.class.rename(key, to, bucket.name, options)
576
+ end
577
+
578
+ def etag(reload = false)
579
+ return nil unless stored?
580
+ memoize(reload) do
581
+ reload ? about(reload)['etag'][1...-1] : attributes['e_tag'][1...-1]
582
+ end
583
+ end
584
+
585
+ # Returns the owner of the current object.
586
+ def owner
587
+ Owner.new(attributes['owner'])
588
+ end
589
+ memoized :owner
590
+
591
+ # Generates an authenticated url for the current object. Accepts the same options as its class method
592
+ # counter part S3Object.url_for.
593
+ def url(options = {})
594
+ self.class.url_for(key, bucket.name, options)
595
+ end
596
+
597
+ # Returns true if the current object has been stored on S3 yet.
598
+ def stored?
599
+ !attributes['e_tag'].nil?
600
+ end
601
+
602
+ def ==(s3object) #:nodoc:
603
+ path == s3object.path
604
+ end
605
+
606
+ def path #:nodoc:
607
+ self.class.path!(
608
+ belongs_to_bucket? ? bucket.name : '(no bucket)',
609
+ key_set? ? key : '(no key)'
610
+ )
611
+ end
612
+
613
+ # Don't dump binary data :)
614
+ def inspect #:nodoc:
615
+ "#<%s:0x%s '%s'>" % [self.class, object_id, path]
616
+ end
617
+
618
+ private
619
+ def proxiable_attribute?(name)
620
+ valid_header_settings.include?(name)
621
+ end
622
+
623
+ def valid_header_settings
624
+ %w(cache_control content_type content_length content_md5 content_disposition content_encoding expires)
625
+ end
626
+ end
627
+ end
628
+ end
@@ -0,0 +1,44 @@
1
+ module AWS
2
+ module S3
3
+ # Entities in S3 have an associated owner (the person who created them). The owner is a canonical representation of an
4
+ # entity in the S3 system. It has an <tt>id</tt> and a <tt>display_name</tt>.
5
+ #
6
+ # These attributes can be used when specifying a ACL::Grantee for an ACL::Grant.
7
+ #
8
+ # You can retrieve the owner of the current account by calling Owner.current.
9
+ class Owner
10
+ undef_method :id if method_defined?(:id) # Get rid of Object#id
11
+ include SelectiveAttributeProxy
12
+
13
+ class << self
14
+ # The owner of the current account.
15
+ def current
16
+ response = Service.get('/')
17
+ new(response.parsed['owner']) if response.parsed['owner']
18
+ end
19
+ memoized :current
20
+ end
21
+
22
+ def initialize(attributes = {}) #:nodoc:
23
+ @attributes = attributes
24
+ end
25
+
26
+ def ==(other_owner) #:nodoc:
27
+ hash == other_owner.hash
28
+ end
29
+
30
+ def hash #:nodoc
31
+ [id, display_name].join.hash
32
+ end
33
+
34
+ private
35
+ def proxiable_attribute?(name)
36
+ valid_attributes.include?(name)
37
+ end
38
+
39
+ def valid_attributes
40
+ %w(id display_name)
41
+ end
42
+ end
43
+ end
44
+ end