aws-s3 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (55) hide show
  1. data/COPYING +19 -0
  2. data/INSTALL +35 -0
  3. data/README +529 -0
  4. data/Rakefile +284 -0
  5. data/bin/s3sh +4 -0
  6. data/bin/setup.rb +10 -0
  7. data/lib/aws/s3.rb +64 -0
  8. data/lib/aws/s3/acl.rb +631 -0
  9. data/lib/aws/s3/authentication.rb +218 -0
  10. data/lib/aws/s3/base.rb +232 -0
  11. data/lib/aws/s3/bittorrent.rb +58 -0
  12. data/lib/aws/s3/bucket.rb +323 -0
  13. data/lib/aws/s3/connection.rb +212 -0
  14. data/lib/aws/s3/error.rb +69 -0
  15. data/lib/aws/s3/exceptions.rb +130 -0
  16. data/lib/aws/s3/extensions.rb +186 -0
  17. data/lib/aws/s3/logging.rb +163 -0
  18. data/lib/aws/s3/object.rb +565 -0
  19. data/lib/aws/s3/owner.rb +44 -0
  20. data/lib/aws/s3/parsing.rb +138 -0
  21. data/lib/aws/s3/response.rb +180 -0
  22. data/lib/aws/s3/service.rb +43 -0
  23. data/lib/aws/s3/version.rb +12 -0
  24. data/support/faster-xml-simple/lib/faster_xml_simple.rb +115 -0
  25. data/support/faster-xml-simple/test/regression_test.rb +16 -0
  26. data/support/faster-xml-simple/test/xml_simple_comparison_test.rb +22 -0
  27. data/support/rdoc/code_info.rb +211 -0
  28. data/test/acl_test.rb +243 -0
  29. data/test/authentication_test.rb +96 -0
  30. data/test/base_test.rb +143 -0
  31. data/test/bucket_test.rb +48 -0
  32. data/test/connection_test.rb +120 -0
  33. data/test/error_test.rb +75 -0
  34. data/test/extensions_test.rb +282 -0
  35. data/test/fixtures.rb +89 -0
  36. data/test/fixtures/buckets.yml +102 -0
  37. data/test/fixtures/errors.yml +34 -0
  38. data/test/fixtures/headers.yml +3 -0
  39. data/test/fixtures/logging.yml +15 -0
  40. data/test/fixtures/policies.yml +16 -0
  41. data/test/logging_test.rb +36 -0
  42. data/test/mocks/base.rb +89 -0
  43. data/test/object_test.rb +177 -0
  44. data/test/parsing_test.rb +82 -0
  45. data/test/remote/acl_test.rb +117 -0
  46. data/test/remote/bittorrent_test.rb +45 -0
  47. data/test/remote/bucket_test.rb +127 -0
  48. data/test/remote/logging_test.rb +82 -0
  49. data/test/remote/object_test.rb +267 -0
  50. data/test/remote/test_file.data +0 -0
  51. data/test/remote/test_helper.rb +30 -0
  52. data/test/response_test.rb +70 -0
  53. data/test/service_test.rb +26 -0
  54. data/test/test_helper.rb +82 -0
  55. metadata +125 -0
@@ -0,0 +1,565 @@
1
+ module AWS
2
+ module S3
3
+ # S3Objects represent the data you store on S3. They have a key (their name) and a value (their data). All objects belong to a
4
+ # bucket.
5
+ #
6
+ # You can store an object on S3 by specifying a key, its data and the name of the bucket you want to put it in:
7
+ #
8
+ # S3Object.store(
9
+ # 'headshot.jpg',
10
+ # File.open('headshot.jpg'),
11
+ # 'photos',
12
+ # :content_type => 'image/jpg'
13
+ # )
14
+ #
15
+ # You can read more about storing files on S3 in the documentation for S3Object.store.
16
+ #
17
+ # If you just want to fetch an object you've stored on S3, you just specify its name and its bucket:
18
+ #
19
+ # picture = S3Object.find 'headshot.jpg', 'photos'
20
+ #
21
+ # N.B. The actual data for the file is not downloaded in both the example where the file appeared in the bucket and when fetched directly.
22
+ # You get the data for the file like this:
23
+ #
24
+ # picture.value
25
+ #
26
+ # You can fetch just the object's data directly:
27
+ #
28
+ # S3Object.value 'headshot.jpg', 'photos'
29
+ #
30
+ # Or stream it by passing a block to <tt>stream</tt>:
31
+ #
32
+ # File.open('song.mp3', 'w') do |file|
33
+ # S3Object.stream('song.mp3', 'jukebox') do |chunk|
34
+ # file.write chunk
35
+ # end
36
+ # end
37
+ #
38
+ # The data of the file, once download, is cached, so subsequent calls to <tt>value</tt> won't redownload the file unless you
39
+ # tell the object to reload its <tt>value</tt>:
40
+ #
41
+ # # Redownloads the file's data
42
+ # song.value(:reload)
43
+ #
44
+ # Other functionality includes:
45
+ #
46
+ # # Copying an object
47
+ # S3Object.copy 'headshot.jpg', 'headshot2.jpg', 'photos'
48
+ #
49
+ # # Renaming an object
50
+ # S3Object.rename 'headshot.jpg', 'portrait.jpg', 'photos'
51
+ #
52
+ # # Deleting an object
53
+ # S3Object.delete 'headshot.jpg', 'photos'
54
+ #
55
+ # ==== More about objects and their metadata
56
+ #
57
+ # You can find out the content type of your object with the <tt>content_type</tt> method:
58
+ #
59
+ # song.content_type
60
+ # # => "audio/mpeg"
61
+ #
62
+ # You can change the content type as well if you like:
63
+ #
64
+ # song.content_type = 'application/octet-stream'
65
+ # song.store
66
+ #
67
+ # (Keep in mind that due to limitiations in S3's exposed API, the only way to change things like the content_type
68
+ # is to PUT the object onto S3 again. In the case of large files, this will result in fully re-uploading the file.)
69
+ #
70
+ # A bevie of information about an object can be had using the <tt>about</tt> method:
71
+ #
72
+ # pp song.about
73
+ # {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT",
74
+ # "content-type" => "binary/octect-stream",
75
+ # "etag" => "\"dc629038ffc674bee6f62eb64ff3a\"",
76
+ # "date" => "Sat, 28 Oct 2006 21:30:41 GMT",
77
+ # "x-amz-request-id" => "B7BC68F55495B1C8",
78
+ # "server" => "AmazonS3",
79
+ # "content-length" => "3418766"}
80
+ #
81
+ # You can get and set metadata for an object:
82
+ #
83
+ # song.metadata
84
+ # # => {}
85
+ # song.metadata[:album] = "A River Ain't Too Much To Love"
86
+ # # => "A River Ain't Too Much To Love"
87
+ # song.metadata[:released] = 2005
88
+ # pp song.metadata
89
+ # {"x-amz-meta-released" => 2005,
90
+ # "x-amz-meta-album" => "A River Ain't Too Much To Love"}
91
+ # song.store
92
+ #
93
+ # That metadata will be saved in S3 and is hence forth available from that object:
94
+ #
95
+ # song = S3Object.find('black-flowers.mp3', 'jukebox')
96
+ # pp song.metadata
97
+ # {"x-amz-meta-released" => "2005",
98
+ # "x-amz-meta-album" => "A River Ain't Too Much To Love"}
99
+ # song.metada[:released]
100
+ # # => "2005"
101
+ # song.metada[:released] = 2006
102
+ # pp song.metada
103
+ # {"x-amz-meta-released" => 2006,
104
+ # "x-amz-meta-album" => "A River Ain't Too Much To Love"}
105
+ class S3Object < Base
106
+ class << self
107
+ # Returns the value of the object with <tt>key</tt> in the specified bucket.
108
+ #
109
+ # === Conditional GET options
110
+ #
111
+ # * <tt>:if_modified_since</tt> - Return the object only if it has been modified since the specified time,
112
+ # otherwise return a 304 (not modified).
113
+ # * <tt>:if_unmodified_since</tt> - Return the object only if it has not been modified since the specified time,
114
+ # otherwise raise PreconditionFailed.
115
+ # * <tt>:if_match</tt> - Return the object only if its entity tag (ETag) is the same as the one specified,
116
+ # otherwise raise PreconditionFailed.
117
+ # * <tt>:if_none_match</tt> - Return the object only if its entity tag (ETag) is different from the one specified,
118
+ # otherwise return a 304 (not modified).
119
+ #
120
+ # === Other options
121
+ # * <tt>:range</tt> - Return only the bytes of the object in the specified range.
122
+ def value(key, bucket = nil, options = {}, &block)
123
+ Value.new(get(path!(bucket, key, options), options, &block))
124
+ end
125
+
126
+ def stream(key, bucket = nil, options = {}, &block)
127
+ value(key, bucket, options) do |response|
128
+ response.read_body(&block)
129
+ end
130
+ end
131
+
132
+ # Returns the object whose key is <tt>name</tt> in the specified bucket. If the specified key does not
133
+ # exist, a NoSuchKey exception will be raised.
134
+ def find(key, bucket = nil)
135
+ # N.B. This is arguably a hack. From what the current S3 API exposes, when you retrieve a bucket, it
136
+ # provides a listing of all the files in that bucket (assuming you haven't limited the scope of what it returns).
137
+ # Each file in the listing contains information about that file. It is from this information that an S3Object is built.
138
+ #
139
+ # If you know the specific file that you want, S3 allows you to make a get request for that specific file and it returns
140
+ # the value of that file in its response body. This response body is used to build an S3Object::Value object.
141
+ # If you want information about that file, you can make a head request and the headers of the response will contain
142
+ # information about that file. There is no way, though, to say, give me the representation of just this given file the same
143
+ # way that it would appear in a bucket listing.
144
+ #
145
+ # When fetching a bucket, you can provide options which narrow the scope of what files should be returned in that listing.
146
+ # Of those options, one is <tt>marker</tt> which is a string and instructs the bucket to return only object's who's key comes after
147
+ # the specified marker according to alphabetic order. Another option is <tt>max-keys</tt> which defaults to 1000 but allows you
148
+ # to dictate how many objects should be returned in the listing. With a combination of <tt>marker</tt> and <tt>max-keys</tt> you can
149
+ # *almost* specify exactly which file you'd like it to return, but <tt>marker</tt> is not inclusive. In other words, if there is a bucket
150
+ # which contains three objects who's keys are respectively 'a', 'b' and 'c', then fetching a bucket listing with marker set to 'b' will only
151
+ # return 'c', not 'b'.
152
+ #
153
+ # Given all that, my hack to fetch a bucket with only one specific file, is to set the marker to the result of calling String#previous on
154
+ # the desired object's key, which functionally makes the key ordered one degree higher than the desired object key according to
155
+ # alphabetic ordering. This is a hack, but it should work around 99% of the time. I can't think of a scenario where it would return
156
+ # something incorrect.
157
+ bucket = Bucket.find(bucket_name(bucket), :marker => key.previous, :max_keys => 1)
158
+ # If our heuristic failed, trigger a NoSuchKey exception
159
+ if (object = bucket.objects.first) && object.key == key
160
+ object
161
+ else
162
+ raise NoSuchKey.new("No such key `#{key}'", bucket)
163
+ end
164
+ end
165
+
166
+ # Makes a copy of the object with <tt>key</tt> to <tt>copy_name</tt>.
167
+ def copy(key, copy_key, bucket = nil, options = {})
168
+ bucket = bucket_name(bucket)
169
+ original = find(key, bucket)
170
+ default_options = {:content_type => original.content_type}
171
+ store(copy_key, original.value, bucket, default_options.merge(options)).success?
172
+ end
173
+
174
+ # Rename the object with key <tt>from</tt> to have key in <tt>to</tt>.
175
+ def rename(from, to, bucket = nil, options = {})
176
+ copy(from, to, bucket, options)
177
+ delete(from, bucket)
178
+ end
179
+
180
+ # Fetch information about the key with <tt>name</tt> from <tt>bucket</tt>. Information includes content type, content length,
181
+ # last modified time, and others.
182
+ def about(key, bucket = nil, options = {})
183
+ About.new(head(path!(bucket, key, options), options).headers)
184
+ end
185
+
186
+ # Delete object with <tt>key</tt> from <tt>bucket</tt>.
187
+ def delete(key, bucket = nil, options = {})
188
+ # A bit confusing. Calling super actually makes an HTTP DELETE request. The delete method is
189
+ # defined in the Base class. It happens to have the same name.
190
+ super(path!(bucket, key, options), options).success?
191
+ end
192
+
193
+ # When storing an object on the S3 servers using S3Object.store, the <tt>data</tt> argument can be a string or an I/O stream.
194
+ # If <tt>data</tt> is an I/O stream it will be read in segments and written to the socket incrementally. This approach
195
+ # may be desirable for very large files so they are not read into memory all at once.
196
+ #
197
+ # # Non streamed upload
198
+ # S3Object.store('simple-text-file.txt',
199
+ # 'hello world!',
200
+ # 'marcel',
201
+ # :content_type => 'text/plain')
202
+ #
203
+ # # Streamed upload
204
+ # S3Object.store('roots.mpeg',
205
+ # File.open('roots.mpeg'),
206
+ # 'marcel',
207
+ # :content_type => 'audio/mpeg')
208
+ def store(key, data, bucket = nil, options = {})
209
+ validate_key!(key)
210
+ put(path!(bucket, key, options), options, data) # Don't call .success? on response. We want to get the etag.
211
+ end
212
+ alias_method :create, :store
213
+ alias_method :save, :store
214
+
215
+ # All private objects are accessible via an authenticated GET request to the S3 servers. You can generate an
216
+ # authenticated url for an object like this:
217
+ #
218
+ # S3Object.url_for('beluga_baby.jpg', 'marcel_molina')
219
+ #
220
+ # By default authenticated urls expire 5 minutes after they were generated.
221
+ #
222
+ # Expiration options can be specified either with an absolute time since the epoch with the <tt>:expires</tt> options,
223
+ # or with a number of seconds relative to now with the <tt>:expires_in</tt> options:
224
+ #
225
+ # # Absolute expiration date
226
+ # # (Expires January 18th, 2038)
227
+ # doomsday = Time.mktime(2038, 1, 18).to_i
228
+ # S3Object.url_for('beluga_baby.jpg',
229
+ # 'marcel',
230
+ # :expires => doomsday)
231
+ #
232
+ # # Expiration relative to now specified in seconds
233
+ # # (Expires in 3 hours)
234
+ # S3Object.url_for('beluga_baby.jpg',
235
+ # 'marcel',
236
+ # :expires_in => 60 * 60 * 3)
237
+ #
238
+ # You can specify whether the url should go over SSL with the <tt>:use_ssl</tt> option:
239
+ #
240
+ # # Url will use https protocol
241
+ # S3Object.url_for('beluga_baby.jpg',
242
+ # 'marcel',
243
+ # :use_ssl => true)
244
+ #
245
+ # By default, the ssl settings for the current connection will be used.
246
+ #
247
+ # If you have an object handy, you can use its <tt>url</tt> method with the same objects:
248
+ #
249
+ # song.url(:expires_in => 30)
250
+ def url_for(name, bucket = nil, options = {})
251
+ connection.url_for(path!(bucket, name, options), options) # Do not normalize options
252
+ end
253
+
254
+ def path!(bucket, name, options = {}) #:nodoc:
255
+ # We're using the second argument for options
256
+ if bucket.is_a?(Hash)
257
+ options.replace(bucket)
258
+ bucket = nil
259
+ end
260
+ '/' << File.join(bucket_name(bucket), name)
261
+ end
262
+
263
+ private
264
+
265
+ def validate_key!(key)
266
+ raise InvalidKeyName.new(key) unless key && key.size <= 1024
267
+ end
268
+ end
269
+
270
+ class Value < String #:nodoc:
271
+ attr_reader :response
272
+ def initialize(response)
273
+ super(response.body)
274
+ @response = response
275
+ end
276
+ end
277
+
278
+ class About < Hash #:nodoc:
279
+ def initialize(headers)
280
+ super()
281
+ replace(headers)
282
+ metadata
283
+ end
284
+
285
+ def [](header)
286
+ super(header.to_header)
287
+ end
288
+
289
+ def []=(header, value)
290
+ super(header.to_header, value)
291
+ end
292
+
293
+ def to_headers
294
+ self.merge(metadata.to_headers)
295
+ end
296
+
297
+ def metadata
298
+ Metadata.new(self)
299
+ end
300
+ memoized :metadata
301
+ end
302
+
303
+ class Metadata < Hash #:nodoc:
304
+ HEADER_PREFIX = 'x-amz-meta-'
305
+ SIZE_LIMIT = 2048 # 2 kilobytes
306
+
307
+ def initialize(headers)
308
+ @headers = headers
309
+ super()
310
+ extract_metadata!
311
+ end
312
+
313
+ def []=(header, value)
314
+ super(header_name(header.to_header), value)
315
+ end
316
+
317
+ def [](header)
318
+ super(header_name(header.to_header))
319
+ end
320
+
321
+ def to_headers
322
+ validate!
323
+ self
324
+ end
325
+
326
+ private
327
+ attr_reader :headers
328
+
329
+ def extract_metadata!
330
+ headers.keys.grep(Regexp.new(HEADER_PREFIX)).each do |metadata_header|
331
+ self[metadata_header] = headers.delete(metadata_header)
332
+ end
333
+ end
334
+
335
+ def header_name(name)
336
+ name =~ Regexp.new(HEADER_PREFIX) ? name : [HEADER_PREFIX, name].join
337
+ end
338
+
339
+ def validate!
340
+ invalid_headers = inject([]) do |invalid, (name, value)|
341
+ invalid << name unless valid?(value)
342
+ invalid
343
+ end
344
+
345
+ raise InvalidMetadataValue.new(invalid_headers) unless invalid_headers.empty?
346
+ end
347
+
348
+ def valid?(value)
349
+ value && value.size < SIZE_LIMIT
350
+ end
351
+ end
352
+
353
+ attr_writer :value #:nodoc:
354
+
355
+ # Provides readers and writers for all valid header settings listed in <tt>valid_header_settings</tt>.
356
+ # Subsequent saves to the object after setting any of the valid headers settings will be reflected in
357
+ # information about the object.
358
+ #
359
+ # some_s3_object.content_type
360
+ # => nil
361
+ # some_s3_object.content_type = 'text/plain'
362
+ # => "text/plain"
363
+ # some_s3_object.content_type
364
+ # => "text/plain"
365
+ # some_s3_object.store
366
+ # S3Object.about(some_s3_object.key, some_s3_object.bucket.name)['content-type']
367
+ # => "text/plain"
368
+ include SelectiveAttributeProxy #:nodoc
369
+
370
+ proxy_to :about, :exclusively => false
371
+
372
+ # Initializes a new S3Object.
373
+ def initialize(attributes = {}, &block)
374
+ super
375
+ self.value = attributes.delete(:value)
376
+ self.bucket = attributes.delete(:bucket)
377
+ yield self if block_given?
378
+ end
379
+
380
+ # The current object's bucket. If no bucket has been set, a NoBucketSpecified exception will be raised. For
381
+ # cases where you are not sure if the bucket has been set, you can use the belongs_to_bucket? method.
382
+ def bucket
383
+ @bucket or raise NoBucketSpecified
384
+ end
385
+
386
+ # Sets the bucket that the object belongs to.
387
+ def bucket=(bucket)
388
+ @bucket = bucket
389
+ self
390
+ end
391
+
392
+ # Returns true if the current object has been assigned to a bucket yet. Objects must belong to a bucket before they
393
+ # can be saved onto S3.
394
+ def belongs_to_bucket?
395
+ !@bucket.nil?
396
+ end
397
+ alias_method :orphan?, :belongs_to_bucket?
398
+
399
+ # Returns the key of the object. If the key is not set, a NoKeySpecified exception will be raised. For cases
400
+ # where you are not sure if the key has been set, you can use the key_set? method. Objects must have a key
401
+ # set to be saved onto S3. Objects which have already been saved onto S3 will always have their key set.
402
+ def key
403
+ attributes['key'] or raise NoKeySpecified
404
+ end
405
+
406
+ # Sets the key for the current object.
407
+ def key=(value)
408
+ attributes['key'] = value
409
+ end
410
+
411
+ # Returns true if the current object has had its key set yet. Objects which have already been saved will
412
+ # always return true. This method is useful for objects which have not been saved yet so you know if you
413
+ # need to set the object's key since you can not save an object unless its key has been set.
414
+ #
415
+ # object.store if object.key_set? && object.belongs_to_bucket?
416
+ def key_set?
417
+ !attributes['key'].nil?
418
+ end
419
+
420
+ # Lazily loads object data.
421
+ #
422
+ # Force a reload of the data by passing <tt>:reload</tt>.
423
+ #
424
+ # object.value(:reload)
425
+ #
426
+ # When loading the data for the first time you can optionally yield to a block which will
427
+ # allow you to stream the data in segments.
428
+ #
429
+ # object.value do |segment|
430
+ # send_data segment
431
+ # end
432
+ #
433
+ # The full list of options are listed in the documentation for its class method counter part, S3Object::value.
434
+ def value(options = {}, &block)
435
+ if options.is_a?(Hash)
436
+ reload = !options.empty?
437
+ else
438
+ reload = options
439
+ options = {}
440
+ end
441
+ memoize(reload) do
442
+ self.class.stream(key, bucket.name, options, &block)
443
+ end
444
+ end
445
+
446
+ # Interface to information about the current object. Information is read only, though some of its data
447
+ # can be modified through specific methods, such as content_type and content_type=.
448
+ #
449
+ # pp some_object.about
450
+ # {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT",
451
+ # "x-amz-id-2" => "LdcQRk5qLwxJQiZ8OH50HhoyKuqyWoJ67B6i+rOE5MxpjJTWh1kCkL+I0NQzbVQn",
452
+ # "content-type" => "binary/octect-stream",
453
+ # "etag" => "\"dc629038ffc674bee6f62eb68454ff3a\"",
454
+ # "date" => "Sat, 28 Oct 2006 21:30:41 GMT",
455
+ # "x-amz-request-id" => "B7BC68F55495B1C8",
456
+ # "server" => "AmazonS3",
457
+ # "content-length" => "3418766"}
458
+ #
459
+ # some_object.content_type
460
+ # # => "binary/octect-stream"
461
+ # some_object.content_type = 'audio/mpeg'
462
+ # some_object.content_type
463
+ # # => 'audio/mpeg'
464
+ # some_object.store
465
+ def about
466
+ stored? ? self.class.about(key, bucket.name) : About.new
467
+ end
468
+ memoized :about
469
+
470
+ # Interface to viewing and editing metadata for the current object. To be treated like a Hash.
471
+ #
472
+ # some_object.metadata
473
+ # # => {}
474
+ # some_object.metadata[:author] = 'Dave Thomas'
475
+ # some_object.metadata
476
+ # # => {"x-amz-meta-author" => "Dave Thomas"}
477
+ # some_object.metadata[:author]
478
+ # # => "Dave Thomas"
479
+ def metadata
480
+ about.metadata
481
+ end
482
+ memoized :metadata
483
+
484
+ # Saves the current object with the specified <tt>options</tt>. Valid options are listed in the documentation for S3Object::store.
485
+ def store(options = {})
486
+ raise DeletedObject if frozen?
487
+ options = about.to_headers.merge(options) if stored?
488
+ response = self.class.store(key, value, bucket.name, options)
489
+ bucket.update(:stored, self)
490
+ response.success?
491
+ end
492
+ alias_method :create, :store
493
+ alias_method :save, :store
494
+
495
+ # Deletes the current object. Trying to save an object after it has been deleted with
496
+ # raise a DeletedObject exception.
497
+ def delete
498
+ bucket.update(:deleted, self)
499
+ freeze
500
+ self.class.delete(key, bucket.name)
501
+ end
502
+
503
+ # Copies the current object, given it the name <tt>copy_name</tt>. Keep in mind that due to limitations in
504
+ # S3's API, this operation requires retransmitting the entire object to S3.
505
+ def copy(copy_name, options = {})
506
+ self.class.copy(key, copy_name, bucket.name, options)
507
+ end
508
+
509
+ # Rename the current object. Keep in mind that due to limitations in S3's API, this operation requires
510
+ # retransmitting the entire object to S3.
511
+ def rename(to, options = {})
512
+ self.class.rename(key, to, bucket.name, options)
513
+ end
514
+
515
+ def etag(reload = false)
516
+ return nil unless stored?
517
+ memoize(reload) do
518
+ reload ? about(reload)['etag'][1...-1] : attributes['e_tag'][1...-1]
519
+ end
520
+ end
521
+
522
+ # Returns the owner of the current object.
523
+ def owner
524
+ Owner.new(attributes['owner'])
525
+ end
526
+ memoized :owner
527
+
528
+ # Generates an authenticated url for the current object. Accepts the same options as its class method
529
+ # counter part S3Object.url_for.
530
+ def url(options = {})
531
+ self.class.url_for(key, bucket.name, options)
532
+ end
533
+
534
+ # Returns true if the current object has been stored on S3 yet.
535
+ def stored?
536
+ !attributes['e_tag'].nil?
537
+ end
538
+
539
+ def ==(s3object) #:nodoc:
540
+ path == s3object.path
541
+ end
542
+
543
+ def path #:nodoc:
544
+ self.class.path!(
545
+ belongs_to_bucket? ? bucket.name : '(no bucket)',
546
+ key_set? ? key : '(no key)'
547
+ )
548
+ end
549
+
550
+ # Don't dump binary data :)
551
+ def inspect #:nodoc:
552
+ "#<AWS::S3::S3Object:0x#{object_id} '#{path}'>"
553
+ end
554
+
555
+ private
556
+ def proxiable_attribute?(name)
557
+ valid_header_settings.include?(name)
558
+ end
559
+
560
+ def valid_header_settings
561
+ %w(cache_control content_type content_length content_md5 content_disposition content_encoding expires)
562
+ end
563
+ end
564
+ end
565
+ end