net-amazon-s3 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
data/LICENSE ADDED
@@ -0,0 +1,25 @@
1
+ Copyright (c) 2006 Ryan Grove <ryan@wonko.com>
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice,
8
+ this list of conditions and the following disclaimer.
9
+ * Redistributions in binary form must reproduce the above copyright notice,
10
+ this list of conditions and the following disclaimer in the documentation
11
+ and/or other materials provided with the distribution.
12
+ * Neither the name of this project nor the names of its contributors may be
13
+ used to endorse or promote products derived from this software without
14
+ specific prior written permission.
15
+
16
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1,361 @@
1
+ require 'base64'
2
+ require 'digest/md5'
3
+ require 'net/https'
4
+ require 'net/amazon/s3/bucket'
5
+ require 'net/amazon/s3/errors'
6
+ require 'net/amazon/s3/object'
7
+ require 'openssl'
8
+ require 'rexml/document'
9
+
10
+ module Net; module Amazon
11
+
12
+ # = Net::Amazon::S3
13
+ #
14
+ # This library implements the Amazon S3 REST API (Rubyfied for your pleasure).
15
+ # Its usage is hopefully pretty straightforward. See below for examples.
16
+ #
17
+ # Author:: Ryan Grove (mailto:ryan@wonko.com)
18
+ # Version:: 0.1.0
19
+ # Copyright:: Copyright (c) 2006 Ryan Grove. All rights reserved.
20
+ # License:: New BSD License (http://opensource.org/licenses/bsd-license.php)
21
+ # Website:: http://wonko.com/software/net-amazon-s3
22
+ #
23
+ # == A Brief Overview of Amazon S3
24
+ #
25
+ # Amazon S3 stores arbitrary values (objects) identified by keys and organized
26
+ # into buckets. An S3 bucket is essentially a glorified Hash. Object values
27
+ # can be up to 5 GB in size, and objects can also have up to 2 KB of metadata
28
+ # associated with them.
29
+ #
30
+ # Bucket names share a global namespace and must be unique across all of
31
+ # S3, but object keys only have to be unique within the bucket in which they
32
+ # are stored.
33
+ #
34
+ # For more details, visit http://s3.amazonaws.com
35
+ #
36
+ # == Installation
37
+ #
38
+ # gem install net-amazon-s3
39
+ #
40
+ # == Examples
41
+ #
42
+ # === Create an instance of the S3 client
43
+ #
44
+ # require 'rubygems'
45
+ # require 'net/amazon/s3'
46
+ #
47
+ # access_key_id = 'DXM37ARQ25519H34E6W2'
48
+ # secret_access_key = '43HM88c+8kYr/UeFp+shjTnzFgisO5AZzpEO06FU'
49
+ #
50
+ # s3 = Net::Amazon::S3.new(access_key_id, secret_access_key)
51
+ #
52
+ # === Create a bucket and add an object to it
53
+ #
54
+ # foo = s3.create_bucket('foo')
55
+ # foo['bar'] = 'baz' # create object 'bar' and assign it the
56
+ # # value 'baz'
57
+ #
58
+ # === Upload a large file to the bucket
59
+ #
60
+ # File.open('mybigmovie.avi', 'rb') do |file|
61
+ # foo['mybigmovie.avi'] = file
62
+ # end
63
+ #
64
+ # === Download a large file from the bucket
65
+ #
66
+ # File.open('mybigmovie.avi', 'wb') do |file|
67
+ # foo['mybigmovie.avi'].value {|chunk| file.write(chunk) }
68
+ # end
69
+ #
70
+ # === Get a hash containing all objects in the bucket
71
+ #
72
+ # objects = foo.get_objects
73
+ #
74
+ # === Get all objects in the bucket whose keys begin with "my"
75
+ #
76
+ # my_objects = foo.get_objects('my')
77
+ #
78
+ # === Delete the bucket and everything in it
79
+ #
80
+ # s3.delete_bucket('foo', true)
81
+ #
82
+ # == TODO
83
+ #
84
+ # * Owner support
85
+ # * Object metadata support
86
+ # * ACLs
87
+ # * Logging configuration
88
+ #
89
+ class S3
90
+ include Enumerable
91
+
92
+ REST_ENDPOINT = 's3.amazonaws.com'
93
+
94
+ attr_accessor :access_key_id, :secret_access_key
95
+ attr_reader :options
96
+
97
+ # Creates and returns a new S3 client. The following options are available:
98
+ #
99
+ # [<tt>:enable_cache</tt>] Set to +true+ to enable intelligent caching of
100
+ # frequently-used requests. This can improve
101
+ # performance, but may result in staleness if other
102
+ # clients are simultaneously modifying the buckets
103
+ # and objects in this S3 account. Default is
104
+ # +true+.
105
+ # [<tt>:ssl</tt>] Set to +true+ to use SSL for all requests. No verification
106
+ # is performed on the server's certificate when SSL is used.
107
+ # Default is +true+.
108
+ def initialize(access_key_id, secret_access_key, options = {})
109
+ @access_key_id = access_key_id
110
+ @secret_access_key = secret_access_key
111
+
112
+ @options = {
113
+ :enable_cache => true,
114
+ :ssl => true
115
+ }
116
+
117
+ @options.merge!(options)
118
+
119
+ @cache = {}
120
+ end
121
+
122
+ # Returns +true+ if a bucket with the specified +bucket_name+ exists in this
123
+ # S3 account, +false+ otherwise.
124
+ def bucket_exist?(bucket_name)
125
+ return get_buckets.has_key?(bucket_name)
126
+ end
127
+
128
+ alias has_bucket? bucket_exist?
129
+
130
+ # Creates a new bucket with the specified +bucket_name+ and returns a
131
+ # Bucket object representing it.
132
+ def create_bucket(bucket_name)
133
+ error?(request_put("/#{bucket_name}"))
134
+ @cache.delete(:buckets)
135
+ return get_bucket(bucket_name)
136
+ end
137
+
138
+ # Deletes the bucket with the specified +bucket_name+. If +recursive+ is
139
+ # +true+, all objects contained in the bucket will also be deleted. If
140
+ # +recursive+ is +false+ and the bucket is not empty, a
141
+ # S3Error::BucketNotEmpty error will be raised.
142
+ def delete_bucket(bucket_name, recursive = false)
143
+ unless bucket = get_bucket(bucket_name)
144
+ raise S3Error::NoSuchBucket, 'The specified bucket does not exist'
145
+ end
146
+
147
+ if recursive
148
+ bucket.each {|object| bucket.delete_object(object.name) }
149
+ end
150
+
151
+ @cache.delete(:buckets)
152
+
153
+ return true unless error?(request_delete("/#{bucket_name}"))
154
+ end
155
+
156
+ # Iterates through the list of buckets.
157
+ def each
158
+ get_buckets.each {|key, value| yield key, value }
159
+ end
160
+
161
+ # Raises the appropriate error if the specified Net::HTTPResponse object
162
+ # contains an Amazon S3 error; returns +false+ otherwise.
163
+ def error?(response)
164
+ return false if response.is_a?(Net::HTTPSuccess)
165
+
166
+ xml = REXML::Document.new(response.body)
167
+
168
+ unless xml.root.name == 'Error'
169
+ raise S3Error, "Unknown error: #{response.body}"
170
+ end
171
+
172
+ error_code = xml.root.elements['Code'].text
173
+ error_message = xml.root.elements['Message'].text
174
+
175
+ if S3Error.const_defined?(error_code)
176
+ raise S3Error.const_get(error_code), error_message
177
+ else
178
+ raise S3Error, "#{error_code}: #{error_message}"
179
+ end
180
+ end
181
+
182
+ # Returns a Bucket object representing the specified bucket, or +nil+ if the
183
+ # bucket doesn't exist.
184
+ def get_bucket(bucket_name)
185
+ return nil unless bucket_exist?(bucket_name)
186
+ return @cache[:buckets][bucket_name]
187
+ end
188
+
189
+ alias [] get_bucket
190
+
191
+ # Gets a list of all buckets owned by this account. Returns a Hash of
192
+ # Bucket objects indexed by bucket name.
193
+ def get_buckets
194
+ if @options[:enable_cache] && !@cache[:buckets].nil?
195
+ return @cache[:buckets]
196
+ end
197
+
198
+ response = request_get('/')
199
+ error?(response)
200
+
201
+ xml = REXML::Document.new(response.body)
202
+
203
+ buckets = {}
204
+
205
+ xml.root.elements.each('Buckets/Bucket') do |element|
206
+ bucket_name = element.elements['Name'].text
207
+ creation_date = Time.parse(element.elements['CreationDate'].text)
208
+
209
+ buckets[bucket_name] = Bucket.new(self, bucket_name, creation_date)
210
+ end
211
+
212
+ return @cache[:buckets] = buckets
213
+ end
214
+
215
+ # Sends a properly-signed DELETE request to the specified S3 path and
216
+ # returns a Net::HTTPResponse object.
217
+ def request_delete(path, headers = {})
218
+ http = Net::HTTP.new(REST_ENDPOINT, @options[:ssl] ? 443 : 80)
219
+
220
+ http.use_ssl = @options[:ssl]
221
+ http.verify_mode = OpenSSL::SSL::VERIFY_NONE
222
+
223
+ http.start do |http|
224
+ req = sign_request(Net::HTTP::Delete.new(path), nil, headers)
225
+ return http.request(req)
226
+ end
227
+ end
228
+
229
+ # Sends a properly-signed GET request to the specified S3 path and returns
230
+ # a Net::HTTPResponse object.
231
+ #
232
+ # When called with a block, yields a Net::HTTPResponse object whose body has
233
+ # not been read; the caller can process it using
234
+ # Net::HTTPResponse.read_body.
235
+ def request_get(path, headers = {})
236
+ http = Net::HTTP.new(REST_ENDPOINT, @options[:ssl] ? 443 : 80)
237
+
238
+ http.use_ssl = @options[:ssl]
239
+ http.verify_mode = OpenSSL::SSL::VERIFY_NONE
240
+
241
+ http.start do |http|
242
+ req = sign_request(Net::HTTP::Get.new(path), nil, headers)
243
+
244
+ if block_given?
245
+ http.request(req) {|response| yield response }
246
+ else
247
+ return http.request(req)
248
+ end
249
+ end
250
+ end
251
+
252
+ # Sends a properly-signed HEAD request to the specified S3 path and returns
253
+ # a Net::HTTPResponse object.
254
+ def request_head(path, headers = {})
255
+ http = Net::HTTP.new(REST_ENDPOINT, @options[:ssl] ? 443 : 80)
256
+
257
+ http.use_ssl = @options[:ssl]
258
+ http.verify_mode = OpenSSL::SSL::VERIFY_NONE
259
+
260
+ http.start do |http|
261
+ req = sign_request(Net::HTTP::Head.new(path), nil, headers)
262
+ return http.request(req)
263
+ end
264
+ end
265
+
266
+ # Sends a properly-signed PUT request to the specified S3 path and returns a
267
+ # Net::HTTPResponse object.
268
+ #
269
+ # If +content+ is an open IO stream, the body of the request will be read
270
+ # from the stream.
271
+ def request_put(path, content = nil, headers = {})
272
+ http = Net::HTTP.new(REST_ENDPOINT, @options[:ssl] ? 443 : 80)
273
+
274
+ http.use_ssl = @options[:ssl]
275
+ http.verify_mode = OpenSSL::SSL::VERIFY_NONE
276
+
277
+ http.start do |http|
278
+ req = sign_request(Net::HTTP::Put.new(path), content, headers)
279
+
280
+ if content.is_a?(IO)
281
+ req.body_stream = content
282
+ else
283
+ req.body = content
284
+ end
285
+
286
+ response = http.request(req)
287
+
288
+ return response
289
+ end
290
+ end
291
+
292
+ private
293
+
294
+ # Adds an appropriately-signed +Authorization+ header to the
295
+ # Net::HTTPRequest +request+.
296
+ #
297
+ # If +content+ is an open IO stream, the body of the request will be read
298
+ # from the stream.
299
+ def sign_request(request, content = nil, headers = {})
300
+ unless request.is_a?(Net::HTTPRequest)
301
+ raise ArgumentError,
302
+ "Expected Net::HTTPRequest, not #{request.class}"
303
+ end
304
+
305
+ unless request.path =~ /^(\/.*?)(?:\?.*)?$/i
306
+ raise S3Error, "Invalid request path: #{request.path}"
307
+ end
308
+
309
+ path = $1
310
+
311
+ request['Host'] = REST_ENDPOINT
312
+ request['Date'] = Time.new.httpdate()
313
+
314
+ if content.nil?
315
+ request['Content-Length'] = 0
316
+ elsif content.is_a?(IO)
317
+ # Generate an MD5 hash of the stream's contents.
318
+ md5 = Digest::MD5.new
319
+ content.rewind
320
+
321
+ while buffer = content.read(65536) do
322
+ md5 << buffer
323
+ end
324
+
325
+ content.rewind
326
+
327
+ # Set headers.
328
+ request['Content-Type'] = 'binary/octet-stream'
329
+ request['Content-Length'] = content.stat.size
330
+ request['Content-MD5'] = Base64.encode64(md5.digest).strip
331
+ else
332
+ request['Content-Type'] = 'binary/octet-stream'
333
+ request['Content-Length'] = content.length
334
+ request['Content-MD5'] = Base64.encode64(Digest::MD5.digest(
335
+ content)).strip
336
+ end
337
+
338
+ headers.each {|key, value| request[key] = value }
339
+
340
+ hmac = OpenSSL::HMAC.new(@secret_access_key, OpenSSL::Digest::SHA1.new)
341
+ hmac << "#{request.method}\n#{request['Content-MD5']}\n".toutf8 +
342
+ "#{request['Content-Type']}\n#{request['Date']}\n".toutf8
343
+
344
+ request.to_hash.keys.sort.each do |key|
345
+ if key =~ /^x-amz-/i
346
+ hmac << "#{key.downcase.strip}:#{request[key].strip}\n".toutf8
347
+ end
348
+ end
349
+
350
+ hmac << path.toutf8
351
+
352
+ signature = Base64.encode64(hmac.digest).strip
353
+
354
+ request['Authorization'] = "AWS #{@access_key_id}:#{signature}"
355
+
356
+ return request
357
+ end
358
+
359
+ end
360
+
361
+ end; end
@@ -0,0 +1,141 @@
1
+ require 'rexml/document'
2
+ require 'time'
3
+ require 'uri'
4
+
5
+ module Net; module Amazon; class S3
6
+
7
+ # Represents an Amazon S3 bucket. This class should only be instantiated
8
+ # through one of the methods in the S3 class.
9
+ class Bucket
10
+ include Comparable
11
+ include Enumerable
12
+
13
+ attr_reader :name, :creation_date
14
+
15
+ # Creates and returns a new Bucket object. You should never create new
16
+ # Bucket objects directly. Instead, use one of the methods in the S3 class.
17
+ def initialize(s3, bucket_name, creation_date)
18
+ @s3 = s3
19
+ @name = bucket_name
20
+ @creation_date = creation_date
21
+
22
+ @cache = {}
23
+ end
24
+
25
+ # Compares two buckets by name.
26
+ def <=>(bucket)
27
+ return @name <=> bucket.name
28
+ end
29
+
30
+ # Creates and returns a new S3::Object with the specified +object_key+ and
31
+ # +value+. If this bucket already contains an object with the specified key,
32
+ # that object will be overwritten.
33
+ #
34
+ # If +value+ is an open IO stream, the value of the object will be read from
35
+ # the stream.
36
+ def create_object(object_key, value, metadata = {})
37
+ object_key_escaped = S3::Object.escape_key(object_key)
38
+
39
+ headers = {}
40
+ metadata.each {|key, value| headers["x-amz-meta-#{key}"] = value }
41
+
42
+ response = @s3.request_put("/#{@name}/#{object_key_escaped}", value,
43
+ headers)
44
+ @s3.error?(response)
45
+
46
+ @cache.delete(:objects)
47
+
48
+ return get_object(object_key)
49
+ end
50
+
51
+ alias []= create_object
52
+
53
+ # Deletes the specified object from this bucket.
54
+ def delete_object(object_key)
55
+ object_key_escaped = S3::Object.escape_key(object_key)
56
+
57
+ unless object = get_object(object_key)
58
+ raise S3Error::NoSuchKey, 'The specified key does not exist'
59
+ end
60
+
61
+ @cache.delete(:objects)
62
+
63
+ return true unless @s3.error?(@s3.request_delete(
64
+ "/#{@name}/#{object_key_escaped}"))
65
+ end
66
+
67
+ # Iterates through the list of objects.
68
+ def each
69
+ get_objects.each {|key, value| yield key, value }
70
+ end
71
+
72
+ # Returns a S3::Object representing the specified +object_key+, or +nil+ if
73
+ # the object doesn't exist in this bucket.
74
+ def get_object(object_key)
75
+ return get_objects(object_key)[object_key]
76
+ end
77
+
78
+ alias [] get_object
79
+
80
+ # Gets a list of all objects in this bucket whose keys begin with +prefix+.
81
+ # Returns a Hash of S3::Object objects indexed by object key.
82
+ def get_objects(prefix = '')
83
+ prefix = prefix.toutf8
84
+
85
+ if @s3.options[:enable_cache] && !@cache[:objects].nil? &&
86
+ !@cache[:objects][prefix].nil?
87
+ return @cache[:objects][prefix]
88
+ end
89
+
90
+ if @cache[:objects].nil?
91
+ @cache[:objects] = {}
92
+ end
93
+
94
+ objects = {}
95
+ request_uri = "/#{@name}?prefix=#{URI.escape(prefix)}"
96
+ is_truncated = true
97
+
98
+ # The request is made in a loop because the S3 API limits results to pages
99
+ # of 1,000 objects by default, so if there are more than 1,000 objects,
100
+ # we'll have to send more requests to get them all.
101
+ while is_truncated do
102
+ response = @s3.request_get(request_uri)
103
+ @s3.error?(response)
104
+
105
+ xml = REXML::Document.new(response.body)
106
+
107
+ if xml.root.elements['IsTruncated'].text == 'false'
108
+ is_truncated = false
109
+ else
110
+ request_uri = "/#{@name}?prefix=#{URI.escape(prefix)}&marker=" +
111
+ xml.root.elements.to_a('Contents').last.elements['Key'].text
112
+ end
113
+
114
+ next if xml.root.elements['Contents'].nil?
115
+
116
+ xml.root.elements.each('Contents') do |element|
117
+ object_key = element.elements['Key'].text
118
+ object_size = element.elements['Size'].text
119
+ object_etag = element.elements['ETag'].text
120
+ object_last_modified = Time.parse(
121
+ element.elements['LastModified'].text)
122
+
123
+ objects[object_key] = S3::Object.new(@s3, self, object_key,
124
+ object_size, object_etag, object_last_modified)
125
+ end
126
+ end
127
+
128
+ return @cache[:objects][prefix] = objects
129
+ end
130
+
131
+ # Returns +true+ if an object with the specified +object_key+ exists in
132
+ # this bucket, +false+ otherwise.
133
+ def object_exist?(object_key)
134
+ return get_objects.has_key?(object_key)
135
+ end
136
+
137
+ alias has_object? object_exist?
138
+
139
+ end
140
+
141
+ end; end; end
@@ -0,0 +1,47 @@
1
+ module Net; module Amazon; class S3; class S3Error < StandardError
2
+
3
+ class AccessDenied < S3Error; end
4
+ class AccountProblem < S3Error; end
5
+ class AllAccessDisabled < S3Error; end
6
+ class AmbiguousGrantByEmailAddress < S3Error; end
7
+ class OperationAborted < S3Error; end
8
+ class BadDigest < S3Error; end
9
+ class BucketAlreadyExists < S3Error; end
10
+ class BucketNotEmpty < S3Error; end
11
+ class CredentialsNotSupported < S3Error; end
12
+ class EntityTooLarge < S3Error; end
13
+ class IncompleteBody < S3Error; end
14
+ class InternalError < S3Error; end
15
+ class InvalidAccessKeyId < S3Error; end
16
+ class InvalidAddressingHeader < S3Error; end
17
+ class InvalidArgument < S3Error; end
18
+ class InvalidBucketName < S3Error; end
19
+ class InvalidDigest < S3Error; end
20
+ class InvalidRange < S3Error; end
21
+ class InvalidSecurity < S3Error; end
22
+ class InvalidStorageClass < S3Error; end
23
+ class InvalidTargetBucketForLogging < S3Error; end
24
+ class KeyTooLong < S3Error; end
25
+ class InvalidURI < S3Error; end
26
+ class MalformedACLError < S3Error; end
27
+ class MalformedXMLError < S3Error; end
28
+ class MaxMessageLengthExceeded < S3Error; end
29
+ class MetadataTooLarge < S3Error; end
30
+ class MethodNotAllowed < S3Error; end
31
+ class MissingContentLength < S3Error; end
32
+ class MissingSecurityHeader < S3Error; end
33
+ class NoLoggingStatusForKey < S3Error; end
34
+ class NoSuchBucket < S3Error; end
35
+ class NoSuchKey < S3Error; end
36
+ class NotImplemented < S3Error; end
37
+ class NotSignedUp < S3Error; end
38
+ class PreconditionFailed < S3Error; end
39
+ class RequestTimeout < S3Error; end
40
+ class RequestTimeTooSkewed < S3Error; end
41
+ class RequestTorrentOfBucketError < S3Error; end
42
+ class SignatureDoesNotMatch < S3Error; end
43
+ class TooManyBuckets < S3Error; end
44
+ class UnexpectedContent < S3Error; end
45
+ class UnresolvableGrantByEmailAddress < S3Error; end
46
+
47
+ end; end; end; end
@@ -0,0 +1,77 @@
1
+ module Net; module Amazon; class S3
2
+
3
+ # Represents an Amazon S3 object. This class should only be instantiated
4
+ # through one of the methods in Bucket.
5
+ class Object
6
+ include Comparable
7
+
8
+ #--
9
+ # Public Class Methods
10
+ #++
11
+
12
+ # Escapes an object key for use in an S3 request path. This method should
13
+ # not be used to escape object keys for use in URL query parameters. Use
14
+ # URI.escape for that.
15
+ def self.escape_key(object_key)
16
+ return object_key.gsub(' ', '+').toutf8
17
+ end
18
+
19
+ #--
20
+ # Public Instance Methods
21
+ #++
22
+
23
+ attr_reader :name, :size, :etag, :last_modified
24
+
25
+ def initialize(s3, bucket, object_key, size, etag, last_modified)
26
+ @s3 = s3
27
+ @bucket = bucket
28
+ @key = object_key.toutf8
29
+ @size = size
30
+ @etag = etag
31
+ @last_modified = last_modified
32
+
33
+ @cache = {}
34
+ end
35
+
36
+ # Compares two objects by key.
37
+ def <=>(object)
38
+ return @key <=> object.key
39
+ end
40
+
41
+ # Gets this object's value.
42
+ #
43
+ # When called with a block, yields the value in chunks as it is read in from
44
+ # the socket.
45
+ def value
46
+ key_escaped = Object.escape_key(@key)
47
+
48
+ if block_given?
49
+ @s3.request_get("/#{@bucket.name}/#{key_escaped}") do |response|
50
+ @s3.error?(response)
51
+ response.read_body {|chunk| yield chunk }
52
+ end
53
+ else
54
+ response = @s3.request_get("/#{@bucket.name}/#{key_escaped}")
55
+ @s3.error?(response)
56
+
57
+ return response.body
58
+ end
59
+ end
60
+
61
+ # Sets this object's value.
62
+ #
63
+ # If +new_value+ is an open IO stream, the value will be read from the
64
+ # stream.
65
+ def value=(new_value)
66
+ key_escaped = Object.escape_key(@key)
67
+
68
+ response = @s3.request_put("/#{@bucket.name}/" +
69
+ "#{key_escaped}", new_value)
70
+ @s3.error?(response)
71
+
72
+ return new_value
73
+ end
74
+
75
+ end
76
+
77
+ end; end; end
metadata ADDED
@@ -0,0 +1,57 @@
1
+ --- !ruby/object:Gem::Specification
2
+ rubygems_version: 0.9.0
3
+ specification_version: 1
4
+ name: net-amazon-s3
5
+ version: !ruby/object:Gem::Version
6
+ version: 0.1.0
7
+ date: 2006-11-25 00:00:00 -08:00
8
+ summary: Amazon S3 library.
9
+ require_paths:
10
+ - lib
11
+ email: ryan@wonko.com
12
+ homepage: http://wonko.com/software/net-amazon-s3
13
+ rubyforge_project: net-amazon-s3
14
+ description:
15
+ autorequire:
16
+ default_executable:
17
+ bindir: bin
18
+ has_rdoc: true
19
+ required_ruby_version: !ruby/object:Gem::Version::Requirement
20
+ requirements:
21
+ - - ">="
22
+ - !ruby/object:Gem::Version
23
+ version: 1.8.4
24
+ version:
25
+ platform: ruby
26
+ signing_key:
27
+ cert_chain:
28
+ post_install_message:
29
+ authors:
30
+ - Ryan Grove
31
+ files:
32
+ - lib/net
33
+ - lib/net/amazon
34
+ - lib/net/amazon/s3
35
+ - lib/net/amazon/s3.rb
36
+ - lib/net/amazon/s3/bucket.rb
37
+ - lib/net/amazon/s3/errors.rb
38
+ - lib/net/amazon/s3/object.rb
39
+ - LICENSE
40
+ test_files: []
41
+
42
+ rdoc_options:
43
+ - --title
44
+ - Net::Amazon::S3 Documentation
45
+ - --main
46
+ - Net::Amazon::S3
47
+ - --line-numbers
48
+ extra_rdoc_files: []
49
+
50
+ executables: []
51
+
52
+ extensions: []
53
+
54
+ requirements: []
55
+
56
+ dependencies: []
57
+