seanwalbran-scashin133-s3 0.3.11

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,31 @@
1
+ module S3
2
+ # Class responsible for sending chunked requests
3
+ # properly. Net::HTTPGenericRequest has hardcoded chunk_size, so we
4
+ # inherit the class and override chunk_size.
5
+ class Request < Net::HTTPGenericRequest
6
+ def initialize(chunk_size, m, reqbody, resbody, path, initheader = nil)
7
+ @chunk_size = chunk_size
8
+ super(m, reqbody, resbody, path, initheader)
9
+ end
10
+
11
+ private
12
+
13
+ def send_request_with_body_stream(sock, ver, path, f)
14
+ unless content_length() or chunked?
15
+ raise ArgumentError, "Content-Length not given and Transfer-Encoding is not `chunked'"
16
+ end
17
+ supply_default_content_type
18
+ write_header sock, ver, path
19
+ if chunked?
20
+ while s = f.read(@chunk_size)
21
+ sock.write(sprintf("%x\r\n", s.length) << s << "\r\n")
22
+ end
23
+ sock.write "0\r\n\r\n"
24
+ else
25
+ while s = f.read(@chunk_size)
26
+ sock.write s
27
+ end
28
+ end
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,87 @@
1
+ module S3
2
+ class Service
3
+ include Parser
4
+ include Proxies
5
+
6
+ attr_reader :access_key_id, :secret_access_key, :use_ssl, :proxy
7
+
8
+ # Compares service to other, by <tt>access_key_id</tt> and
9
+ # <tt>secret_access_key</tt>
10
+ def ==(other)
11
+ self.access_key_id == other.access_key_id and self.secret_access_key == other.secret_access_key
12
+ end
13
+
14
+ # Creates new service.
15
+ #
16
+ # ==== Options
17
+ # * <tt>:access_key_id</tt> - Access key id (REQUIRED)
18
+ # * <tt>:secret_access_key</tt> - Secret access key (REQUIRED)
19
+ # * <tt>:use_ssl</tt> - Use https or http protocol (false by
20
+ # default)
21
+ # * <tt>:debug</tt> - Display debug information on the STDOUT
22
+ # (false by default)
23
+ # * <tt>:timeout</tt> - Timeout to use by the Net::HTTP object
24
+ # (60 by default)
25
+ def initialize(options)
26
+ @access_key_id = options.fetch(:access_key_id)
27
+ @secret_access_key = options.fetch(:secret_access_key)
28
+ @use_ssl = options.fetch(:use_ssl, false)
29
+ @timeout = options.fetch(:timeout, 60)
30
+ @debug = options.fetch(:debug, false)
31
+
32
+ raise ArgumentError, "Missing proxy settings. Must specify at least :host." if options[:proxy] && !options[:proxy][:host]
33
+ @proxy = options.fetch(:proxy, nil)
34
+ end
35
+
36
+ # Returns all buckets in the service and caches the result (see
37
+ # +reload+)
38
+ def buckets
39
+ Proxy.new(lambda { list_all_my_buckets }, :owner => self, :extend => BucketsExtension)
40
+ end
41
+
42
+ # Returns the bucket with the given name. Does not check whether the
43
+ # bucket exists. But also does not issue any HTTP requests, so it's
44
+ # much faster than buckets.find
45
+ def bucket(name)
46
+ Bucket.send(:new, self, name)
47
+ end
48
+
49
+ # Returns "http://" or "https://", depends on <tt>:use_ssl</tt>
50
+ # value from initializer
51
+ def protocol
52
+ use_ssl ? "https://" : "http://"
53
+ end
54
+
55
+ # Returns 443 or 80, depends on <tt>:use_ssl</tt> value from
56
+ # initializer
57
+ def port
58
+ use_ssl ? 443 : 80
59
+ end
60
+
61
+ def inspect #:nodoc:
62
+ "#<#{self.class}:#@access_key_id>"
63
+ end
64
+
65
+ private
66
+
67
+ def list_all_my_buckets
68
+ response = service_request(:get)
69
+ names = parse_list_all_my_buckets_result(response.body)
70
+ names.map { |name| Bucket.send(:new, self, name) }
71
+ end
72
+
73
+ def service_request(method, options = {})
74
+ connection.request(method, options.merge(:path => "/#{options[:path]}"))
75
+ end
76
+
77
+ def connection
78
+ return @connection if defined?(@connection)
79
+ @connection = Connection.new(:access_key_id => @access_key_id,
80
+ :secret_access_key => @secret_access_key,
81
+ :use_ssl => @use_ssl,
82
+ :timeout => @timeout,
83
+ :debug => @debug,
84
+ :proxy => @proxy)
85
+ end
86
+ end
87
+ end
@@ -0,0 +1,245 @@
1
+ module S3
2
+
3
+ # Class responsible for generating signatures to requests.
4
+ #
5
+ # Implements algorithm defined by Amazon Web Services to sign
6
+ # request with secret private credentials
7
+ #
8
+ # === See
9
+ # http://docs.amazonwebservices.com/AmazonS3/latest/index.html?RESTAuthentication.html
10
+
11
+ class Signature
12
+
13
+ # Generates signature for given parameters
14
+ #
15
+ # ==== Options
16
+ # * <tt>:host</tt> - Hostname
17
+ # * <tt>:request</tt> - Net::HTTPRequest object with correct
18
+ # headers
19
+ # * <tt>:access_key_id</tt> - Access key id
20
+ # * <tt>:secret_access_key</tt> - Secret access key
21
+ #
22
+ # ==== Returns
23
+ # Generated signature string for given hostname and request
24
+ def self.generate(options)
25
+ request = options[:request]
26
+ access_key_id = options[:access_key_id]
27
+
28
+ options.merge!(:headers => request, :method => request.method, :resource => request.path)
29
+
30
+ signature = canonicalized_signature(options)
31
+
32
+ "AWS #{access_key_id}:#{signature}"
33
+ end
34
+
35
+ # Generates temporary URL signature for given resource
36
+ #
37
+ # ==== Options
38
+ # * <tt>:bucket</tt> - Bucket in which the resource resides
39
+ # * <tt>:resource</tt> - Path to the resouce you want to create
40
+ # a temporary link to
41
+ # * <tt>:secret_access_key</tt> - Secret access key
42
+ # * <tt>:expires_at</tt> - Unix time stamp of when the resouce
43
+ # link will expire
44
+ # * <tt>:method</tt> - HTTP request method you want to use on
45
+ # the resource, defaults to GET
46
+ # * <tt>:headers</tt> - Any additional HTTP headers you intend
47
+ # to use when requesting the resource
48
+ def self.generate_temporary_url_signature(options)
49
+ bucket = options[:bucket]
50
+ resource = options[:resource]
51
+ secret_access_key = options[:secret_access_key]
52
+ expires = options[:expires_at]
53
+
54
+ headers = options[:headers] || {}
55
+ headers.merge!("date" => expires.to_i.to_s)
56
+
57
+ options.merge!(:resource => "/#{bucket}/#{resource}",
58
+ :method => options[:method] || :get,
59
+ :headers => headers)
60
+ signature = canonicalized_signature(options)
61
+
62
+ CGI.escape(signature)
63
+ end
64
+
65
+ # Generates temporary URL for given resource
66
+ #
67
+ # ==== Options
68
+ # * <tt>:bucket</tt> - Bucket in which the resource resides
69
+ # * <tt>:resource</tt> - Path to the resouce you want to create
70
+ # a temporary link to
71
+ # * <tt>:access_key</tt> - Access key
72
+ # * <tt>:secret_access_key</tt> - Secret access key
73
+ # * <tt>:expires_at</tt> - Unix time stamp of when the resouce
74
+ # link will expire
75
+ # * <tt>:method</tt> - HTTP request method you want to use on
76
+ # the resource, defaults to GET
77
+ # * <tt>:headers</tt> - Any additional HTTP headers you intend
78
+ # to use when requesting the resource
79
+ def self.generate_temporary_url(options)
80
+ bucket = options[:bucket]
81
+ resource = options[:resource]
82
+ access_key = options[:access_key]
83
+ expires = options[:expires_at].to_i
84
+ resource = File.join(bucket, resource) unless S3::Bucket.vhost?(bucket)
85
+ signature = generate_temporary_url_signature(options)
86
+ protocol = options[:use_ssl] ? 'https' : 'http'
87
+
88
+ url = "#{protocol}://#{(bucket + ".") if S3::Bucket.vhost?(bucket)}#{S3::HOST}/#{resource}"
89
+ url << "?AWSAccessKeyId=#{access_key}"
90
+ url << "&Expires=#{expires}"
91
+ url << "&Signature=#{signature}"
92
+ end
93
+
94
+ private
95
+
96
+ def self.canonicalized_signature(options)
97
+ headers = options[:headers] || {}
98
+ host = options[:host] || ""
99
+ resource = options[:resource]
100
+ access_key_id = options[:access_key_id]
101
+ secret_access_key = options[:secret_access_key]
102
+
103
+ http_verb = options[:method].to_s.upcase
104
+ content_md5 = headers["content-md5"] || ""
105
+ content_type = headers["content-type"] || ""
106
+ date = headers["x-amz-date"].nil? ? headers["date"] : ""
107
+ canonicalized_resource = canonicalized_resource(host, resource)
108
+ canonicalized_amz_headers = canonicalized_amz_headers(headers)
109
+
110
+ string_to_sign = ""
111
+ string_to_sign << http_verb
112
+ string_to_sign << "\n"
113
+ string_to_sign << content_md5
114
+ string_to_sign << "\n"
115
+ string_to_sign << content_type
116
+ string_to_sign << "\n"
117
+ string_to_sign << date
118
+ string_to_sign << "\n"
119
+ string_to_sign << canonicalized_amz_headers
120
+ string_to_sign << canonicalized_resource
121
+
122
+ digest = OpenSSL::Digest::Digest.new("sha1")
123
+ hmac = OpenSSL::HMAC.digest(digest, secret_access_key, string_to_sign)
124
+ base64 = Base64.encode64(hmac)
125
+ base64.chomp
126
+ end
127
+
128
+ # Helper method for extracting header fields from Net::HTTPRequest
129
+ # and preparing them for singing in #generate method
130
+ #
131
+ # ==== Parameters
132
+ # * <tt>request</tt> - Net::HTTPRequest object with header fields
133
+ # filled in
134
+ #
135
+ # ==== Returns
136
+ # String containing interesting header fields in suitable order
137
+ # and form
138
+ def self.canonicalized_amz_headers(request)
139
+ headers = []
140
+
141
+ # 1. Convert each HTTP header name to lower-case. For example,
142
+ # "X-Amz-Date" becomes "x-amz-date".
143
+ request.each { |key, value| headers << [key.downcase, value] if key =~ /\Ax-amz-/io }
144
+ #=> [["c", 0], ["a", 1], ["a", 2], ["b", 3]]
145
+
146
+ # 2. Sort the collection of headers lexicographically by header
147
+ # name.
148
+ headers.sort!
149
+ #=> [["a", 1], ["a", 2], ["b", 3], ["c", 0]]
150
+
151
+ # 3. Combine header fields with the same name into one
152
+ # "header-name:comma-separated-value-list" pair as prescribed by
153
+ # RFC 2616, section 4.2, without any white-space between
154
+ # values. For example, the two metadata headers
155
+ # "x-amz-meta-username: fred" and "x-amz-meta-username: barney"
156
+ # would be combined into the single header "x-amz-meta-username:
157
+ # fred,barney".
158
+ combined_headers = headers.inject([]) do |new_headers, header|
159
+ existing_header = new_headers.find { |h| h.first == header.first }
160
+ if existing_header
161
+ existing_header.last << ",#{header.last}"
162
+ else
163
+ new_headers << header
164
+ end
165
+ end
166
+ #=> [["a", "1,2"], ["b", "3"], ["c", "0"]]
167
+
168
+ # 4. "Un-fold" long headers that span multiple lines (as allowed
169
+ # by RFC 2616, section 4.2) by replacing the folding white-space
170
+ # (including new-line) by a single space.
171
+ unfolded_headers = combined_headers.map do |header|
172
+ key = header.first
173
+ value = header.last
174
+ value.gsub!(/\s+/, " ")
175
+ [key, value]
176
+ end
177
+
178
+ # 5. Trim any white-space around the colon in the header. For
179
+ # example, the header "x-amz-meta-username: fred,barney" would
180
+ # become "x-amz-meta-username:fred,barney"
181
+ joined_headers = unfolded_headers.map do |header|
182
+ key = header.first.strip
183
+ value = header.last.strip
184
+ "#{key}:#{value}"
185
+ end
186
+
187
+ # 6. Finally, append a new-line (U+000A) to each canonicalized
188
+ # header in the resulting list. Construct the
189
+ # CanonicalizedResource element by concatenating all headers in
190
+ # this list into a single string.
191
+ joined_headers << "" unless joined_headers.empty?
192
+ joined_headers.join("\n")
193
+ end
194
+
195
+ # Helper methods for extracting caninocalized resource address
196
+ #
197
+ # ==== Parameters
198
+ # * <tt>host</tt> - Hostname
199
+ # * <tt>request</tt> - Net::HTTPRequest object with header fields
200
+ # filled in
201
+ #
202
+ # ==== Returns
203
+ # String containing extracted canonicalized resource
204
+ def self.canonicalized_resource(host, resource)
205
+ # 1. Start with the empty string ("").
206
+ string = ""
207
+
208
+ # 2. If the request specifies a bucket using the HTTP Host
209
+ # header (virtual hosted-style), append the bucket name preceded
210
+ # by a "/" (e.g., "/bucketname"). For path-style requests and
211
+ # requests that don't address a bucket, do nothing. For more
212
+ # information on virtual hosted-style requests, see Virtual
213
+ # Hosting of Buckets.
214
+ bucket_name = host.sub(/\.?s3\.amazonaws\.com\Z/, "")
215
+ string << "/#{bucket_name}" unless bucket_name.empty?
216
+
217
+ # 3. Append the path part of the un-decoded HTTP Request-URI,
218
+ # up-to but not including the query string.
219
+ uri = URI.parse(resource)
220
+ string << uri.path
221
+
222
+ # 4. If the request addresses a sub-resource, like ?location,
223
+ # ?acl, or ?torrent, append the sub-resource including question
224
+ # mark.
225
+ sub_resources = [
226
+ "acl",
227
+ "location",
228
+ "logging",
229
+ "notification",
230
+ "partNumber",
231
+ "policy",
232
+ "requestPayment",
233
+ "torrent",
234
+ "uploadId",
235
+ "uploads",
236
+ "versionId",
237
+ "versioning",
238
+ "versions",
239
+ "website"
240
+ ]
241
+ string << "?#{$1}" if uri.query =~ /&?(#{sub_resources.join("|")})(?:&|=|\Z)/
242
+ string
243
+ end
244
+ end
245
+ end
@@ -0,0 +1,3 @@
1
+ module S3
2
+ VERSION = "0.3.11"
3
+ end
@@ -0,0 +1,29 @@
1
+ # -*- encoding: utf-8 -*-
2
+
3
+ # Load version requiring the canonical "s3/version", otherwise Ruby will think
4
+ # is a different file and complaint about a double declaration of S3::VERSION.
5
+ $LOAD_PATH.unshift File.expand_path("../lib", __FILE__)
6
+ require "s3/version"
7
+
8
+ Gem::Specification.new do |s|
9
+ s.name = "seanwalbran-scashin133-s3"
10
+ s.version = S3::VERSION
11
+ s.platform = Gem::Platform::RUBY
12
+ s.authors = ["Jakub Kuźma", "Sean Cashin"]
13
+ s.email = ["qoobaa@gmail.com", "scashin133@gmail.com"]
14
+ s.homepage = "http://jah.pl/projects/s3.html"
15
+ s.summary = "Library for accessing S3 objects and buckets"
16
+ s.description = "S3 library provides access to Amazon's Simple Storage Service. It supports both: European and US buckets through REST API."
17
+
18
+ s.required_rubygems_version = ">= 1.3.6"
19
+ s.rubyforge_project = "scashin133-s3"
20
+
21
+ s.add_dependency "proxies", "~> 0.2.0"
22
+ s.add_development_dependency "test-unit", ">= 2.0"
23
+ s.add_development_dependency "mocha"
24
+ s.add_development_dependency "bundler", ">= 1.0.0"
25
+
26
+ s.files = `git ls-files`.split("\n")
27
+ s.executables = `git ls-files`.split("\n").map{|f| f =~ /^bin\/(.*)/ ? $1 : nil}.compact
28
+ s.require_path = "lib"
29
+ end
@@ -0,0 +1,215 @@
1
+ require "test_helper"
2
+
3
+ class BucketTest < Test::Unit::TestCase
4
+ def setup
5
+ @bucket_vhost = S3::Bucket.send(:new, nil, "Data-Bucket")
6
+ @bucket_path = S3::Bucket.send(:new, nil, "Data_Bucket")
7
+ @bucket = @bucket_vhost
8
+
9
+ @bucket_location = "EU"
10
+ @bucket_location_body = <<-EOLocation
11
+ <?xml version="1.0" encoding="UTF-8"?>\n<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">EU</LocationConstraint>
12
+ EOLocation
13
+
14
+ @response_location = Net::HTTPOK.new("1.1", "200", "OK")
15
+ @response_location.stubs(:body).returns(@bucket_location_body)
16
+
17
+ @bucket_owned_by_you_body = <<-EOOwnedByYou
18
+ <?xml version="1.0" encoding="UTF-8"?>\n<Error> <Code>BucketAlreadyOwnedByYou</Code> <Message>Your previous request to create the named bucket succeeded and you already own it.</Message> <BucketName>bucket</BucketName> <RequestId>117D08EA0EC6E860</RequestId> <HostId>4VpMSvmJ+G5+DLtVox6O5cZNgdPlYcjCu3l0n4HjDe01vPxxuk5eTAtcAkUynRyV</HostId> </Error>
19
+ EOOwnedByYou
20
+
21
+ @reponse_owned_by_you = Net::HTTPConflict.new("1.1", "409", "Conflict")
22
+ @reponse_owned_by_you.stubs(:body).returns(@bucket_owned_by_you_body)
23
+
24
+ @bucket_already_exists_body = <<-EOAlreadyExists
25
+ <?xml version="1.0" encoding="UTF-8"?>\n<Error> <Code>BucketAlreadyExists</Code> <Message>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</Message> <BucketName>bucket</BucketName> <RequestId>4C154D32807C92BD</RequestId> <HostId>/xyHQgXcUXTZQhoO+NUBzbaxbFrIhKlyuaRHFnmcId0bMePvY9Zwg+dyk2LYE4g5</HostId> </Error>
26
+ EOAlreadyExists
27
+
28
+ @reponse_already_exists = Net::HTTPConflict.new("1.1", "409", "Conflict")
29
+ @response_already_exists.stubs(:body).returns(@bucket_already_exists_body)
30
+
31
+ @objects_list_empty = []
32
+ @objects_list = [
33
+ S3::Object.send(:new, @bucket, :key => "obj1"),
34
+ S3::Object.send(:new, @bucket, :key => "obj2")
35
+ ]
36
+
37
+ @response_objects_list_empty_body = <<-EOEmpty
38
+ <?xml version="1.0" encoding="UTF-8"?>\n<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Name>bucket</Name> <Prefix></Prefix> <Marker></Marker> <MaxKeys>1000</MaxKeys> <IsTruncated>false</IsTruncated> </ListBucketResult>
39
+ EOEmpty
40
+
41
+ @response_objects_list_empty = Net::HTTPOK.new("1.1", "200", "OK")
42
+ @response_objects_list_empty.stubs(:body).returns(@response_objects_list_empty_body)
43
+
44
+ @response_objects_list_body = <<-EOObjects
45
+ <?xml version="1.0" encoding="UTF-8"?>\n<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Name>bucket</Name> <Prefix></Prefix> <Marker></Marker> <MaxKeys>1000</MaxKeys> <IsTruncated>false</IsTruncated> <Contents> <Key>obj1</Key> <LastModified>2009-07-03T10:17:33.000Z</LastModified> <ETag>&quot;99519cdf14c255e580e1b7bca85a458c&quot;</ETag> <Size>1729</Size> <Owner> <ID>df864aeb6f42be43f1d9e60aaabe3f15e245b035a4b79d1cfe36c4deaec67205</ID> <DisplayName>owner</DisplayName> </Owner> <StorageClass>STANDARD</StorageClass> </Contents> <Contents> <Key>obj2</Key> <LastModified>2009-07-03T11:17:33.000Z</LastModified> <ETag>&quot;99519cdf14c255e586e1b12bca85a458c&quot;</ETag> <Size>179</Size> <Owner> <ID>df864aeb6f42be43f1d9e60aaabe3f17e247b037a4b79d1cfe36c4deaec67205</ID> <DisplayName>owner</DisplayName> </Owner> <StorageClass>STANDARD</StorageClass> </Contents> </ListBucketResult>
46
+ EOObjects
47
+
48
+ @response_objects_list = Net::HTTPOK.new("1.1", "200", "OK")
49
+ @response_objects_list.stubs(:body).returns(@response_objects_list_body)
50
+ end
51
+
52
+ test "name valid" do
53
+ assert_raise ArgumentError do S3::Bucket.send(:new, nil, "") end # should not be valid with empty name
54
+ assert_raise ArgumentError do S3::Bucket.send(:new, nil, "10.0.0.1") end # should not be valid with IP as name
55
+ assert_raise ArgumentError do S3::Bucket.send(:new, nil, "as") end # should not be valid with name shorter than 3 characters
56
+ assert_raise ArgumentError do S3::Bucket.send(:new, nil, "a" * 256) end # should not be valid with name longer than 255 characters
57
+ assert_raise ArgumentError do S3::Bucket.send(:new, nil, ".asdf") end # should not allow special characters as first character
58
+ assert_raise ArgumentError do S3::Bucket.send(:new, nil, "-asdf") end # should not allow special characters as first character
59
+ assert_raise ArgumentError do S3::Bucket.send(:new, nil, "_asdf") end # should not allow special characters as first character
60
+
61
+ assert_nothing_raised do
62
+ S3::Bucket.send(:new, nil, "a-a-")
63
+ S3::Bucket.send(:new, nil, "a.a.")
64
+ S3::Bucket.send(:new, nil, "a_a_")
65
+ end
66
+ end
67
+
68
+ test "path prefix" do
69
+ expected = ""
70
+ actual = @bucket_vhost.path_prefix
71
+ assert_equal expected, actual
72
+
73
+ expected = "Data_Bucket/"
74
+ actual = @bucket_path.path_prefix
75
+ assert_equal expected, actual
76
+ end
77
+
78
+ test "host" do
79
+ expected = "Data-Bucket.s3.amazonaws.com"
80
+ actual = @bucket_vhost.host
81
+ assert_equal expected, actual
82
+
83
+ expected = "s3.amazonaws.com"
84
+ actual = @bucket_path.host
85
+ assert_equal expected, actual
86
+ end
87
+
88
+ test "vhost" do
89
+ assert @bucket_vhost.vhost?
90
+ assert ! @bucket_path.vhost?
91
+ end
92
+
93
+ test "exists" do
94
+ @bucket.expects(:retrieve).returns(@bucket_vhost)
95
+ assert @bucket.exists?
96
+
97
+ @bucket.expects(:retrieve).raises(S3::Error::NoSuchBucket.new(nil, nil))
98
+ assert ! @bucket.exists?
99
+ end
100
+
101
+ test "location and parse location" do
102
+ @bucket.expects(:bucket_request).with(:get, { :params => { :location => nil } }).returns(@response_location)
103
+
104
+ expected = @bucket_location
105
+ actual = @bucket.location
106
+ assert_equal expected, actual
107
+
108
+ @bucket.stubs(:bucket_request).with(:get, { :params => { :location => nil } })
109
+ actual = @bucket.location
110
+ assert_equal expected, actual
111
+ end
112
+
113
+ test "save" do
114
+ @bucket.expects(:bucket_request).with(:put, { :headers => {} })
115
+ assert @bucket.save
116
+ # mock ensures that bucket_request was called
117
+ end
118
+
119
+ test "save failure owned by you" do
120
+ @bucket.expects(:bucket_request).with(:put, { :headers => {} }).raises(S3::Error::BucketAlreadyOwnedByYou.new(409, @response_owned_by_you))
121
+ assert_raise S3::Error::BucketAlreadyOwnedByYou do
122
+ @bucket.save
123
+ end
124
+
125
+ @bucket.expects(:bucket_request).with(:put, { :headers => {} }).raises(S3::Error::BucketAlreadyExists.new(409, @response_already_exists))
126
+ assert_raise S3::Error::BucketAlreadyExists do
127
+ @bucket.save
128
+ end
129
+ end
130
+
131
+ test "objects" do
132
+ @bucket.expects(:list_bucket).returns(@objects_list_empty)
133
+ expected = @objects_list_empty
134
+ actual = @bucket.objects
135
+ assert_equal expected, actual
136
+
137
+ @bucket.stubs(:list_bucket).returns(@objects_list_empty)
138
+ actual = @bucket.objects
139
+ assert_equal expected, actual
140
+
141
+ @bucket.stubs(:list_bucket).returns(@objects_list)
142
+
143
+ expected = @objects_list
144
+ actual = @bucket.objects
145
+ assert_equal expected, actual
146
+ end
147
+
148
+ test "list bucket and parse objects" do
149
+ @bucket.expects(:bucket_request).with(:get, :params => { :test=>true }).returns(@response_objects_list_empty)
150
+ expected = @objects_list_empty
151
+ actual = @bucket.objects.find_all(:test => true)
152
+ assert_equal expected, actual
153
+
154
+ @bucket.expects(:bucket_request).with(:get, :params => { :test => true }).returns(@response_objects_list)
155
+ expected = @objects_list
156
+ actual = @bucket.objects.find_all(:test => true)
157
+ assert_equal expected, actual
158
+ end
159
+
160
+ test "destroy" do
161
+ @bucket.expects(:bucket_request).with(:delete)
162
+ assert @bucket.destroy
163
+ end
164
+
165
+ test "objects build" do
166
+ @bucket.stubs(:bucket_request)
167
+
168
+ expected = "object_name"
169
+ actual = @bucket.objects.build("object_name")
170
+ assert_kind_of S3::Object, actual
171
+ assert_equal expected, actual.key
172
+ end
173
+
174
+ test "objects find first" do
175
+ assert_nothing_raised do
176
+ S3::Object.any_instance.stubs(:retrieve).returns(S3::Object.send(:new, nil, :key => "obj2"))
177
+ expected = "obj2"
178
+ actual = @bucket.objects.find_first("obj2")
179
+ assert_equal "obj2", actual.key
180
+ end
181
+ end
182
+
183
+ test "objects find first fail" do
184
+ assert_raise S3::Error::NoSuchKey do
185
+ S3::Object.any_instance.stubs(:retrieve).raises(S3::Error::NoSuchKey.new(404, nil))
186
+ @bucket.objects.find_first("obj3")
187
+ end
188
+ end
189
+
190
+ test "objects find all on empty list" do
191
+ @bucket.stubs(:list_bucket).returns(@objects_list_empty)
192
+ assert_nothing_raised do
193
+ expected = @objects_list_empty
194
+ actual = @bucket.objects.find_all
195
+ assert_equal expected, actual
196
+ end
197
+ end
198
+
199
+ test "objects find all" do
200
+ @bucket.stubs(:list_bucket).returns(@objects_list)
201
+ assert_nothing_raised do
202
+ expected = @objects_list
203
+ actual = @bucket.objects.find_all
204
+ assert_equal expected, actual
205
+ end
206
+ end
207
+
208
+ test "objects destroy all" do
209
+ @bucket.stubs(:list_bucket).returns(@objects_list)
210
+ @bucket.objects.each do |obj|
211
+ obj.expects(:destroy)
212
+ end
213
+ @bucket.objects.destroy_all
214
+ end
215
+ end