steamcannon-s3 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/s3/object.rb ADDED
@@ -0,0 +1,249 @@
1
+ module S3
2
+
3
+ # Class responsible for handling objects stored in S3 buckets
4
+ class Object
5
+ include Parser
6
+ extend Forwardable
7
+
8
+ attr_accessor :content_type, :content_disposition, :content_encoding
9
+ attr_reader :last_modified, :etag, :size, :bucket, :key, :acl, :storage_class
10
+ attr_writer :content
11
+
12
+ def_instance_delegators :bucket, :name, :service, :bucket_request, :vhost?, :host, :path_prefix
13
+ def_instance_delegators :service, :protocol, :port, :secret_access_key
14
+ private_class_method :new
15
+
16
+ # Compares the object with other object. Returns true if the key
17
+ # of the objects are the same, and both have the same buckets (see
18
+ # Bucket equality)
19
+ def ==(other)
20
+ other.equal?(self) || (other.instance_of?(self.class) && self.key == other.key && self.bucket == other.bucket)
21
+ end
22
+
23
+ # Returns full key of the object: e.g. <tt>bucket-name/object/key.ext</tt>
24
+ def full_key
25
+ [name, key].join("/")
26
+ end
27
+
28
+ # Assigns a new +key+ to the object, raises ArgumentError if given
29
+ # key is not valid key name
30
+ def key=(key)
31
+ raise ArgumentError.new("Invalid key name: #{key}") unless key_valid?(key)
32
+ @key ||= key
33
+ end
34
+
35
+ # Assigns a new ACL to the object. Please note that ACL is not
36
+ # retrieved from the server and set to "public-read" by default.
37
+ #
38
+ # ==== Example
39
+ # object.acl = :public_read
40
+ def acl=(acl)
41
+ @acl = acl.to_s.gsub("_", "-") if acl
42
+ end
43
+
44
+ # Assigns a new storage class (RRS) to the object. Please note
45
+ # that the storage class is not retrieved from the server and set
46
+ # to "STANDARD" by default.
47
+ #
48
+ # ==== Example
49
+ # object.storage_class = :reduced_redundancy
50
+ def storage_class=(storage_class)
51
+ @storage_class = storage_class.to_s.upcase if storage_class
52
+ end
53
+
54
+ # Retrieves the object from the server. Method is used to download
55
+ # object information only (content type, size and so on). It does
56
+ # NOT download the content of the object (use the #content method
57
+ # to do it).
58
+ def retrieve
59
+ object_headers
60
+ self
61
+ end
62
+
63
+ # Retrieves the object from the server, returns true if the object
64
+ # exists or false otherwise. Uses #retrieve method, but catches
65
+ # S3::Error::NoSuchKey exception and returns false when it happens
66
+ def exists?
67
+ retrieve
68
+ true
69
+ rescue Error::NoSuchKey
70
+ false
71
+ end
72
+
73
+ # Download the content of the object, and caches it. Pass true
74
+ # to clear the cache and download the object again.
75
+ def content(reload = false)
76
+ if reload or @content.nil?
77
+ get_object
78
+ end
79
+ @content
80
+ end
81
+
82
+ # Saves the object, returns true if successfull.
83
+ def save
84
+ put_object
85
+ true
86
+ end
87
+
88
+ # Copies the file to another key and/or bucket.
89
+ #
90
+ # ==== Options
91
+ # * <tt>:key</tt> - New key to store object in
92
+ # * <tt>:bucket</tt> - New bucket to store object in (instance of
93
+ # S3::Bucket)
94
+ # * <tt>:acl</tt> - ACL of the copied object (default:
95
+ # "public-read")
96
+ # * <tt>:content_type</tt> - Content type of the copied object
97
+ # (default: "application/octet-stream")
98
+ def copy(options = {})
99
+ copy_object(options)
100
+ end
101
+
102
+ # Destroys the file on the server
103
+ def destroy
104
+ delete_object
105
+ true
106
+ end
107
+
108
+ # Returns Object's URL using protocol specified in service,
109
+ # e.g. <tt>http://domain.com.s3.amazonaws.com/key/with/path.extension</tt>
110
+ def url
111
+ URI.escape("#{protocol}#{host}/#{path_prefix}#{key}")
112
+ end
113
+
114
+ # Returns a temporary url to the object that expires on the
115
+ # timestamp given. Defaults to one hour expire time.
116
+ def temporary_url(expires_at = Time.now + 3600)
117
+ signature = Signature.generate_temporary_url_signature(:bucket => name,
118
+ :resource => key,
119
+ :expires_at => expires_at,
120
+ :secret_access_key => secret_access_key)
121
+
122
+ "#{url}?AWSAccessKeyId=#{self.bucket.service.access_key_id}&Expires=#{expires_at.to_i.to_s}&Signature=#{signature}"
123
+ end
124
+
125
+ # Returns Object's CNAME URL (without <tt>s3.amazonaws.com</tt>
126
+ # suffix) using protocol specified in Service,
127
+ # e.g. <tt>http://domain.com/key/with/path.extension</tt>. (you
128
+ # have to set the CNAME in your DNS before using the CNAME URL
129
+ # schema).
130
+ def cname_url
131
+ URI.escape("#{protocol}#{name}/#{key}") if bucket.vhost?
132
+ end
133
+
134
+ def inspect #:nodoc:
135
+ "#<#{self.class}:/#{name}/#{key}>"
136
+ end
137
+
138
+ private
139
+
140
+ attr_writer :last_modified, :etag, :size, :original_key, :bucket
141
+
142
+ def copy_object(options = {})
143
+ key = options[:key] or raise ArgumentError, "No key given"
144
+ raise ArgumentError.new("Invalid key name: #{key}") unless key_valid?(key)
145
+ bucket = options[:bucket] || self.bucket
146
+
147
+ headers = {}
148
+
149
+ headers[:x_amz_acl] = options[:acl] || acl || "public-read"
150
+ headers[:content_type] = options[:content_type] || content_type || "application/octet-stream"
151
+ headers[:content_encoding] = options[:content_encoding] if options[:content_encoding]
152
+ headers[:content_disposition] = options[:content_disposition] if options[:content_disposition]
153
+ headers[:x_amz_copy_source] = full_key
154
+ headers[:x_amz_metadata_directive] = "REPLACE"
155
+ headers[:x_amz_copy_source_if_match] = options[:if_match] if options[:if_match]
156
+ headers[:x_amz_copy_source_if_none_match] = options[:if_none_match] if options[:if_none_match]
157
+ headers[:x_amz_copy_source_if_unmodified_since] = options[:if_modified_since] if options[:if_modified_since]
158
+ headers[:x_amz_copy_source_if_modified_since] = options[:if_unmodified_since] if options[:if_unmodified_since]
159
+
160
+ response = bucket.send(:bucket_request, :put, :path => key, :headers => headers)
161
+ object_attributes = parse_copy_object_result(response.body)
162
+
163
+ object = Object.send(:new, bucket, object_attributes.merge(:key => key, :size => size))
164
+ object.acl = response["x-amz-acl"]
165
+ object.content_type = response["content-type"]
166
+ object.content_encoding = response["content-encoding"]
167
+ object.content_disposition = response["content-disposition"]
168
+ object
169
+ end
170
+
171
+ def get_object(options = {})
172
+ response = object_request(:get, options)
173
+ parse_headers(response)
174
+ end
175
+
176
+ def object_headers(options = {})
177
+ response = object_request(:head, options)
178
+ parse_headers(response)
179
+ rescue Error::ResponseError => e
180
+ if e.response.code.to_i == 404
181
+ raise Error::ResponseError.exception("NoSuchKey").new("The specified key does not exist.", nil)
182
+ else
183
+ raise e
184
+ end
185
+ end
186
+
187
+ def put_object
188
+ body = content.is_a?(IO) ? content.read : content
189
+ response = object_request(:put, :body => body, :headers => dump_headers)
190
+ parse_headers(response)
191
+ end
192
+
193
+ def delete_object(options = {})
194
+ object_request(:delete)
195
+ end
196
+
197
+ def initialize(bucket, options = {})
198
+ self.bucket = bucket
199
+ self.key = options[:key]
200
+ self.last_modified = options[:last_modified]
201
+ self.etag = options[:etag]
202
+ self.size = options[:size]
203
+ end
204
+
205
+ def object_request(method, options = {})
206
+ bucket_request(method, options.merge(:path => key))
207
+ end
208
+
209
+ def last_modified=(last_modified)
210
+ @last_modified = Time.parse(last_modified) if last_modified
211
+ end
212
+
213
+ def etag=(etag)
214
+ @etag = etag[1..-2] if etag
215
+ end
216
+
217
+ def key_valid?(key)
218
+ if (key.nil? or key.empty? or key =~ %r#//#)
219
+ false
220
+ else
221
+ true
222
+ end
223
+ end
224
+
225
+ def dump_headers
226
+ headers = {}
227
+ headers[:x_amz_acl] = @acl || "public-read"
228
+ headers[:x_amz_storage_class] = @storage_class || "STANDARD"
229
+ headers[:content_type] = @content_type || "application/octet-stream"
230
+ headers[:content_encoding] = @content_encoding if @content_encoding
231
+ headers[:content_disposition] = @content_disposition if @content_disposition
232
+ headers
233
+ end
234
+
235
+ def parse_headers(response)
236
+ self.etag = response["etag"]
237
+ self.content_type = response["content-type"]
238
+ self.content_disposition = response["content-disposition"]
239
+ self.content_encoding = response["content-encoding"]
240
+ self.last_modified = response["last-modified"]
241
+ if response["content-range"]
242
+ self.size = response["content-range"].sub(/[^\/]+\//, "").to_i
243
+ else
244
+ self.size = response["content-length"]
245
+ self.content = response.body
246
+ end
247
+ end
248
+ end
249
+ end
@@ -0,0 +1,37 @@
1
+ module S3
2
+ module ObjectsExtension
3
+ # Builds the object in the bucket with given key
4
+ def build(key)
5
+ Object.send(:new, proxy_owner, :key => key)
6
+ end
7
+
8
+ # Finds first object with given name or raises the exception if
9
+ # not found
10
+ def find_first(name)
11
+ object = build(name)
12
+ object.retrieve
13
+ end
14
+ alias :find :find_first
15
+
16
+ # Finds the objects in the bucket.
17
+ #
18
+ # ==== Options
19
+ # * <tt>:prefix</tt> - Limits the response to keys which begin
20
+ # with the indicated prefix
21
+ # * <tt>:marker</tt> - Indicates where in the bucket to begin
22
+ # listing
23
+ # * <tt>:max_keys</tt> - The maximum number of keys you'd like
24
+ # to see
25
+ # * <tt>:delimiter</tt> - Causes keys that contain the same
26
+ # string between the prefix and the first occurrence of the
27
+ # delimiter to be rolled up into a single result element
28
+ def find_all(options = {})
29
+ proxy_owner.send(:list_bucket, options)
30
+ end
31
+
32
+ # Destroys all keys in the bucket
33
+ def destroy_all
34
+ proxy_target.each { |object| object.destroy }
35
+ end
36
+ end
37
+ end
data/lib/s3/parser.rb ADDED
@@ -0,0 +1,48 @@
1
+ module S3
2
+ module Parser
3
+ include REXML
4
+
5
+ def rexml_document(xml)
6
+ xml.force_encoding(Encoding::UTF_8) if xml.respond_to? :force_encoding
7
+ Document.new(xml)
8
+ end
9
+
10
+ def parse_list_all_my_buckets_result(xml)
11
+ names = []
12
+ rexml_document(xml).elements.each("ListAllMyBucketsResult/Buckets/Bucket/Name") { |e| names << e.text }
13
+ names
14
+ end
15
+
16
+ def parse_location_constraint(xml)
17
+ rexml_document(xml).elements["LocationConstraint"].text
18
+ end
19
+
20
+ def parse_list_bucket_result(xml)
21
+ objects_attributes = []
22
+ rexml_document(xml).elements.each("ListBucketResult/Contents") do |e|
23
+ object_attributes = {}
24
+ object_attributes[:key] = e.elements["Key"].text
25
+ object_attributes[:etag] = e.elements["ETag"].text
26
+ object_attributes[:last_modified] = e.elements["LastModified"].text
27
+ object_attributes[:size] = e.elements["Size"].text
28
+ objects_attributes << object_attributes
29
+ end
30
+ objects_attributes
31
+ end
32
+
33
+ def parse_copy_object_result(xml)
34
+ object_attributes = {}
35
+ document = rexml_document(xml)
36
+ object_attributes[:etag] = document.elements["CopyObjectResult/ETag"].text
37
+ object_attributes[:last_modified] = document.elements["CopyObjectResult/LastModified"].text
38
+ object_attributes
39
+ end
40
+
41
+ def parse_error(xml)
42
+ document = rexml_document(xml)
43
+ code = document.elements["Error/Code"].text
44
+ message = document.elements["Error/Message"].text
45
+ [code, message]
46
+ end
47
+ end
48
+ end
data/lib/s3/service.rb ADDED
@@ -0,0 +1,82 @@
1
+ module S3
2
+ class Service
3
+ include Parser
4
+ include Proxies
5
+
6
+ attr_reader :access_key_id, :secret_access_key, :use_ssl, :proxy
7
+
8
+ # Compares service to other, by <tt>access_key_id</tt> and
9
+ # <tt>secret_access_key</tt>
10
+ def ==(other)
11
+ self.access_key_id == other.access_key_id and self.secret_access_key == other.secret_access_key
12
+ end
13
+
14
+ # Creates new service.
15
+ #
16
+ # ==== Options
17
+ # * <tt>:access_key_id</tt> - Access key id (REQUIRED)
18
+ # * <tt>:secret_access_key</tt> - Secret access key (REQUIRED)
19
+ # * <tt>:use_ssl</tt> - Use https or http protocol (false by
20
+ # default)
21
+ # * <tt>:debug</tt> - Display debug information on the STDOUT
22
+ # (false by default)
23
+ # * <tt>:timeout</tt> - Timeout to use by the Net::HTTP object
24
+ # (60 by default)
25
+ def initialize(options)
26
+ @access_key_id = options.fetch(:access_key_id)
27
+ @secret_access_key = options.fetch(:secret_access_key)
28
+ @use_ssl = options.fetch(:use_ssl, false)
29
+ @timeout = options.fetch(:timeout, 60)
30
+ @debug = options.fetch(:debug, false)
31
+
32
+ raise ArgumentError, "Missing proxy settings. Must specify at least :host." if options[:proxy] && !options[:proxy][:host]
33
+ @proxy = options.fetch(:proxy, nil)
34
+ end
35
+
36
+ # Returns all buckets in the service and caches the result (see
37
+ # +reload+)
38
+ def buckets
39
+ MethodProxy.new(self, :list_all_my_buckets, :extend => BucketsExtension)
40
+ end
41
+
42
+ # Returns "http://" or "https://", depends on <tt>:use_ssl</tt>
43
+ # value from initializer
44
+ def protocol
45
+ use_ssl ? "https://" : "http://"
46
+ end
47
+
48
+ # Returns 443 or 80, depends on <tt>:use_ssl</tt> value from
49
+ # initializer
50
+ def port
51
+ use_ssl ? 443 : 80
52
+ end
53
+
54
+ def inspect #:nodoc:
55
+ "#<#{self.class}:#@access_key_id>"
56
+ end
57
+
58
+ private
59
+
60
+ def list_all_my_buckets
61
+ response = service_request(:get)
62
+ names = parse_list_all_my_buckets_result(response.body)
63
+ names.map { |name| Bucket.send(:new, self, name) }
64
+ end
65
+
66
+ def service_request(method, options = {})
67
+ connection.request(method, options.merge(:path => "/#{options[:path]}"))
68
+ end
69
+
70
+ def connection
71
+ if @connection.nil?
72
+ @connection = Connection.new(:access_key_id => @access_key_id,
73
+ :secret_access_key => @secret_access_key,
74
+ :use_ssl => @use_ssl,
75
+ :timeout => @timeout,
76
+ :debug => @debug,
77
+ :proxy => @proxy)
78
+ end
79
+ @connection
80
+ end
81
+ end
82
+ end
@@ -0,0 +1,229 @@
1
+ module S3
2
+
3
+ # Class responsible for generating signatures to requests.
4
+ #
5
+ # Implements algorithm defined by Amazon Web Services to sign
6
+ # request with secret private credentials
7
+ #
8
+ # === See
9
+ # http://docs.amazonwebservices.com/AmazonS3/latest/index.html?RESTAuthentication.html
10
+
11
+ class Signature
12
+
13
+ # Generates signature for given parameters
14
+ #
15
+ # ==== Options
16
+ # * <tt>:host</tt> - Hostname
17
+ # * <tt>:request</tt> - Net::HTTPRequest object with correct
18
+ # headers
19
+ # * <tt>:access_key_id</tt> - Access key id
20
+ # * <tt>:secret_access_key</tt> - Secret access key
21
+ #
22
+ # ==== Returns
23
+ # Generated signature string for given hostname and request
24
+ def self.generate(options)
25
+ request = options[:request]
26
+ access_key_id = options[:access_key_id]
27
+
28
+ options.merge!(:headers => request,
29
+ :method => request.method,
30
+ :resource => request.path)
31
+
32
+ signature = canonicalized_signature(options)
33
+
34
+ "AWS #{access_key_id}:#{signature}"
35
+ end
36
+
37
+ # Generates temporary URL signature for given resource
38
+ #
39
+ # ==== Options
40
+ # * <tt>:bucket</tt> - Bucket in which the resource resides
41
+ # * <tt>:resource</tt> - Path to the resouce you want to create
42
+ # a temporary link to
43
+ # * <tt>:secret_access_key</tt> - Secret access key
44
+ # * <tt>:expires_at</tt> - Unix time stamp of when the resouce
45
+ # link will expire
46
+ # * <tt>:method</tt> - HTTP request method you want to use on
47
+ # the resource, defaults to GET
48
+ # * <tt>:headers</tt> - Any additional HTTP headers you intend
49
+ # to use when requesting the resource
50
+ def self.generate_temporary_url_signature(options)
51
+ bucket = options[:bucket]
52
+ resource = options[:resource]
53
+ secret_access_key = options[:secret_access_key]
54
+ expires = options[:expires_at]
55
+
56
+ headers = options[:headers] || {}
57
+ headers.merge!('date' => expires.to_i.to_s)
58
+
59
+ options.merge!(:resource => "/#{bucket}/#{resource}",
60
+ :method => options[:method] || :get,
61
+ :headers => headers)
62
+ signature = canonicalized_signature(options)
63
+
64
+ CGI.escape(signature)
65
+ end
66
+
67
+ # Generates temporary URL for given resource
68
+ #
69
+ # ==== Options
70
+ # * <tt>:bucket</tt> - Bucket in which the resource resides
71
+ # * <tt>:resource</tt> - Path to the resouce you want to create
72
+ # a temporary link to
73
+ # * <tt>:access_key</tt> - Access key
74
+ # * <tt>:secret_access_key</tt> - Secret access key
75
+ # * <tt>:expires_at</tt> - Unix time stamp of when the resouce
76
+ # link will expire
77
+ # * <tt>:method</tt> - HTTP request method you want to use on
78
+ # the resource, defaults to GET
79
+ # * <tt>:headers</tt> - Any additional HTTP headers you intend
80
+ # to use when requesting the resource
81
+ def self.generate_temporary_url(options)
82
+ bucket = options[:bucket]
83
+ resource = options[:resource]
84
+ access_key = options[:access_key]
85
+ expires = options[:expires_at].to_i
86
+ signature = generate_temporary_url_signature(options)
87
+
88
+ url = "http://#{S3::HOST}/#{bucket}/#{resource}"
89
+ url << "?AWSAccessKeyId=#{access_key}"
90
+ url << "&Expires=#{expires}"
91
+ url << "&Signature=#{signature}"
92
+ end
93
+
94
+ private
95
+
96
+ def self.canonicalized_signature(options)
97
+ headers = options[:headers] || {}
98
+ host = options[:host] || ""
99
+ resource = options[:resource]
100
+ access_key_id = options[:access_key_id]
101
+ secret_access_key = options[:secret_access_key]
102
+
103
+ http_verb = options[:method].to_s.upcase
104
+ content_md5 = headers["content-md5"] || ""
105
+ content_type = headers["content-type"] || ""
106
+ date = headers["x-amz-date"].nil? ? headers["date"] : ""
107
+ canonicalized_resource = canonicalized_resource(host, resource)
108
+ canonicalized_amz_headers = canonicalized_amz_headers(headers)
109
+
110
+ string_to_sign = ""
111
+ string_to_sign << http_verb
112
+ string_to_sign << "\n"
113
+ string_to_sign << content_md5
114
+ string_to_sign << "\n"
115
+ string_to_sign << content_type
116
+ string_to_sign << "\n"
117
+ string_to_sign << date
118
+ string_to_sign << "\n"
119
+ string_to_sign << canonicalized_amz_headers
120
+ string_to_sign << canonicalized_resource
121
+
122
+ digest = OpenSSL::Digest::Digest.new('sha1')
123
+ hmac = OpenSSL::HMAC.digest(digest, secret_access_key, string_to_sign)
124
+ base64 = Base64.encode64(hmac)
125
+ base64.chomp
126
+ end
127
+
128
+ # Helper method for extracting header fields from Net::HTTPRequest
129
+ # and preparing them for singing in #generate method
130
+ #
131
+ # ==== Parameters
132
+ # * <tt>request</tt> - Net::HTTPRequest object with header fields
133
+ # filled in
134
+ #
135
+ # ==== Returns
136
+ # String containing interesting header fields in suitable order
137
+ # and form
138
+ def self.canonicalized_amz_headers(request)
139
+ headers = []
140
+
141
+ # 1. Convert each HTTP header name to lower-case. For example,
142
+ # 'X-Amz-Date' becomes 'x-amz-date'.
143
+ request.each { |key, value| headers << [key.downcase, value] if key =~ /\Ax-amz-/io }
144
+ #=> [["c", 0], ["a", 1], ["a", 2], ["b", 3]]
145
+
146
+ # 2. Sort the collection of headers lexicographically by header
147
+ # name.
148
+ headers.sort!
149
+ #=> [["a", 1], ["a", 2], ["b", 3], ["c", 0]]
150
+
151
+ # 3. Combine header fields with the same name into one
152
+ # "header-name:comma-separated-value-list" pair as prescribed by
153
+ # RFC 2616, section 4.2, without any white-space between
154
+ # values. For example, the two metadata headers
155
+ # 'x-amz-meta-username: fred' and 'x-amz-meta-username: barney'
156
+ # would be combined into the single header 'x-amz-meta-username:
157
+ # fred,barney'.
158
+ combined_headers = headers.inject([]) do |new_headers, header|
159
+ existing_header = new_headers.find { |h| h.first == header.first }
160
+ if existing_header
161
+ existing_header.last << ",#{header.last}"
162
+ else
163
+ new_headers << header
164
+ end
165
+ end
166
+ #=> [["a", "1,2"], ["b", "3"], ["c", "0"]]
167
+
168
+ # 4. "Un-fold" long headers that span multiple lines (as allowed
169
+ # by RFC 2616, section 4.2) by replacing the folding white-space
170
+ # (including new-line) by a single space.
171
+ unfolded_headers = combined_headers.map do |header|
172
+ key = header.first
173
+ value = header.last
174
+ value.gsub!(/\s+/, " ")
175
+ [key, value]
176
+ end
177
+
178
+ # 5. Trim any white-space around the colon in the header. For
179
+ # example, the header 'x-amz-meta-username: fred,barney' would
180
+ # become 'x-amz-meta-username:fred,barney'
181
+ joined_headers = unfolded_headers.map do |header|
182
+ key = header.first.strip
183
+ value = header.last.strip
184
+ "#{key}:#{value}"
185
+ end
186
+
187
+ # 6. Finally, append a new-line (U+000A) to each canonicalized
188
+ # header in the resulting list. Construct the
189
+ # CanonicalizedResource element by concatenating all headers in
190
+ # this list into a single string.
191
+ joined_headers << "" unless joined_headers.empty?
192
+ joined_headers.join("\n")
193
+ end
194
+
195
+ # Helper methods for extracting caninocalized resource address
196
+ #
197
+ # ==== Parameters
198
+ # * <tt>host</tt> - Hostname
199
+ # * <tt>request</tt> - Net::HTTPRequest object with header fields
200
+ # filled in
201
+ #
202
+ # ==== Returns
203
+ # String containing extracted canonicalized resource
204
+ def self.canonicalized_resource(host, resource)
205
+ # 1. Start with the empty string ("").
206
+ string = ""
207
+
208
+ # 2. If the request specifies a bucket using the HTTP Host
209
+ # header (virtual hosted-style), append the bucket name preceded
210
+ # by a "/" (e.g., "/bucketname"). For path-style requests and
211
+ # requests that don't address a bucket, do nothing. For more
212
+ # information on virtual hosted-style requests, see Virtual
213
+ # Hosting of Buckets.
214
+ bucket_name = host.sub(/\.?s3\.amazonaws\.com\Z/, "")
215
+ string << "/#{bucket_name}" unless bucket_name.empty?
216
+
217
+ # 3. Append the path part of the un-decoded HTTP Request-URI,
218
+ # up-to but not including the query string.
219
+ uri = URI.parse(resource)
220
+ string << uri.path
221
+
222
+ # 4. If the request addresses a sub-resource, like ?location,
223
+ # ?acl, or ?torrent, append the sub-resource including question
224
+ # mark.
225
+ string << "?#{$1}" if uri.query =~ /&?(acl|torrent|logging|location)(?:&|=|\Z)/
226
+ string
227
+ end
228
+ end
229
+ end
data/lib/s3/version.rb ADDED
@@ -0,0 +1,3 @@
1
+ module S3
2
+ VERSION = "0.3.2"
3
+ end
data/lib/s3.rb ADDED
@@ -0,0 +1,26 @@
1
+ require "base64"
2
+ require "cgi"
3
+ require "digest/md5"
4
+ require "forwardable"
5
+ require "net/http"
6
+ require "net/https"
7
+ require "openssl"
8
+ require "rexml/document"
9
+ require "time"
10
+
11
+ require "proxies"
12
+ require "s3/objects_extension"
13
+ require "s3/buckets_extension"
14
+ require "s3/parser"
15
+ require "s3/bucket"
16
+ require "s3/connection"
17
+ require "s3/exceptions"
18
+ require "s3/object"
19
+ require "s3/service"
20
+ require "s3/signature"
21
+ require "s3/version"
22
+
23
+ module S3
24
+ # Default (and only) host serving S3 stuff
25
+ HOST = "s3.amazonaws.com"
26
+ end