radosgw-s3 0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +7 -0
- data/Gemfile +2 -0
- data/LICENSE +202 -0
- data/README.rdoc +126 -0
- data/Rakefile +21 -0
- data/lib/ceph/radosgw.rb +35 -0
- data/lib/radosgw-s3.rb +29 -0
- data/lib/s3/bucket.rb +217 -0
- data/lib/s3/buckets_extension.rb +29 -0
- data/lib/s3/connection.rb +225 -0
- data/lib/s3/exceptions.rb +111 -0
- data/lib/s3/object.rb +262 -0
- data/lib/s3/objects_extension.rb +37 -0
- data/lib/s3/parser.rb +90 -0
- data/lib/s3/request.rb +31 -0
- data/lib/s3/service.rb +98 -0
- data/lib/s3/signature.rb +261 -0
- data/lib/s3/version.rb +3 -0
- data/radosgw-s3.gemspec +30 -0
- data/test/bucket_test.rb +245 -0
- data/test/connection_test.rb +215 -0
- data/test/object_test.rb +223 -0
- data/test/service_test.rb +133 -0
- data/test/signature_test.rb +228 -0
- data/test/test_helper.rb +3 -0
- metadata +157 -0
@@ -0,0 +1,37 @@
|
|
1
|
+
module S3
|
2
|
+
module ObjectsExtension
|
3
|
+
# Builds the object in the bucket with given key
|
4
|
+
def build(key)
|
5
|
+
Object.send(:new, proxy_owner, :key => key)
|
6
|
+
end
|
7
|
+
|
8
|
+
# Finds first object with given name or raises the exception if
|
9
|
+
# not found
|
10
|
+
def find_first(name)
|
11
|
+
object = build(name)
|
12
|
+
object.retrieve
|
13
|
+
end
|
14
|
+
alias :find :find_first
|
15
|
+
|
16
|
+
# Finds the objects in the bucket.
|
17
|
+
#
|
18
|
+
# ==== Options
|
19
|
+
# * <tt>:prefix</tt> - Limits the response to keys which begin
|
20
|
+
# with the indicated prefix
|
21
|
+
# * <tt>:marker</tt> - Indicates where in the bucket to begin
|
22
|
+
# listing
|
23
|
+
# * <tt>:max_keys</tt> - The maximum number of keys you'd like
|
24
|
+
# to see
|
25
|
+
# * <tt>:delimiter</tt> - Causes keys that contain the same
|
26
|
+
# string between the prefix and the first occurrence of the
|
27
|
+
# delimiter to be rolled up into a single result element
|
28
|
+
def find_all(options = {})
|
29
|
+
proxy_owner.send(:list_bucket, options)
|
30
|
+
end
|
31
|
+
|
32
|
+
# Destroys all keys in the bucket
|
33
|
+
def destroy_all
|
34
|
+
proxy_target.each { |object| object.destroy }
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
data/lib/s3/parser.rb
ADDED
@@ -0,0 +1,90 @@
|
|
1
|
+
module S3
|
2
|
+
module Parser
|
3
|
+
include REXML
|
4
|
+
|
5
|
+
def rexml_document(xml)
|
6
|
+
xml.force_encoding(::Encoding::UTF_8) if xml.respond_to? :force_encoding
|
7
|
+
Document.new(xml)
|
8
|
+
end
|
9
|
+
|
10
|
+
def parse_list_all_my_buckets_result(xml)
|
11
|
+
names = []
|
12
|
+
rexml_document(xml).elements.each("ListAllMyBucketsResult/Buckets/Bucket/Name") { |e| names << e.text }
|
13
|
+
names
|
14
|
+
end
|
15
|
+
|
16
|
+
def parse_location_constraint(xml)
|
17
|
+
rexml_document(xml).elements["LocationConstraint"].text
|
18
|
+
end
|
19
|
+
|
20
|
+
def parse_list_bucket_result(xml)
|
21
|
+
objects_attributes = []
|
22
|
+
rexml_document(xml).elements.each("ListBucketResult/Contents") do |e|
|
23
|
+
object_attributes = {}
|
24
|
+
object_attributes[:key] = e.elements["Key"].text
|
25
|
+
object_attributes[:etag] = e.elements["ETag"].text
|
26
|
+
object_attributes[:last_modified] = e.elements["LastModified"].text
|
27
|
+
object_attributes[:size] = e.elements["Size"].text
|
28
|
+
objects_attributes << object_attributes
|
29
|
+
end
|
30
|
+
objects_attributes
|
31
|
+
end
|
32
|
+
|
33
|
+
def parse_copy_object_result(xml)
|
34
|
+
object_attributes = {}
|
35
|
+
document = rexml_document(xml)
|
36
|
+
object_attributes[:etag] = document.elements["CopyObjectResult/ETag"].text
|
37
|
+
object_attributes[:last_modified] = document.elements["CopyObjectResult/LastModified"].text
|
38
|
+
object_attributes
|
39
|
+
end
|
40
|
+
|
41
|
+
def parse_error(xml)
|
42
|
+
document = rexml_document(xml)
|
43
|
+
code = document.elements["Error/Code"].text
|
44
|
+
message = document.elements["Error/Message"].text
|
45
|
+
[code, message]
|
46
|
+
end
|
47
|
+
|
48
|
+
def parse_is_truncated xml
|
49
|
+
rexml_document(xml).elements["ListBucketResult/IsTruncated"].text =='true'
|
50
|
+
end
|
51
|
+
|
52
|
+
|
53
|
+
# Parse acl response and return hash with grantee and their permissions
|
54
|
+
def parse_acl(xml)
|
55
|
+
grants = {}
|
56
|
+
rexml_document(xml).elements.each("AccessControlPolicy/AccessControlList/Grant") do |grant|
|
57
|
+
grants.merge!(extract_grantee(grant))
|
58
|
+
end
|
59
|
+
grants
|
60
|
+
end
|
61
|
+
|
62
|
+
private
|
63
|
+
|
64
|
+
def convert_uri_to_group_name(uri)
|
65
|
+
case uri
|
66
|
+
when "http://acs.amazonaws.com/groups/global/AllUsers"
|
67
|
+
return "Everyone"
|
68
|
+
when "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
|
69
|
+
return "Authenticated Users"
|
70
|
+
when "http://acs.amazonaws.com/groups/s3/LogDelivery"
|
71
|
+
return "Log Delivery"
|
72
|
+
else
|
73
|
+
return uri
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
def extract_grantee(grant)
|
78
|
+
grants = {}
|
79
|
+
grant.each_element_with_attribute("xsi:type", "Group") do |grantee|
|
80
|
+
group_name = convert_uri_to_group_name(grantee.get_text("URI").value)
|
81
|
+
grants[group_name] = grant.get_text("Permission").value
|
82
|
+
end
|
83
|
+
grant.each_element_with_attribute("xsi:type", "CanonicalUser") do |grantee|
|
84
|
+
user_name = grantee.get_text("DisplayName").value
|
85
|
+
grants[user_name] = grant.get_text("Permission").value
|
86
|
+
end
|
87
|
+
grants
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
data/lib/s3/request.rb
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
module S3
|
2
|
+
# Class responsible for sending chunked requests
|
3
|
+
# properly. Net::HTTPGenericRequest has hardcoded chunk_size, so we
|
4
|
+
# inherit the class and override chunk_size.
|
5
|
+
class Request < Net::HTTPGenericRequest
|
6
|
+
def initialize(chunk_size, m, reqbody, resbody, path, initheader = nil)
|
7
|
+
@chunk_size = chunk_size
|
8
|
+
super(m, reqbody, resbody, path, initheader)
|
9
|
+
end
|
10
|
+
|
11
|
+
private
|
12
|
+
|
13
|
+
def send_request_with_body_stream(sock, ver, path, f)
|
14
|
+
unless content_length() or chunked?
|
15
|
+
raise ArgumentError, "Content-Length not given and Transfer-Encoding is not `chunked'"
|
16
|
+
end
|
17
|
+
supply_default_content_type
|
18
|
+
write_header sock, ver, path
|
19
|
+
if chunked?
|
20
|
+
while s = f.read(@chunk_size)
|
21
|
+
sock.write(sprintf("%x\r\n", s.length) << s << "\r\n")
|
22
|
+
end
|
23
|
+
sock.write "0\r\n\r\n"
|
24
|
+
else
|
25
|
+
while s = f.read(@chunk_size)
|
26
|
+
sock.write s
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
data/lib/s3/service.rb
ADDED
@@ -0,0 +1,98 @@
|
|
1
|
+
module S3
|
2
|
+
class Service
|
3
|
+
include Parser
|
4
|
+
include Proxies
|
5
|
+
|
6
|
+
attr_reader :access_key_id, :secret_access_key, :use_ssl, :use_vhost, :proxy, :host
|
7
|
+
|
8
|
+
# Compares service to other, by <tt>access_key_id</tt> and
|
9
|
+
# <tt>secret_access_key</tt>
|
10
|
+
def ==(other)
|
11
|
+
self.access_key_id == other.access_key_id and self.secret_access_key == other.secret_access_key
|
12
|
+
end
|
13
|
+
|
14
|
+
# Creates new service.
|
15
|
+
#
|
16
|
+
# ==== Options
|
17
|
+
# * <tt>:access_key_id</tt> - Access key id (REQUIRED)
|
18
|
+
# * <tt>:secret_access_key</tt> - Secret access key (REQUIRED)
|
19
|
+
# * <tt>:use_ssl</tt> - Use https or http protocol (false by
|
20
|
+
# default)
|
21
|
+
# * <tt>:use_vhost</tt> - Use bucket.s3.amazonaws.com or s3.amazonaws.com/bucket (true by
|
22
|
+
# default)
|
23
|
+
# * <tt>:debug</tt> - Display debug information on the STDOUT
|
24
|
+
# (false by default)
|
25
|
+
# * <tt>:timeout</tt> - Timeout to use by the Net::HTTP object
|
26
|
+
# (60 by default)
|
27
|
+
def initialize(options)
|
28
|
+
# The keys for these required options might exist in the options hash, but
|
29
|
+
# they might be set to something like `nil`. If this is the case, we want
|
30
|
+
# to fail early.
|
31
|
+
raise ArgumentError, "Missing :access_key_id." if !options[:access_key_id]
|
32
|
+
raise ArgumentError, "Missing :secret_access_key." if !options[:secret_access_key]
|
33
|
+
|
34
|
+
@access_key_id = options.fetch(:access_key_id)
|
35
|
+
@secret_access_key = options.fetch(:secret_access_key)
|
36
|
+
@host = options.fetch(:host)
|
37
|
+
@use_ssl = options.fetch(:use_ssl, false)
|
38
|
+
@use_vhost = options.fetch(:use_vhost, true)
|
39
|
+
@timeout = options.fetch(:timeout, 60)
|
40
|
+
@debug = options.fetch(:debug, false)
|
41
|
+
|
42
|
+
raise ArgumentError, "Missing proxy settings. Must specify at least :host." if options[:proxy] && !options[:proxy][:host]
|
43
|
+
@proxy = options.fetch(:proxy, nil)
|
44
|
+
end
|
45
|
+
|
46
|
+
# Returns all buckets in the service and caches the result (see
|
47
|
+
# +reload+)
|
48
|
+
def buckets
|
49
|
+
Proxy.new(lambda { list_all_my_buckets }, :owner => self, :extend => BucketsExtension)
|
50
|
+
end
|
51
|
+
|
52
|
+
# Returns the bucket with the given name. Does not check whether the
|
53
|
+
# bucket exists. But also does not issue any HTTP requests, so it's
|
54
|
+
# much faster than buckets.find
|
55
|
+
def bucket(name)
|
56
|
+
Bucket.send(:new, self, name)
|
57
|
+
end
|
58
|
+
|
59
|
+
# Returns "http://" or "https://", depends on <tt>:use_ssl</tt>
|
60
|
+
# value from initializer
|
61
|
+
def protocol
|
62
|
+
use_ssl ? "https://" : "http://"
|
63
|
+
end
|
64
|
+
|
65
|
+
# Returns 443 or 80, depends on <tt>:use_ssl</tt> value from
|
66
|
+
# initializer
|
67
|
+
def port
|
68
|
+
use_ssl ? 443 : 80
|
69
|
+
end
|
70
|
+
|
71
|
+
def inspect #:nodoc:
|
72
|
+
"#<#{self.class}:#@access_key_id>"
|
73
|
+
end
|
74
|
+
|
75
|
+
private
|
76
|
+
|
77
|
+
def list_all_my_buckets
|
78
|
+
response = service_request(:get)
|
79
|
+
names = parse_list_all_my_buckets_result(response.body)
|
80
|
+
names.map { |name| Bucket.send(:new, self, name) }
|
81
|
+
end
|
82
|
+
|
83
|
+
def service_request(method, options = {})
|
84
|
+
connection.request(method, options.merge(:path => "/#{options[:path]}"))
|
85
|
+
end
|
86
|
+
|
87
|
+
def connection
|
88
|
+
return @connection if defined?(@connection)
|
89
|
+
@connection = Connection.new(:access_key_id => @access_key_id,
|
90
|
+
:secret_access_key => @secret_access_key,
|
91
|
+
:host => @host,
|
92
|
+
:use_ssl => @use_ssl,
|
93
|
+
:timeout => @timeout,
|
94
|
+
:debug => @debug,
|
95
|
+
:proxy => @proxy)
|
96
|
+
end
|
97
|
+
end
|
98
|
+
end
|
data/lib/s3/signature.rb
ADDED
@@ -0,0 +1,261 @@
|
|
1
|
+
module S3
|
2
|
+
|
3
|
+
# Class responsible for generating signatures to requests.
|
4
|
+
#
|
5
|
+
# Implements algorithm defined by Amazon Web Services to sign
|
6
|
+
# request with secret private credentials
|
7
|
+
#
|
8
|
+
# === See
|
9
|
+
# http://docs.amazonwebservices.com/AmazonS3/latest/index.html?RESTAuthentication.html
|
10
|
+
|
11
|
+
class Signature
|
12
|
+
|
13
|
+
# Generates signature for given parameters
|
14
|
+
#
|
15
|
+
# ==== Options
|
16
|
+
# * <tt>:host</tt> - Hostname
|
17
|
+
# * <tt>:request</tt> - Net::HTTPRequest object with correct
|
18
|
+
# headers
|
19
|
+
# * <tt>:access_key_id</tt> - Access key id
|
20
|
+
# * <tt>:secret_access_key</tt> - Secret access key
|
21
|
+
#
|
22
|
+
# ==== Returns
|
23
|
+
# Generated signature string for given hostname and request
|
24
|
+
def self.generate(options)
|
25
|
+
request = options[:request]
|
26
|
+
access_key_id = options[:access_key_id]
|
27
|
+
|
28
|
+
options.merge!(:headers => request,
|
29
|
+
:method => request.method,
|
30
|
+
:resource => request.path)
|
31
|
+
|
32
|
+
signature = canonicalized_signature(options)
|
33
|
+
|
34
|
+
"AWS #{access_key_id}:#{signature}"
|
35
|
+
end
|
36
|
+
|
37
|
+
# Generates temporary URL signature for given resource
|
38
|
+
#
|
39
|
+
# ==== Options
|
40
|
+
# * <tt>:bucket</tt> - Bucket in which the resource resides
|
41
|
+
# * <tt>:resource</tt> - Path to the resouce you want to create
|
42
|
+
# a temporary link to
|
43
|
+
# * <tt>:secret_access_key</tt> - Secret access key
|
44
|
+
# * <tt>:expires_at</tt> - Unix time stamp of when the resouce
|
45
|
+
# link will expire
|
46
|
+
# * <tt>:method</tt> - HTTP request method you want to use on
|
47
|
+
# the resource, defaults to GET
|
48
|
+
# * <tt>:headers</tt> - Any additional HTTP headers you intend
|
49
|
+
# to use when requesting the resource
|
50
|
+
# * <tt>:add_bucket_to_host</tt> - Use in case of virtual-host style,
|
51
|
+
# defaults to false
|
52
|
+
def self.generate_temporary_url_signature(options)
|
53
|
+
bucket = options[:bucket]
|
54
|
+
resource = options[:resource]
|
55
|
+
secret_access_key = options[:secret_access_key]
|
56
|
+
expires = options[:expires_at]
|
57
|
+
|
58
|
+
headers = options[:headers] || {}
|
59
|
+
headers.merge!("date" => expires.to_i.to_s)
|
60
|
+
|
61
|
+
resource = "/#{URI.escape(resource, /[^#{URI::REGEXP::PATTERN::UNRESERVED}\/]/)}"
|
62
|
+
resource = "/#{bucket}" + resource unless options[:add_bucket_to_host]
|
63
|
+
|
64
|
+
options.merge!(:resource => resource,
|
65
|
+
:method => options[:method] || :get,
|
66
|
+
:headers => headers)
|
67
|
+
signature = canonicalized_signature(options)
|
68
|
+
|
69
|
+
CGI.escape(signature)
|
70
|
+
end
|
71
|
+
|
72
|
+
# Generates temporary URL for given resource
|
73
|
+
#
|
74
|
+
# ==== Options
|
75
|
+
# * <tt>:bucket</tt> - Bucket in which the resource resides
|
76
|
+
# * <tt>:resource</tt> - Path to the resouce you want to create
|
77
|
+
# a temporary link to
|
78
|
+
# * <tt>:access_key</tt> - Access key
|
79
|
+
# * <tt>:secret_access_key</tt> - Secret access key
|
80
|
+
# * <tt>:expires_at</tt> - Unix time stamp of when the resouce
|
81
|
+
# link will expire
|
82
|
+
# * <tt>:method</tt> - HTTP request method you want to use on
|
83
|
+
# the resource, defaults to GET
|
84
|
+
# * <tt>:headers</tt> - Any additional HTTP headers you intend
|
85
|
+
# to use when requesting the resource
|
86
|
+
# * <tt>:add_bucket_to_host</tt> - Use in case of virtual-host style,
|
87
|
+
# defaults to false
|
88
|
+
def self.generate_temporary_url(options)
|
89
|
+
bucket = options[:bucket]
|
90
|
+
resource = options[:resource]
|
91
|
+
access_key = options[:access_key]
|
92
|
+
expires = options[:expires_at].to_i
|
93
|
+
host = options[:host]
|
94
|
+
|
95
|
+
if options[:add_bucket_to_host]
|
96
|
+
host = bucket + '.' + host
|
97
|
+
url = "http://#{host}/#{resource}"
|
98
|
+
else
|
99
|
+
url = "http://#{host}/#{bucket}/#{resource}"
|
100
|
+
end
|
101
|
+
|
102
|
+
options[:host] = host
|
103
|
+
signature = generate_temporary_url_signature(options)
|
104
|
+
|
105
|
+
url << "?AWSAccessKeyId=#{access_key}"
|
106
|
+
url << "&Expires=#{expires}"
|
107
|
+
url << "&Signature=#{signature}"
|
108
|
+
end
|
109
|
+
|
110
|
+
private
|
111
|
+
|
112
|
+
def self.canonicalized_signature(options)
|
113
|
+
headers = options[:headers] || {}
|
114
|
+
host = options[:host] || ""
|
115
|
+
resource = options[:resource]
|
116
|
+
access_key_id = options[:access_key_id]
|
117
|
+
secret_access_key = options[:secret_access_key]
|
118
|
+
|
119
|
+
http_verb = options[:method].to_s.upcase
|
120
|
+
content_md5 = headers["content-md5"] || ""
|
121
|
+
content_type = headers["content-type"] || ""
|
122
|
+
date = headers["x-amz-date"].nil? ? headers["date"] : ""
|
123
|
+
canonicalized_resource = canonicalized_resource(host, resource)
|
124
|
+
canonicalized_amz_headers = canonicalized_amz_headers(headers)
|
125
|
+
|
126
|
+
string_to_sign = ""
|
127
|
+
string_to_sign << http_verb
|
128
|
+
string_to_sign << "\n"
|
129
|
+
string_to_sign << content_md5
|
130
|
+
string_to_sign << "\n"
|
131
|
+
string_to_sign << content_type
|
132
|
+
string_to_sign << "\n"
|
133
|
+
string_to_sign << date
|
134
|
+
string_to_sign << "\n"
|
135
|
+
string_to_sign << canonicalized_amz_headers
|
136
|
+
string_to_sign << canonicalized_resource
|
137
|
+
|
138
|
+
digest = OpenSSL::Digest.new("sha1")
|
139
|
+
hmac = OpenSSL::HMAC.digest(digest, secret_access_key, string_to_sign)
|
140
|
+
base64 = Base64.encode64(hmac)
|
141
|
+
base64.chomp
|
142
|
+
end
|
143
|
+
|
144
|
+
# Helper method for extracting header fields from Net::HTTPRequest
|
145
|
+
# and preparing them for singing in #generate method
|
146
|
+
#
|
147
|
+
# ==== Parameters
|
148
|
+
# * <tt>request</tt> - Net::HTTPRequest object with header fields
|
149
|
+
# filled in
|
150
|
+
#
|
151
|
+
# ==== Returns
|
152
|
+
# String containing interesting header fields in suitable order
|
153
|
+
# and form
|
154
|
+
def self.canonicalized_amz_headers(request)
|
155
|
+
headers = []
|
156
|
+
|
157
|
+
# 1. Convert each HTTP header name to lower-case. For example,
|
158
|
+
# "X-Amz-Date" becomes "x-amz-date".
|
159
|
+
request.each { |key, value| headers << [key.downcase, value] if key =~ /\Ax-amz-/io }
|
160
|
+
#=> [["c", 0], ["a", 1], ["a", 2], ["b", 3]]
|
161
|
+
|
162
|
+
# 2. Sort the collection of headers lexicographically by header
|
163
|
+
# name.
|
164
|
+
headers.sort!
|
165
|
+
#=> [["a", 1], ["a", 2], ["b", 3], ["c", 0]]
|
166
|
+
|
167
|
+
# 3. Combine header fields with the same name into one
|
168
|
+
# "header-name:comma-separated-value-list" pair as prescribed by
|
169
|
+
# RFC 2616, section 4.2, without any white-space between
|
170
|
+
# values. For example, the two metadata headers
|
171
|
+
# "x-amz-meta-username: fred" and "x-amz-meta-username: barney"
|
172
|
+
# would be combined into the single header "x-amz-meta-username:
|
173
|
+
# fred,barney".
|
174
|
+
combined_headers = headers.inject([]) do |new_headers, header|
|
175
|
+
existing_header = new_headers.find { |h| h.first == header.first }
|
176
|
+
if existing_header
|
177
|
+
existing_header.last << ",#{header.last}"
|
178
|
+
else
|
179
|
+
new_headers << header
|
180
|
+
end
|
181
|
+
end
|
182
|
+
#=> [["a", "1,2"], ["b", "3"], ["c", "0"]]
|
183
|
+
|
184
|
+
# 4. "Un-fold" long headers that span multiple lines (as allowed
|
185
|
+
# by RFC 2616, section 4.2) by replacing the folding white-space
|
186
|
+
# (including new-line) by a single space.
|
187
|
+
unfolded_headers = combined_headers.map do |header|
|
188
|
+
key = header.first
|
189
|
+
value = header.last
|
190
|
+
value.gsub!(/\s+/, " ")
|
191
|
+
[key, value]
|
192
|
+
end
|
193
|
+
|
194
|
+
# 5. Trim any white-space around the colon in the header. For
|
195
|
+
# example, the header "x-amz-meta-username: fred,barney" would
|
196
|
+
# become "x-amz-meta-username:fred,barney"
|
197
|
+
joined_headers = unfolded_headers.map do |header|
|
198
|
+
key = header.first.strip
|
199
|
+
value = header.last.strip
|
200
|
+
"#{key}:#{value}"
|
201
|
+
end
|
202
|
+
|
203
|
+
# 6. Finally, append a new-line (U+000A) to each canonicalized
|
204
|
+
# header in the resulting list. Construct the
|
205
|
+
# CanonicalizedResource element by concatenating all headers in
|
206
|
+
# this list into a single string.
|
207
|
+
joined_headers << "" unless joined_headers.empty?
|
208
|
+
joined_headers.join("\n")
|
209
|
+
end
|
210
|
+
|
211
|
+
# Helper methods for extracting caninocalized resource address
|
212
|
+
#
|
213
|
+
# ==== Parameters
|
214
|
+
# * <tt>host</tt> - Hostname
|
215
|
+
# * <tt>request</tt> - Net::HTTPRequest object with header fields
|
216
|
+
# filled in
|
217
|
+
#
|
218
|
+
# ==== Returns
|
219
|
+
# String containing extracted canonicalized resource
|
220
|
+
def self.canonicalized_resource(host, resource)
|
221
|
+
# 1. Start with the empty string ("").
|
222
|
+
string = ""
|
223
|
+
|
224
|
+
# 2. If the request specifies a bucket using the HTTP Host
|
225
|
+
# header (virtual hosted-style), append the bucket name preceded
|
226
|
+
# by a "/" (e.g., "/bucketname"). For path-style requests and
|
227
|
+
# requests that don't address a bucket, do nothing. For more
|
228
|
+
# information on virtual hosted-style requests, see Virtual
|
229
|
+
# Hosting of Buckets.
|
230
|
+
bucket_name = host.sub(/\.?#{host}\Z/, "")
|
231
|
+
string << "/#{bucket_name}" unless bucket_name.empty?
|
232
|
+
|
233
|
+
# 3. Append the path part of the un-decoded HTTP Request-URI,
|
234
|
+
# up-to but not including the query string.
|
235
|
+
uri = URI.parse(resource)
|
236
|
+
string << uri.path
|
237
|
+
|
238
|
+
# 4. If the request addresses a sub-resource, like ?location,
|
239
|
+
# ?acl, or ?torrent, append the sub-resource including question
|
240
|
+
# mark.
|
241
|
+
sub_resources = [
|
242
|
+
"acl",
|
243
|
+
"location",
|
244
|
+
"logging",
|
245
|
+
"notification",
|
246
|
+
"partNumber",
|
247
|
+
"policy",
|
248
|
+
"requestPayment",
|
249
|
+
"torrent",
|
250
|
+
"uploadId",
|
251
|
+
"uploads",
|
252
|
+
"versionId",
|
253
|
+
"versioning",
|
254
|
+
"versions",
|
255
|
+
"website"
|
256
|
+
]
|
257
|
+
string << "?#{$1}" if uri.query =~ /&?(#{sub_resources.join("|")})(?:&|=|\Z)/
|
258
|
+
string
|
259
|
+
end
|
260
|
+
end
|
261
|
+
end
|