s3restful 0.2.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG.md +24 -0
- data/Gemfile +10 -0
- data/LICENSE.txt +13 -0
- data/README.md +230 -0
- data/Rakefile +47 -0
- data/VERSION +1 -0
- data/benchmark/right_aws.rb +106 -0
- data/lib/s3restful/aws.rb +65 -0
- data/lib/s3restful/log.rb +40 -0
- data/lib/s3restful/s3/item.rb +136 -0
- data/lib/s3restful/s3/request.rb +111 -0
- data/lib/s3restful/s3.rb +16 -0
- data/lib/s3restful/utils.rb +26 -0
- data/lib/s3restful.rb +23 -0
- data/s3restful.gemspec +69 -0
- data/test/aws_test.rb +41 -0
- data/test/s3/item_test.rb +531 -0
- data/test/s3/request_test.rb +109 -0
- data/test/s3_test.rb +32 -0
- data/test/test_helper.rb +94 -0
- metadata +148 -0
data/CHANGELOG.md
ADDED
@@ -0,0 +1,24 @@
|
|
1
|
+
Changelog
|
2
|
+
=============
|
3
|
+
|
4
|
+
- re-license under Apache 2.0
|
5
|
+
|
6
|
+
0.2.5
|
7
|
+
|
8
|
+
- fix building
|
9
|
+
|
10
|
+
0.2.3
|
11
|
+
|
12
|
+
- yank older version
|
13
|
+
|
14
|
+
0.2.2
|
15
|
+
|
16
|
+
- Refactor version and build code
|
17
|
+
|
18
|
+
0.2.1
|
19
|
+
|
20
|
+
- Return the response object on get so that the data can be streamed [carlism]
|
21
|
+
|
22
|
+
0.2.0
|
23
|
+
|
24
|
+
- Initial release
|
data/Gemfile
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
Copyright 2010-2011 Peritor GmbH
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
data/README.md
ADDED
@@ -0,0 +1,230 @@
|
|
1
|
+
important Note
|
2
|
+
==============
|
3
|
+
s3restful is a modified version of the Happening gem written by Jonathan Weiss (https://github.com/peritor/happening) under the Apache 2.0 license.
|
4
|
+
S3-restufl introduces compatibility patches to enable the original Happening gem to communicate with other S3 API based storage system like Cumulus and Walrus.
|
5
|
+
It has also been improved to implement streaming uploads (credits go to Viktors Buls Happening fork at https://github.com/krukid/happening).
|
6
|
+
|
7
|
+
This README has been adapted from the original Happening gem. Lincense was mantained with its original authors.
|
8
|
+
|
9
|
+
--------------
|
10
|
+
|
11
|
+
Amazon S3 Ruby library that leverages [EventMachine](http://rubyeventmachine.com/) and [em-http-request](http://github.com/igrigorik/em-http-request).
|
12
|
+
|
13
|
+
By using EventMachine s3restful does not block on S3 downloads/uploads thus allowing for a higher concurrency.
|
14
|
+
|
15
|
+
s3restful was developed by [Peritor](http://www.peritor.com) for usage inside Nanite/EventMachine.
|
16
|
+
Alternatives like RightAws block during the HTTP calls thus blocking the Nanite-Agent.
|
17
|
+
|
18
|
+
For now it only supports GET, PUT and DELETE operations on S3 items. The PUT operations support S3 ACLs/permissions.
|
19
|
+
s3restful will handle redirects and retries on errors by default.
|
20
|
+
|
21
|
+
s3restful also supports other S3 compatible APIs, like Eucalyptus Walrus and Nimbus Cumulus. See instructions below
|
22
|
+
for more details.
|
23
|
+
|
24
|
+
Installation
|
25
|
+
============
|
26
|
+
|
27
|
+
gem install s3restful
|
28
|
+
|
29
|
+
Usage
|
30
|
+
=============
|
31
|
+
|
32
|
+
require 's3restful'
|
33
|
+
|
34
|
+
EM.run do
|
35
|
+
item = S3restful::S3::Item.new('bucket', 'item_id')
|
36
|
+
item.get # non-authenticated download, works only for public-read content
|
37
|
+
|
38
|
+
item = S3restful::S3::Item.new('bucket', 'item_id', :aws_access_key_id => 'Your-ID', :aws_secret_access_key => 'secret')
|
39
|
+
item.get # authenticated download
|
40
|
+
|
41
|
+
item.put("The new content")
|
42
|
+
|
43
|
+
item.delete
|
44
|
+
end
|
45
|
+
|
46
|
+
The above examples are a bit useless, as you never get any content back.
|
47
|
+
You need to specify a callback that interacts with the http response:
|
48
|
+
|
49
|
+
EM.run do
|
50
|
+
item = S3restful::S3::Item.new('bucket', 'item_id', :aws_access_key_id => 'Your-ID', :aws_secret_access_key => 'secret')
|
51
|
+
item.get do |response|
|
52
|
+
puts "the response content is: #{response.response}"
|
53
|
+
EM.stop
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
This will enqueue your download and run it in the EventMachine event loop.
|
58
|
+
|
59
|
+
You can also react to errors:
|
60
|
+
|
61
|
+
EM.run do
|
62
|
+
on_error = Proc.new {|response| puts "An error occured: #{response.response_header.status}"; EM.stop }
|
63
|
+
item = S3restful::S3::Item.new('bucket', 'item_id', :aws_access_key_id => 'Your-ID', :aws_secret_access_key => 'secret')
|
64
|
+
item.get(:on_error => on_error) do |response|
|
65
|
+
puts "the response content is: #{response.response}"
|
66
|
+
EM.stop
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
If you don't supply an error handler yourself, s3restful will be default raise an Exception.
|
71
|
+
|
72
|
+
Downloading many files could look like this:
|
73
|
+
|
74
|
+
EM.run do
|
75
|
+
count = 100
|
76
|
+
on_error = Proc.new {|http| puts "An error occured: #{http.response_header.status}"; EM.stop if count <= 0}
|
77
|
+
on_success = Proc.new {|http| puts "the response is: #{http.response}"; EM.stop if count <= 0}
|
78
|
+
|
79
|
+
count.times do |i|
|
80
|
+
item = S3restful::S3::Item.new('bucket', "item_#{i}", :aws_access_key_id => 'Your-ID', :aws_secret_access_key => 'secret')
|
81
|
+
item.get(:on_success => on_success, :on_error => on_error)
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
Upload
|
86
|
+
=============
|
87
|
+
|
88
|
+
s3restful supports the simple S3 PUT upload:
|
89
|
+
|
90
|
+
EM.run do
|
91
|
+
on_error = Proc.new {|http| puts "An error occured: #{http.response_header.status}"; EM.stop }
|
92
|
+
item = S3restful::S3::Item.new('bucket', 'item_id', :aws_access_key_id => 'Your-ID', :aws_secret_access_key => 'secret', :on_success => on_success, :on_error => on_error)
|
93
|
+
item.put( File.read('/etc/passwd'), :on_error => on_error ) do |response|
|
94
|
+
puts "Upload finished!"; EM.stop
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
Setting permissions looks like this:
|
99
|
+
|
100
|
+
EM.run do
|
101
|
+
on_error = Proc.new {|http| puts "An error occured: #{http.response_header.status}"; EM.stop }
|
102
|
+
on_success = Proc.new {|http| puts "the response is: #{http.response}"; EM.stop }
|
103
|
+
item = S3restful::S3::Item.new('bucket', 'item_id', :aws_access_key_id => 'Your-ID', :aws_secret_access_key => 'secret', :permissions => 'public-write')
|
104
|
+
item.get(:on_success => on_success, :on_error => on_error)
|
105
|
+
end
|
106
|
+
|
107
|
+
Custom headers:
|
108
|
+
|
109
|
+
EM.run do
|
110
|
+
on_error = Proc.new {|http| puts "An error occured: #{http.response_header.status}"; EM.stop }
|
111
|
+
on_success = Proc.new {|http| puts "the response is: #{http.response}"; EM.stop }
|
112
|
+
item = S3restful::S3::Item.new('bucket', 'item_id', :aws_access_key_id => 'Your-ID', :aws_secret_access_key => 'secret', :permissions => 'public-write')
|
113
|
+
item.put(:on_success => on_success,
|
114
|
+
:on_error => on_error,
|
115
|
+
:headers => {
|
116
|
+
'Cache-Control' => "max-age=252460800",
|
117
|
+
'Content-Type' => 'text/html',
|
118
|
+
'Expires' => 'Fri, 16 Nov 2018 22:09:29 GMT',
|
119
|
+
'x-amz-meta-abc' => 'ABC'
|
120
|
+
})
|
121
|
+
end
|
122
|
+
|
123
|
+
|
124
|
+
Deleting
|
125
|
+
=============
|
126
|
+
|
127
|
+
s3restful support the simple S3 PUT upload:
|
128
|
+
|
129
|
+
EM.run do
|
130
|
+
on_error = Proc.new {|response| puts "An error occured: #{response.response_header.status}"; EM.stop }
|
131
|
+
item = S3restful::S3::Item.new('bucket', 'item_id', :aws_access_key_id => 'Your-ID', :aws_secret_access_key => 'secret')
|
132
|
+
item.delete(:on_error => on_error) do |response|
|
133
|
+
puts "Deleted!"
|
134
|
+
EM.stop
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
Amazon returns no content on delete, so having a success handler is usually not needed for delete operations.
|
139
|
+
|
140
|
+
Head
|
141
|
+
=============
|
142
|
+
|
143
|
+
You can also just load the headers of an S3 item:
|
144
|
+
|
145
|
+
EM.run do
|
146
|
+
on_error = Proc.new {|response| puts "An error occured: #{response.response_header.status}"; EM.stop }
|
147
|
+
item = S3restful::S3::Item.new('bucket', 'item_id', :aws_access_key_id => 'Your-ID', :aws_secret_access_key => 'secret')
|
148
|
+
item.head(:on_error => on_error) do |response|
|
149
|
+
puts "Headers: #{response.inspect}"
|
150
|
+
EM.stop
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
|
155
|
+
|
156
|
+
Streaming
|
157
|
+
=============
|
158
|
+
|
159
|
+
The response data can also be streamed:
|
160
|
+
|
161
|
+
EM.run do
|
162
|
+
item = S3restful::S3::Item.new( bucket...
|
163
|
+
item.get(:on_error => on_error, :on_success => on_success ).stream do |chunk|
|
164
|
+
# .. handle the individual chunk
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
|
169
|
+
SSL Support
|
170
|
+
=============
|
171
|
+
|
172
|
+
s3restful will use SSL/HTTPS by default. What it cannot do by default is verify the SSL certificate. This means
|
173
|
+
that traffic is encrypted but nobody can say if the SSL-endpoint is the one you except. In order to verify the
|
174
|
+
SSL certificate you need to provide s3restful with the path to a certificate CA collection in PEM format:
|
175
|
+
|
176
|
+
S3restful::S3.ssl_options[:cert_chain_file] = '/etc/ca-bundle.crt'
|
177
|
+
|
178
|
+
You can also set this option on each item:
|
179
|
+
|
180
|
+
S3restful::S3::Item.new('bucket', 'item_id',
|
181
|
+
:aws_access_key_id => 'A',
|
182
|
+
:aws_secret_access_key => 'B',
|
183
|
+
:ssl => {
|
184
|
+
:cert_chain_file => '/etc/ca-bundle.crt'
|
185
|
+
}
|
186
|
+
|
187
|
+
Or even on the request:
|
188
|
+
|
189
|
+
item.get(:ssl => {:cert_chain_file => '/etc/ca-bundle.crt'})
|
190
|
+
|
191
|
+
The SSL options are directly passed to EventMachine, see the [EventMachine documentation](http://eventmachine.rubyforge.org/EventMachine/Connection.html#M000296) for more information on the SSL support.
|
192
|
+
|
193
|
+
|
194
|
+
Walrus
|
195
|
+
=============
|
196
|
+
|
197
|
+
s3restful also supports interacting with Walrus storage from the Eucalyptus cloud. The only difference is in the Item
|
198
|
+
initialization. Here you should prepend the bucket name with the 'services/Walrus/' string, as this is the base path
|
199
|
+
used by the Walrus API and should be signed allong with the bucket name. You should specify the host address and port
|
200
|
+
where Walrus listens for incoming requests too.
|
201
|
+
|
202
|
+
S3restful::S3::Item.new('services/Walrus/bucket', 'item_id', :server => 'Walrus address', :port => 8773, :protocol => 'http',
|
203
|
+
:aws_access_key_id => 'Walrus ID', :aws_secret_access_key => 'Walrus secret')
|
204
|
+
|
205
|
+
|
206
|
+
Cumulus
|
207
|
+
=============
|
208
|
+
|
209
|
+
s3restful also supports interacting with Cumulus storage from the Nimbus cloud. The only difference is in the Item
|
210
|
+
initialization, where you should specify the host address and port where Cumulus listens for incoming requests.
|
211
|
+
|
212
|
+
S3restful::S3::Item.new('bucket', 'item_id', :server => 'cumulus address', :port => 8888, :protocol => 'http',
|
213
|
+
:aws_access_key_id => 'cumulus ID', :aws_secret_access_key => 'cumulus secret')
|
214
|
+
|
215
|
+
|
216
|
+
Credits
|
217
|
+
=============
|
218
|
+
|
219
|
+
The AWS signing and canonical request description is based on [RightAws](http://github.com/rightscale/right_aws).
|
220
|
+
|
221
|
+
|
222
|
+
License
|
223
|
+
=============
|
224
|
+
|
225
|
+
Happening is licensed under the Apache 2.0 license. See LICENSE.txt
|
226
|
+
|
227
|
+
About
|
228
|
+
=============
|
229
|
+
|
230
|
+
Happening was written by [Jonathan Weiss](http://twitter.com/jweiss) for [Peritor](http://www.peritor.com).
|
data/Rakefile
ADDED
@@ -0,0 +1,47 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
require 'rubygems'
|
4
|
+
require 'bundler'
|
5
|
+
begin
|
6
|
+
Bundler.setup(:default, :development)
|
7
|
+
rescue Bundler::BundlerError => e
|
8
|
+
$stderr.puts e.message
|
9
|
+
$stderr.puts "Run `bundle install` to install missing gems"
|
10
|
+
exit e.status_code
|
11
|
+
end
|
12
|
+
require 'rake'
|
13
|
+
|
14
|
+
require 'jeweler'
|
15
|
+
require File.expand_path("./lib/s3restful")
|
16
|
+
Jeweler::Tasks.new do |gem|
|
17
|
+
# gem is a Gem::Specification... see http://docs.rubygems.org/read/chapter/20 for more options
|
18
|
+
gem.name = "s3restful"
|
19
|
+
gem.homepage = "http://github.com/peritor/s3restful"
|
20
|
+
gem.license = "BSD"
|
21
|
+
gem.summary = %Q{An EventMachine based S3 client }
|
22
|
+
gem.description = %Q{An EventMachine based S3 client }
|
23
|
+
gem.email = "jw@innerewut.de"
|
24
|
+
gem.authors = ["Jonathan Weiss"]
|
25
|
+
gem.version = S3restful::VERSION
|
26
|
+
# dependencies defined in Gemfile
|
27
|
+
end
|
28
|
+
Jeweler::RubygemsDotOrgTasks.new
|
29
|
+
|
30
|
+
require 'rake/testtask'
|
31
|
+
Rake::TestTask.new(:test) do |t|
|
32
|
+
t.libs << 'test'
|
33
|
+
t.pattern = "test/**/*_test.rb"
|
34
|
+
t.verbose = true
|
35
|
+
end
|
36
|
+
|
37
|
+
task :default => :test
|
38
|
+
|
39
|
+
require 'rake/rdoctask'
|
40
|
+
Rake::RDocTask.new do |rdoc|
|
41
|
+
version = File.exist?('VERSION') ? File.read('VERSION') : ""
|
42
|
+
|
43
|
+
rdoc.rdoc_dir = 'rdoc'
|
44
|
+
rdoc.title = "hello-gem #{version}"
|
45
|
+
rdoc.rdoc_files.include('README*')
|
46
|
+
rdoc.rdoc_files.include('lib/**/*.rb')
|
47
|
+
end
|
data/VERSION
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
0.2.7
|
@@ -0,0 +1,106 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/../s3restful'
|
2
|
+
|
3
|
+
require 'benchmark'
|
4
|
+
require 'right_aws'
|
5
|
+
|
6
|
+
AWS_ACCESS_KEY_ID = ENV['AWS_ACCESS_KEY_ID'] or raise "please set AWS_ACCESS_KEY_ID='your-key'"
|
7
|
+
AWS_SECRET_ACCESS_KEY = ENV['AWS_SECRET_ACCESS_KEY'] or raise "please set AWS_SECRET_ACCESS_KEY='your-scret'"
|
8
|
+
|
9
|
+
BUCKET = 's3restful-benchmark'
|
10
|
+
FILE = 'the_file_name'
|
11
|
+
PROTOCOL = 'https'
|
12
|
+
|
13
|
+
COUNT = 100
|
14
|
+
CONTENT = File.read('/tmp/VzLinuxUG.pdf')
|
15
|
+
|
16
|
+
command = ARGV.first || 'get'
|
17
|
+
|
18
|
+
puts "running command: #{command}"
|
19
|
+
|
20
|
+
if command == 'get'
|
21
|
+
Benchmark.bm(7) do |x|
|
22
|
+
x.report("RightAWS - Get an item") do
|
23
|
+
count = COUNT
|
24
|
+
s3 = RightAws::S3Interface.new(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, :protocol => PROTOCOL)
|
25
|
+
count.times do |i|
|
26
|
+
s3.get_object(BUCKET, FILE)
|
27
|
+
print '.'; $stdout.flush
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
puts ""
|
32
|
+
x.report("S3restful - Get an item") do
|
33
|
+
puts ""
|
34
|
+
count = COUNT
|
35
|
+
on_success = Proc.new do |http|
|
36
|
+
print '.'; $stdout.flush
|
37
|
+
count = count - 1
|
38
|
+
EM.stop if count <= 0
|
39
|
+
end
|
40
|
+
|
41
|
+
on_error = Proc.new do |http|
|
42
|
+
puts "Status: #{http.response_header.status}"
|
43
|
+
puts "Header: #{http.response_header.inspect}"
|
44
|
+
puts "Content:"
|
45
|
+
puts http.response.inspect + "\n"
|
46
|
+
count = count - 1
|
47
|
+
EM.stop if count <= 0
|
48
|
+
end
|
49
|
+
|
50
|
+
EM.run do
|
51
|
+
count.times do |i|
|
52
|
+
item = S3restful::S3::Item.new(BUCKET, FILE, :protocol => PROTOCOL, :on_success => on_success, :on_error => on_error)
|
53
|
+
item.get
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
elsif command == 'put'
|
60
|
+
Benchmark.bm(7) do |x|
|
61
|
+
x.report("RightAWS - Put an item") do
|
62
|
+
count = COUNT
|
63
|
+
s3 = RightAws::S3Interface.new(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, :protocol => PROTOCOL)
|
64
|
+
count.times do |i|
|
65
|
+
s3.put(BUCKET, "upload_test_right_aws_#{i}", CONTENT)
|
66
|
+
print '.'; $stdout.flush
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
puts ""
|
71
|
+
x.report("S3restful - Put an item") do
|
72
|
+
puts ""
|
73
|
+
count = COUNT
|
74
|
+
on_success = Proc.new do |http|
|
75
|
+
#puts "Success"
|
76
|
+
puts "Status: #{http.response_header.status}" unless http.response_header.status == 200
|
77
|
+
#puts "Header: #{http.response_header.inspect}"
|
78
|
+
#puts "Content:"
|
79
|
+
#puts http.response.inspect + "\n"
|
80
|
+
print '.'; $stdout.flush
|
81
|
+
count = count - 1
|
82
|
+
EM.stop if count <= 0
|
83
|
+
end
|
84
|
+
|
85
|
+
on_error = Proc.new do |http|
|
86
|
+
puts "Error"
|
87
|
+
puts "Status: #{http.response_header.status}"
|
88
|
+
puts "Header: #{http.response_header.inspect}"
|
89
|
+
puts "Content:"
|
90
|
+
puts http.response.inspect + "\n"
|
91
|
+
count = count - 1
|
92
|
+
EM.stop if count <= 0
|
93
|
+
end
|
94
|
+
|
95
|
+
EM.run do
|
96
|
+
count.times do |i|
|
97
|
+
item = S3restful::S3::Item.new(BUCKET, "upload_test_s3restful_#{i}", :protocol => PROTOCOL, :on_success => on_success, :on_error => on_error, :aws_access_key_id => AWS_ACCESS_KEY_ID, :aws_secret_access_key => AWS_SECRET_ACCESS_KEY)
|
98
|
+
item.put(CONTENT)
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
else
|
105
|
+
puts "unknown command: #{command}"
|
106
|
+
end
|
@@ -0,0 +1,65 @@
|
|
1
|
+
require 'time'
|
2
|
+
module S3restful
|
3
|
+
class AWS
|
4
|
+
include Utils
|
5
|
+
|
6
|
+
AMAZON_HEADER_PREFIX = 'x-amz-'
|
7
|
+
AMAZON_METADATA_PREFIX = 'x-amz-meta-'
|
8
|
+
DIGEST = OpenSSL::Digest.new('sha1')
|
9
|
+
|
10
|
+
attr_accessor :aws_access_key_id, :aws_secret_access_key
|
11
|
+
|
12
|
+
def initialize(aws_access_key_id, aws_secret_access_key)
|
13
|
+
@aws_access_key_id = aws_access_key_id
|
14
|
+
@aws_secret_access_key = aws_secret_access_key
|
15
|
+
raise ArgumentError, "need AWS Access Key Id and AWS Secret Key" if blank?(aws_access_key_id) || blank?(aws_secret_access_key)
|
16
|
+
end
|
17
|
+
|
18
|
+
def sign(method, path, headers={})
|
19
|
+
headers = {
|
20
|
+
'date' => utc_httpdate
|
21
|
+
}.update(headers)
|
22
|
+
|
23
|
+
request_description = canonical_request_description(method, path, headers)
|
24
|
+
headers.update("Authorization" => "AWS #{aws_access_key_id}:#{generate_signature(request_description)}")
|
25
|
+
end
|
26
|
+
|
27
|
+
protected
|
28
|
+
|
29
|
+
def utc_httpdate
|
30
|
+
Time.now.utc.httpdate
|
31
|
+
end
|
32
|
+
|
33
|
+
def generate_signature(request_description)
|
34
|
+
Base64.encode64(OpenSSL::HMAC.digest(DIGEST, aws_secret_access_key, request_description)).strip
|
35
|
+
end
|
36
|
+
|
37
|
+
def canonical_request_description(method, path, headers = {}, expires = nil)
|
38
|
+
s3_attributes = {}
|
39
|
+
headers.each do |key, value|
|
40
|
+
key = key.downcase
|
41
|
+
s3_attributes[key] = value.to_s.strip if key.match(/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o)
|
42
|
+
end
|
43
|
+
s3_attributes['content-type'] ||= ''
|
44
|
+
s3_attributes['content-md5'] ||= ''
|
45
|
+
s3_attributes['date'] = '' if s3_attributes.has_key?('x-amz-date')
|
46
|
+
s3_attributes['date'] = expires if expires
|
47
|
+
|
48
|
+
description = "#{method}\n"
|
49
|
+
s3_attributes.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
|
50
|
+
description << (key.match(/^#{AMAZON_HEADER_PREFIX}/o) ? "#{key}:#{value}\n" : "#{value}\n")
|
51
|
+
end
|
52
|
+
|
53
|
+
# ignore all parameters by default
|
54
|
+
description << path.gsub(/\?.*$/, '')
|
55
|
+
|
56
|
+
# handle amazon parameters
|
57
|
+
description << '?acl' if path[/[&?]acl($|&|=)/]
|
58
|
+
description << '?torrent' if path[/[&?]torrent($|&|=)/]
|
59
|
+
description << '?location' if path[/[&?]location($|&|=)/]
|
60
|
+
description << '?logging' if path[/[&?]logging($|&|=)/]
|
61
|
+
description
|
62
|
+
end
|
63
|
+
|
64
|
+
end
|
65
|
+
end
|
@@ -0,0 +1,40 @@
|
|
1
|
+
module S3restful
|
2
|
+
class Log
|
3
|
+
|
4
|
+
@@logger = Logger.new(STDOUT)
|
5
|
+
@@logger.level = Logger::ERROR
|
6
|
+
|
7
|
+
def self.logger=(log)
|
8
|
+
@@logger = log
|
9
|
+
end
|
10
|
+
|
11
|
+
def self.logger
|
12
|
+
@@logger
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.level=(lev)
|
16
|
+
logger.level = lev
|
17
|
+
end
|
18
|
+
|
19
|
+
def self.level
|
20
|
+
logger.level
|
21
|
+
end
|
22
|
+
|
23
|
+
def self.debug(msg)
|
24
|
+
logger.debug("S3restful: #{msg}")
|
25
|
+
end
|
26
|
+
|
27
|
+
def self.info(msg)
|
28
|
+
logger.debug("S3restful: #{msg}")
|
29
|
+
end
|
30
|
+
|
31
|
+
def self.warn(msg)
|
32
|
+
logger.debug("S3restful: #{msg}")
|
33
|
+
end
|
34
|
+
|
35
|
+
def self.error(msg)
|
36
|
+
logger.debug("S3restful: #{msg}")
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,136 @@
|
|
1
|
+
require 'uri'
|
2
|
+
require 'cgi'
|
3
|
+
|
4
|
+
module S3restful
|
5
|
+
module S3
|
6
|
+
class Item
|
7
|
+
include Utils
|
8
|
+
|
9
|
+
REQUIRED_FIELDS = [:server]
|
10
|
+
VALID_HEADERS = ['Cache-Control', 'Content-Disposition', 'Content-Encoding', 'Content-Length', 'Content-MD5', 'Content-Type', 'Expect', 'Expires']
|
11
|
+
|
12
|
+
attr_accessor :bucket, :aws_id, :options
|
13
|
+
|
14
|
+
def initialize(bucket, aws_id, options = {})
|
15
|
+
@options = {
|
16
|
+
:timeout => 10,
|
17
|
+
:server => 's3.amazonaws.com',
|
18
|
+
:protocol => 'https',
|
19
|
+
:aws_access_key_id => nil,
|
20
|
+
:aws_secret_access_key => nil,
|
21
|
+
:retry_count => 4,
|
22
|
+
:permissions => 'private',
|
23
|
+
:ssl => S3restful::S3.ssl_options
|
24
|
+
}.update(symbolize_keys(options))
|
25
|
+
assert_valid_keys(options, :timeout, :server, :port, :protocol, :aws_access_key_id, :aws_secret_access_key, :retry_count, :permissions, :ssl)
|
26
|
+
@aws_id = aws_id.to_s
|
27
|
+
@bucket = bucket.to_s
|
28
|
+
|
29
|
+
validate
|
30
|
+
end
|
31
|
+
|
32
|
+
def head(request_options = {}, &blk)
|
33
|
+
headers = needs_to_sign? ? aws.sign("HEAD", path) : {}
|
34
|
+
request_options[:on_success] = blk if blk
|
35
|
+
request_options.update(:headers => headers)
|
36
|
+
S3restful::S3::Request.new(:head, url, {:ssl => options[:ssl]}.update(request_options)).execute
|
37
|
+
end
|
38
|
+
|
39
|
+
def get(request_options = {}, &blk)
|
40
|
+
headers = needs_to_sign? ? aws.sign("GET", path) : {}
|
41
|
+
request_options[:on_success] = blk if blk
|
42
|
+
request_options.update(:headers => headers)
|
43
|
+
S3restful::S3::Request.new(:get, url, {:ssl => options[:ssl]}.update(request_options)).execute
|
44
|
+
end
|
45
|
+
|
46
|
+
def aget(request_options = {}, &blk)
|
47
|
+
headers = needs_to_sign? ? aws.sign("GET", path) : {}
|
48
|
+
request_options[:on_success] = blk if blk
|
49
|
+
request_options.update(:headers => headers)
|
50
|
+
S3restful::S3::Request.new(:aget, url, {:ssl => options[:ssl]}.update(request_options)).execute
|
51
|
+
end
|
52
|
+
|
53
|
+
def put(data, request_options = {}, &blk)
|
54
|
+
headers = construct_aws_headers('PUT', request_options.delete(:headers) || {})
|
55
|
+
request_options[:on_success] = blk if blk
|
56
|
+
request_options.update(:headers => headers, :data => data)
|
57
|
+
S3restful::S3::Request.new(:put, url, {:ssl => options[:ssl]}.update(request_options)).execute
|
58
|
+
end
|
59
|
+
|
60
|
+
def store(file_path, request_options = {}, &blk)
|
61
|
+
headers = construct_aws_headers('PUT', request_options.delete(:headers) || {})
|
62
|
+
request_options[:on_success] = blk if blk
|
63
|
+
request_options.update(:headers => headers, :file => file_path)
|
64
|
+
S3restful::S3::Request.new(:put, url, {:ssl => options[:ssl]}.update(request_options)).execute
|
65
|
+
end
|
66
|
+
|
67
|
+
def delete(request_options = {}, &blk)
|
68
|
+
headers = needs_to_sign? ? aws.sign("DELETE", path, {'url' => path}) : {}
|
69
|
+
request_options[:on_success] = blk if blk
|
70
|
+
request_options.update(:headers => headers)
|
71
|
+
S3restful::S3::Request.new(:delete, url, {:ssl => options[:ssl]}.update(request_options)).execute
|
72
|
+
end
|
73
|
+
|
74
|
+
def url
|
75
|
+
uri = options[:port] ? path : path(!dns_bucket?)
|
76
|
+
URI::Generic.new(options[:protocol], nil, server, port, nil, uri, nil, nil, nil).to_s
|
77
|
+
end
|
78
|
+
|
79
|
+
def server
|
80
|
+
return options[:server] if options[:port]
|
81
|
+
dns_bucket? ? "#{bucket}.#{options[:server]}" : options[:server]
|
82
|
+
end
|
83
|
+
|
84
|
+
def path(with_bucket=true)
|
85
|
+
with_bucket ? "/#{bucket}/#{CGI::escape(aws_id)}" : "/#{CGI::escape(aws_id)}"
|
86
|
+
end
|
87
|
+
|
88
|
+
protected
|
89
|
+
|
90
|
+
def needs_to_sign?
|
91
|
+
present?(options[:aws_access_key_id])
|
92
|
+
end
|
93
|
+
|
94
|
+
def dns_bucket?
|
95
|
+
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
|
96
|
+
return false unless (3..63) === bucket.size
|
97
|
+
bucket.split('.').each do |component|
|
98
|
+
return false unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
|
99
|
+
end
|
100
|
+
true
|
101
|
+
end
|
102
|
+
|
103
|
+
def port
|
104
|
+
options[:port] || (options[:protocol].to_s == 'https' ? 443 : 80)
|
105
|
+
end
|
106
|
+
|
107
|
+
def validate
|
108
|
+
raise ArgumentError, "need a bucket name" unless present?(bucket)
|
109
|
+
raise ArgumentError, "need a AWS Key" unless present?(aws_id)
|
110
|
+
|
111
|
+
REQUIRED_FIELDS.each do |field|
|
112
|
+
raise ArgumentError, "need field #{field}" unless present?(options[field])
|
113
|
+
end
|
114
|
+
|
115
|
+
raise ArgumentError, "unknown protocoll #{options[:protocol]}" unless ['http', 'https'].include?(options[:protocol])
|
116
|
+
end
|
117
|
+
|
118
|
+
def aws
|
119
|
+
@aws ||= S3restful::AWS.new(options[:aws_access_key_id], options[:aws_secret_access_key])
|
120
|
+
end
|
121
|
+
|
122
|
+
def construct_aws_headers(http_method, headers = {})
|
123
|
+
unless headers.keys.all?{|header| VALID_HEADERS.include?(header) || header.to_s.match(/\Ax-amz-/) }
|
124
|
+
raise ArgumentError, "invalid headers. All headers must either one of #{VALID_HEADERS} or start with 'x-amz-'"
|
125
|
+
end
|
126
|
+
|
127
|
+
permissions = options[:permissions] != 'private' ? {'x-amz-acl' => options[:permissions] } : {}
|
128
|
+
headers.update(permissions)
|
129
|
+
headers.update({'url' => path})
|
130
|
+
|
131
|
+
headers = needs_to_sign? ? aws.sign(http_method, path, headers) : headers
|
132
|
+
end
|
133
|
+
|
134
|
+
end
|
135
|
+
end
|
136
|
+
end
|