aws 1.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/History.txt +246 -0
- data/Manifest.txt +19 -0
- data/README.txt +168 -0
- data/Rakefile +17 -0
- data/lib/acf/right_acf_interface.rb +379 -0
- data/lib/awsbase/benchmark_fix.rb +39 -0
- data/lib/awsbase/right_awsbase.rb +797 -0
- data/lib/awsbase/support.rb +111 -0
- data/lib/ec2/right_ec2.rb +1737 -0
- data/lib/right_aws.rb +69 -0
- data/lib/s3/right_s3.rb +1094 -0
- data/lib/s3/right_s3_interface.rb +1175 -0
- data/lib/sdb/active_sdb.rb +936 -0
- data/lib/sdb/right_sdb_interface.rb +701 -0
- data/lib/sqs/right_sqs.rb +388 -0
- data/lib/sqs/right_sqs_gen2.rb +286 -0
- data/lib/sqs/right_sqs_gen2_interface.rb +444 -0
- data/lib/sqs/right_sqs_interface.rb +596 -0
- metadata +96 -0
@@ -0,0 +1,1175 @@
|
|
1
|
+
#
|
2
|
+
# Copyright (c) 2007-2008 RightScale Inc
|
3
|
+
#
|
4
|
+
# Permission is hereby granted, free of charge, to any person obtaining
|
5
|
+
# a copy of this software and associated documentation files (the
|
6
|
+
# "Software"), to deal in the Software without restriction, including
|
7
|
+
# without limitation the rights to use, copy, modify, merge, publish,
|
8
|
+
# distribute, sublicense, and/or sell copies of the Software, and to
|
9
|
+
# permit persons to whom the Software is furnished to do so, subject to
|
10
|
+
# the following conditions:
|
11
|
+
#
|
12
|
+
# The above copyright notice and this permission notice shall be
|
13
|
+
# included in all copies or substantial portions of the Software.
|
14
|
+
#
|
15
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
16
|
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
17
|
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
18
|
+
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
19
|
+
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
20
|
+
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
21
|
+
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
22
|
+
#
|
23
|
+
|
24
|
+
module RightAws
|
25
|
+
|
26
|
+
class S3Interface < RightAwsBase
|
27
|
+
|
28
|
+
USE_100_CONTINUE_PUT_SIZE = 1_000_000
|
29
|
+
|
30
|
+
include RightAwsBaseInterface
|
31
|
+
|
32
|
+
DEFAULT_HOST = 's3.amazonaws.com'
|
33
|
+
DEFAULT_PORT = 443
|
34
|
+
DEFAULT_PROTOCOL = 'https'
|
35
|
+
DEFAULT_SERVICE = '/'
|
36
|
+
REQUEST_TTL = 30
|
37
|
+
DEFAULT_EXPIRES_AFTER = 1 * 24 * 60 * 60 # One day's worth of seconds
|
38
|
+
ONE_YEAR_IN_SECONDS = 365 * 24 * 60 * 60
|
39
|
+
AMAZON_HEADER_PREFIX = 'x-amz-'
|
40
|
+
AMAZON_METADATA_PREFIX = 'x-amz-meta-'
|
41
|
+
|
42
|
+
@@bench = AwsBenchmarkingBlock.new
|
43
|
+
def self.bench_xml
|
44
|
+
@@bench.xml
|
45
|
+
end
|
46
|
+
def self.bench_s3
|
47
|
+
@@bench.service
|
48
|
+
end
|
49
|
+
|
50
|
+
|
51
|
+
# Creates new RightS3 instance.
|
52
|
+
#
|
53
|
+
# s3 = RightAws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:multi_thread => true, :logger => Logger.new('/tmp/x.log')}) #=> #<RightAws::S3Interface:0xb7b3c27c>
|
54
|
+
#
|
55
|
+
# Params is a hash:
|
56
|
+
#
|
57
|
+
# {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
|
58
|
+
# :port => 443 # Amazon service port: 80 or 443(default)
|
59
|
+
# :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
|
60
|
+
# :multi_thread => true|false # Multi-threaded (connection per each thread): true or false(default)
|
61
|
+
# :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
|
62
|
+
#
|
63
|
+
def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
|
64
|
+
init({ :name => 'S3',
|
65
|
+
:default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST,
|
66
|
+
:default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT,
|
67
|
+
:default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE,
|
68
|
+
:default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL },
|
69
|
+
aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
|
70
|
+
aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'],
|
71
|
+
params)
|
72
|
+
end
|
73
|
+
|
74
|
+
|
75
|
+
#-----------------------------------------------------------------
|
76
|
+
# Requests
|
77
|
+
#-----------------------------------------------------------------
|
78
|
+
# Produces canonical string for signing.
|
79
|
+
def canonical_string(method, path, headers={}, expires=nil) # :nodoc:
|
80
|
+
s3_headers = {}
|
81
|
+
headers.each do |key, value|
|
82
|
+
key = key.downcase
|
83
|
+
s3_headers[key] = value.to_s.strip if key[/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o]
|
84
|
+
end
|
85
|
+
s3_headers['content-type'] ||= ''
|
86
|
+
s3_headers['content-md5'] ||= ''
|
87
|
+
s3_headers['date'] = '' if s3_headers.has_key? 'x-amz-date'
|
88
|
+
s3_headers['date'] = expires if expires
|
89
|
+
# prepare output string
|
90
|
+
out_string = "#{method}\n"
|
91
|
+
s3_headers.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
|
92
|
+
out_string << (key[/^#{AMAZON_HEADER_PREFIX}/o] ? "#{key}:#{value}\n" : "#{value}\n")
|
93
|
+
end
|
94
|
+
# ignore everything after the question mark...
|
95
|
+
out_string << path.gsub(/\?.*$/, '')
|
96
|
+
# ...unless there is an acl or torrent parameter
|
97
|
+
out_string << '?acl' if path[/[&?]acl($|&|=)/]
|
98
|
+
out_string << '?torrent' if path[/[&?]torrent($|&|=)/]
|
99
|
+
out_string << '?location' if path[/[&?]location($|&|=)/]
|
100
|
+
out_string << '?logging' if path[/[&?]logging($|&|=)/] # this one is beta, no support for now
|
101
|
+
out_string
|
102
|
+
end
|
103
|
+
|
104
|
+
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
|
105
|
+
def is_dns_bucket?(bucket_name)
|
106
|
+
bucket_name = bucket_name.to_s
|
107
|
+
return nil unless (3..63) === bucket_name.size
|
108
|
+
bucket_name.split('.').each do |component|
|
109
|
+
return nil unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
|
110
|
+
end
|
111
|
+
true
|
112
|
+
end
|
113
|
+
|
114
|
+
def fetch_request_params(headers) #:nodoc:
|
115
|
+
# default server to use
|
116
|
+
server = @params[:server]
|
117
|
+
service = @params[:service].to_s
|
118
|
+
service.chop! if service[%r{/$}] # remove trailing '/' from service
|
119
|
+
# extract bucket name and check it's dns compartibility
|
120
|
+
headers[:url].to_s[%r{^([a-z0-9._-]*)(/[^?]*)?(\?.+)?}i]
|
121
|
+
bucket_name, key_path, params_list = $1, $2, $3
|
122
|
+
# select request model
|
123
|
+
if is_dns_bucket?(bucket_name)
|
124
|
+
# fix a path
|
125
|
+
server = "#{bucket_name}.#{server}"
|
126
|
+
key_path ||= '/'
|
127
|
+
path = "#{service}#{key_path}#{params_list}"
|
128
|
+
else
|
129
|
+
path = "#{service}/#{bucket_name}#{key_path}#{params_list}"
|
130
|
+
end
|
131
|
+
path_to_sign = "#{service}/#{bucket_name}#{key_path}#{params_list}"
|
132
|
+
# path_to_sign = "/#{bucket_name}#{key_path}#{params_list}"
|
133
|
+
[ server, path, path_to_sign ]
|
134
|
+
end
|
135
|
+
|
136
|
+
# Generates request hash for REST API.
|
137
|
+
# Assumes that headers[:url] is URL encoded (use CGI::escape)
|
138
|
+
def generate_rest_request(method, headers) # :nodoc:
|
139
|
+
# calculate request data
|
140
|
+
server, path, path_to_sign = fetch_request_params(headers)
|
141
|
+
data = headers[:data]
|
142
|
+
# remove unset(==optional) and symbolyc keys
|
143
|
+
headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
|
144
|
+
#
|
145
|
+
headers['content-type'] ||= ''
|
146
|
+
headers['date'] = Time.now.httpdate
|
147
|
+
# create request
|
148
|
+
request = "Net::HTTP::#{method.capitalize}".constantize.new(path)
|
149
|
+
request.body = data if data
|
150
|
+
# set request headers and meta headers
|
151
|
+
headers.each { |key, value| request[key.to_s] = value }
|
152
|
+
#generate auth strings
|
153
|
+
auth_string = canonical_string(request.method, path_to_sign, request.to_hash)
|
154
|
+
signature = AwsUtils::sign(@aws_secret_access_key, auth_string)
|
155
|
+
# set other headers
|
156
|
+
request['Authorization'] = "AWS #{@aws_access_key_id}:#{signature}"
|
157
|
+
# prepare output hash
|
158
|
+
{ :request => request,
|
159
|
+
:server => server,
|
160
|
+
:port => @params[:port],
|
161
|
+
:protocol => @params[:protocol] }
|
162
|
+
end
|
163
|
+
|
164
|
+
# Sends request to Amazon and parses the response.
|
165
|
+
# Raises AwsError if any banana happened.
|
166
|
+
def request_info(request, parser, &block) # :nodoc:
|
167
|
+
thread = @params[:multi_thread] ? Thread.current : Thread.main
|
168
|
+
thread[:s3_connection] ||= Rightscale::HttpConnection.new(:exception => RightAws::AwsError, :logger => @logger)
|
169
|
+
request_info_impl(thread[:s3_connection], @@bench, request, parser, &block)
|
170
|
+
end
|
171
|
+
|
172
|
+
|
173
|
+
# Returns an array of customer's buckets. Each item is a +hash+.
|
174
|
+
#
|
175
|
+
# s3.list_all_my_buckets #=>
|
176
|
+
# [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
|
177
|
+
# :owner_display_name => "root",
|
178
|
+
# :name => "bucket_name",
|
179
|
+
# :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
|
180
|
+
#
|
181
|
+
def list_all_my_buckets(headers={})
|
182
|
+
req_hash = generate_rest_request('GET', headers.merge(:url=>''))
|
183
|
+
request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger))
|
184
|
+
rescue
|
185
|
+
on_exception
|
186
|
+
end
|
187
|
+
|
188
|
+
# Creates new bucket. Returns +true+ or an exception.
|
189
|
+
#
|
190
|
+
# # create a bucket at American server
|
191
|
+
# s3.create_bucket('my-awesome-bucket-us') #=> true
|
192
|
+
# # create a bucket at European server
|
193
|
+
# s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
|
194
|
+
#
|
195
|
+
def create_bucket(bucket, headers={})
|
196
|
+
data = nil
|
197
|
+
unless headers[:location].blank?
|
198
|
+
data = "<CreateBucketConfiguration><LocationConstraint>#{headers[:location].to_s.upcase}</LocationConstraint></CreateBucketConfiguration>"
|
199
|
+
end
|
200
|
+
req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data))
|
201
|
+
request_info(req_hash, RightHttp2xxParser.new)
|
202
|
+
rescue Exception => e
|
203
|
+
# if the bucket exists AWS returns an error for the location constraint interface. Drop it
|
204
|
+
e.is_a?(RightAws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception
|
205
|
+
end
|
206
|
+
|
207
|
+
# Retrieve bucket location
|
208
|
+
#
|
209
|
+
# s3.create_bucket('my-awesome-bucket-us') #=> true
|
210
|
+
# puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed)
|
211
|
+
#
|
212
|
+
# s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
|
213
|
+
# puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
|
214
|
+
#
|
215
|
+
def bucket_location(bucket, headers={})
|
216
|
+
req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location"))
|
217
|
+
request_info(req_hash, S3BucketLocationParser.new)
|
218
|
+
rescue
|
219
|
+
on_exception
|
220
|
+
end
|
221
|
+
|
222
|
+
# Retrieves the logging configuration for a bucket.
|
223
|
+
# Returns a hash of {:enabled, :targetbucket, :targetprefix}
|
224
|
+
#
|
225
|
+
# s3.interface.get_logging_parse(:bucket => "asset_bucket")
|
226
|
+
# => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
|
227
|
+
#
|
228
|
+
#
|
229
|
+
def get_logging_parse(params)
|
230
|
+
AwsUtils.mandatory_arguments([:bucket], params)
|
231
|
+
AwsUtils.allow_only([:bucket, :headers], params)
|
232
|
+
params[:headers] = {} unless params[:headers]
|
233
|
+
req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging"))
|
234
|
+
request_info(req_hash, S3LoggingParser.new)
|
235
|
+
rescue
|
236
|
+
on_exception
|
237
|
+
end
|
238
|
+
|
239
|
+
# Sets logging configuration for a bucket from the XML configuration document.
|
240
|
+
# params:
|
241
|
+
# :bucket
|
242
|
+
# :xmldoc
|
243
|
+
def put_logging(params)
|
244
|
+
AwsUtils.mandatory_arguments([:bucket,:xmldoc], params)
|
245
|
+
AwsUtils.allow_only([:bucket,:xmldoc, :headers], params)
|
246
|
+
params[:headers] = {} unless params[:headers]
|
247
|
+
req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc]))
|
248
|
+
request_info(req_hash, S3TrueParser.new)
|
249
|
+
rescue
|
250
|
+
on_exception
|
251
|
+
end
|
252
|
+
|
253
|
+
# Deletes new bucket. Bucket must be empty! Returns +true+ or an exception.
|
254
|
+
#
|
255
|
+
# s3.delete_bucket('my_awesome_bucket') #=> true
|
256
|
+
#
|
257
|
+
# See also: force_delete_bucket method
|
258
|
+
#
|
259
|
+
def delete_bucket(bucket, headers={})
|
260
|
+
req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket))
|
261
|
+
request_info(req_hash, RightHttp2xxParser.new)
|
262
|
+
rescue
|
263
|
+
on_exception
|
264
|
+
end
|
265
|
+
|
266
|
+
# Returns an array of bucket's keys. Each array item (key data) is a +hash+.
|
267
|
+
#
|
268
|
+
# s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=>
|
269
|
+
# [{:key => "test1",
|
270
|
+
# :last_modified => "2007-05-18T07:00:59.000Z",
|
271
|
+
# :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
|
272
|
+
# :owner_display_name => "root",
|
273
|
+
# :e_tag => "000000000059075b964b07152d234b70",
|
274
|
+
# :storage_class => "STANDARD",
|
275
|
+
# :size => 3,
|
276
|
+
# :service=> {'is_truncated' => false,
|
277
|
+
# 'prefix' => "t",
|
278
|
+
# 'marker' => "",
|
279
|
+
# 'name' => "my_awesome_bucket",
|
280
|
+
# 'max-keys' => "5"}, ..., {...}]
|
281
|
+
#
|
282
|
+
def list_bucket(bucket, options={}, headers={})
|
283
|
+
bucket += '?'+options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.blank?
|
284
|
+
req_hash = generate_rest_request('GET', headers.merge(:url=>bucket))
|
285
|
+
request_info(req_hash, S3ListBucketParser.new(:logger => @logger))
|
286
|
+
rescue
|
287
|
+
on_exception
|
288
|
+
end
|
289
|
+
|
290
|
+
# Incrementally list the contents of a bucket. Yields the following hash to a block:
|
291
|
+
# s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields
|
292
|
+
# {
|
293
|
+
# :name => 'bucketname',
|
294
|
+
# :prefix => 'subfolder/',
|
295
|
+
# :marker => 'fileN.jpg',
|
296
|
+
# :max_keys => 234,
|
297
|
+
# :delimiter => '/',
|
298
|
+
# :is_truncated => true,
|
299
|
+
# :next_marker => 'fileX.jpg',
|
300
|
+
# :contents => [
|
301
|
+
# { :key => "file1",
|
302
|
+
# :last_modified => "2007-05-18T07:00:59.000Z",
|
303
|
+
# :e_tag => "000000000059075b964b07152d234b70",
|
304
|
+
# :size => 3,
|
305
|
+
# :storage_class => "STANDARD",
|
306
|
+
# :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
|
307
|
+
# :owner_display_name => "root"
|
308
|
+
# }, { :key, ...}, ... {:key, ...}
|
309
|
+
# ]
|
310
|
+
# :common_prefixes => [
|
311
|
+
# "prefix1",
|
312
|
+
# "prefix2",
|
313
|
+
# ...,
|
314
|
+
# "prefixN"
|
315
|
+
# ]
|
316
|
+
# }
|
317
|
+
def incrementally_list_bucket(bucket, options={}, headers={}, &block)
|
318
|
+
internal_options = options.symbolize_keys
|
319
|
+
begin
|
320
|
+
internal_bucket = bucket.dup
|
321
|
+
internal_bucket += '?'+internal_options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless internal_options.blank?
|
322
|
+
req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket))
|
323
|
+
response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger))
|
324
|
+
there_are_more_keys = response[:is_truncated]
|
325
|
+
if(there_are_more_keys)
|
326
|
+
internal_options[:marker] = decide_marker(response)
|
327
|
+
total_results = response[:contents].length + response[:common_prefixes].length
|
328
|
+
internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil
|
329
|
+
end
|
330
|
+
yield response
|
331
|
+
end while there_are_more_keys && under_max_keys(internal_options)
|
332
|
+
true
|
333
|
+
rescue
|
334
|
+
on_exception
|
335
|
+
end
|
336
|
+
|
337
|
+
|
338
|
+
private
|
339
|
+
def decide_marker(response)
|
340
|
+
return response[:next_marker].dup if response[:next_marker]
|
341
|
+
last_key = response[:contents].last[:key]
|
342
|
+
last_prefix = response[:common_prefixes].last
|
343
|
+
if(!last_key)
|
344
|
+
return nil if(!last_prefix)
|
345
|
+
last_prefix.dup
|
346
|
+
elsif(!last_prefix)
|
347
|
+
last_key.dup
|
348
|
+
else
|
349
|
+
last_key > last_prefix ? last_key.dup : last_prefix.dup
|
350
|
+
end
|
351
|
+
end
|
352
|
+
|
353
|
+
def under_max_keys(internal_options)
|
354
|
+
internal_options[:'max-keys'] ? internal_options[:'max-keys'] > 0 : true
|
355
|
+
end
|
356
|
+
|
357
|
+
public
|
358
|
+
# Saves object to Amazon. Returns +true+ or an exception.
|
359
|
+
# Any header starting with AMAZON_METADATA_PREFIX is considered
|
360
|
+
# user metadata. It will be stored with the object and returned
|
361
|
+
# when you retrieve the object. The total size of the HTTP
|
362
|
+
# request, not including the body, must be less than 4 KB.
|
363
|
+
#
|
364
|
+
# s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
|
365
|
+
#
|
366
|
+
# This method is capable of 'streaming' uploads; that is, it can upload
|
367
|
+
# data from a file or other IO object without first reading all the data
|
368
|
+
# into memory. This is most useful for large PUTs - it is difficult to read
|
369
|
+
# a 2 GB file entirely into memory before sending it to S3.
|
370
|
+
# To stream an upload, pass an object that responds to 'read' (like the read
|
371
|
+
# method of IO) and to either 'lstat' or 'size'. For files, this means
|
372
|
+
# streaming is enabled by simply making the call:
|
373
|
+
#
|
374
|
+
# s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
|
375
|
+
#
|
376
|
+
# If the IO object you wish to stream from responds to the read method but
|
377
|
+
# doesn't implement lstat or size, you can extend the object dynamically
|
378
|
+
# to implement these methods, or define your own class which defines these
|
379
|
+
# methods. Be sure that your class returns 'nil' from read() after having
|
380
|
+
# read 'size' bytes. Otherwise S3 will drop the socket after
|
381
|
+
# 'Content-Length' bytes have been uploaded, and HttpConnection will
|
382
|
+
# interpret this as an error.
|
383
|
+
#
|
384
|
+
# This method now supports very large PUTs, where very large
|
385
|
+
# is > 2 GB.
|
386
|
+
#
|
387
|
+
# For Win32 users: Files and IO objects should be opened in binary mode. If
|
388
|
+
# a text mode IO object is passed to PUT, it will be converted to binary
|
389
|
+
# mode.
|
390
|
+
#
|
391
|
+
|
392
|
+
def put(bucket, key, data=nil, headers={})
|
393
|
+
# On Windows, if someone opens a file in text mode, we must reset it so
|
394
|
+
# to binary mode for streaming to work properly
|
395
|
+
if(data.respond_to?(:binmode))
|
396
|
+
data.binmode
|
397
|
+
end
|
398
|
+
if (data.respond_to?(:lstat) && data.lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
|
399
|
+
(data.respond_to?(:size) && data.size >= USE_100_CONTINUE_PUT_SIZE)
|
400
|
+
headers['expect'] = '100-continue'
|
401
|
+
end
|
402
|
+
req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}", :data=>data))
|
403
|
+
request_info(req_hash, RightHttp2xxParser.new)
|
404
|
+
rescue
|
405
|
+
on_exception
|
406
|
+
end
|
407
|
+
|
408
|
+
|
409
|
+
|
410
|
+
# New experimental API for uploading objects, introduced in RightAws 1.8.1.
|
411
|
+
# store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification
|
412
|
+
# of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
|
413
|
+
# The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
|
414
|
+
#
|
415
|
+
# If the optional :md5 argument is provided, store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
|
416
|
+
# set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
|
417
|
+
#
|
418
|
+
# The optional argument of :headers allows the caller to specify arbitrary request header values.
|
419
|
+
#
|
420
|
+
# s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
|
421
|
+
# => {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/",
|
422
|
+
# "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
|
423
|
+
# "date"=>"Mon, 29 Sep 2008 18:57:46 GMT",
|
424
|
+
# :verified_md5=>true,
|
425
|
+
# "x-amz-request-id"=>"63916465939995BA",
|
426
|
+
# "server"=>"AmazonS3",
|
427
|
+
# "content-length"=>"0"}
|
428
|
+
#
|
429
|
+
# s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
|
430
|
+
# => {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR",
|
431
|
+
# "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
|
432
|
+
# "date"=>"Mon, 29 Sep 2008 18:58:56 GMT",
|
433
|
+
# :verified_md5=>false,
|
434
|
+
# "x-amz-request-id"=>"3B25A996BC2CDD3B",
|
435
|
+
# "server"=>"AmazonS3",
|
436
|
+
# "content-length"=>"0"}
|
437
|
+
|
438
|
+
def store_object(params)
|
439
|
+
AwsUtils.allow_only([:bucket, :key, :data, :headers, :md5], params)
|
440
|
+
AwsUtils.mandatory_arguments([:bucket, :key, :data], params)
|
441
|
+
params[:headers] = {} unless params[:headers]
|
442
|
+
|
443
|
+
params[:data].binmode if(params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly
|
444
|
+
if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
|
445
|
+
(params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE)
|
446
|
+
params[:headers]['expect'] = '100-continue'
|
447
|
+
end
|
448
|
+
|
449
|
+
req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data]))
|
450
|
+
resp = request_info(req_hash, S3HttpResponseHeadParser.new)
|
451
|
+
if(params[:md5])
|
452
|
+
resp[:verified_md5] = (resp['etag'].gsub(/\"/, '') == params[:md5]) ? true : false
|
453
|
+
else
|
454
|
+
resp[:verified_md5] = false
|
455
|
+
end
|
456
|
+
resp
|
457
|
+
rescue
|
458
|
+
on_exception
|
459
|
+
end
|
460
|
+
|
461
|
+
# Identical in function to store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
|
462
|
+
# If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
|
463
|
+
# This call is implemented as a wrapper around store_object and the user may gain different semantics by creating a custom wrapper.
|
464
|
+
#
|
465
|
+
# s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
|
466
|
+
# => {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb",
|
467
|
+
# "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
|
468
|
+
# "date"=>"Mon, 29 Sep 2008 18:38:32 GMT",
|
469
|
+
# :verified_md5=>true,
|
470
|
+
# "x-amz-request-id"=>"E8D7EA4FE00F5DF7",
|
471
|
+
# "server"=>"AmazonS3",
|
472
|
+
# "content-length"=>"0"}
|
473
|
+
#
|
474
|
+
# s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
|
475
|
+
# RightAws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n",
|
476
|
+
# "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
|
477
|
+
# "date"=>"Mon, 29 Sep 2008 18:38:41 GMT",
|
478
|
+
# :verified_md5=>false,
|
479
|
+
# "x-amz-request-id"=>"0D7ADE09F42606F2",
|
480
|
+
# "server"=>"AmazonS3",
|
481
|
+
# "content-length"=>"0"}
|
482
|
+
def store_object_and_verify(params)
|
483
|
+
AwsUtils.mandatory_arguments([:md5], params)
|
484
|
+
r = store_object(params)
|
485
|
+
r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}"))
|
486
|
+
end
|
487
|
+
|
488
|
+
# Retrieves object data from Amazon. Returns a +hash+ or an exception.
|
489
|
+
#
|
490
|
+
# s3.get('my_awesome_bucket', 'log/curent/1.log') #=>
|
491
|
+
#
|
492
|
+
# {:object => "Ola-la!",
|
493
|
+
# :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
|
494
|
+
# "content-type" => "",
|
495
|
+
# "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
|
496
|
+
# "date" => "Wed, 23 May 2007 09:08:03 GMT",
|
497
|
+
# "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
|
498
|
+
# "x-amz-meta-family" => "Woho556!",
|
499
|
+
# "x-amz-request-id" => "0000000C246D770C",
|
500
|
+
# "server" => "AmazonS3",
|
501
|
+
# "content-length" => "7"}}
|
502
|
+
#
|
503
|
+
# If a block is provided, yields incrementally to the block as
|
504
|
+
# the response is read. For large responses, this function is ideal as
|
505
|
+
# the response can be 'streamed'. The hash containing header fields is
|
506
|
+
# still returned.
|
507
|
+
# Example:
|
508
|
+
# foo = File.new('./chunder.txt', File::CREAT|File::RDWR)
|
509
|
+
# rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
|
510
|
+
# foo.write(chunk)
|
511
|
+
# end
|
512
|
+
# foo.close
|
513
|
+
#
|
514
|
+
|
515
|
+
def get(bucket, key, headers={}, &block)
|
516
|
+
req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
|
517
|
+
request_info(req_hash, S3HttpResponseBodyParser.new, &block)
|
518
|
+
rescue
|
519
|
+
on_exception
|
520
|
+
end
|
521
|
+
|
522
|
+
# New experimental API for retrieving objects, introduced in RightAws 1.8.1.
|
523
|
+
# retrieve_object is similar in function to the older function get. It allows for optional verification
|
524
|
+
# of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
|
525
|
+
#
|
526
|
+
# If the optional :md5 argument is provided, retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
|
527
|
+
# set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
|
528
|
+
#
|
529
|
+
# The optional argument of :headers allows the caller to specify arbitrary request header values.
|
530
|
+
# Mandatory arguments:
|
531
|
+
# :bucket - the bucket in which the object is stored
|
532
|
+
# :key - the object address (or path) within the bucket
|
533
|
+
# Optional arguments:
|
534
|
+
# :headers - hash of additional HTTP headers to include with the request
|
535
|
+
# :md5 - MD5 checksum against which to verify the retrieved object
|
536
|
+
#
|
537
|
+
# s3.retrieve_object(:bucket => "foobucket", :key => "foo")
|
538
|
+
# => {:verified_md5=>false,
|
539
|
+
# :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
|
540
|
+
# "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp",
|
541
|
+
# "content-type"=>"",
|
542
|
+
# "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
|
543
|
+
# "date"=>"Tue, 30 Sep 2008 00:52:44 GMT",
|
544
|
+
# "x-amz-request-id"=>"EE4855DE27A2688C",
|
545
|
+
# "server"=>"AmazonS3",
|
546
|
+
# "content-length"=>"10"},
|
547
|
+
# :object=>"polemonium"}
|
548
|
+
#
|
549
|
+
# s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954')
|
550
|
+
# => {:verified_md5=>true,
|
551
|
+
# :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
|
552
|
+
# "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH",
|
553
|
+
# "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
|
554
|
+
# "date"=>"Tue, 30 Sep 2008 00:53:08 GMT",
|
555
|
+
# "x-amz-request-id"=>"6E7F317356580599",
|
556
|
+
# "server"=>"AmazonS3",
|
557
|
+
# "content-length"=>"10"},
|
558
|
+
# :object=>"polemonium"}
|
559
|
+
# If a block is provided, yields incrementally to the block as
|
560
|
+
# the response is read. For large responses, this function is ideal as
|
561
|
+
# the response can be 'streamed'. The hash containing header fields is
|
562
|
+
# still returned.
|
563
|
+
def retrieve_object(params, &block)
|
564
|
+
AwsUtils.mandatory_arguments([:bucket, :key], params)
|
565
|
+
AwsUtils.allow_only([:bucket, :key, :headers, :md5], params)
|
566
|
+
params[:headers] = {} unless params[:headers]
|
567
|
+
req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}"))
|
568
|
+
resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block)
|
569
|
+
resp[:verified_md5] = false
|
570
|
+
if(params[:md5] && (resp[:headers]['etag'].gsub(/\"/,'') == params[:md5]))
|
571
|
+
resp[:verified_md5] = true
|
572
|
+
end
|
573
|
+
resp
|
574
|
+
rescue
|
575
|
+
on_exception
|
576
|
+
end
|
577
|
+
|
578
|
+
# Identical in function to retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
|
579
|
+
# If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
|
580
|
+
# This call is implemented as a wrapper around retrieve_object and the user may gain different semantics by creating a custom wrapper.
|
581
|
+
def retrieve_object_and_verify(params, &block)
|
582
|
+
AwsUtils.mandatory_arguments([:md5], params)
|
583
|
+
resp = retrieve_object(params, &block)
|
584
|
+
return resp if resp[:verified_md5]
|
585
|
+
raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}")
|
586
|
+
end
|
587
|
+
|
588
|
+
# Retrieves object metadata. Returns a +hash+ of http_response_headers.
|
589
|
+
#
|
590
|
+
# s3.head('my_awesome_bucket', 'log/curent/1.log') #=>
|
591
|
+
# {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
|
592
|
+
# "content-type" => "",
|
593
|
+
# "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
|
594
|
+
# "date" => "Wed, 23 May 2007 09:08:03 GMT",
|
595
|
+
# "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
|
596
|
+
# "x-amz-meta-family" => "Woho556!",
|
597
|
+
# "x-amz-request-id" => "0000000C246D770C",
|
598
|
+
# "server" => "AmazonS3",
|
599
|
+
# "content-length" => "7"}
|
600
|
+
#
|
601
|
+
def head(bucket, key, headers={})
|
602
|
+
req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
|
603
|
+
request_info(req_hash, S3HttpResponseHeadParser.new)
|
604
|
+
rescue
|
605
|
+
on_exception
|
606
|
+
end
|
607
|
+
|
608
|
+
# Deletes key. Returns +true+ or an exception.
|
609
|
+
#
|
610
|
+
# s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
|
611
|
+
#
|
612
|
+
def delete(bucket, key='', headers={})
|
613
|
+
req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
|
614
|
+
request_info(req_hash, RightHttp2xxParser.new)
|
615
|
+
rescue
|
616
|
+
on_exception
|
617
|
+
end
|
618
|
+
|
619
|
+
# Copy an object.
|
620
|
+
# directive: :copy - copy meta-headers from source (default value)
|
621
|
+
# :replace - replace meta-headers by passed ones
|
622
|
+
#
|
623
|
+
# # copy a key with meta-headers
|
624
|
+
# s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"}
|
625
|
+
#
|
626
|
+
# # copy a key, overwrite meta-headers
|
627
|
+
# s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
|
628
|
+
#
|
629
|
+
# see: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
|
630
|
+
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
|
631
|
+
#
|
632
|
+
def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
|
633
|
+
dest_key ||= src_key
|
634
|
+
headers['x-amz-metadata-directive'] = directive.to_s.upcase
|
635
|
+
headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}"
|
636
|
+
req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}"))
|
637
|
+
request_info(req_hash, S3CopyParser.new)
|
638
|
+
rescue
|
639
|
+
on_exception
|
640
|
+
end
|
641
|
+
|
642
|
+
# Move an object.
|
643
|
+
# directive: :copy - copy meta-headers from source (default value)
|
644
|
+
# :replace - replace meta-headers by passed ones
|
645
|
+
#
|
646
|
+
# # move bucket1/key1 to bucket1/key2
|
647
|
+
# s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"}
|
648
|
+
#
|
649
|
+
# # move bucket1/key1 to bucket2/key2 with new meta-headers assignment
|
650
|
+
# s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
|
651
|
+
#
|
652
|
+
def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
|
653
|
+
copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers)
|
654
|
+
# delete an original key if it differs from a destination one
|
655
|
+
delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key
|
656
|
+
copy_result
|
657
|
+
end
|
658
|
+
|
659
|
+
# Rename an object.
|
660
|
+
#
|
661
|
+
# # rename bucket1/key1 to bucket1/key2
|
662
|
+
# s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
|
663
|
+
#
|
664
|
+
def rename(src_bucket, src_key, dest_key, headers={})
|
665
|
+
move(src_bucket, src_key, src_bucket, dest_key, :copy, headers)
|
666
|
+
end
|
667
|
+
|
668
|
+
# Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
|
669
|
+
#
|
670
|
+
# s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=>
|
671
|
+
# {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX",
|
672
|
+
# "content-type"=>"application/xml;charset=ISO-8859-1",
|
673
|
+
# "date"=>"Wed, 23 May 2007 09:40:16 GMT",
|
674
|
+
# "x-amz-request-id"=>"B183FA7AB5FBB4DD",
|
675
|
+
# "server"=>"AmazonS3",
|
676
|
+
# "transfer-encoding"=>"chunked"},
|
677
|
+
# :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner>
|
678
|
+
# <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner>
|
679
|
+
# <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>
|
680
|
+
# 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee>
|
681
|
+
# <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
|
682
|
+
#
|
683
|
+
def get_acl(bucket, key='', headers={})
|
684
|
+
key = key.blank? ? '' : "/#{CGI::escape key}"
|
685
|
+
req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
|
686
|
+
request_info(req_hash, S3HttpResponseBodyParser.new)
|
687
|
+
rescue
|
688
|
+
on_exception
|
689
|
+
end
|
690
|
+
|
691
|
+
# Retieves the ACL (access control policy) for a bucket or object.
|
692
|
+
# Returns a hash of {:owner, :grantees}
|
693
|
+
#
|
694
|
+
# s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=>
|
695
|
+
#
|
696
|
+
# { :grantees=>
|
697
|
+
# { "16...2a"=>
|
698
|
+
# { :display_name=>"root",
|
699
|
+
# :permissions=>["FULL_CONTROL"],
|
700
|
+
# :attributes=>
|
701
|
+
# { "xsi:type"=>"CanonicalUser",
|
702
|
+
# "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}},
|
703
|
+
# "http://acs.amazonaws.com/groups/global/AllUsers"=>
|
704
|
+
# { :display_name=>"AllUsers",
|
705
|
+
# :permissions=>["READ"],
|
706
|
+
# :attributes=>
|
707
|
+
# { "xsi:type"=>"Group",
|
708
|
+
# "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}},
|
709
|
+
# :owner=>
|
710
|
+
# { :id=>"16..2a",
|
711
|
+
# :display_name=>"root"}}
|
712
|
+
#
|
713
|
+
def get_acl_parse(bucket, key='', headers={})
|
714
|
+
key = key.blank? ? '' : "/#{CGI::escape key}"
|
715
|
+
req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
|
716
|
+
acl = request_info(req_hash, S3AclParser.new(:logger => @logger))
|
717
|
+
result = {}
|
718
|
+
result[:owner] = acl[:owner]
|
719
|
+
result[:grantees] = {}
|
720
|
+
acl[:grantees].each do |grantee|
|
721
|
+
key = grantee[:id] || grantee[:uri]
|
722
|
+
if result[:grantees].key?(key)
|
723
|
+
result[:grantees][key][:permissions] << grantee[:permissions]
|
724
|
+
else
|
725
|
+
result[:grantees][key] =
|
726
|
+
{ :display_name => grantee[:display_name] || grantee[:uri].to_s[/[^\/]*$/],
|
727
|
+
:permissions => grantee[:permissions].to_a,
|
728
|
+
:attributes => grantee[:attributes] }
|
729
|
+
end
|
730
|
+
end
|
731
|
+
result
|
732
|
+
rescue
|
733
|
+
on_exception
|
734
|
+
end
|
735
|
+
|
736
|
+
# Sets the ACL on a bucket or object.
|
737
|
+
def put_acl(bucket, key, acl_xml_doc, headers={})
|
738
|
+
key = key.blank? ? '' : "/#{CGI::escape key}"
|
739
|
+
req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc))
|
740
|
+
request_info(req_hash, S3HttpResponseBodyParser.new)
|
741
|
+
rescue
|
742
|
+
on_exception
|
743
|
+
end
|
744
|
+
|
745
|
+
# Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
|
746
|
+
def get_bucket_acl(bucket, headers={})
|
747
|
+
return get_acl(bucket, '', headers)
|
748
|
+
rescue
|
749
|
+
on_exception
|
750
|
+
end
|
751
|
+
|
752
|
+
# Sets the ACL on a bucket only.
|
753
|
+
def put_bucket_acl(bucket, acl_xml_doc, headers={})
|
754
|
+
return put_acl(bucket, '', acl_xml_doc, headers)
|
755
|
+
rescue
|
756
|
+
on_exception
|
757
|
+
end
|
758
|
+
|
759
|
+
|
760
|
+
# Removes all keys from bucket. Returns +true+ or an exception.
|
761
|
+
#
|
762
|
+
# s3.clear_bucket('my_awesome_bucket') #=> true
|
763
|
+
#
|
764
|
+
def clear_bucket(bucket)
|
765
|
+
incrementally_list_bucket(bucket) do |results|
|
766
|
+
results[:contents].each { |key| delete(bucket, key[:key]) }
|
767
|
+
end
|
768
|
+
true
|
769
|
+
rescue
|
770
|
+
on_exception
|
771
|
+
end
|
772
|
+
|
773
|
+
# Deletes all keys in bucket then deletes bucket. Returns +true+ or an exception.
|
774
|
+
#
|
775
|
+
# s3.force_delete_bucket('my_awesome_bucket')
|
776
|
+
#
|
777
|
+
def force_delete_bucket(bucket)
|
778
|
+
clear_bucket(bucket)
|
779
|
+
delete_bucket(bucket)
|
780
|
+
rescue
|
781
|
+
on_exception
|
782
|
+
end
|
783
|
+
|
784
|
+
# Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
|
785
|
+
#
|
786
|
+
# s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs']
|
787
|
+
# s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
|
788
|
+
#
|
789
|
+
def delete_folder(bucket, folder_key, separator='/')
|
790
|
+
folder_key.chomp!(separator)
|
791
|
+
allkeys = []
|
792
|
+
incrementally_list_bucket(bucket, { 'prefix' => folder_key }) do |results|
|
793
|
+
keys = results[:contents].map{ |s3_key| s3_key[:key][/^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil}.compact
|
794
|
+
keys.each{ |key| delete(bucket, key) }
|
795
|
+
allkeys << keys
|
796
|
+
end
|
797
|
+
allkeys
|
798
|
+
rescue
|
799
|
+
on_exception
|
800
|
+
end
|
801
|
+
|
802
|
+
# Retrieves object data only (headers are omitted). Returns +string+ or an exception.
|
803
|
+
#
|
804
|
+
# s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
|
805
|
+
#
|
806
|
+
def get_object(bucket, key, headers={})
|
807
|
+
get(bucket, key, headers)[:object]
|
808
|
+
rescue
|
809
|
+
on_exception
|
810
|
+
end
|
811
|
+
|
812
|
+
#-----------------------------------------------------------------
|
813
|
+
# Query API: Links
|
814
|
+
#-----------------------------------------------------------------
|
815
|
+
|
816
|
+
# Generates link for QUERY API
|
817
|
+
def generate_link(method, headers={}, expires=nil) #:nodoc:
|
818
|
+
# calculate request data
|
819
|
+
server, path, path_to_sign = fetch_request_params(headers)
|
820
|
+
# expiration time
|
821
|
+
expires ||= DEFAULT_EXPIRES_AFTER
|
822
|
+
expires = Time.now.utc + expires if expires.is_a?(Fixnum) && (expires < ONE_YEAR_IN_SECONDS)
|
823
|
+
expires = expires.to_i
|
824
|
+
# remove unset(==optional) and symbolyc keys
|
825
|
+
headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
|
826
|
+
#generate auth strings
|
827
|
+
auth_string = canonical_string(method, path_to_sign, headers, expires)
|
828
|
+
signature = CGI::escape(Base64.encode64(OpenSSL::HMAC.digest(OpenSSL::Digest::Digest.new("sha1"), @aws_secret_access_key, auth_string)).strip)
|
829
|
+
# path building
|
830
|
+
addon = "Signature=#{signature}&Expires=#{expires}&AWSAccessKeyId=#{@aws_access_key_id}"
|
831
|
+
path += path[/\?/] ? "&#{addon}" : "?#{addon}"
|
832
|
+
"#{@params[:protocol]}://#{server}:#{@params[:port]}#{path}"
|
833
|
+
rescue
|
834
|
+
on_exception
|
835
|
+
end
|
836
|
+
|
837
|
+
# Generates link for 'ListAllMyBuckets'.
|
838
|
+
#
|
839
|
+
# s3.list_all_my_buckets_link #=> url string
|
840
|
+
#
|
841
|
+
def list_all_my_buckets_link(expires=nil, headers={})
|
842
|
+
generate_link('GET', headers.merge(:url=>''), expires)
|
843
|
+
rescue
|
844
|
+
on_exception
|
845
|
+
end
|
846
|
+
|
847
|
+
# Generates link for 'CreateBucket'.
|
848
|
+
#
|
849
|
+
# s3.create_bucket_link('my_awesome_bucket') #=> url string
|
850
|
+
#
|
851
|
+
def create_bucket_link(bucket, expires=nil, headers={})
|
852
|
+
generate_link('PUT', headers.merge(:url=>bucket), expires)
|
853
|
+
rescue
|
854
|
+
on_exception
|
855
|
+
end
|
856
|
+
|
857
|
+
# Generates link for 'DeleteBucket'.
|
858
|
+
#
|
859
|
+
# s3.delete_bucket_link('my_awesome_bucket') #=> url string
|
860
|
+
#
|
861
|
+
def delete_bucket_link(bucket, expires=nil, headers={})
|
862
|
+
generate_link('DELETE', headers.merge(:url=>bucket), expires)
|
863
|
+
rescue
|
864
|
+
on_exception
|
865
|
+
end
|
866
|
+
|
867
|
+
# Generates link for 'ListBucket'.
|
868
|
+
#
|
869
|
+
# s3.list_bucket_link('my_awesome_bucket') #=> url string
|
870
|
+
#
|
871
|
+
def list_bucket_link(bucket, options=nil, expires=nil, headers={})
|
872
|
+
bucket += '?' + options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.blank?
|
873
|
+
generate_link('GET', headers.merge(:url=>bucket), expires)
|
874
|
+
rescue
|
875
|
+
on_exception
|
876
|
+
end
|
877
|
+
|
878
|
+
# Generates link for 'PutObject'.
|
879
|
+
#
|
880
|
+
# s3.put_link('my_awesome_bucket',key, object) #=> url string
|
881
|
+
#
|
882
|
+
def put_link(bucket, key, data=nil, expires=nil, headers={})
|
883
|
+
generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}", :data=>data), expires)
|
884
|
+
rescue
|
885
|
+
on_exception
|
886
|
+
end
|
887
|
+
|
888
|
+
# Generates link for 'GetObject'.
|
889
|
+
#
|
890
|
+
# if a bucket comply with virtual hosting naming then retuns a link with the
|
891
|
+
# bucket as a part of host name:
|
892
|
+
#
|
893
|
+
# s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
|
894
|
+
#
|
895
|
+
# otherwise returns an old style link (the bucket is a part of path):
|
896
|
+
#
|
897
|
+
# s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
|
898
|
+
#
|
899
|
+
# see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
|
900
|
+
def get_link(bucket, key, expires=nil, headers={})
|
901
|
+
generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
|
902
|
+
rescue
|
903
|
+
on_exception
|
904
|
+
end
|
905
|
+
|
906
|
+
# Generates link for 'HeadObject'.
|
907
|
+
#
|
908
|
+
# s3.head_link('my_awesome_bucket',key) #=> url string
|
909
|
+
#
|
910
|
+
def head_link(bucket, key, expires=nil, headers={})
|
911
|
+
generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
|
912
|
+
rescue
|
913
|
+
on_exception
|
914
|
+
end
|
915
|
+
|
916
|
+
# Generates link for 'DeleteObject'.
|
917
|
+
#
|
918
|
+
# s3.delete_link('my_awesome_bucket',key) #=> url string
|
919
|
+
#
|
920
|
+
def delete_link(bucket, key, expires=nil, headers={})
|
921
|
+
generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
|
922
|
+
rescue
|
923
|
+
on_exception
|
924
|
+
end
|
925
|
+
|
926
|
+
|
927
|
+
# Generates link for 'GetACL'.
|
928
|
+
#
|
929
|
+
# s3.get_acl_link('my_awesome_bucket',key) #=> url string
|
930
|
+
#
|
931
|
+
def get_acl_link(bucket, key='', headers={})
|
932
|
+
return generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
|
933
|
+
rescue
|
934
|
+
on_exception
|
935
|
+
end
|
936
|
+
|
937
|
+
# Generates link for 'PutACL'.
|
938
|
+
#
|
939
|
+
# s3.put_acl_link('my_awesome_bucket',key) #=> url string
|
940
|
+
#
|
941
|
+
def put_acl_link(bucket, key='', headers={})
|
942
|
+
return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
|
943
|
+
rescue
|
944
|
+
on_exception
|
945
|
+
end
|
946
|
+
|
947
|
+
# Generates link for 'GetBucketACL'.
|
948
|
+
#
|
949
|
+
# s3.get_acl_link('my_awesome_bucket',key) #=> url string
|
950
|
+
#
|
951
|
+
def get_bucket_acl_link(bucket, headers={})
|
952
|
+
return get_acl_link(bucket, '', headers)
|
953
|
+
rescue
|
954
|
+
on_exception
|
955
|
+
end
|
956
|
+
|
957
|
+
# Generates link for 'PutBucketACL'.
|
958
|
+
#
|
959
|
+
# s3.put_acl_link('my_awesome_bucket',key) #=> url string
|
960
|
+
#
|
961
|
+
def put_bucket_acl_link(bucket, acl_xml_doc, headers={})
|
962
|
+
return put_acl_link(bucket, '', acl_xml_doc, headers)
|
963
|
+
rescue
|
964
|
+
on_exception
|
965
|
+
end
|
966
|
+
|
967
|
+
#-----------------------------------------------------------------
|
968
|
+
# PARSERS:
|
969
|
+
#-----------------------------------------------------------------
|
970
|
+
|
971
|
+
class S3ListAllMyBucketsParser < RightAWSParser # :nodoc:
|
972
|
+
def reset
|
973
|
+
@result = []
|
974
|
+
@owner = {}
|
975
|
+
end
|
976
|
+
def tagstart(name, attributes)
|
977
|
+
@current_bucket = {} if name == 'Bucket'
|
978
|
+
end
|
979
|
+
def tagend(name)
|
980
|
+
case name
|
981
|
+
when 'ID' ; @owner[:owner_id] = @text
|
982
|
+
when 'DisplayName' ; @owner[:owner_display_name] = @text
|
983
|
+
when 'Name' ; @current_bucket[:name] = @text
|
984
|
+
when 'CreationDate'; @current_bucket[:creation_date] = @text
|
985
|
+
when 'Bucket' ; @result << @current_bucket.merge(@owner)
|
986
|
+
end
|
987
|
+
end
|
988
|
+
end
|
989
|
+
|
990
|
+
class S3ListBucketParser < RightAWSParser # :nodoc:
|
991
|
+
def reset
|
992
|
+
@result = []
|
993
|
+
@service = {}
|
994
|
+
@current_key = {}
|
995
|
+
end
|
996
|
+
def tagstart(name, attributes)
|
997
|
+
@current_key = {} if name == 'Contents'
|
998
|
+
end
|
999
|
+
def tagend(name)
|
1000
|
+
case name
|
1001
|
+
# service info
|
1002
|
+
when 'Name' ; @service['name'] = @text
|
1003
|
+
when 'Prefix' ; @service['prefix'] = @text
|
1004
|
+
when 'Marker' ; @service['marker'] = @text
|
1005
|
+
when 'MaxKeys' ; @service['max-keys'] = @text
|
1006
|
+
when 'Delimiter' ; @service['delimiter'] = @text
|
1007
|
+
when 'IsTruncated' ; @service['is_truncated'] = (@text =~ /false/ ? false : true)
|
1008
|
+
# key data
|
1009
|
+
when 'Key' ; @current_key[:key] = @text
|
1010
|
+
when 'LastModified'; @current_key[:last_modified] = @text
|
1011
|
+
when 'ETag' ; @current_key[:e_tag] = @text
|
1012
|
+
when 'Size' ; @current_key[:size] = @text.to_i
|
1013
|
+
when 'StorageClass'; @current_key[:storage_class] = @text
|
1014
|
+
when 'ID' ; @current_key[:owner_id] = @text
|
1015
|
+
when 'DisplayName' ; @current_key[:owner_display_name] = @text
|
1016
|
+
when 'Contents' ; @current_key[:service] = @service; @result << @current_key
|
1017
|
+
end
|
1018
|
+
end
|
1019
|
+
end
|
1020
|
+
|
1021
|
+
class S3ImprovedListBucketParser < RightAWSParser # :nodoc:
|
1022
|
+
def reset
|
1023
|
+
@result = {}
|
1024
|
+
@result[:contents] = []
|
1025
|
+
@result[:common_prefixes] = []
|
1026
|
+
@contents = []
|
1027
|
+
@current_key = {}
|
1028
|
+
@common_prefixes = []
|
1029
|
+
@in_common_prefixes = false
|
1030
|
+
end
|
1031
|
+
def tagstart(name, attributes)
|
1032
|
+
@current_key = {} if name == 'Contents'
|
1033
|
+
@in_common_prefixes = true if name == 'CommonPrefixes'
|
1034
|
+
end
|
1035
|
+
def tagend(name)
|
1036
|
+
case name
|
1037
|
+
# service info
|
1038
|
+
when 'Name' ; @result[:name] = @text
|
1039
|
+
# Amazon uses the same tag for the search prefix and for the entries
|
1040
|
+
# in common prefix...so use our simple flag to see which element
|
1041
|
+
# we are parsing
|
1042
|
+
when 'Prefix' ; @in_common_prefixes ? @common_prefixes << @text : @result[:prefix] = @text
|
1043
|
+
when 'Marker' ; @result[:marker] = @text
|
1044
|
+
when 'MaxKeys' ; @result[:max_keys] = @text
|
1045
|
+
when 'Delimiter' ; @result[:delimiter] = @text
|
1046
|
+
when 'IsTruncated' ; @result[:is_truncated] = (@text =~ /false/ ? false : true)
|
1047
|
+
when 'NextMarker' ; @result[:next_marker] = @text
|
1048
|
+
# key data
|
1049
|
+
when 'Key' ; @current_key[:key] = @text
|
1050
|
+
when 'LastModified'; @current_key[:last_modified] = @text
|
1051
|
+
when 'ETag' ; @current_key[:e_tag] = @text
|
1052
|
+
when 'Size' ; @current_key[:size] = @text.to_i
|
1053
|
+
when 'StorageClass'; @current_key[:storage_class] = @text
|
1054
|
+
when 'ID' ; @current_key[:owner_id] = @text
|
1055
|
+
when 'DisplayName' ; @current_key[:owner_display_name] = @text
|
1056
|
+
when 'Contents' ; @result[:contents] << @current_key
|
1057
|
+
# Common Prefix stuff
|
1058
|
+
when 'CommonPrefixes' ; @result[:common_prefixes] = @common_prefixes; @in_common_prefixes = false
|
1059
|
+
end
|
1060
|
+
end
|
1061
|
+
end
|
1062
|
+
|
1063
|
+
class S3BucketLocationParser < RightAWSParser # :nodoc:
|
1064
|
+
def reset
|
1065
|
+
@result = ''
|
1066
|
+
end
|
1067
|
+
def tagend(name)
|
1068
|
+
@result = @text if name == 'LocationConstraint'
|
1069
|
+
end
|
1070
|
+
end
|
1071
|
+
|
1072
|
+
class S3AclParser < RightAWSParser # :nodoc:
|
1073
|
+
def reset
|
1074
|
+
@result = {:grantees=>[], :owner=>{}}
|
1075
|
+
@current_grantee = {}
|
1076
|
+
end
|
1077
|
+
def tagstart(name, attributes)
|
1078
|
+
@current_grantee = { :attributes => attributes } if name=='Grantee'
|
1079
|
+
end
|
1080
|
+
def tagend(name)
|
1081
|
+
case name
|
1082
|
+
# service info
|
1083
|
+
when 'ID'
|
1084
|
+
if @xmlpath == 'AccessControlPolicy/Owner'
|
1085
|
+
@result[:owner][:id] = @text
|
1086
|
+
else
|
1087
|
+
@current_grantee[:id] = @text
|
1088
|
+
end
|
1089
|
+
when 'DisplayName'
|
1090
|
+
if @xmlpath == 'AccessControlPolicy/Owner'
|
1091
|
+
@result[:owner][:display_name] = @text
|
1092
|
+
else
|
1093
|
+
@current_grantee[:display_name] = @text
|
1094
|
+
end
|
1095
|
+
when 'URI'
|
1096
|
+
@current_grantee[:uri] = @text
|
1097
|
+
when 'Permission'
|
1098
|
+
@current_grantee[:permissions] = @text
|
1099
|
+
when 'Grant'
|
1100
|
+
@result[:grantees] << @current_grantee
|
1101
|
+
end
|
1102
|
+
end
|
1103
|
+
end
|
1104
|
+
|
1105
|
+
class S3LoggingParser < RightAWSParser # :nodoc:
|
1106
|
+
def reset
|
1107
|
+
@result = {:enabled => false, :targetbucket => '', :targetprefix => ''}
|
1108
|
+
@current_grantee = {}
|
1109
|
+
end
|
1110
|
+
def tagend(name)
|
1111
|
+
case name
|
1112
|
+
# service info
|
1113
|
+
when 'TargetBucket'
|
1114
|
+
if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
|
1115
|
+
@result[:targetbucket] = @text
|
1116
|
+
@result[:enabled] = true
|
1117
|
+
end
|
1118
|
+
when 'TargetPrefix'
|
1119
|
+
if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
|
1120
|
+
@result[:targetprefix] = @text
|
1121
|
+
@result[:enabled] = true
|
1122
|
+
end
|
1123
|
+
end
|
1124
|
+
end
|
1125
|
+
end
|
1126
|
+
|
1127
|
+
class S3CopyParser < RightAWSParser # :nodoc:
|
1128
|
+
def reset
|
1129
|
+
@result = {}
|
1130
|
+
end
|
1131
|
+
def tagend(name)
|
1132
|
+
case name
|
1133
|
+
when 'LastModified' : @result[:last_modified] = @text
|
1134
|
+
when 'ETag' : @result[:e_tag] = @text
|
1135
|
+
end
|
1136
|
+
end
|
1137
|
+
end
|
1138
|
+
|
1139
|
+
#-----------------------------------------------------------------
|
1140
|
+
# PARSERS: Non XML
|
1141
|
+
#-----------------------------------------------------------------
|
1142
|
+
|
1143
|
+
class S3HttpResponseParser # :nodoc:
|
1144
|
+
attr_reader :result
|
1145
|
+
def parse(response)
|
1146
|
+
@result = response
|
1147
|
+
end
|
1148
|
+
def headers_to_string(headers)
|
1149
|
+
result = {}
|
1150
|
+
headers.each do |key, value|
|
1151
|
+
value = value.to_s if value.is_a?(Array) && value.size<2
|
1152
|
+
result[key] = value
|
1153
|
+
end
|
1154
|
+
result
|
1155
|
+
end
|
1156
|
+
end
|
1157
|
+
|
1158
|
+
class S3HttpResponseBodyParser < S3HttpResponseParser # :nodoc:
|
1159
|
+
def parse(response)
|
1160
|
+
@result = {
|
1161
|
+
:object => response.body,
|
1162
|
+
:headers => headers_to_string(response.to_hash)
|
1163
|
+
}
|
1164
|
+
end
|
1165
|
+
end
|
1166
|
+
|
1167
|
+
class S3HttpResponseHeadParser < S3HttpResponseParser # :nodoc:
|
1168
|
+
def parse(response)
|
1169
|
+
@result = headers_to_string(response.to_hash)
|
1170
|
+
end
|
1171
|
+
end
|
1172
|
+
|
1173
|
+
end
|
1174
|
+
|
1175
|
+
end
|