dmarkov-right_aws 1.10.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,1167 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ module RightAws
25
+
26
+ class S3Interface < RightAwsBase
27
+
28
+ USE_100_CONTINUE_PUT_SIZE = 1_000_000
29
+
30
+ include RightAwsBaseInterface
31
+
32
+ DEFAULT_HOST = 's3.amazonaws.com'
33
+ DEFAULT_PORT = 443
34
+ DEFAULT_PROTOCOL = 'https'
35
+ DEFAULT_SERVICE = '/'
36
+ REQUEST_TTL = 30
37
+ DEFAULT_EXPIRES_AFTER = 1 * 24 * 60 * 60 # One day's worth of seconds
38
+ ONE_YEAR_IN_SECONDS = 365 * 24 * 60 * 60
39
+ AMAZON_HEADER_PREFIX = 'x-amz-'
40
+ AMAZON_METADATA_PREFIX = 'x-amz-meta-'
41
+
42
+ @@bench = AwsBenchmarkingBlock.new
43
+ def self.bench_xml
44
+ @@bench.xml
45
+ end
46
+ def self.bench_s3
47
+ @@bench.service
48
+ end
49
+
50
+
51
+ # Creates new RightS3 instance.
52
+ #
53
+ # s3 = RightAws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:multi_thread => true, :logger => Logger.new('/tmp/x.log')}) #=> #<RightAws::S3Interface:0xb7b3c27c>
54
+ #
55
+ # Params is a hash:
56
+ #
57
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
58
+ # :port => 443 # Amazon service port: 80 or 443(default)
59
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
60
+ # :multi_thread => true|false # Multi-threaded (connection per each thread): true or false(default)
61
+ # :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
62
+ #
63
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
64
+ init({ :name => 'S3',
65
+ :default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST,
66
+ :default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT,
67
+ :default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE,
68
+ :default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL },
69
+ aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
70
+ aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'],
71
+ params)
72
+ end
73
+
74
+
75
+ #-----------------------------------------------------------------
76
+ # Requests
77
+ #-----------------------------------------------------------------
78
+ # Produces canonical string for signing.
79
+ def canonical_string(method, path, headers={}, expires=nil) # :nodoc:
80
+ s3_headers = {}
81
+ headers.each do |key, value|
82
+ key = key.downcase
83
+ s3_headers[key] = value.join("").strip if key[/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o]
84
+ end
85
+ s3_headers['content-type'] ||= ''
86
+ s3_headers['content-md5'] ||= ''
87
+ s3_headers['date'] = '' if s3_headers.has_key? 'x-amz-date'
88
+ s3_headers['date'] = expires if expires
89
+ # prepare output string
90
+ out_string = "#{method}\n"
91
+ s3_headers.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
92
+ out_string << (key[/^#{AMAZON_HEADER_PREFIX}/o] ? "#{key}:#{value}\n" : "#{value}\n")
93
+ end
94
+ # ignore everything after the question mark...
95
+ out_string << path.gsub(/\?.*$/, '')
96
+ # ...unless there is an acl or torrent parameter
97
+ out_string << '?acl' if path[/[&?]acl($|&|=)/]
98
+ out_string << '?torrent' if path[/[&?]torrent($|&|=)/]
99
+ out_string << '?location' if path[/[&?]location($|&|=)/]
100
+ out_string << '?logging' if path[/[&?]logging($|&|=)/] # this one is beta, no support for now
101
+ out_string
102
+ end
103
+
104
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
105
+ def is_dns_bucket?(bucket_name)
106
+ bucket_name = bucket_name.to_s
107
+ return nil unless (3..63) === bucket_name.size
108
+ bucket_name.split('.').each do |component|
109
+ return nil unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
110
+ end
111
+ true
112
+ end
113
+
114
+ def fetch_request_params(headers) #:nodoc:
115
+ # default server to use
116
+ server = @params[:server]
117
+ service = @params[:service].to_s
118
+ service.chop! if service[%r{/$}] # remove trailing '/' from service
119
+ # extract bucket name and check it's dns compartibility
120
+ headers[:url].to_s[%r{^([a-z0-9._-]*)(/[^?]*)?(\?.+)?}i]
121
+ bucket_name, key_path, params_list = $1, $2, $3
122
+ path = "#{service}/#{bucket_name}#{key_path}#{params_list}"
123
+ path_to_sign = "#{service}/#{bucket_name}#{key_path}#{params_list}"
124
+ # path_to_sign = "/#{bucket_name}#{key_path}#{params_list}"
125
+ [ server, path, path_to_sign ]
126
+ end
127
+
128
+ # Generates request hash for REST API.
129
+ # Assumes that headers[:url] is URL encoded (use CGI::escape)
130
+ def generate_rest_request(method, headers) # :nodoc:
131
+ # calculate request data
132
+ server, path, path_to_sign = fetch_request_params(headers)
133
+ data = headers[:data]
134
+ # remove unset(==optional) and symbolyc keys
135
+ headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
136
+ #
137
+ headers['content-type'] ||= ''
138
+ headers['date'] = Time.now.httpdate
139
+ # create request
140
+ request = "Net::HTTP::#{method.capitalize}".constantize.new(path)
141
+ request.body = data if data
142
+ # set request headers and meta headers
143
+ headers.each { |key, value| request[key.to_s] = value }
144
+ #generate auth strings
145
+ auth_string = canonical_string(request.method, path_to_sign, request.to_hash)
146
+ signature = AwsUtils::sign(@aws_secret_access_key, auth_string)
147
+ # set other headers
148
+ request['Authorization'] = "AWS #{@aws_access_key_id}:#{signature}"
149
+ # prepare output hash
150
+ { :request => request,
151
+ :server => server,
152
+ :port => @params[:port],
153
+ :protocol => @params[:protocol] }
154
+ end
155
+
156
+ # Sends request to Amazon and parses the response.
157
+ # Raises AwsError if any banana happened.
158
+ def request_info(request, parser, &block) # :nodoc:
159
+ thread = @params[:multi_thread] ? Thread.current : Thread.main
160
+ thread[:s3_connection] ||= Rightscale::HttpConnection.new(:exception => RightAws::AwsError, :logger => @logger)
161
+ request_info_impl(thread[:s3_connection], @@bench, request, parser, &block)
162
+ end
163
+
164
+
165
+ # Returns an array of customer's buckets. Each item is a +hash+.
166
+ #
167
+ # s3.list_all_my_buckets #=>
168
+ # [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
169
+ # :owner_display_name => "root",
170
+ # :name => "bucket_name",
171
+ # :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
172
+ #
173
+ def list_all_my_buckets(headers={})
174
+ req_hash = generate_rest_request('GET', headers.merge(:url=>''))
175
+ request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger))
176
+ rescue
177
+ on_exception
178
+ end
179
+
180
+ # Creates new bucket. Returns +true+ or an exception.
181
+ #
182
+ # # create a bucket at American server
183
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
184
+ # # create a bucket at European server
185
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
186
+ #
187
+ def create_bucket(bucket, headers={})
188
+ data = nil
189
+ unless headers[:location].blank?
190
+ data = "<CreateBucketConfiguration><LocationConstraint>#{headers[:location].to_s.upcase}</LocationConstraint></CreateBucketConfiguration>"
191
+ end
192
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data))
193
+ request_info(req_hash, RightHttp2xxParser.new)
194
+ rescue Exception => e
195
+ # if the bucket exists AWS returns an error for the location constraint interface. Drop it
196
+ e.is_a?(RightAws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception
197
+ end
198
+
199
+ # Retrieve bucket location
200
+ #
201
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
202
+ # puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed)
203
+ #
204
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
205
+ # puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
206
+ #
207
+ def bucket_location(bucket, headers={})
208
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location"))
209
+ request_info(req_hash, S3BucketLocationParser.new)
210
+ rescue
211
+ on_exception
212
+ end
213
+
214
+ # Retrieves the logging configuration for a bucket.
215
+ # Returns a hash of {:enabled, :targetbucket, :targetprefix}
216
+ #
217
+ # s3.interface.get_logging_parse(:bucket => "asset_bucket")
218
+ # => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
219
+ #
220
+ #
221
+ def get_logging_parse(params)
222
+ AwsUtils.mandatory_arguments([:bucket], params)
223
+ AwsUtils.allow_only([:bucket, :headers], params)
224
+ params[:headers] = {} unless params[:headers]
225
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging"))
226
+ request_info(req_hash, S3LoggingParser.new)
227
+ rescue
228
+ on_exception
229
+ end
230
+
231
+ # Sets logging configuration for a bucket from the XML configuration document.
232
+ # params:
233
+ # :bucket
234
+ # :xmldoc
235
+ def put_logging(params)
236
+ AwsUtils.mandatory_arguments([:bucket,:xmldoc], params)
237
+ AwsUtils.allow_only([:bucket,:xmldoc, :headers], params)
238
+ params[:headers] = {} unless params[:headers]
239
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc]))
240
+ request_info(req_hash, S3TrueParser.new)
241
+ rescue
242
+ on_exception
243
+ end
244
+
245
+ # Deletes new bucket. Bucket must be empty! Returns +true+ or an exception.
246
+ #
247
+ # s3.delete_bucket('my_awesome_bucket') #=> true
248
+ #
249
+ # See also: force_delete_bucket method
250
+ #
251
+ def delete_bucket(bucket, headers={})
252
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket))
253
+ request_info(req_hash, RightHttp2xxParser.new)
254
+ rescue
255
+ on_exception
256
+ end
257
+
258
+ # Returns an array of bucket's keys. Each array item (key data) is a +hash+.
259
+ #
260
+ # s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=>
261
+ # [{:key => "test1",
262
+ # :last_modified => "2007-05-18T07:00:59.000Z",
263
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
264
+ # :owner_display_name => "root",
265
+ # :e_tag => "000000000059075b964b07152d234b70",
266
+ # :storage_class => "STANDARD",
267
+ # :size => 3,
268
+ # :service=> {'is_truncated' => false,
269
+ # 'prefix' => "t",
270
+ # 'marker' => "",
271
+ # 'name' => "my_awesome_bucket",
272
+ # 'max-keys' => "5"}, ..., {...}]
273
+ #
274
+ def list_bucket(bucket, options={}, headers={})
275
+ bucket += '?'+options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.blank?
276
+ req_hash = generate_rest_request('GET', headers.merge(:url=>bucket))
277
+ request_info(req_hash, S3ListBucketParser.new(:logger => @logger))
278
+ rescue
279
+ on_exception
280
+ end
281
+
282
+ # Incrementally list the contents of a bucket. Yields the following hash to a block:
283
+ # s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields
284
+ # {
285
+ # :name => 'bucketname',
286
+ # :prefix => 'subfolder/',
287
+ # :marker => 'fileN.jpg',
288
+ # :max_keys => 234,
289
+ # :delimiter => '/',
290
+ # :is_truncated => true,
291
+ # :next_marker => 'fileX.jpg',
292
+ # :contents => [
293
+ # { :key => "file1",
294
+ # :last_modified => "2007-05-18T07:00:59.000Z",
295
+ # :e_tag => "000000000059075b964b07152d234b70",
296
+ # :size => 3,
297
+ # :storage_class => "STANDARD",
298
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
299
+ # :owner_display_name => "root"
300
+ # }, { :key, ...}, ... {:key, ...}
301
+ # ]
302
+ # :common_prefixes => [
303
+ # "prefix1",
304
+ # "prefix2",
305
+ # ...,
306
+ # "prefixN"
307
+ # ]
308
+ # }
309
+ def incrementally_list_bucket(bucket, options={}, headers={}, &block)
310
+ internal_options = options.symbolize_keys
311
+ begin
312
+ internal_bucket = bucket.dup
313
+ internal_bucket += '?'+internal_options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless internal_options.blank?
314
+ req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket))
315
+ response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger))
316
+ there_are_more_keys = response[:is_truncated]
317
+ if(there_are_more_keys)
318
+ internal_options[:marker] = decide_marker(response)
319
+ total_results = response[:contents].length + response[:common_prefixes].length
320
+ internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil
321
+ end
322
+ yield response
323
+ end while there_are_more_keys && under_max_keys(internal_options)
324
+ true
325
+ rescue
326
+ on_exception
327
+ end
328
+
329
+
330
+ private
331
+ def decide_marker(response)
332
+ return response[:next_marker].dup if response[:next_marker]
333
+ last_key = response[:contents].last[:key]
334
+ last_prefix = response[:common_prefixes].last
335
+ if(!last_key)
336
+ return nil if(!last_prefix)
337
+ last_prefix.dup
338
+ elsif(!last_prefix)
339
+ last_key.dup
340
+ else
341
+ last_key > last_prefix ? last_key.dup : last_prefix.dup
342
+ end
343
+ end
344
+
345
+ def under_max_keys(internal_options)
346
+ internal_options[:'max-keys'] ? internal_options[:'max-keys'] > 0 : true
347
+ end
348
+
349
+ public
350
+ # Saves object to Amazon. Returns +true+ or an exception.
351
+ # Any header starting with AMAZON_METADATA_PREFIX is considered
352
+ # user metadata. It will be stored with the object and returned
353
+ # when you retrieve the object. The total size of the HTTP
354
+ # request, not including the body, must be less than 4 KB.
355
+ #
356
+ # s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
357
+ #
358
+ # This method is capable of 'streaming' uploads; that is, it can upload
359
+ # data from a file or other IO object without first reading all the data
360
+ # into memory. This is most useful for large PUTs - it is difficult to read
361
+ # a 2 GB file entirely into memory before sending it to S3.
362
+ # To stream an upload, pass an object that responds to 'read' (like the read
363
+ # method of IO) and to either 'lstat' or 'size'. For files, this means
364
+ # streaming is enabled by simply making the call:
365
+ #
366
+ # s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
367
+ #
368
+ # If the IO object you wish to stream from responds to the read method but
369
+ # doesn't implement lstat or size, you can extend the object dynamically
370
+ # to implement these methods, or define your own class which defines these
371
+ # methods. Be sure that your class returns 'nil' from read() after having
372
+ # read 'size' bytes. Otherwise S3 will drop the socket after
373
+ # 'Content-Length' bytes have been uploaded, and HttpConnection will
374
+ # interpret this as an error.
375
+ #
376
+ # This method now supports very large PUTs, where very large
377
+ # is > 2 GB.
378
+ #
379
+ # For Win32 users: Files and IO objects should be opened in binary mode. If
380
+ # a text mode IO object is passed to PUT, it will be converted to binary
381
+ # mode.
382
+ #
383
+
384
+ def put(bucket, key, data=nil, headers={})
385
+ # On Windows, if someone opens a file in text mode, we must reset it so
386
+ # to binary mode for streaming to work properly
387
+ if(data.respond_to?(:binmode))
388
+ data.binmode
389
+ end
390
+ if (data.respond_to?(:lstat) && data.lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
391
+ (data.respond_to?(:size) && data.size >= USE_100_CONTINUE_PUT_SIZE)
392
+ headers['expect'] = '100-continue'
393
+ end
394
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}", :data=>data))
395
+ request_info(req_hash, RightHttp2xxParser.new)
396
+ rescue
397
+ on_exception
398
+ end
399
+
400
+
401
+
402
+ # New experimental API for uploading objects, introduced in RightAws 1.8.1.
403
+ # store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification
404
+ # of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
405
+ # The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
406
+ #
407
+ # If the optional :md5 argument is provided, store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
408
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
409
+ #
410
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
411
+ #
412
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
413
+ # => {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/",
414
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
415
+ # "date"=>"Mon, 29 Sep 2008 18:57:46 GMT",
416
+ # :verified_md5=>true,
417
+ # "x-amz-request-id"=>"63916465939995BA",
418
+ # "server"=>"AmazonS3",
419
+ # "content-length"=>"0"}
420
+ #
421
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
422
+ # => {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR",
423
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
424
+ # "date"=>"Mon, 29 Sep 2008 18:58:56 GMT",
425
+ # :verified_md5=>false,
426
+ # "x-amz-request-id"=>"3B25A996BC2CDD3B",
427
+ # "server"=>"AmazonS3",
428
+ # "content-length"=>"0"}
429
+
430
+ def store_object(params)
431
+ AwsUtils.allow_only([:bucket, :key, :data, :headers, :md5], params)
432
+ AwsUtils.mandatory_arguments([:bucket, :key, :data], params)
433
+ params[:headers] = {} unless params[:headers]
434
+
435
+ params[:data].binmode if(params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly
436
+ if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
437
+ (params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE)
438
+ params[:headers]['expect'] = '100-continue'
439
+ end
440
+
441
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data]))
442
+ resp = request_info(req_hash, S3HttpResponseHeadParser.new)
443
+ if(params[:md5])
444
+ resp[:verified_md5] = (resp['etag'].gsub(/\"/, '') == params[:md5]) ? true : false
445
+ else
446
+ resp[:verified_md5] = false
447
+ end
448
+ resp
449
+ rescue
450
+ on_exception
451
+ end
452
+
453
+ # Identical in function to store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
454
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
455
+ # This call is implemented as a wrapper around store_object and the user may gain different semantics by creating a custom wrapper.
456
+ #
457
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
458
+ # => {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb",
459
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
460
+ # "date"=>"Mon, 29 Sep 2008 18:38:32 GMT",
461
+ # :verified_md5=>true,
462
+ # "x-amz-request-id"=>"E8D7EA4FE00F5DF7",
463
+ # "server"=>"AmazonS3",
464
+ # "content-length"=>"0"}
465
+ #
466
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
467
+ # RightAws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n",
468
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
469
+ # "date"=>"Mon, 29 Sep 2008 18:38:41 GMT",
470
+ # :verified_md5=>false,
471
+ # "x-amz-request-id"=>"0D7ADE09F42606F2",
472
+ # "server"=>"AmazonS3",
473
+ # "content-length"=>"0"}
474
+ def store_object_and_verify(params)
475
+ AwsUtils.mandatory_arguments([:md5], params)
476
+ r = store_object(params)
477
+ r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}"))
478
+ end
479
+
480
+ # Retrieves object data from Amazon. Returns a +hash+ or an exception.
481
+ #
482
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=>
483
+ #
484
+ # {:object => "Ola-la!",
485
+ # :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
486
+ # "content-type" => "",
487
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
488
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
489
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
490
+ # "x-amz-meta-family" => "Woho556!",
491
+ # "x-amz-request-id" => "0000000C246D770C",
492
+ # "server" => "AmazonS3",
493
+ # "content-length" => "7"}}
494
+ #
495
+ # If a block is provided, yields incrementally to the block as
496
+ # the response is read. For large responses, this function is ideal as
497
+ # the response can be 'streamed'. The hash containing header fields is
498
+ # still returned.
499
+ # Example:
500
+ # foo = File.new('./chunder.txt', File::CREAT|File::RDWR)
501
+ # rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
502
+ # foo.write(chunk)
503
+ # end
504
+ # foo.close
505
+ #
506
+
507
+ def get(bucket, key, headers={}, &block)
508
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
509
+ request_info(req_hash, S3HttpResponseBodyParser.new, &block)
510
+ rescue
511
+ on_exception
512
+ end
513
+
514
+ # New experimental API for retrieving objects, introduced in RightAws 1.8.1.
515
+ # retrieve_object is similar in function to the older function get. It allows for optional verification
516
+ # of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
517
+ #
518
+ # If the optional :md5 argument is provided, retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
519
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
520
+ #
521
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
522
+ # Mandatory arguments:
523
+ # :bucket - the bucket in which the object is stored
524
+ # :key - the object address (or path) within the bucket
525
+ # Optional arguments:
526
+ # :headers - hash of additional HTTP headers to include with the request
527
+ # :md5 - MD5 checksum against which to verify the retrieved object
528
+ #
529
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo")
530
+ # => {:verified_md5=>false,
531
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
532
+ # "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp",
533
+ # "content-type"=>"",
534
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
535
+ # "date"=>"Tue, 30 Sep 2008 00:52:44 GMT",
536
+ # "x-amz-request-id"=>"EE4855DE27A2688C",
537
+ # "server"=>"AmazonS3",
538
+ # "content-length"=>"10"},
539
+ # :object=>"polemonium"}
540
+ #
541
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954')
542
+ # => {:verified_md5=>true,
543
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
544
+ # "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH",
545
+ # "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
546
+ # "date"=>"Tue, 30 Sep 2008 00:53:08 GMT",
547
+ # "x-amz-request-id"=>"6E7F317356580599",
548
+ # "server"=>"AmazonS3",
549
+ # "content-length"=>"10"},
550
+ # :object=>"polemonium"}
551
+ # If a block is provided, yields incrementally to the block as
552
+ # the response is read. For large responses, this function is ideal as
553
+ # the response can be 'streamed'. The hash containing header fields is
554
+ # still returned.
555
+ def retrieve_object(params, &block)
556
+ AwsUtils.mandatory_arguments([:bucket, :key], params)
557
+ AwsUtils.allow_only([:bucket, :key, :headers, :md5], params)
558
+ params[:headers] = {} unless params[:headers]
559
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}"))
560
+ resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block)
561
+ resp[:verified_md5] = false
562
+ if(params[:md5] && (resp[:headers]['etag'].gsub(/\"/,'') == params[:md5]))
563
+ resp[:verified_md5] = true
564
+ end
565
+ resp
566
+ rescue
567
+ on_exception
568
+ end
569
+
570
+ # Identical in function to retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
571
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
572
+ # This call is implemented as a wrapper around retrieve_object and the user may gain different semantics by creating a custom wrapper.
573
+ def retrieve_object_and_verify(params, &block)
574
+ AwsUtils.mandatory_arguments([:md5], params)
575
+ resp = retrieve_object(params, &block)
576
+ return resp if resp[:verified_md5]
577
+ raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}")
578
+ end
579
+
580
+ # Retrieves object metadata. Returns a +hash+ of http_response_headers.
581
+ #
582
+ # s3.head('my_awesome_bucket', 'log/curent/1.log') #=>
583
+ # {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
584
+ # "content-type" => "",
585
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
586
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
587
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
588
+ # "x-amz-meta-family" => "Woho556!",
589
+ # "x-amz-request-id" => "0000000C246D770C",
590
+ # "server" => "AmazonS3",
591
+ # "content-length" => "7"}
592
+ #
593
+ def head(bucket, key, headers={})
594
+ req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
595
+ request_info(req_hash, S3HttpResponseHeadParser.new)
596
+ rescue
597
+ on_exception
598
+ end
599
+
600
+ # Deletes key. Returns +true+ or an exception.
601
+ #
602
+ # s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
603
+ #
604
+ def delete(bucket, key='', headers={})
605
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
606
+ request_info(req_hash, RightHttp2xxParser.new)
607
+ rescue
608
+ on_exception
609
+ end
610
+
611
+ # Copy an object.
612
+ # directive: :copy - copy meta-headers from source (default value)
613
+ # :replace - replace meta-headers by passed ones
614
+ #
615
+ # # copy a key with meta-headers
616
+ # s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"}
617
+ #
618
+ # # copy a key, overwrite meta-headers
619
+ # s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
620
+ #
621
+ # see: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
622
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
623
+ #
624
+ def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
625
+ dest_key ||= src_key
626
+ headers['x-amz-metadata-directive'] = directive.to_s.upcase
627
+ headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}"
628
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}"))
629
+ request_info(req_hash, S3CopyParser.new)
630
+ rescue
631
+ on_exception
632
+ end
633
+
634
+ # Move an object.
635
+ # directive: :copy - copy meta-headers from source (default value)
636
+ # :replace - replace meta-headers by passed ones
637
+ #
638
+ # # move bucket1/key1 to bucket1/key2
639
+ # s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"}
640
+ #
641
+ # # move bucket1/key1 to bucket2/key2 with new meta-headers assignment
642
+ # s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
643
+ #
644
+ def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
645
+ copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers)
646
+ # delete an original key if it differs from a destination one
647
+ delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key
648
+ copy_result
649
+ end
650
+
651
+ # Rename an object.
652
+ #
653
+ # # rename bucket1/key1 to bucket1/key2
654
+ # s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
655
+ #
656
+ def rename(src_bucket, src_key, dest_key, headers={})
657
+ move(src_bucket, src_key, src_bucket, dest_key, :copy, headers)
658
+ end
659
+
660
+ # Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
661
+ #
662
+ # s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=>
663
+ # {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX",
664
+ # "content-type"=>"application/xml;charset=ISO-8859-1",
665
+ # "date"=>"Wed, 23 May 2007 09:40:16 GMT",
666
+ # "x-amz-request-id"=>"B183FA7AB5FBB4DD",
667
+ # "server"=>"AmazonS3",
668
+ # "transfer-encoding"=>"chunked"},
669
+ # :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner>
670
+ # <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner>
671
+ # <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>
672
+ # 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee>
673
+ # <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
674
+ #
675
+ def get_acl(bucket, key='', headers={})
676
+ key = key.blank? ? '' : "/#{CGI::escape key}"
677
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
678
+ request_info(req_hash, S3HttpResponseBodyParser.new)
679
+ rescue
680
+ on_exception
681
+ end
682
+
683
+ # Retieves the ACL (access control policy) for a bucket or object.
684
+ # Returns a hash of {:owner, :grantees}
685
+ #
686
+ # s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=>
687
+ #
688
+ # { :grantees=>
689
+ # { "16...2a"=>
690
+ # { :display_name=>"root",
691
+ # :permissions=>["FULL_CONTROL"],
692
+ # :attributes=>
693
+ # { "xsi:type"=>"CanonicalUser",
694
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}},
695
+ # "http://acs.amazonaws.com/groups/global/AllUsers"=>
696
+ # { :display_name=>"AllUsers",
697
+ # :permissions=>["READ"],
698
+ # :attributes=>
699
+ # { "xsi:type"=>"Group",
700
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}},
701
+ # :owner=>
702
+ # { :id=>"16..2a",
703
+ # :display_name=>"root"}}
704
+ #
705
+ def get_acl_parse(bucket, key='', headers={})
706
+ key = key.blank? ? '' : "/#{CGI::escape key}"
707
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
708
+ acl = request_info(req_hash, S3AclParser.new(:logger => @logger))
709
+ result = {}
710
+ result[:owner] = acl[:owner]
711
+ result[:grantees] = {}
712
+ acl[:grantees].each do |grantee|
713
+ key = grantee[:id] || grantee[:uri]
714
+ if result[:grantees].key?(key)
715
+ result[:grantees][key][:permissions] << grantee[:permissions]
716
+ else
717
+ result[:grantees][key] =
718
+ { :display_name => grantee[:display_name] || grantee[:uri].to_s[/[^\/]*$/],
719
+ :permissions => grantee[:permissions].to_a,
720
+ :attributes => grantee[:attributes] }
721
+ end
722
+ end
723
+ result
724
+ rescue
725
+ on_exception
726
+ end
727
+
728
+ # Sets the ACL on a bucket or object.
729
+ def put_acl(bucket, key, acl_xml_doc, headers={})
730
+ key = key.blank? ? '' : "/#{CGI::escape key}"
731
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc))
732
+ request_info(req_hash, S3HttpResponseBodyParser.new)
733
+ rescue
734
+ on_exception
735
+ end
736
+
737
+ # Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
738
+ def get_bucket_acl(bucket, headers={})
739
+ return get_acl(bucket, '', headers)
740
+ rescue
741
+ on_exception
742
+ end
743
+
744
+ # Sets the ACL on a bucket only.
745
+ def put_bucket_acl(bucket, acl_xml_doc, headers={})
746
+ return put_acl(bucket, '', acl_xml_doc, headers)
747
+ rescue
748
+ on_exception
749
+ end
750
+
751
+
752
+ # Removes all keys from bucket. Returns +true+ or an exception.
753
+ #
754
+ # s3.clear_bucket('my_awesome_bucket') #=> true
755
+ #
756
+ def clear_bucket(bucket)
757
+ incrementally_list_bucket(bucket) do |results|
758
+ results[:contents].each { |key| delete(bucket, key[:key]) }
759
+ end
760
+ true
761
+ rescue
762
+ on_exception
763
+ end
764
+
765
+ # Deletes all keys in bucket then deletes bucket. Returns +true+ or an exception.
766
+ #
767
+ # s3.force_delete_bucket('my_awesome_bucket')
768
+ #
769
+ def force_delete_bucket(bucket)
770
+ clear_bucket(bucket)
771
+ delete_bucket(bucket)
772
+ rescue
773
+ on_exception
774
+ end
775
+
776
+ # Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
777
+ #
778
+ # s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs']
779
+ # s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
780
+ #
781
+ def delete_folder(bucket, folder_key, separator='/')
782
+ folder_key.chomp!(separator)
783
+ allkeys = []
784
+ incrementally_list_bucket(bucket, { 'prefix' => folder_key }) do |results|
785
+ keys = results[:contents].map{ |s3_key| s3_key[:key][/^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil}.compact
786
+ keys.each{ |key| delete(bucket, key) }
787
+ allkeys << keys
788
+ end
789
+ allkeys
790
+ rescue
791
+ on_exception
792
+ end
793
+
794
+ # Retrieves object data only (headers are omitted). Returns +string+ or an exception.
795
+ #
796
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
797
+ #
798
+ def get_object(bucket, key, headers={})
799
+ get(bucket, key, headers)[:object]
800
+ rescue
801
+ on_exception
802
+ end
803
+
804
+ #-----------------------------------------------------------------
805
+ # Query API: Links
806
+ #-----------------------------------------------------------------
807
+
808
+ # Generates link for QUERY API
809
+ def generate_link(method, headers={}, expires=nil) #:nodoc:
810
+ # calculate request data
811
+ server, path, path_to_sign = fetch_request_params(headers)
812
+ # expiration time
813
+ expires ||= DEFAULT_EXPIRES_AFTER
814
+ expires = Time.now.utc + expires if expires.is_a?(Fixnum) && (expires < ONE_YEAR_IN_SECONDS)
815
+ expires = expires.to_i
816
+ # remove unset(==optional) and symbolyc keys
817
+ headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
818
+ #generate auth strings
819
+ auth_string = canonical_string(method, path_to_sign, headers, expires)
820
+ signature = CGI::escape(Base64.encode64(OpenSSL::HMAC.digest(OpenSSL::Digest::Digest.new("sha1"), @aws_secret_access_key, auth_string)).strip)
821
+ # path building
822
+ addon = "Signature=#{signature}&Expires=#{expires}&AWSAccessKeyId=#{@aws_access_key_id}"
823
+ path += path[/\?/] ? "&#{addon}" : "?#{addon}"
824
+ "#{@params[:protocol]}://#{server}:#{@params[:port]}#{path}"
825
+ rescue
826
+ on_exception
827
+ end
828
+
829
+ # Generates link for 'ListAllMyBuckets'.
830
+ #
831
+ # s3.list_all_my_buckets_link #=> url string
832
+ #
833
+ def list_all_my_buckets_link(expires=nil, headers={})
834
+ generate_link('GET', headers.merge(:url=>''), expires)
835
+ rescue
836
+ on_exception
837
+ end
838
+
839
+ # Generates link for 'CreateBucket'.
840
+ #
841
+ # s3.create_bucket_link('my_awesome_bucket') #=> url string
842
+ #
843
+ def create_bucket_link(bucket, expires=nil, headers={})
844
+ generate_link('PUT', headers.merge(:url=>bucket), expires)
845
+ rescue
846
+ on_exception
847
+ end
848
+
849
+ # Generates link for 'DeleteBucket'.
850
+ #
851
+ # s3.delete_bucket_link('my_awesome_bucket') #=> url string
852
+ #
853
+ def delete_bucket_link(bucket, expires=nil, headers={})
854
+ generate_link('DELETE', headers.merge(:url=>bucket), expires)
855
+ rescue
856
+ on_exception
857
+ end
858
+
859
+ # Generates link for 'ListBucket'.
860
+ #
861
+ # s3.list_bucket_link('my_awesome_bucket') #=> url string
862
+ #
863
+ def list_bucket_link(bucket, options=nil, expires=nil, headers={})
864
+ bucket += '?' + options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.blank?
865
+ generate_link('GET', headers.merge(:url=>bucket), expires)
866
+ rescue
867
+ on_exception
868
+ end
869
+
870
+ # Generates link for 'PutObject'.
871
+ #
872
+ # s3.put_link('my_awesome_bucket',key, object) #=> url string
873
+ #
874
+ def put_link(bucket, key, data=nil, expires=nil, headers={})
875
+ generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}", :data=>data), expires)
876
+ rescue
877
+ on_exception
878
+ end
879
+
880
+ # Generates link for 'GetObject'.
881
+ #
882
+ # if a bucket comply with virtual hosting naming then retuns a link with the
883
+ # bucket as a part of host name:
884
+ #
885
+ # s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
886
+ #
887
+ # otherwise returns an old style link (the bucket is a part of path):
888
+ #
889
+ # s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
890
+ #
891
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
892
+ def get_link(bucket, key, expires=nil, headers={})
893
+ generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
894
+ rescue
895
+ on_exception
896
+ end
897
+
898
+ # Generates link for 'HeadObject'.
899
+ #
900
+ # s3.head_link('my_awesome_bucket',key) #=> url string
901
+ #
902
+ def head_link(bucket, key, expires=nil, headers={})
903
+ generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
904
+ rescue
905
+ on_exception
906
+ end
907
+
908
+ # Generates link for 'DeleteObject'.
909
+ #
910
+ # s3.delete_link('my_awesome_bucket',key) #=> url string
911
+ #
912
+ def delete_link(bucket, key, expires=nil, headers={})
913
+ generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
914
+ rescue
915
+ on_exception
916
+ end
917
+
918
+
919
+ # Generates link for 'GetACL'.
920
+ #
921
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
922
+ #
923
+ def get_acl_link(bucket, key='', headers={})
924
+ return generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
925
+ rescue
926
+ on_exception
927
+ end
928
+
929
+ # Generates link for 'PutACL'.
930
+ #
931
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
932
+ #
933
+ def put_acl_link(bucket, key='', headers={})
934
+ return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
935
+ rescue
936
+ on_exception
937
+ end
938
+
939
+ # Generates link for 'GetBucketACL'.
940
+ #
941
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
942
+ #
943
+ def get_bucket_acl_link(bucket, headers={})
944
+ return get_acl_link(bucket, '', headers)
945
+ rescue
946
+ on_exception
947
+ end
948
+
949
+ # Generates link for 'PutBucketACL'.
950
+ #
951
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
952
+ #
953
+ def put_bucket_acl_link(bucket, acl_xml_doc, headers={})
954
+ return put_acl_link(bucket, '', acl_xml_doc, headers)
955
+ rescue
956
+ on_exception
957
+ end
958
+
959
+ #-----------------------------------------------------------------
960
+ # PARSERS:
961
+ #-----------------------------------------------------------------
962
+
963
+ class S3ListAllMyBucketsParser < RightAWSParser # :nodoc:
964
+ def reset
965
+ @result = []
966
+ @owner = {}
967
+ end
968
+ def tagstart(name, attributes)
969
+ @current_bucket = {} if name == 'Bucket'
970
+ end
971
+ def tagend(name)
972
+ case name
973
+ when 'ID' ; @owner[:owner_id] = @text
974
+ when 'DisplayName' ; @owner[:owner_display_name] = @text
975
+ when 'Name' ; @current_bucket[:name] = @text
976
+ when 'CreationDate'; @current_bucket[:creation_date] = @text
977
+ when 'Bucket' ; @result << @current_bucket.merge(@owner)
978
+ end
979
+ end
980
+ end
981
+
982
+ class S3ListBucketParser < RightAWSParser # :nodoc:
983
+ def reset
984
+ @result = []
985
+ @service = {}
986
+ @current_key = {}
987
+ end
988
+ def tagstart(name, attributes)
989
+ @current_key = {} if name == 'Contents'
990
+ end
991
+ def tagend(name)
992
+ case name
993
+ # service info
994
+ when 'Name' ; @service['name'] = @text
995
+ when 'Prefix' ; @service['prefix'] = @text
996
+ when 'Marker' ; @service['marker'] = @text
997
+ when 'MaxKeys' ; @service['max-keys'] = @text
998
+ when 'Delimiter' ; @service['delimiter'] = @text
999
+ when 'IsTruncated' ; @service['is_truncated'] = (@text =~ /false/ ? false : true)
1000
+ # key data
1001
+ when 'Key' ; @current_key[:key] = @text
1002
+ when 'LastModified'; @current_key[:last_modified] = @text
1003
+ when 'ETag' ; @current_key[:e_tag] = @text
1004
+ when 'Size' ; @current_key[:size] = @text.to_i
1005
+ when 'StorageClass'; @current_key[:storage_class] = @text
1006
+ when 'ID' ; @current_key[:owner_id] = @text
1007
+ when 'DisplayName' ; @current_key[:owner_display_name] = @text
1008
+ when 'Contents' ; @current_key[:service] = @service; @result << @current_key
1009
+ end
1010
+ end
1011
+ end
1012
+
1013
+ class S3ImprovedListBucketParser < RightAWSParser # :nodoc:
1014
+ def reset
1015
+ @result = {}
1016
+ @result[:contents] = []
1017
+ @result[:common_prefixes] = []
1018
+ @contents = []
1019
+ @current_key = {}
1020
+ @common_prefixes = []
1021
+ @in_common_prefixes = false
1022
+ end
1023
+ def tagstart(name, attributes)
1024
+ @current_key = {} if name == 'Contents'
1025
+ @in_common_prefixes = true if name == 'CommonPrefixes'
1026
+ end
1027
+ def tagend(name)
1028
+ case name
1029
+ # service info
1030
+ when 'Name' ; @result[:name] = @text
1031
+ # Amazon uses the same tag for the search prefix and for the entries
1032
+ # in common prefix...so use our simple flag to see which element
1033
+ # we are parsing
1034
+ when 'Prefix' ; @in_common_prefixes ? @common_prefixes << @text : @result[:prefix] = @text
1035
+ when 'Marker' ; @result[:marker] = @text
1036
+ when 'MaxKeys' ; @result[:max_keys] = @text
1037
+ when 'Delimiter' ; @result[:delimiter] = @text
1038
+ when 'IsTruncated' ; @result[:is_truncated] = (@text =~ /false/ ? false : true)
1039
+ when 'NextMarker' ; @result[:next_marker] = @text
1040
+ # key data
1041
+ when 'Key' ; @current_key[:key] = @text
1042
+ when 'LastModified'; @current_key[:last_modified] = @text
1043
+ when 'ETag' ; @current_key[:e_tag] = @text
1044
+ when 'Size' ; @current_key[:size] = @text.to_i
1045
+ when 'StorageClass'; @current_key[:storage_class] = @text
1046
+ when 'ID' ; @current_key[:owner_id] = @text
1047
+ when 'DisplayName' ; @current_key[:owner_display_name] = @text
1048
+ when 'Contents' ; @result[:contents] << @current_key
1049
+ # Common Prefix stuff
1050
+ when 'CommonPrefixes' ; @result[:common_prefixes] = @common_prefixes; @in_common_prefixes = false
1051
+ end
1052
+ end
1053
+ end
1054
+
1055
+ class S3BucketLocationParser < RightAWSParser # :nodoc:
1056
+ def reset
1057
+ @result = ''
1058
+ end
1059
+ def tagend(name)
1060
+ @result = @text if name == 'LocationConstraint'
1061
+ end
1062
+ end
1063
+
1064
+ class S3AclParser < RightAWSParser # :nodoc:
1065
+ def reset
1066
+ @result = {:grantees=>[], :owner=>{}}
1067
+ @current_grantee = {}
1068
+ end
1069
+ def tagstart(name, attributes)
1070
+ @current_grantee = { :attributes => attributes } if name=='Grantee'
1071
+ end
1072
+ def tagend(name)
1073
+ case name
1074
+ # service info
1075
+ when 'ID'
1076
+ if @xmlpath == 'AccessControlPolicy/Owner'
1077
+ @result[:owner][:id] = @text
1078
+ else
1079
+ @current_grantee[:id] = @text
1080
+ end
1081
+ when 'DisplayName'
1082
+ if @xmlpath == 'AccessControlPolicy/Owner'
1083
+ @result[:owner][:display_name] = @text
1084
+ else
1085
+ @current_grantee[:display_name] = @text
1086
+ end
1087
+ when 'URI'
1088
+ @current_grantee[:uri] = @text
1089
+ when 'Permission'
1090
+ @current_grantee[:permissions] = @text
1091
+ when 'Grant'
1092
+ @result[:grantees] << @current_grantee
1093
+ end
1094
+ end
1095
+ end
1096
+
1097
+ class S3LoggingParser < RightAWSParser # :nodoc:
1098
+ def reset
1099
+ @result = {:enabled => false, :targetbucket => '', :targetprefix => ''}
1100
+ @current_grantee = {}
1101
+ end
1102
+ def tagend(name)
1103
+ case name
1104
+ # service info
1105
+ when 'TargetBucket'
1106
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1107
+ @result[:targetbucket] = @text
1108
+ @result[:enabled] = true
1109
+ end
1110
+ when 'TargetPrefix'
1111
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1112
+ @result[:targetprefix] = @text
1113
+ @result[:enabled] = true
1114
+ end
1115
+ end
1116
+ end
1117
+ end
1118
+
1119
+ class S3CopyParser < RightAWSParser # :nodoc:
1120
+ def reset
1121
+ @result = {}
1122
+ end
1123
+ def tagend(name)
1124
+ case name
1125
+ when 'LastModified' then @result[:last_modified] = @text
1126
+ when 'ETag' then @result[:e_tag] = @text
1127
+ end
1128
+ end
1129
+ end
1130
+
1131
+ #-----------------------------------------------------------------
1132
+ # PARSERS: Non XML
1133
+ #-----------------------------------------------------------------
1134
+
1135
+ class S3HttpResponseParser # :nodoc:
1136
+ attr_reader :result
1137
+ def parse(response)
1138
+ @result = response
1139
+ end
1140
+ def headers_to_string(headers)
1141
+ result = {}
1142
+ headers.each do |key, value|
1143
+ value = value.to_s if value.is_a?(Array) && value.size<2
1144
+ result[key] = value
1145
+ end
1146
+ result
1147
+ end
1148
+ end
1149
+
1150
+ class S3HttpResponseBodyParser < S3HttpResponseParser # :nodoc:
1151
+ def parse(response)
1152
+ @result = {
1153
+ :object => response.body,
1154
+ :headers => headers_to_string(response.to_hash)
1155
+ }
1156
+ end
1157
+ end
1158
+
1159
+ class S3HttpResponseHeadParser < S3HttpResponseParser # :nodoc:
1160
+ def parse(response)
1161
+ @result = headers_to_string(response.to_hash)
1162
+ end
1163
+ end
1164
+
1165
+ end
1166
+
1167
+ end