aboisvert_aws 3.0.0 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1231 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ module RightAws
25
+
26
+ class S3Interface < RightAwsBase
27
+
28
+ USE_100_CONTINUE_PUT_SIZE = 1_000_000
29
+
30
+ include RightAwsBaseInterface
31
+
32
+ DEFAULT_HOST = 's3.amazonaws.com'
33
+ DEFAULT_PORT = 443
34
+ DEFAULT_PROTOCOL = 'https'
35
+ DEFAULT_SERVICE = '/'
36
+ REQUEST_TTL = 30
37
+ DEFAULT_EXPIRES_AFTER = 1 * 24 * 60 * 60 # One day's worth of seconds
38
+ ONE_YEAR_IN_SECONDS = 365 * 24 * 60 * 60
39
+ AMAZON_HEADER_PREFIX = 'x-amz-'
40
+ AMAZON_METADATA_PREFIX = 'x-amz-meta-'
41
+ S3_REQUEST_PARAMETERS = [ 'acl',
42
+ 'location',
43
+ 'logging', # this one is beta, no support for now
44
+ 'response-content-type',
45
+ 'response-content-language',
46
+ 'response-expires',
47
+ 'response-cache-control',
48
+ 'response-content-disposition',
49
+ 'response-content-encoding',
50
+ 'torrent' ].sort
51
+
52
+
53
+ @@bench = AwsBenchmarkingBlock.new
54
+ def self.bench_xml
55
+ @@bench.xml
56
+ end
57
+ def self.bench_s3
58
+ @@bench.service
59
+ end
60
+
61
+ # Params supported:
62
+ # :no_subdomains => true # do not use bucket as a part of domain name but as a part of path
63
+ @@params = {}
64
+ def self.params
65
+ @@params
66
+ end
67
+
68
+ # get custom option
69
+ def param(name)
70
+ # - check explicitly defined param (@params)
71
+ # - otherwise check implicitly defined one (@@params)
72
+ @params.has_key?(name) ? @params[name] : @@params[name]
73
+ end
74
+
75
+ # Creates new RightS3 instance.
76
+ #
77
+ # s3 = RightAws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:logger => Logger.new('/tmp/x.log')}) #=> #<RightAws::S3Interface:0xb7b3c27c>
78
+ #
79
+ # Params is a hash:
80
+ #
81
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
82
+ # :port => 443 # Amazon service port: 80 or 443(default)
83
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
84
+ # :logger => Logger Object # Logger instance: logs to STDOUT if omitted
85
+ # :no_subdomains => true} # Force placing bucket name into path instead of domain name
86
+ #
87
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
88
+ init({ :name => 'S3',
89
+ :default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST,
90
+ :default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT,
91
+ :default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE,
92
+ :default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL },
93
+ aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
94
+ aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'],
95
+ params)
96
+ end
97
+
98
+
99
+ #-----------------------------------------------------------------
100
+ # Requests
101
+ #-----------------------------------------------------------------
102
+ # Produces canonical string for signing.
103
+ def canonical_string(method, path, headers={}, expires=nil) # :nodoc:
104
+ s3_headers = {}
105
+ headers.each do |key, value|
106
+ key = key.downcase
107
+ value = case
108
+ when value.is_a?(Array) then value.join('')
109
+ else value.to_s
110
+ end
111
+ s3_headers[key] = value.strip if key[/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o]
112
+ end
113
+ s3_headers['content-type'] ||= ''
114
+ s3_headers['content-md5'] ||= ''
115
+ s3_headers['date'] = '' if s3_headers.has_key? 'x-amz-date'
116
+ s3_headers['date'] = expires if expires
117
+ # prepare output string
118
+ out_string = "#{method}\n"
119
+ s3_headers.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
120
+ out_string << (key[/^#{AMAZON_HEADER_PREFIX}/o] ? "#{key}:#{value}\n" : "#{value}\n")
121
+ end
122
+ # ignore everything after the question mark by default...
123
+ out_string << path.gsub(/\?.*$/, '')
124
+ # ... unless there is a parameter that we care about.
125
+ S3_REQUEST_PARAMETERS.each do |parameter|
126
+ if path[/[&?]#{parameter}(=[^&]*)?($|&)/]
127
+ if $1
128
+ value = CGI::unescape($1)
129
+ else
130
+ value = ''
131
+ end
132
+ out_string << (out_string[/[?]/] ? "&#{parameter}#{value}" : "?#{parameter}#{value}")
133
+ end
134
+ end
135
+
136
+ out_string
137
+ end
138
+
139
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
140
+ def is_dns_bucket?(bucket_name)
141
+ bucket_name = bucket_name.to_s
142
+ return nil unless (3..63) === bucket_name.size
143
+ bucket_name.split('.').each do |component|
144
+ return nil unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
145
+ end
146
+ true
147
+ end
148
+
149
+ def fetch_request_params(headers) #:nodoc:
150
+ # default server to use
151
+ server = @params[:server]
152
+ service = @params[:service].to_s
153
+ service.chop! if service[%r{/$}] # remove trailing '/' from service
154
+ # extract bucket name and check it's dns compartibility
155
+ headers[:url].to_s[%r{^([a-z0-9._-]*)(/[^?]*)?(\?.+)?}i]
156
+ bucket_name, key_path, params_list = $1, $2, $3
157
+ key_path = key_path.gsub( '%2F', '/' ) if key_path
158
+ # select request model
159
+ if !param(:no_subdomains) && is_dns_bucket?(bucket_name)
160
+ # fix a path
161
+ server = "#{bucket_name}.#{server}"
162
+ key_path ||= '/'
163
+ path = "#{service}#{key_path}#{params_list}"
164
+ else
165
+ path = "#{service}/#{bucket_name}#{key_path}#{params_list}"
166
+ end
167
+ path_to_sign = "#{service}/#{bucket_name}#{key_path}#{params_list}"
168
+ # path_to_sign = "/#{bucket_name}#{key_path}#{params_list}"
169
+ [ server, path, path_to_sign ]
170
+ end
171
+
172
+ # Generates request hash for REST API.
173
+ # Assumes that headers[:url] is URL encoded (use CGI::escape)
174
+ def generate_rest_request(method, headers) # :nodoc:
175
+ # calculate request data
176
+ server, path, path_to_sign = fetch_request_params(headers)
177
+ data = headers[:data]
178
+ # remove unset(==optional) and symbolyc keys
179
+ headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
180
+ #
181
+ headers['content-type'] ||= ''
182
+ headers['date'] = Time.now.httpdate
183
+ # create request
184
+ request = "Net::HTTP::#{method.capitalize}".right_constantize.new(path)
185
+ request.body = data if data
186
+ # set request headers and meta headers
187
+ headers.each { |key, value| request[key.to_s] = value }
188
+ #generate auth strings
189
+ auth_string = canonical_string(request.method, path_to_sign, request.to_hash)
190
+ signature = AwsUtils::sign(@aws_secret_access_key, auth_string)
191
+ # set other headers
192
+ request['Authorization'] = "AWS #{@aws_access_key_id}:#{signature}"
193
+ # prepare output hash
194
+ { :request => request,
195
+ :server => server,
196
+ :port => @params[:port],
197
+ :protocol => @params[:protocol] }
198
+ end
199
+
200
+ # Sends request to Amazon and parses the response.
201
+ # Raises AwsError if any banana happened.
202
+ def request_info(request, parser, &block) # :nodoc:
203
+ request_info_impl(:s3_connection, @@bench, request, parser, &block)
204
+ end
205
+
206
+ # Returns an array of customer's buckets. Each item is a +hash+.
207
+ #
208
+ # s3.list_all_my_buckets #=>
209
+ # [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
210
+ # :owner_display_name => "root",
211
+ # :name => "bucket_name",
212
+ # :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
213
+ #
214
+ def list_all_my_buckets(headers={})
215
+ req_hash = generate_rest_request('GET', headers.merge(:url=>''))
216
+ request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger))
217
+ rescue
218
+ on_exception
219
+ end
220
+
221
+ # Creates new bucket. Returns +true+ or an exception.
222
+ #
223
+ # # create a bucket at American server
224
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
225
+ # # create a bucket at European server
226
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
227
+ #
228
+ def create_bucket(bucket, headers={})
229
+ data = nil
230
+ location = case headers[:location].to_s
231
+ when 'us','US' then ''
232
+ when 'eu' then 'EU'
233
+ else headers[:location].to_s
234
+ end
235
+
236
+ unless location.right_blank?
237
+ data = "<CreateBucketConfiguration><LocationConstraint>#{location}</LocationConstraint></CreateBucketConfiguration>"
238
+ end
239
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data))
240
+ request_info(req_hash, RightHttp2xxParser.new)
241
+ rescue Exception => e
242
+ # if the bucket exists AWS returns an error for the location constraint interface. Drop it
243
+ e.is_a?(RightAws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception
244
+ end
245
+
246
+ # Retrieve bucket location
247
+ #
248
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
249
+ # puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed)
250
+ #
251
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
252
+ # puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
253
+ #
254
+ def bucket_location(bucket, headers={})
255
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location"))
256
+ request_info(req_hash, S3BucketLocationParser.new)
257
+ rescue
258
+ on_exception
259
+ end
260
+
261
+ # Retrieves the logging configuration for a bucket.
262
+ # Returns a hash of {:enabled, :targetbucket, :targetprefix}
263
+ #
264
+ # s3.interface.get_logging_parse(:bucket => "asset_bucket")
265
+ # => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
266
+ #
267
+ #
268
+ def get_logging_parse(params)
269
+ AwsUtils.mandatory_arguments([:bucket], params)
270
+ AwsUtils.allow_only([:bucket, :headers], params)
271
+ params[:headers] = {} unless params[:headers]
272
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging"))
273
+ request_info(req_hash, S3LoggingParser.new)
274
+ rescue
275
+ on_exception
276
+ end
277
+
278
+ # Sets logging configuration for a bucket from the XML configuration document.
279
+ # params:
280
+ # :bucket
281
+ # :xmldoc
282
+ def put_logging(params)
283
+ AwsUtils.mandatory_arguments([:bucket,:xmldoc], params)
284
+ AwsUtils.allow_only([:bucket,:xmldoc, :headers], params)
285
+ params[:headers] = {} unless params[:headers]
286
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc]))
287
+ request_info(req_hash, RightHttp2xxParser.new)
288
+ rescue
289
+ on_exception
290
+ end
291
+
292
+ # Deletes new bucket. Bucket must be empty! Returns +true+ or an exception.
293
+ #
294
+ # s3.delete_bucket('my_awesome_bucket') #=> true
295
+ #
296
+ # See also: force_delete_bucket method
297
+ #
298
+ def delete_bucket(bucket, headers={})
299
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket))
300
+ request_info(req_hash, RightHttp2xxParser.new)
301
+ rescue
302
+ on_exception
303
+ end
304
+
305
+ # Returns an array of bucket's keys. Each array item (key data) is a +hash+.
306
+ #
307
+ # s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=>
308
+ # [{:key => "test1",
309
+ # :last_modified => "2007-05-18T07:00:59.000Z",
310
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
311
+ # :owner_display_name => "root",
312
+ # :e_tag => "000000000059075b964b07152d234b70",
313
+ # :storage_class => "STANDARD",
314
+ # :size => 3,
315
+ # :service=> {'is_truncated' => false,
316
+ # 'prefix' => "t",
317
+ # 'marker' => "",
318
+ # 'name' => "my_awesome_bucket",
319
+ # 'max-keys' => "5"}, ..., {...}]
320
+ #
321
+ def list_bucket(bucket, options={}, headers={})
322
+ bucket += '?'+options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.right_blank?
323
+ req_hash = generate_rest_request('GET', headers.merge(:url=>bucket))
324
+ request_info(req_hash, S3ListBucketParser.new(:logger => @logger))
325
+ rescue
326
+ on_exception
327
+ end
328
+
329
+ # Incrementally list the contents of a bucket. Yields the following hash to a block:
330
+ # s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields
331
+ # {
332
+ # :name => 'bucketname',
333
+ # :prefix => 'subfolder/',
334
+ # :marker => 'fileN.jpg',
335
+ # :max_keys => 234,
336
+ # :delimiter => '/',
337
+ # :is_truncated => true,
338
+ # :next_marker => 'fileX.jpg',
339
+ # :contents => [
340
+ # { :key => "file1",
341
+ # :last_modified => "2007-05-18T07:00:59.000Z",
342
+ # :e_tag => "000000000059075b964b07152d234b70",
343
+ # :size => 3,
344
+ # :storage_class => "STANDARD",
345
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
346
+ # :owner_display_name => "root"
347
+ # }, { :key, ...}, ... {:key, ...}
348
+ # ]
349
+ # :common_prefixes => [
350
+ # "prefix1",
351
+ # "prefix2",
352
+ # ...,
353
+ # "prefixN"
354
+ # ]
355
+ # }
356
+ def incrementally_list_bucket(bucket, options={}, headers={}, &block)
357
+ internal_options = options.right_symbolize_keys
358
+ begin
359
+ internal_bucket = bucket.dup
360
+ internal_bucket += '?'+internal_options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless internal_options.right_blank?
361
+ req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket))
362
+ response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger))
363
+ there_are_more_keys = response[:is_truncated]
364
+ if(there_are_more_keys)
365
+ internal_options[:marker] = decide_marker(response)
366
+ total_results = response[:contents].length + response[:common_prefixes].length
367
+ internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil
368
+ end
369
+ yield response
370
+ end while there_are_more_keys && under_max_keys(internal_options)
371
+ true
372
+ rescue
373
+ on_exception
374
+ end
375
+
376
+
377
+ private
378
+ def decide_marker(response)
379
+ return response[:next_marker].dup if response[:next_marker]
380
+ last_key = response[:contents].last[:key]
381
+ last_prefix = response[:common_prefixes].last
382
+ if(!last_key)
383
+ return nil if(!last_prefix)
384
+ last_prefix.dup
385
+ elsif(!last_prefix)
386
+ last_key.dup
387
+ else
388
+ last_key > last_prefix ? last_key.dup : last_prefix.dup
389
+ end
390
+ end
391
+
392
+ def under_max_keys(internal_options)
393
+ internal_options[:'max-keys'] ? internal_options[:'max-keys'] > 0 : true
394
+ end
395
+
396
+ public
397
+ # Saves object to Amazon. Returns +true+ or an exception.
398
+ # Any header starting with AMAZON_METADATA_PREFIX is considered
399
+ # user metadata. It will be stored with the object and returned
400
+ # when you retrieve the object. The total size of the HTTP
401
+ # request, not including the body, must be less than 4 KB.
402
+ #
403
+ # s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
404
+ #
405
+ # This method is capable of 'streaming' uploads; that is, it can upload
406
+ # data from a file or other IO object without first reading all the data
407
+ # into memory. This is most useful for large PUTs - it is difficult to read
408
+ # a 2 GB file entirely into memory before sending it to S3.
409
+ # To stream an upload, pass an object that responds to 'read' (like the read
410
+ # method of IO) and to either 'lstat' or 'size'. For files, this means
411
+ # streaming is enabled by simply making the call:
412
+ #
413
+ # s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
414
+ #
415
+ # If the IO object you wish to stream from responds to the read method but
416
+ # doesn't implement lstat or size, you can extend the object dynamically
417
+ # to implement these methods, or define your own class which defines these
418
+ # methods. Be sure that your class returns 'nil' from read() after having
419
+ # read 'size' bytes. Otherwise S3 will drop the socket after
420
+ # 'Content-Length' bytes have been uploaded, and HttpConnection will
421
+ # interpret this as an error.
422
+ #
423
+ # This method now supports very large PUTs, where very large
424
+ # is > 2 GB.
425
+ #
426
+ # For Win32 users: Files and IO objects should be opened in binary mode. If
427
+ # a text mode IO object is passed to PUT, it will be converted to binary
428
+ # mode.
429
+ #
430
+
431
+ def put(bucket, key, data=nil, headers={})
432
+ # On Windows, if someone opens a file in text mode, we must reset it so
433
+ # to binary mode for streaming to work properly
434
+ if(data.respond_to?(:binmode))
435
+ data.binmode
436
+ end
437
+ if (data.respond_to?(:lstat) && data.lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
438
+ (data.respond_to?(:size) && data.size >= USE_100_CONTINUE_PUT_SIZE)
439
+ headers['expect'] = '100-continue'
440
+ end
441
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}", :data=>data))
442
+ request_info(req_hash, RightHttp2xxParser.new)
443
+ rescue
444
+ on_exception
445
+ end
446
+
447
+
448
+
449
+ # New experimental API for uploading objects, introduced in RightAws 1.8.1.
450
+ # store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification
451
+ # of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
452
+ # The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
453
+ #
454
+ # If the optional :md5 argument is provided, store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
455
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
456
+ #
457
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
458
+ #
459
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
460
+ # => {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/",
461
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
462
+ # "date"=>"Mon, 29 Sep 2008 18:57:46 GMT",
463
+ # :verified_md5=>true,
464
+ # "x-amz-request-id"=>"63916465939995BA",
465
+ # "server"=>"AmazonS3",
466
+ # "content-length"=>"0"}
467
+ #
468
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
469
+ # => {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR",
470
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
471
+ # "date"=>"Mon, 29 Sep 2008 18:58:56 GMT",
472
+ # :verified_md5=>false,
473
+ # "x-amz-request-id"=>"3B25A996BC2CDD3B",
474
+ # "server"=>"AmazonS3",
475
+ # "content-length"=>"0"}
476
+
477
+ def store_object(params)
478
+ AwsUtils.allow_only([:bucket, :key, :data, :headers, :md5], params)
479
+ AwsUtils.mandatory_arguments([:bucket, :key, :data], params)
480
+ params[:headers] = {} unless params[:headers]
481
+
482
+ params[:data].binmode if(params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly
483
+ if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
484
+ (params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE)
485
+ params[:headers]['expect'] = '100-continue'
486
+ end
487
+
488
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data]))
489
+ resp = request_info(req_hash, S3HttpResponseHeadParser.new)
490
+ if(params[:md5])
491
+ resp[:verified_md5] = (resp['etag'].gsub(/\"/, '') == params[:md5]) ? true : false
492
+ else
493
+ resp[:verified_md5] = false
494
+ end
495
+ resp
496
+ rescue
497
+ on_exception
498
+ end
499
+
500
+ # Identical in function to store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
501
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
502
+ # This call is implemented as a wrapper around store_object and the user may gain different semantics by creating a custom wrapper.
503
+ #
504
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
505
+ # => {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb",
506
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
507
+ # "date"=>"Mon, 29 Sep 2008 18:38:32 GMT",
508
+ # :verified_md5=>true,
509
+ # "x-amz-request-id"=>"E8D7EA4FE00F5DF7",
510
+ # "server"=>"AmazonS3",
511
+ # "content-length"=>"0"}
512
+ #
513
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
514
+ # RightAws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n",
515
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
516
+ # "date"=>"Mon, 29 Sep 2008 18:38:41 GMT",
517
+ # :verified_md5=>false,
518
+ # "x-amz-request-id"=>"0D7ADE09F42606F2",
519
+ # "server"=>"AmazonS3",
520
+ # "content-length"=>"0"}
521
+ def store_object_and_verify(params)
522
+ AwsUtils.mandatory_arguments([:md5], params)
523
+ r = store_object(params)
524
+ r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}"))
525
+ end
526
+
527
+ # Retrieves object data from Amazon. Returns a +hash+ or an exception.
528
+ #
529
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=>
530
+ #
531
+ # {:object => "Ola-la!",
532
+ # :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
533
+ # "content-type" => "",
534
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
535
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
536
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
537
+ # "x-amz-meta-family" => "Woho556!",
538
+ # "x-amz-request-id" => "0000000C246D770C",
539
+ # "server" => "AmazonS3",
540
+ # "content-length" => "7"}}
541
+ #
542
+ # If a block is provided, yields incrementally to the block as
543
+ # the response is read. For large responses, this function is ideal as
544
+ # the response can be 'streamed'. The hash containing header fields is
545
+ # still returned.
546
+ # Example:
547
+ # foo = File.new('./chunder.txt', File::CREAT|File::RDWR)
548
+ # rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
549
+ # foo.write(chunk)
550
+ # end
551
+ # foo.close
552
+ #
553
+
554
+ def get(bucket, key, headers={}, &block)
555
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
556
+ request_info(req_hash, S3HttpResponseBodyParser.new, &block)
557
+ rescue
558
+ on_exception
559
+ end
560
+
561
+ # New experimental API for retrieving objects, introduced in RightAws 1.8.1.
562
+ # retrieve_object is similar in function to the older function get. It allows for optional verification
563
+ # of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
564
+ #
565
+ # If the optional :md5 argument is provided, retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
566
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
567
+ #
568
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
569
+ # Mandatory arguments:
570
+ # :bucket - the bucket in which the object is stored
571
+ # :key - the object address (or path) within the bucket
572
+ # Optional arguments:
573
+ # :headers - hash of additional HTTP headers to include with the request
574
+ # :md5 - MD5 checksum against which to verify the retrieved object
575
+ #
576
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo")
577
+ # => {:verified_md5=>false,
578
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
579
+ # "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp",
580
+ # "content-type"=>"",
581
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
582
+ # "date"=>"Tue, 30 Sep 2008 00:52:44 GMT",
583
+ # "x-amz-request-id"=>"EE4855DE27A2688C",
584
+ # "server"=>"AmazonS3",
585
+ # "content-length"=>"10"},
586
+ # :object=>"polemonium"}
587
+ #
588
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954')
589
+ # => {:verified_md5=>true,
590
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
591
+ # "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH",
592
+ # "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
593
+ # "date"=>"Tue, 30 Sep 2008 00:53:08 GMT",
594
+ # "x-amz-request-id"=>"6E7F317356580599",
595
+ # "server"=>"AmazonS3",
596
+ # "content-length"=>"10"},
597
+ # :object=>"polemonium"}
598
+ # If a block is provided, yields incrementally to the block as
599
+ # the response is read. For large responses, this function is ideal as
600
+ # the response can be 'streamed'. The hash containing header fields is
601
+ # still returned.
602
+ def retrieve_object(params, &block)
603
+ AwsUtils.mandatory_arguments([:bucket, :key], params)
604
+ AwsUtils.allow_only([:bucket, :key, :headers, :md5], params)
605
+ params[:headers] = {} unless params[:headers]
606
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}"))
607
+ resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block)
608
+ resp[:verified_md5] = false
609
+ if(params[:md5] && (resp[:headers]['etag'].gsub(/\"/,'') == params[:md5]))
610
+ resp[:verified_md5] = true
611
+ end
612
+ resp
613
+ rescue
614
+ on_exception
615
+ end
616
+
617
+ # Identical in function to retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
618
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
619
+ # This call is implemented as a wrapper around retrieve_object and the user may gain different semantics by creating a custom wrapper.
620
+ def retrieve_object_and_verify(params, &block)
621
+ AwsUtils.mandatory_arguments([:md5], params)
622
+ resp = retrieve_object(params, &block)
623
+ return resp if resp[:verified_md5]
624
+ raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}")
625
+ end
626
+
627
+ # Retrieves object metadata. Returns a +hash+ of http_response_headers.
628
+ #
629
+ # s3.head('my_awesome_bucket', 'log/curent/1.log') #=>
630
+ # {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
631
+ # "content-type" => "",
632
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
633
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
634
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
635
+ # "x-amz-meta-family" => "Woho556!",
636
+ # "x-amz-request-id" => "0000000C246D770C",
637
+ # "server" => "AmazonS3",
638
+ # "content-length" => "7"}
639
+ #
640
+ def head(bucket, key, headers={})
641
+ req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
642
+ request_info(req_hash, S3HttpResponseHeadParser.new)
643
+ rescue
644
+ on_exception
645
+ end
646
+
647
+ # Deletes key. Returns +true+ or an exception.
648
+ #
649
+ # s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
650
+ #
651
+ def delete(bucket, key='', headers={})
652
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
653
+ request_info(req_hash, RightHttp2xxParser.new)
654
+ rescue
655
+ on_exception
656
+ end
657
+
658
+ # Copy an object.
659
+ # directive: :copy - copy meta-headers from source (default value)
660
+ # :replace - replace meta-headers by passed ones
661
+ #
662
+ # # copy a key with meta-headers
663
+ # s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"}
664
+ #
665
+ # # copy a key, overwrite meta-headers
666
+ # s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
667
+ #
668
+ # see: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
669
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
670
+ #
671
+ def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
672
+ dest_key ||= src_key
673
+ headers['x-amz-metadata-directive'] = directive.to_s.upcase
674
+ headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}"
675
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}"))
676
+ request_info(req_hash, S3CopyParser.new)
677
+ rescue
678
+ on_exception
679
+ end
680
+
681
+ # Move an object.
682
+ # directive: :copy - copy meta-headers from source (default value)
683
+ # :replace - replace meta-headers by passed ones
684
+ #
685
+ # # move bucket1/key1 to bucket1/key2
686
+ # s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"}
687
+ #
688
+ # # move bucket1/key1 to bucket2/key2 with new meta-headers assignment
689
+ # s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
690
+ #
691
+ def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
692
+ copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers)
693
+ # delete an original key if it differs from a destination one
694
+ delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key
695
+ copy_result
696
+ end
697
+
698
+ # Rename an object.
699
+ #
700
+ # # rename bucket1/key1 to bucket1/key2
701
+ # s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
702
+ #
703
+ def rename(src_bucket, src_key, dest_key, headers={})
704
+ move(src_bucket, src_key, src_bucket, dest_key, :copy, headers)
705
+ end
706
+
707
+ # Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
708
+ #
709
+ # s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=>
710
+ # {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX",
711
+ # "content-type"=>"application/xml;charset=ISO-8859-1",
712
+ # "date"=>"Wed, 23 May 2007 09:40:16 GMT",
713
+ # "x-amz-request-id"=>"B183FA7AB5FBB4DD",
714
+ # "server"=>"AmazonS3",
715
+ # "transfer-encoding"=>"chunked"},
716
+ # :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner>
717
+ # <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner>
718
+ # <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>
719
+ # 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee>
720
+ # <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
721
+ #
722
+ def get_acl(bucket, key='', headers={})
723
+ key = key.right_blank? ? '' : "/#{CGI::escape key}"
724
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
725
+ request_info(req_hash, S3HttpResponseBodyParser.new)
726
+ rescue
727
+ on_exception
728
+ end
729
+
730
+ # Retieves the ACL (access control policy) for a bucket or object.
731
+ # Returns a hash of {:owner, :grantees}
732
+ #
733
+ # s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=>
734
+ #
735
+ # { :grantees=>
736
+ # { "16...2a"=>
737
+ # { :display_name=>"root",
738
+ # :permissions=>["FULL_CONTROL"],
739
+ # :attributes=>
740
+ # { "xsi:type"=>"CanonicalUser",
741
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}},
742
+ # "http://acs.amazonaws.com/groups/global/AllUsers"=>
743
+ # { :display_name=>"AllUsers",
744
+ # :permissions=>["READ"],
745
+ # :attributes=>
746
+ # { "xsi:type"=>"Group",
747
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}},
748
+ # :owner=>
749
+ # { :id=>"16..2a",
750
+ # :display_name=>"root"}}
751
+ #
752
+ def get_acl_parse(bucket, key='', headers={})
753
+ key = key.right_blank? ? '' : "/#{CGI::escape key}"
754
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
755
+ acl = request_info(req_hash, S3AclParser.new(:logger => @logger))
756
+ result = {}
757
+ result[:owner] = acl[:owner]
758
+ result[:grantees] = {}
759
+ acl[:grantees].each do |grantee|
760
+ key = grantee[:id] || grantee[:uri]
761
+ if result[:grantees].key?(key)
762
+ result[:grantees][key][:permissions] << grantee[:permissions]
763
+ else
764
+ result[:grantees][key] =
765
+ { :display_name => grantee[:display_name] || grantee[:uri].to_s[/[^\/]*$/],
766
+ :permissions => Array(grantee[:permissions]),
767
+ :attributes => grantee[:attributes] }
768
+ end
769
+ end
770
+ result
771
+ rescue
772
+ on_exception
773
+ end
774
+
775
+ # Sets the ACL on a bucket or object.
776
+ def put_acl(bucket, key, acl_xml_doc, headers={})
777
+ key = key.right_blank? ? '' : "/#{CGI::escape key}"
778
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc))
779
+ request_info(req_hash, S3HttpResponseBodyParser.new)
780
+ rescue
781
+ on_exception
782
+ end
783
+
784
+ # Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
785
+ def get_bucket_acl(bucket, headers={})
786
+ return get_acl(bucket, '', headers)
787
+ rescue
788
+ on_exception
789
+ end
790
+
791
+ # Sets the ACL on a bucket only.
792
+ def put_bucket_acl(bucket, acl_xml_doc, headers={})
793
+ return put_acl(bucket, '', acl_xml_doc, headers)
794
+ rescue
795
+ on_exception
796
+ end
797
+
798
+
799
+ # Removes all keys from bucket. Returns +true+ or an exception.
800
+ #
801
+ # s3.clear_bucket('my_awesome_bucket') #=> true
802
+ #
803
+ def clear_bucket(bucket)
804
+ incrementally_list_bucket(bucket) do |results|
805
+ results[:contents].each { |key| delete(bucket, key[:key]) }
806
+ end
807
+ true
808
+ rescue
809
+ on_exception
810
+ end
811
+
812
+ # Deletes all keys in bucket then deletes bucket. Returns +true+ or an exception.
813
+ #
814
+ # s3.force_delete_bucket('my_awesome_bucket')
815
+ #
816
+ def force_delete_bucket(bucket)
817
+ clear_bucket(bucket)
818
+ delete_bucket(bucket)
819
+ rescue
820
+ on_exception
821
+ end
822
+
823
+ # Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
824
+ #
825
+ # s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs']
826
+ # s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
827
+ #
828
+ def delete_folder(bucket, folder_key, separator='/')
829
+ folder_key.chomp!(separator)
830
+ allkeys = []
831
+ incrementally_list_bucket(bucket, { 'prefix' => folder_key }) do |results|
832
+ keys = results[:contents].map{ |s3_key| s3_key[:key][/^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil}.compact
833
+ keys.each{ |key| delete(bucket, key) }
834
+ allkeys << keys
835
+ end
836
+ allkeys
837
+ rescue
838
+ on_exception
839
+ end
840
+
841
+ # Retrieves object data only (headers are omitted). Returns +string+ or an exception.
842
+ #
843
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
844
+ #
845
+ def get_object(bucket, key, headers={})
846
+ get(bucket, key, headers)[:object]
847
+ rescue
848
+ on_exception
849
+ end
850
+
851
+ #-----------------------------------------------------------------
852
+ # Query API: Links
853
+ #-----------------------------------------------------------------
854
+
855
+ # Generates link for QUERY API
856
+ def generate_link(method, headers={}, expires=nil) #:nodoc:
857
+ # calculate request data
858
+ server, path, path_to_sign = fetch_request_params(headers)
859
+ path_to_sign = CGI.unescape(path_to_sign)
860
+ # expiration time
861
+ expires ||= DEFAULT_EXPIRES_AFTER
862
+ expires = Time.now.utc + expires if expires.is_a?(Fixnum) && (expires < ONE_YEAR_IN_SECONDS)
863
+ expires = expires.to_i
864
+ # remove unset(==optional) and symbolyc keys
865
+ headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
866
+ #generate auth strings
867
+ auth_string = canonical_string(method, path_to_sign, headers, expires)
868
+ signature = CGI::escape(AwsUtils::sign( @aws_secret_access_key, auth_string))
869
+ # path building
870
+ addon = "Signature=#{signature}&Expires=#{expires}&AWSAccessKeyId=#{@aws_access_key_id}"
871
+ path += path[/\?/] ? "&#{addon}" : "?#{addon}"
872
+ "#{@params[:protocol]}://#{server}:#{@params[:port]}#{path}"
873
+ rescue
874
+ on_exception
875
+ end
876
+
877
+ # Generates link for 'ListAllMyBuckets'.
878
+ #
879
+ # s3.list_all_my_buckets_link #=> url string
880
+ #
881
+ def list_all_my_buckets_link(expires=nil, headers={})
882
+ generate_link('GET', headers.merge(:url=>''), expires)
883
+ rescue
884
+ on_exception
885
+ end
886
+
887
+ # Generates link for 'CreateBucket'.
888
+ #
889
+ # s3.create_bucket_link('my_awesome_bucket') #=> url string
890
+ #
891
+ def create_bucket_link(bucket, expires=nil, headers={})
892
+ generate_link('PUT', headers.merge(:url=>bucket), expires)
893
+ rescue
894
+ on_exception
895
+ end
896
+
897
+ # Generates link for 'DeleteBucket'.
898
+ #
899
+ # s3.delete_bucket_link('my_awesome_bucket') #=> url string
900
+ #
901
+ def delete_bucket_link(bucket, expires=nil, headers={})
902
+ generate_link('DELETE', headers.merge(:url=>bucket), expires)
903
+ rescue
904
+ on_exception
905
+ end
906
+
907
+ # Generates link for 'ListBucket'.
908
+ #
909
+ # s3.list_bucket_link('my_awesome_bucket') #=> url string
910
+ #
911
+ def list_bucket_link(bucket, options=nil, expires=nil, headers={})
912
+ bucket += '?' + options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.right_blank?
913
+ generate_link('GET', headers.merge(:url=>bucket), expires)
914
+ rescue
915
+ on_exception
916
+ end
917
+
918
+ # Generates link for 'PutObject'.
919
+ #
920
+ # s3.put_link('my_awesome_bucket',key, object) #=> url string
921
+ #
922
+ def put_link(bucket, key, data=nil, expires=nil, headers={})
923
+ generate_link('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}", :data=>data), expires)
924
+ rescue
925
+ on_exception
926
+ end
927
+
928
+ # Generates link for 'GetObject'.
929
+ #
930
+ # if a bucket comply with virtual hosting naming then retuns a link with the
931
+ # bucket as a part of host name:
932
+ #
933
+ # s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
934
+ #
935
+ # otherwise returns an old style link (the bucket is a part of path):
936
+ #
937
+ # s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
938
+ #
939
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
940
+ #
941
+ # To specify +response+-* parameters, define them in the response_params hash:
942
+ #
943
+ # s3.get_link('my_awesome_bucket',key,nil,{},{ "response-content-disposition" => "attachment; filename=caf�.png", "response-content-type" => "image/png"})
944
+ #
945
+ # #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?response-content-disposition=attachment%3B%20filename%3Dcaf%25C3%25A9.png&response-content-type=image%2Fpng&Signature=wio...
946
+ #
947
+ def get_link(bucket, key, expires=nil, headers={}, response_params={})
948
+ if response_params.size > 0
949
+ response_params = '?' + response_params.map { |k, v| "#{k}=#{CGI::escape(v).gsub(/[+]/, '%20')}" }.join('&')
950
+ else
951
+ response_params = ''
952
+ end
953
+ generate_link('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}#{response_params}"), expires)
954
+ rescue
955
+ on_exception
956
+ end
957
+
958
+ # Generates link for 'HeadObject'.
959
+ #
960
+ # s3.head_link('my_awesome_bucket',key) #=> url string
961
+ #
962
+ def head_link(bucket, key, expires=nil, headers={})
963
+ generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"), expires)
964
+ rescue
965
+ on_exception
966
+ end
967
+
968
+ # Generates link for 'DeleteObject'.
969
+ #
970
+ # s3.delete_link('my_awesome_bucket',key) #=> url string
971
+ #
972
+ def delete_link(bucket, key, expires=nil, headers={})
973
+ generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"), expires)
974
+ rescue
975
+ on_exception
976
+ end
977
+
978
+
979
+ # Generates link for 'GetACL'.
980
+ #
981
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
982
+ #
983
+ def get_acl_link(bucket, key='', headers={})
984
+ return generate_link('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}?acl"))
985
+ rescue
986
+ on_exception
987
+ end
988
+
989
+ # Generates link for 'PutACL'.
990
+ #
991
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
992
+ #
993
+ def put_acl_link(bucket, key='', headers={})
994
+ return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}?acl"))
995
+ rescue
996
+ on_exception
997
+ end
998
+
999
+ # Generates link for 'GetBucketACL'.
1000
+ #
1001
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
1002
+ #
1003
+ def get_bucket_acl_link(bucket, headers={})
1004
+ return get_acl_link(bucket, '', headers)
1005
+ rescue
1006
+ on_exception
1007
+ end
1008
+
1009
+ # Generates link for 'PutBucketACL'.
1010
+ #
1011
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
1012
+ #
1013
+ def put_bucket_acl_link(bucket, acl_xml_doc, headers={})
1014
+ return put_acl_link(bucket, '', acl_xml_doc, headers)
1015
+ rescue
1016
+ on_exception
1017
+ end
1018
+
1019
+ #-----------------------------------------------------------------
1020
+ # PARSERS:
1021
+ #-----------------------------------------------------------------
1022
+
1023
+ class S3ListAllMyBucketsParser < RightAWSParser # :nodoc:
1024
+ def reset
1025
+ @result = []
1026
+ @owner = {}
1027
+ end
1028
+ def tagstart(name, attributes)
1029
+ @current_bucket = {} if name == 'Bucket'
1030
+ end
1031
+ def tagend(name)
1032
+ case name
1033
+ when 'ID' then @owner[:owner_id] = @text
1034
+ when 'DisplayName' then @owner[:owner_display_name] = @text
1035
+ when 'Name' then @current_bucket[:name] = @text
1036
+ when 'CreationDate'then @current_bucket[:creation_date] = @text
1037
+ when 'Bucket' then @result << @current_bucket.merge(@owner)
1038
+ end
1039
+ end
1040
+ end
1041
+
1042
+ class S3ListBucketParser < RightAWSParser # :nodoc:
1043
+ def reset
1044
+ @result = []
1045
+ @service = {}
1046
+ @current_key = {}
1047
+ end
1048
+ def tagstart(name, attributes)
1049
+ @current_key = {} if name == 'Contents'
1050
+ end
1051
+ def tagend(name)
1052
+ case name
1053
+ # service info
1054
+ when 'Name' then @service['name'] = @text
1055
+ when 'Prefix' then @service['prefix'] = @text
1056
+ when 'Marker' then @service['marker'] = @text
1057
+ when 'MaxKeys' then @service['max-keys'] = @text
1058
+ when 'Delimiter' then @service['delimiter'] = @text
1059
+ when 'IsTruncated' then @service['is_truncated'] = (@text =~ /false/ ? false : true)
1060
+ # key data
1061
+ when 'Key' then @current_key[:key] = @text
1062
+ when 'LastModified'then @current_key[:last_modified] = @text
1063
+ when 'ETag' then @current_key[:e_tag] = @text
1064
+ when 'Size' then @current_key[:size] = @text.to_i
1065
+ when 'StorageClass'then @current_key[:storage_class] = @text
1066
+ when 'ID' then @current_key[:owner_id] = @text
1067
+ when 'DisplayName' then @current_key[:owner_display_name] = @text
1068
+ when 'Contents'
1069
+ @current_key[:service] = @service
1070
+ @result << @current_key
1071
+ end
1072
+ end
1073
+ end
1074
+
1075
+ class S3ImprovedListBucketParser < RightAWSParser # :nodoc:
1076
+ def reset
1077
+ @result = {}
1078
+ @result[:contents] = []
1079
+ @result[:common_prefixes] = []
1080
+ @contents = []
1081
+ @current_key = {}
1082
+ @common_prefixes = []
1083
+ @in_common_prefixes = false
1084
+ end
1085
+ def tagstart(name, attributes)
1086
+ @current_key = {} if name == 'Contents'
1087
+ @in_common_prefixes = true if name == 'CommonPrefixes'
1088
+ end
1089
+ def tagend(name)
1090
+ case name
1091
+ # service info
1092
+ when 'Name' then @result[:name] = @text
1093
+ # Amazon uses the same tag for the search prefix and for the entries
1094
+ # in common prefix...so use our simple flag to see which element
1095
+ # we are parsing
1096
+ when 'Prefix' then @in_common_prefixes ? @common_prefixes << @text : @result[:prefix] = @text
1097
+ when 'Marker' then @result[:marker] = @text
1098
+ when 'MaxKeys' then @result[:max_keys] = @text
1099
+ when 'Delimiter' then @result[:delimiter] = @text
1100
+ when 'IsTruncated' then @result[:is_truncated] = (@text =~ /false/ ? false : true)
1101
+ when 'NextMarker' then @result[:next_marker] = @text
1102
+ # key data
1103
+ when 'Key' then @current_key[:key] = @text
1104
+ when 'LastModified'then @current_key[:last_modified] = @text
1105
+ when 'ETag' then @current_key[:e_tag] = @text
1106
+ when 'Size' then @current_key[:size] = @text.to_i
1107
+ when 'StorageClass'then @current_key[:storage_class] = @text
1108
+ when 'ID' then @current_key[:owner_id] = @text
1109
+ when 'DisplayName' then @current_key[:owner_display_name] = @text
1110
+ when 'Contents' then @result[:contents] << @current_key
1111
+ # Common Prefix stuff
1112
+ when 'CommonPrefixes'
1113
+ @result[:common_prefixes] = @common_prefixes
1114
+ @in_common_prefixes = false
1115
+ end
1116
+ end
1117
+ end
1118
+
1119
+ class S3BucketLocationParser < RightAWSParser # :nodoc:
1120
+ def reset
1121
+ @result = ''
1122
+ end
1123
+ def tagend(name)
1124
+ @result = @text if name == 'LocationConstraint'
1125
+ end
1126
+ end
1127
+
1128
+ class S3AclParser < RightAWSParser # :nodoc:
1129
+ def reset
1130
+ @result = {:grantees=>[], :owner=>{}}
1131
+ @current_grantee = {}
1132
+ end
1133
+ def tagstart(name, attributes)
1134
+ @current_grantee = { :attributes => attributes } if name=='Grantee'
1135
+ end
1136
+ def tagend(name)
1137
+ case name
1138
+ # service info
1139
+ when 'ID'
1140
+ if @xmlpath == 'AccessControlPolicy/Owner'
1141
+ @result[:owner][:id] = @text
1142
+ else
1143
+ @current_grantee[:id] = @text
1144
+ end
1145
+ when 'DisplayName'
1146
+ if @xmlpath == 'AccessControlPolicy/Owner'
1147
+ @result[:owner][:display_name] = @text
1148
+ else
1149
+ @current_grantee[:display_name] = @text
1150
+ end
1151
+ when 'URI'
1152
+ @current_grantee[:uri] = @text
1153
+ when 'Permission'
1154
+ @current_grantee[:permissions] = @text
1155
+ when 'Grant'
1156
+ @result[:grantees] << @current_grantee
1157
+ end
1158
+ end
1159
+ end
1160
+
1161
+ class S3LoggingParser < RightAWSParser # :nodoc:
1162
+ def reset
1163
+ @result = {:enabled => false, :targetbucket => '', :targetprefix => ''}
1164
+ @current_grantee = {}
1165
+ end
1166
+ def tagend(name)
1167
+ case name
1168
+ # service info
1169
+ when 'TargetBucket'
1170
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1171
+ @result[:targetbucket] = @text
1172
+ @result[:enabled] = true
1173
+ end
1174
+ when 'TargetPrefix'
1175
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1176
+ @result[:targetprefix] = @text
1177
+ @result[:enabled] = true
1178
+ end
1179
+ end
1180
+ end
1181
+ end
1182
+
1183
+ class S3CopyParser < RightAWSParser # :nodoc:
1184
+ def reset
1185
+ @result = {}
1186
+ end
1187
+ def tagend(name)
1188
+ case name
1189
+ when 'LastModified' then @result[:last_modified] = @text
1190
+ when 'ETag' then @result[:e_tag] = @text
1191
+ end
1192
+ end
1193
+ end
1194
+
1195
+ #-----------------------------------------------------------------
1196
+ # PARSERS: Non XML
1197
+ #-----------------------------------------------------------------
1198
+
1199
+ class S3HttpResponseParser # :nodoc:
1200
+ attr_reader :result
1201
+ def parse(response)
1202
+ @result = response
1203
+ end
1204
+ def headers_to_string(headers)
1205
+ result = {}
1206
+ headers.each do |key, value|
1207
+ value = value.first if value.is_a?(Array) && value.size<2
1208
+ result[key] = value
1209
+ end
1210
+ result
1211
+ end
1212
+ end
1213
+
1214
+ class S3HttpResponseBodyParser < S3HttpResponseParser # :nodoc:
1215
+ def parse(response)
1216
+ @result = {
1217
+ :object => response.body,
1218
+ :headers => headers_to_string(response.to_hash)
1219
+ }
1220
+ end
1221
+ end
1222
+
1223
+ class S3HttpResponseHeadParser < S3HttpResponseParser # :nodoc:
1224
+ def parse(response)
1225
+ @result = headers_to_string(response.to_hash)
1226
+ end
1227
+ end
1228
+
1229
+ end
1230
+
1231
+ end