steamcannon-aws 2.3.26.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1237 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ module Aws
25
+
26
+ class S3Interface < AwsBase
27
+
28
+ USE_100_CONTINUE_PUT_SIZE = 1_000_000
29
+
30
+ include AwsBaseInterface
31
+
32
+ DEFAULT_HOST = 's3.amazonaws.com'
33
+ DEFAULT_PORT = 443
34
+ DEFAULT_PROTOCOL = 'https'
35
+ DEFAULT_SERVICE = '/'
36
+ REQUEST_TTL = 30
37
+ DEFAULT_EXPIRES_AFTER = 1 * 24 * 60 * 60 # One day's worth of seconds
38
+ ONE_YEAR_IN_SECONDS = 365 * 24 * 60 * 60
39
+ AMAZON_HEADER_PREFIX = 'x-amz-'
40
+ AMAZON_METADATA_PREFIX = 'x-amz-meta-'
41
+
42
+ @@bench = AwsBenchmarkingBlock.new
43
+
44
+ def self.bench_xml
45
+ @@bench.xml
46
+ end
47
+
48
+ def self.bench_s3
49
+ @@bench.service
50
+ end
51
+
52
+
53
+ # Creates new RightS3 instance.
54
+ #
55
+ # s3 = Aws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:multi_thread => true, :logger => Logger.new('/tmp/x.log')}) #=> #<Aws::S3Interface:0xb7b3c27c>
56
+ #
57
+ # Params is a hash:
58
+ #
59
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
60
+ # :port => 443 # Amazon service port: 80 or 443(default)
61
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
62
+ # :connection_mode => :default # options are
63
+ # :default (will use best known safe (as in won't need explicit close) option, may change in the future)
64
+ # :per_request (opens and closes a connection on every request)
65
+ # :single (one thread across entire app)
66
+ # :per_thread (one connection per thread)
67
+ # :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
68
+ #
69
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
70
+ init({:name => 'S3',
71
+ :default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST,
72
+ :default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT,
73
+ :default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE,
74
+ :default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL},
75
+ aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
76
+ aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'],
77
+ params)
78
+ end
79
+
80
+
81
+ def close_connection
82
+ close_conn :s3_connection
83
+ end
84
+
85
+ #-----------------------------------------------------------------
86
+ # Requests
87
+ #-----------------------------------------------------------------
88
+ # Produces canonical string for signing.
89
+ def canonical_string(method, path, headers={}, expires=nil) # :nodoc:
90
+ s3_headers = {}
91
+ headers.each do |key, value|
92
+ key = key.downcase
93
+ s3_headers[key] = value.join("").strip if key[/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o]
94
+ end
95
+ s3_headers['content-type'] ||= ''
96
+ s3_headers['content-md5'] ||= ''
97
+ s3_headers['date'] = '' if s3_headers.has_key? 'x-amz-date'
98
+ s3_headers['date'] = expires if expires
99
+ # prepare output string
100
+ out_string = "#{method}\n"
101
+ s3_headers.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
102
+ out_string << (key[/^#{AMAZON_HEADER_PREFIX}/o] ? "#{key}:#{value}\n" : "#{value}\n")
103
+ end
104
+ # ignore everything after the question mark...
105
+ out_string << path.gsub(/\?.*$/, '')
106
+ # ...unless there is an acl or torrent parameter
107
+ out_string << '?acl' if path[/[&?]acl($|&|=)/]
108
+ out_string << '?torrent' if path[/[&?]torrent($|&|=)/]
109
+ out_string << '?location' if path[/[&?]location($|&|=)/]
110
+ out_string << '?logging' if path[/[&?]logging($|&|=)/] # this one is beta, no support for now
111
+ out_string
112
+ end
113
+
114
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
115
+ def is_dns_bucket?(bucket_name)
116
+ bucket_name = bucket_name.to_s
117
+ return nil unless (3..63) === bucket_name.size
118
+ bucket_name.split('.').each do |component|
119
+ return nil unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
120
+ end
121
+ true
122
+ end
123
+
124
+ def fetch_request_params(headers) #:nodoc:
125
+ # default server to use
126
+ server = @params[:server]
127
+ service = @params[:service].to_s
128
+ service.chop! if service[%r{/$}] # remove trailing '/' from service
129
+ # extract bucket name and check it's dns compartibility
130
+ headers[:url].to_s[%r{^([a-z0-9._-]*)(/[^?]*)?(\?.+)?}i]
131
+ bucket_name, key_path, params_list = $1, $2, $3
132
+ # select request model
133
+ if is_dns_bucket?(bucket_name)
134
+ # fix a path
135
+ server = "#{bucket_name}.#{server}"
136
+ key_path ||= '/'
137
+ path = "#{service}#{key_path}#{params_list}"
138
+ else
139
+ path = "#{service}/#{bucket_name}#{key_path}#{params_list}"
140
+ end
141
+ path_to_sign = "#{service}/#{bucket_name}#{key_path}#{params_list}"
142
+ # path_to_sign = "/#{bucket_name}#{key_path}#{params_list}"
143
+ [server, path, path_to_sign]
144
+ end
145
+
146
+ # Generates request hash for REST API.
147
+ # Assumes that headers[:url] is URL encoded (use CGI::escape)
148
+ def generate_rest_request(method, headers) # :nodoc:
149
+ # calculate request data
150
+ server, path, path_to_sign = fetch_request_params(headers)
151
+ data = headers[:data]
152
+ # remove unset(==optional) and symbolyc keys
153
+ headers.each { |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
154
+ #
155
+ headers['content-type'] ||= ''
156
+ headers['date'] = Time.now.httpdate
157
+ # create request
158
+ request = "Net::HTTP::#{method.capitalize}".constantize.new(path)
159
+ request.body = data if data
160
+ # set request headers and meta headers
161
+ headers.each { |key, value| request[key.to_s] = value }
162
+ #generate auth strings
163
+ auth_string = canonical_string(request.method, path_to_sign, request.to_hash)
164
+ signature = AwsUtils::sign(@aws_secret_access_key, auth_string)
165
+ # set other headers
166
+ request['Authorization'] = "AWS #{@aws_access_key_id}:#{signature}"
167
+ # prepare output hash
168
+ {:request => request,
169
+ :server => server,
170
+ :port => @params[:port],
171
+ :protocol => @params[:protocol]}
172
+ end
173
+
174
+ # Sends request to Amazon and parses the response.
175
+ # Raises AwsError if any banana happened.
176
+ def request_info(request, parser, &block) # :nodoc:
177
+ request_info2(request, parser, @params, :s3_connection, @logger, @@bench, &block)
178
+
179
+ end
180
+
181
+
182
+ # Returns an array of customer's buckets. Each item is a +hash+.
183
+ #
184
+ # s3.list_all_my_buckets #=>
185
+ # [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
186
+ # :owner_display_name => "root",
187
+ # :name => "bucket_name",
188
+ # :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
189
+ #
190
+ def list_all_my_buckets(headers={})
191
+ req_hash = generate_rest_request('GET', headers.merge(:url=>''))
192
+ request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger))
193
+ rescue
194
+ on_exception
195
+ end
196
+
197
+ # Creates new bucket. Returns +true+ or an exception.
198
+ #
199
+ # # create a bucket at American server
200
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
201
+ # # create a bucket at European server
202
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
203
+ #
204
+ def create_bucket(bucket, headers={})
205
+ data = nil
206
+ unless headers[:location].blank?
207
+ location = "#{headers[:location]}"
208
+ location.upcase! if location == 'eu'
209
+ data = "<CreateBucketConfiguration><LocationConstraint>#{location}</LocationConstraint></CreateBucketConfiguration>"
210
+ end
211
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data))
212
+ request_info(req_hash, RightHttp2xxParser.new)
213
+ rescue Exception => e
214
+ # if the bucket exists AWS returns an error for the location constraint interface. Drop it
215
+ e.is_a?(Aws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception
216
+ end
217
+
218
+ # Retrieve bucket location
219
+ #
220
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
221
+ # puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed)
222
+ #
223
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
224
+ # puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
225
+ #
226
+ def bucket_location(bucket, headers={})
227
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location"))
228
+ request_info(req_hash, S3BucketLocationParser.new)
229
+ rescue
230
+ on_exception
231
+ end
232
+
233
+ # Retrieves the logging configuration for a bucket.
234
+ # Returns a hash of {:enabled, :targetbucket, :targetprefix}
235
+ #
236
+ # s3.interface.get_logging_parse(:bucket => "asset_bucket")
237
+ # => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
238
+ #
239
+ #
240
+ def get_logging_parse(params)
241
+ AwsUtils.mandatory_arguments([:bucket], params)
242
+ AwsUtils.allow_only([:bucket, :headers], params)
243
+ params[:headers] = {} unless params[:headers]
244
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging"))
245
+ request_info(req_hash, S3LoggingParser.new)
246
+ rescue
247
+ on_exception
248
+ end
249
+
250
+ # Sets logging configuration for a bucket from the XML configuration document.
251
+ # params:
252
+ # :bucket
253
+ # :xmldoc
254
+ def put_logging(params)
255
+ AwsUtils.mandatory_arguments([:bucket, :xmldoc], params)
256
+ AwsUtils.allow_only([:bucket, :xmldoc, :headers], params)
257
+ params[:headers] = {} unless params[:headers]
258
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc]))
259
+ request_info(req_hash, S3TrueParser.new)
260
+ rescue
261
+ on_exception
262
+ end
263
+
264
+ # Deletes new bucket. Bucket must be empty! Returns +true+ or an exception.
265
+ #
266
+ # s3.delete_bucket('my_awesome_bucket') #=> true
267
+ #
268
+ # See also: force_delete_bucket method
269
+ #
270
+ def delete_bucket(bucket, headers={})
271
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket))
272
+ request_info(req_hash, RightHttp2xxParser.new)
273
+ rescue
274
+ on_exception
275
+ end
276
+
277
+ # Returns an array of bucket's keys. Each array item (key data) is a +hash+.
278
+ #
279
+ # s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=>
280
+ # [{:key => "test1",
281
+ # :last_modified => "2007-05-18T07:00:59.000Z",
282
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
283
+ # :owner_display_name => "root",
284
+ # :e_tag => "000000000059075b964b07152d234b70",
285
+ # :storage_class => "STANDARD",
286
+ # :size => 3,
287
+ # :service=> {'is_truncated' => false,
288
+ # 'prefix' => "t",
289
+ # 'marker' => "",
290
+ # 'name' => "my_awesome_bucket",
291
+ # 'max-keys' => "5"}, ..., {...}]
292
+ #
293
+ def list_bucket(bucket, options={}, headers={})
294
+ bucket += '?'+options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') unless options.blank?
295
+ req_hash = generate_rest_request('GET', headers.merge(:url=>bucket))
296
+ request_info(req_hash, S3ListBucketParser.new(:logger => @logger))
297
+ rescue
298
+ on_exception
299
+ end
300
+
301
+ # Incrementally list the contents of a bucket. Yields the following hash to a block:
302
+ # s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields
303
+ # {
304
+ # :name => 'bucketname',
305
+ # :prefix => 'subfolder/',
306
+ # :marker => 'fileN.jpg',
307
+ # :max_keys => 234,
308
+ # :delimiter => '/',
309
+ # :is_truncated => true,
310
+ # :next_marker => 'fileX.jpg',
311
+ # :contents => [
312
+ # { :key => "file1",
313
+ # :last_modified => "2007-05-18T07:00:59.000Z",
314
+ # :e_tag => "000000000059075b964b07152d234b70",
315
+ # :size => 3,
316
+ # :storage_class => "STANDARD",
317
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
318
+ # :owner_display_name => "root"
319
+ # }, { :key, ...}, ... {:key, ...}
320
+ # ]
321
+ # :common_prefixes => [
322
+ # "prefix1",
323
+ # "prefix2",
324
+ # ...,
325
+ # "prefixN"
326
+ # ]
327
+ # }
328
+ def incrementally_list_bucket(bucket, options={}, headers={}, &block)
329
+ internal_options = options.symbolize_keys
330
+ begin
331
+ internal_bucket = bucket.dup
332
+ internal_bucket += '?'+internal_options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') unless internal_options.blank?
333
+ req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket))
334
+ response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger))
335
+ there_are_more_keys = response[:is_truncated]
336
+ if (there_are_more_keys)
337
+ internal_options[:marker] = decide_marker(response)
338
+ total_results = response[:contents].length + response[:common_prefixes].length
339
+ internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil
340
+ end
341
+ yield response
342
+ end while there_are_more_keys && under_max_keys(internal_options)
343
+ true
344
+ rescue
345
+ on_exception
346
+ end
347
+
348
+
349
+ private
350
+ def decide_marker(response)
351
+ return response[:next_marker].dup if response[:next_marker]
352
+ last_key = response[:contents].last[:key]
353
+ last_prefix = response[:common_prefixes].last
354
+ if (!last_key)
355
+ return nil if (!last_prefix)
356
+ last_prefix.dup
357
+ elsif (!last_prefix)
358
+ last_key.dup
359
+ else
360
+ last_key > last_prefix ? last_key.dup : last_prefix.dup
361
+ end
362
+ end
363
+
364
+ def under_max_keys(internal_options)
365
+ internal_options[:'max-keys'] ? internal_options[:'max-keys'] > 0 : true
366
+ end
367
+
368
+ public
369
+ # Saves object to Amazon. Returns +true+ or an exception.
370
+ # Any header starting with AMAZON_METADATA_PREFIX is considered
371
+ # user metadata. It will be stored with the object and returned
372
+ # when you retrieve the object. The total size of the HTTP
373
+ # request, not including the body, must be less than 4 KB.
374
+ #
375
+ # s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
376
+ #
377
+ # This method is capable of 'streaming' uploads; that is, it can upload
378
+ # data from a file or other IO object without first reading all the data
379
+ # into memory. This is most useful for large PUTs - it is difficult to read
380
+ # a 2 GB file entirely into memory before sending it to S3.
381
+ # To stream an upload, pass an object that responds to 'read' (like the read
382
+ # method of IO) and to either 'lstat' or 'size'. For files, this means
383
+ # streaming is enabled by simply making the call:
384
+ #
385
+ # s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
386
+ #
387
+ # If the IO object you wish to stream from responds to the read method but
388
+ # doesn't implement lstat or size, you can extend the object dynamically
389
+ # to implement these methods, or define your own class which defines these
390
+ # methods. Be sure that your class returns 'nil' from read() after having
391
+ # read 'size' bytes. Otherwise S3 will drop the socket after
392
+ # 'Content-Length' bytes have been uploaded, and HttpConnection will
393
+ # interpret this as an error.
394
+ #
395
+ # This method now supports very large PUTs, where very large
396
+ # is > 2 GB.
397
+ #
398
+ # For Win32 users: Files and IO objects should be opened in binary mode. If
399
+ # a text mode IO object is passed to PUT, it will be converted to binary
400
+ # mode.
401
+ #
402
+
403
+ def put(bucket, key, data=nil, headers={})
404
+ # On Windows, if someone opens a file in text mode, we must reset it so
405
+ # to binary mode for streaming to work properly
406
+ if (data.respond_to?(:binmode))
407
+ data.binmode
408
+ end
409
+ data_size = data.respond_to?(:lstat) ? data.lstat.size :
410
+ (data.respond_to?(:size) ? data.size : 0)
411
+ if (data_size >= USE_100_CONTINUE_PUT_SIZE)
412
+ headers['expect'] = '100-continue'
413
+ end
414
+ req_hash = generate_rest_request('PUT', headers.merge(:url =>"#{bucket}/#{CGI::escape key}", :data=>data,
415
+ 'Content-Length' => data_size.to_s))
416
+ request_info(req_hash, RightHttp2xxParser.new)
417
+ rescue
418
+ on_exception
419
+ end
420
+
421
+
422
+ # New experimental API for uploading objects, introduced in Aws 1.8.1.
423
+ # store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification
424
+ # of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
425
+ # The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
426
+ #
427
+ # If the optional :md5 argument is provided, store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
428
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
429
+ #
430
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
431
+ #
432
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
433
+ # => {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/",
434
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
435
+ # "date"=>"Mon, 29 Sep 2008 18:57:46 GMT",
436
+ # :verified_md5=>true,
437
+ # "x-amz-request-id"=>"63916465939995BA",
438
+ # "server"=>"AmazonS3",
439
+ # "content-length"=>"0"}
440
+ #
441
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
442
+ # => {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR",
443
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
444
+ # "date"=>"Mon, 29 Sep 2008 18:58:56 GMT",
445
+ # :verified_md5=>false,
446
+ # "x-amz-request-id"=>"3B25A996BC2CDD3B",
447
+ # "server"=>"AmazonS3",
448
+ # "content-length"=>"0"}
449
+
450
+ def store_object(params)
451
+ AwsUtils.allow_only([:bucket, :key, :data, :headers, :md5], params)
452
+ AwsUtils.mandatory_arguments([:bucket, :key, :data], params)
453
+ params[:headers] = {} unless params[:headers]
454
+
455
+ params[:data].binmode if (params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly
456
+ if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
457
+ (params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE)
458
+ params[:headers]['expect'] = '100-continue'
459
+ end
460
+
461
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data]))
462
+ resp = request_info(req_hash, S3HttpResponseHeadParser.new)
463
+ if (params[:md5])
464
+ resp[:verified_md5] = (resp['etag'].gsub(/\"/, '') == params[:md5]) ? true : false
465
+ else
466
+ resp[:verified_md5] = false
467
+ end
468
+ resp
469
+ rescue
470
+ on_exception
471
+ end
472
+
473
+ # Identical in function to store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
474
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
475
+ # This call is implemented as a wrapper around store_object and the user may gain different semantics by creating a custom wrapper.
476
+ #
477
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
478
+ # => {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb",
479
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
480
+ # "date"=>"Mon, 29 Sep 2008 18:38:32 GMT",
481
+ # :verified_md5=>true,
482
+ # "x-amz-request-id"=>"E8D7EA4FE00F5DF7",
483
+ # "server"=>"AmazonS3",
484
+ # "content-length"=>"0"}
485
+ #
486
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
487
+ # Aws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n",
488
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
489
+ # "date"=>"Mon, 29 Sep 2008 18:38:41 GMT",
490
+ # :verified_md5=>false,
491
+ # "x-amz-request-id"=>"0D7ADE09F42606F2",
492
+ # "server"=>"AmazonS3",
493
+ # "content-length"=>"0"}
494
+ def store_object_and_verify(params)
495
+ AwsUtils.mandatory_arguments([:md5], params)
496
+ r = store_object(params)
497
+ r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}"))
498
+ end
499
+
500
+ # Retrieves object data from Amazon. Returns a +hash+ or an exception.
501
+ #
502
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=>
503
+ #
504
+ # {:object => "Ola-la!",
505
+ # :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
506
+ # "content-type" => "",
507
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
508
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
509
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
510
+ # "x-amz-meta-family" => "Woho556!",
511
+ # "x-amz-request-id" => "0000000C246D770C",
512
+ # "server" => "AmazonS3",
513
+ # "content-length" => "7"}}
514
+ #
515
+ # If a block is provided, yields incrementally to the block as
516
+ # the response is read. For large responses, this function is ideal as
517
+ # the response can be 'streamed'. The hash containing header fields is
518
+ # still returned.
519
+ # Example:
520
+ # foo = File.new('./chunder.txt', File::CREAT|File::RDWR)
521
+ # rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
522
+ # foo.write(chunk)
523
+ # end
524
+ # foo.close
525
+ #
526
+
527
+ def get(bucket, key, headers={}, &block)
528
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
529
+ request_info(req_hash, S3HttpResponseBodyParser.new, &block)
530
+ rescue
531
+ on_exception
532
+ end
533
+
534
+ # New experimental API for retrieving objects, introduced in Aws 1.8.1.
535
+ # retrieve_object is similar in function to the older function get. It allows for optional verification
536
+ # of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
537
+ #
538
+ # If the optional :md5 argument is provided, retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
539
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
540
+ #
541
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
542
+ # Mandatory arguments:
543
+ # :bucket - the bucket in which the object is stored
544
+ # :key - the object address (or path) within the bucket
545
+ # Optional arguments:
546
+ # :headers - hash of additional HTTP headers to include with the request
547
+ # :md5 - MD5 checksum against which to verify the retrieved object
548
+ #
549
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo")
550
+ # => {:verified_md5=>false,
551
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
552
+ # "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp",
553
+ # "content-type"=>"",
554
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
555
+ # "date"=>"Tue, 30 Sep 2008 00:52:44 GMT",
556
+ # "x-amz-request-id"=>"EE4855DE27A2688C",
557
+ # "server"=>"AmazonS3",
558
+ # "content-length"=>"10"},
559
+ # :object=>"polemonium"}
560
+ #
561
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954')
562
+ # => {:verified_md5=>true,
563
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
564
+ # "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH",
565
+ # "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
566
+ # "date"=>"Tue, 30 Sep 2008 00:53:08 GMT",
567
+ # "x-amz-request-id"=>"6E7F317356580599",
568
+ # "server"=>"AmazonS3",
569
+ # "content-length"=>"10"},
570
+ # :object=>"polemonium"}
571
+ # If a block is provided, yields incrementally to the block as
572
+ # the response is read. For large responses, this function is ideal as
573
+ # the response can be 'streamed'. The hash containing header fields is
574
+ # still returned.
575
+ def retrieve_object(params, &block)
576
+ AwsUtils.mandatory_arguments([:bucket, :key], params)
577
+ AwsUtils.allow_only([:bucket, :key, :headers, :md5], params)
578
+ params[:headers] = {} unless params[:headers]
579
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}"))
580
+ resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block)
581
+ resp[:verified_md5] = false
582
+ if (params[:md5] && (resp[:headers]['etag'].gsub(/\"/, '') == params[:md5]))
583
+ resp[:verified_md5] = true
584
+ end
585
+ resp
586
+ rescue
587
+ on_exception
588
+ end
589
+
590
+ # Identical in function to retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
591
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
592
+ # This call is implemented as a wrapper around retrieve_object and the user may gain different semantics by creating a custom wrapper.
593
+ def retrieve_object_and_verify(params, &block)
594
+ AwsUtils.mandatory_arguments([:md5], params)
595
+ resp = retrieve_object(params, &block)
596
+ return resp if resp[:verified_md5]
597
+ raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}")
598
+ end
599
+
600
+ # Retrieves object metadata. Returns a +hash+ of http_response_headers.
601
+ #
602
+ # s3.head('my_awesome_bucket', 'log/curent/1.log') #=>
603
+ # {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
604
+ # "content-type" => "",
605
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
606
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
607
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
608
+ # "x-amz-meta-family" => "Woho556!",
609
+ # "x-amz-request-id" => "0000000C246D770C",
610
+ # "server" => "AmazonS3",
611
+ # "content-length" => "7"}
612
+ #
613
+ def head(bucket, key, headers={})
614
+ req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
615
+ request_info(req_hash, S3HttpResponseHeadParser.new)
616
+ rescue
617
+ on_exception
618
+ end
619
+
620
+ # Deletes key. Returns +true+ or an exception.
621
+ #
622
+ # s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
623
+ #
624
+ def delete(bucket, key='', headers={})
625
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
626
+ request_info(req_hash, RightHttp2xxParser.new)
627
+ rescue
628
+ on_exception
629
+ end
630
+
631
+ # Copy an object.
632
+ # directive: :copy - copy meta-headers from source (default value)
633
+ # :replace - replace meta-headers by passed ones
634
+ #
635
+ # # copy a key with meta-headers
636
+ # s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"}
637
+ #
638
+ # # copy a key, overwrite meta-headers
639
+ # s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
640
+ #
641
+ # see: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
642
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
643
+ #
644
+ def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
645
+ dest_key ||= src_key
646
+ headers['x-amz-metadata-directive'] = directive.to_s.upcase
647
+ headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}"
648
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}"))
649
+ request_info(req_hash, S3CopyParser.new)
650
+ rescue
651
+ on_exception
652
+ end
653
+
654
+ # Move an object.
655
+ # directive: :copy - copy meta-headers from source (default value)
656
+ # :replace - replace meta-headers by passed ones
657
+ #
658
+ # # move bucket1/key1 to bucket1/key2
659
+ # s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"}
660
+ #
661
+ # # move bucket1/key1 to bucket2/key2 with new meta-headers assignment
662
+ # s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
663
+ #
664
+ def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
665
+ copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers)
666
+ # delete an original key if it differs from a destination one
667
+ delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key
668
+ copy_result
669
+ end
670
+
671
+ # Rename an object.
672
+ #
673
+ # # rename bucket1/key1 to bucket1/key2
674
+ # s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
675
+ #
676
+ def rename(src_bucket, src_key, dest_key, headers={})
677
+ move(src_bucket, src_key, src_bucket, dest_key, :copy, headers)
678
+ end
679
+
680
+ # Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
681
+ #
682
+ # s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=>
683
+ # {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX",
684
+ # "content-type"=>"application/xml;charset=ISO-8859-1",
685
+ # "date"=>"Wed, 23 May 2007 09:40:16 GMT",
686
+ # "x-amz-request-id"=>"B183FA7AB5FBB4DD",
687
+ # "server"=>"AmazonS3",
688
+ # "transfer-encoding"=>"chunked"},
689
+ # :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner>
690
+ # <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner>
691
+ # <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>
692
+ # 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee>
693
+ # <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
694
+ #
695
+ def get_acl(bucket, key='', headers={})
696
+ key = key.blank? ? '' : "/#{CGI::escape key}"
697
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
698
+ request_info(req_hash, S3HttpResponseBodyParser.new)
699
+ rescue
700
+ on_exception
701
+ end
702
+
703
+ # Retieves the ACL (access control policy) for a bucket or object.
704
+ # Returns a hash of {:owner, :grantees}
705
+ #
706
+ # s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=>
707
+ #
708
+ # { :grantees=>
709
+ # { "16...2a"=>
710
+ # { :display_name=>"root",
711
+ # :permissions=>["FULL_CONTROL"],
712
+ # :attributes=>
713
+ # { "xsi:type"=>"CanonicalUser",
714
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}},
715
+ # "http://acs.amazonaws.com/groups/global/AllUsers"=>
716
+ # { :display_name=>"AllUsers",
717
+ # :permissions=>["READ"],
718
+ # :attributes=>
719
+ # { "xsi:type"=>"Group",
720
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}},
721
+ # :owner=>
722
+ # { :id=>"16..2a",
723
+ # :display_name=>"root"}}
724
+ #
725
+ def get_acl_parse(bucket, key='', headers={})
726
+ key = key.blank? ? '' : "/#{CGI::escape key}"
727
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
728
+ acl = request_info(req_hash, S3AclParser.new(:logger => @logger))
729
+ result = {}
730
+ result[:owner] = acl[:owner]
731
+ result[:grantees] = {}
732
+ acl[:grantees].each do |grantee|
733
+ key = grantee[:id] || grantee[:uri]
734
+ if result[:grantees].key?(key)
735
+ result[:grantees][key][:permissions] << grantee[:permissions]
736
+ else
737
+ result[:grantees][key] =
738
+ {:display_name => grantee[:display_name] || grantee[:uri].to_s[/[^\/]*$/],
739
+ :permissions => grantee[:permissions].lines.to_a,
740
+ :attributes => grantee[:attributes]}
741
+ end
742
+ end
743
+ result
744
+ rescue
745
+ on_exception
746
+ end
747
+
748
+ # Sets the ACL on a bucket or object.
749
+ def put_acl(bucket, key, acl_xml_doc, headers={})
750
+ key = key.blank? ? '' : "/#{CGI::escape key}"
751
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc))
752
+ request_info(req_hash, S3HttpResponseBodyParser.new)
753
+ rescue
754
+ on_exception
755
+ end
756
+
757
+ # Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
758
+ def get_bucket_acl(bucket, headers={})
759
+ return get_acl(bucket, '', headers)
760
+ rescue
761
+ on_exception
762
+ end
763
+
764
+ # Sets the ACL on a bucket only.
765
+ def put_bucket_acl(bucket, acl_xml_doc, headers={})
766
+ return put_acl(bucket, '', acl_xml_doc, headers)
767
+ rescue
768
+ on_exception
769
+ end
770
+
771
+
772
+ # Removes all keys from bucket. Returns +true+ or an exception.
773
+ #
774
+ # s3.clear_bucket('my_awesome_bucket') #=> true
775
+ #
776
+ def clear_bucket(bucket)
777
+ incrementally_list_bucket(bucket) do |results|
778
+ results[:contents].each { |key| delete(bucket, key[:key]) }
779
+ end
780
+ true
781
+ rescue
782
+ on_exception
783
+ end
784
+
785
+ # Deletes all keys in bucket then deletes bucket. Returns +true+ or an exception.
786
+ #
787
+ # s3.force_delete_bucket('my_awesome_bucket')
788
+ #
789
+ def force_delete_bucket(bucket)
790
+ clear_bucket(bucket)
791
+ delete_bucket(bucket)
792
+ rescue
793
+ on_exception
794
+ end
795
+
796
+ # Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
797
+ #
798
+ # s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs']
799
+ # s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
800
+ #
801
+ def delete_folder(bucket, folder_key, separator='/')
802
+ folder_key.chomp!(separator)
803
+ allkeys = []
804
+ incrementally_list_bucket(bucket, {'prefix' => folder_key}) do |results|
805
+ keys = results[:contents].map { |s3_key| s3_key[:key][/^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil }.compact
806
+ keys.each { |key| delete(bucket, key) }
807
+ allkeys << keys
808
+ end
809
+ allkeys
810
+ rescue
811
+ on_exception
812
+ end
813
+
814
+ # Retrieves object data only (headers are omitted). Returns +string+ or an exception.
815
+ #
816
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
817
+ #
818
+ def get_object(bucket, key, headers={})
819
+ get(bucket, key, headers)[:object]
820
+ rescue
821
+ on_exception
822
+ end
823
+
824
+ #-----------------------------------------------------------------
825
+ # Query API: Links
826
+ #-----------------------------------------------------------------
827
+
828
+ # Generates link for QUERY API
829
+ def generate_link(method, headers={}, expires=nil) #:nodoc:
830
+ # calculate request data
831
+ server, path, path_to_sign = fetch_request_params(headers)
832
+ # expiration time
833
+ expires ||= DEFAULT_EXPIRES_AFTER
834
+ expires = Time.now.utc + expires if expires.is_a?(Fixnum) && (expires < ONE_YEAR_IN_SECONDS)
835
+ expires = expires.to_i
836
+ # remove unset(==optional) and symbolyc keys
837
+ headers.each { |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
838
+ #generate auth strings
839
+ auth_string = canonical_string(method, path_to_sign, headers, expires)
840
+ signature = CGI::escape(Base64.encode64(OpenSSL::HMAC.digest(OpenSSL::Digest::Digest.new("sha1"), @aws_secret_access_key, auth_string)).strip)
841
+ # path building
842
+ addon = "Signature=#{signature}&Expires=#{expires}&AWSAccessKeyId=#{@aws_access_key_id}"
843
+ path += path[/\?/] ? "&#{addon}" : "?#{addon}"
844
+ "#{@params[:protocol]}://#{server}:#{@params[:port]}#{path}"
845
+ rescue
846
+ on_exception
847
+ end
848
+
849
+ # Generates link for 'ListAllMyBuckets'.
850
+ #
851
+ # s3.list_all_my_buckets_link #=> url string
852
+ #
853
+ def list_all_my_buckets_link(expires=nil, headers={})
854
+ generate_link('GET', headers.merge(:url=>''), expires)
855
+ rescue
856
+ on_exception
857
+ end
858
+
859
+ # Generates link for 'CreateBucket'.
860
+ #
861
+ # s3.create_bucket_link('my_awesome_bucket') #=> url string
862
+ #
863
+ def create_bucket_link(bucket, expires=nil, headers={})
864
+ generate_link('PUT', headers.merge(:url=>bucket), expires)
865
+ rescue
866
+ on_exception
867
+ end
868
+
869
+ # Generates link for 'DeleteBucket'.
870
+ #
871
+ # s3.delete_bucket_link('my_awesome_bucket') #=> url string
872
+ #
873
+ def delete_bucket_link(bucket, expires=nil, headers={})
874
+ generate_link('DELETE', headers.merge(:url=>bucket), expires)
875
+ rescue
876
+ on_exception
877
+ end
878
+
879
+ # Generates link for 'ListBucket'.
880
+ #
881
+ # s3.list_bucket_link('my_awesome_bucket') #=> url string
882
+ #
883
+ def list_bucket_link(bucket, options=nil, expires=nil, headers={})
884
+ bucket += '?' + options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') unless options.blank?
885
+ generate_link('GET', headers.merge(:url=>bucket), expires)
886
+ rescue
887
+ on_exception
888
+ end
889
+
890
+ # Generates link for 'PutObject'.
891
+ #
892
+ # s3.put_link('my_awesome_bucket',key, object) #=> url string
893
+ #
894
+ def put_link(bucket, key, data=nil, expires=nil, headers={})
895
+ generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}", :data=>data), expires)
896
+ rescue
897
+ on_exception
898
+ end
899
+
900
+ # Generates link for 'GetObject'.
901
+ #
902
+ # if a bucket comply with virtual hosting naming then retuns a link with the
903
+ # bucket as a part of host name:
904
+ #
905
+ # s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
906
+ #
907
+ # otherwise returns an old style link (the bucket is a part of path):
908
+ #
909
+ # s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
910
+ #
911
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
912
+ def get_link(bucket, key, expires=nil, headers={})
913
+ generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
914
+ rescue
915
+ on_exception
916
+ end
917
+
918
+ # Generates link for 'HeadObject'.
919
+ #
920
+ # s3.head_link('my_awesome_bucket',key) #=> url string
921
+ #
922
+ def head_link(bucket, key, expires=nil, headers={})
923
+ generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
924
+ rescue
925
+ on_exception
926
+ end
927
+
928
+ # Generates link for 'DeleteObject'.
929
+ #
930
+ # s3.delete_link('my_awesome_bucket',key) #=> url string
931
+ #
932
+ def delete_link(bucket, key, expires=nil, headers={})
933
+ generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
934
+ rescue
935
+ on_exception
936
+ end
937
+
938
+
939
+ # Generates link for 'GetACL'.
940
+ #
941
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
942
+ #
943
+ def get_acl_link(bucket, key='', headers={})
944
+ return generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
945
+ rescue
946
+ on_exception
947
+ end
948
+
949
+ # Generates link for 'PutACL'.
950
+ #
951
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
952
+ #
953
+ def put_acl_link(bucket, key='', headers={})
954
+ return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
955
+ rescue
956
+ on_exception
957
+ end
958
+
959
+ # Generates link for 'GetBucketACL'.
960
+ #
961
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
962
+ #
963
+ def get_bucket_acl_link(bucket, headers={})
964
+ return get_acl_link(bucket, '', headers)
965
+ rescue
966
+ on_exception
967
+ end
968
+
969
+ # Generates link for 'PutBucketACL'.
970
+ #
971
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
972
+ #
973
+ def put_bucket_acl_link(bucket, acl_xml_doc, headers={})
974
+ return put_acl_link(bucket, '', acl_xml_doc, headers)
975
+ rescue
976
+ on_exception
977
+ end
978
+
979
+ #-----------------------------------------------------------------
980
+ # PARSERS:
981
+ #-----------------------------------------------------------------
982
+
983
+ class S3ListAllMyBucketsParser < AwsParser # :nodoc:
984
+ def reset
985
+ @result = []
986
+ @owner = {}
987
+ end
988
+
989
+ def tagstart(name, attributes)
990
+ @current_bucket = {} if name == 'Bucket'
991
+ end
992
+
993
+ def tagend(name)
994
+ case name
995
+ when 'ID';
996
+ @owner[:owner_id] = @text
997
+ when 'DisplayName';
998
+ @owner[:owner_display_name] = @text
999
+ when 'Name';
1000
+ @current_bucket[:name] = @text
1001
+ when 'CreationDate';
1002
+ @current_bucket[:creation_date] = @text
1003
+ when 'Bucket';
1004
+ @result << @current_bucket.merge(@owner)
1005
+ end
1006
+ end
1007
+ end
1008
+
1009
+ class S3ListBucketParser < AwsParser # :nodoc:
1010
+ def reset
1011
+ @result = []
1012
+ @service = {}
1013
+ @current_key = {}
1014
+ end
1015
+
1016
+ def tagstart(name, attributes)
1017
+ @current_key = {} if name == 'Contents'
1018
+ end
1019
+
1020
+ def tagend(name)
1021
+ case name
1022
+ # service info
1023
+ when 'Name';
1024
+ @service['name'] = @text
1025
+ when 'Prefix';
1026
+ @service['prefix'] = @text
1027
+ when 'Marker';
1028
+ @service['marker'] = @text
1029
+ when 'MaxKeys';
1030
+ @service['max-keys'] = @text
1031
+ when 'Delimiter';
1032
+ @service['delimiter'] = @text
1033
+ when 'IsTruncated';
1034
+ @service['is_truncated'] = (@text =~ /false/ ? false : true)
1035
+ # key data
1036
+ when 'Key';
1037
+ @current_key[:key] = @text
1038
+ when 'LastModified';
1039
+ @current_key[:last_modified] = @text
1040
+ when 'ETag';
1041
+ @current_key[:e_tag] = @text
1042
+ when 'Size';
1043
+ @current_key[:size] = @text.to_i
1044
+ when 'StorageClass';
1045
+ @current_key[:storage_class] = @text
1046
+ when 'ID';
1047
+ @current_key[:owner_id] = @text
1048
+ when 'DisplayName';
1049
+ @current_key[:owner_display_name] = @text
1050
+ when 'Contents';
1051
+ @current_key[:service] = @service; @result << @current_key
1052
+ end
1053
+ end
1054
+ end
1055
+
1056
+ class S3ImprovedListBucketParser < AwsParser # :nodoc:
1057
+ def reset
1058
+ @result = {}
1059
+ @result[:contents] = []
1060
+ @result[:common_prefixes] = []
1061
+ @contents = []
1062
+ @current_key = {}
1063
+ @common_prefixes = []
1064
+ @in_common_prefixes = false
1065
+ end
1066
+
1067
+ def tagstart(name, attributes)
1068
+ @current_key = {} if name == 'Contents'
1069
+ @in_common_prefixes = true if name == 'CommonPrefixes'
1070
+ end
1071
+
1072
+ def tagend(name)
1073
+ case name
1074
+ # service info
1075
+ when 'Name';
1076
+ @result[:name] = @text
1077
+ # Amazon uses the same tag for the search prefix and for the entries
1078
+ # in common prefix...so use our simple flag to see which element
1079
+ # we are parsing
1080
+ when 'Prefix';
1081
+ @in_common_prefixes ? @common_prefixes << @text : @result[:prefix] = @text
1082
+ when 'Marker';
1083
+ @result[:marker] = @text
1084
+ when 'MaxKeys';
1085
+ @result[:max_keys] = @text
1086
+ when 'Delimiter';
1087
+ @result[:delimiter] = @text
1088
+ when 'IsTruncated';
1089
+ @result[:is_truncated] = (@text =~ /false/ ? false : true)
1090
+ when 'NextMarker';
1091
+ @result[:next_marker] = @text
1092
+ # key data
1093
+ when 'Key';
1094
+ @current_key[:key] = @text
1095
+ when 'LastModified';
1096
+ @current_key[:last_modified] = @text
1097
+ when 'ETag';
1098
+ @current_key[:e_tag] = @text
1099
+ when 'Size';
1100
+ @current_key[:size] = @text.to_i
1101
+ when 'StorageClass';
1102
+ @current_key[:storage_class] = @text
1103
+ when 'ID';
1104
+ @current_key[:owner_id] = @text
1105
+ when 'DisplayName';
1106
+ @current_key[:owner_display_name] = @text
1107
+ when 'Contents';
1108
+ @result[:contents] << @current_key
1109
+ # Common Prefix stuff
1110
+ when 'CommonPrefixes';
1111
+ @result[:common_prefixes] = @common_prefixes; @in_common_prefixes = false
1112
+ end
1113
+ end
1114
+ end
1115
+
1116
+ class S3BucketLocationParser < AwsParser # :nodoc:
1117
+ def reset
1118
+ @result = ''
1119
+ end
1120
+
1121
+ def tagend(name)
1122
+ @result = @text if name == 'LocationConstraint'
1123
+ end
1124
+ end
1125
+
1126
+ class S3AclParser < AwsParser # :nodoc:
1127
+ def reset
1128
+ @result = {:grantees=>[], :owner=>{}}
1129
+ @current_grantee = {}
1130
+ end
1131
+
1132
+ def tagstart(name, attributes)
1133
+ @current_grantee = {:attributes => attributes} if name=='Grantee'
1134
+ end
1135
+
1136
+ def tagend(name)
1137
+ case name
1138
+ # service info
1139
+ when 'ID'
1140
+ if @xmlpath == 'AccessControlPolicy/Owner'
1141
+ @result[:owner][:id] = @text
1142
+ else
1143
+ @current_grantee[:id] = @text
1144
+ end
1145
+ when 'DisplayName'
1146
+ if @xmlpath == 'AccessControlPolicy/Owner'
1147
+ @result[:owner][:display_name] = @text
1148
+ else
1149
+ @current_grantee[:display_name] = @text
1150
+ end
1151
+ when 'URI'
1152
+ @current_grantee[:uri] = @text
1153
+ when 'Permission'
1154
+ @current_grantee[:permissions] = @text
1155
+ when 'Grant'
1156
+ @result[:grantees] << @current_grantee
1157
+ end
1158
+ end
1159
+ end
1160
+
1161
+ class S3LoggingParser < AwsParser # :nodoc:
1162
+ def reset
1163
+ @result = {:enabled => false, :targetbucket => '', :targetprefix => ''}
1164
+ @current_grantee = {}
1165
+ end
1166
+
1167
+ def tagend(name)
1168
+ case name
1169
+ # service info
1170
+ when 'TargetBucket'
1171
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1172
+ @result[:targetbucket] = @text
1173
+ @result[:enabled] = true
1174
+ end
1175
+ when 'TargetPrefix'
1176
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1177
+ @result[:targetprefix] = @text
1178
+ @result[:enabled] = true
1179
+ end
1180
+ end
1181
+ end
1182
+ end
1183
+
1184
+ class S3CopyParser < AwsParser # :nodoc:
1185
+ def reset
1186
+ @result = {}
1187
+ end
1188
+
1189
+ def tagend(name)
1190
+ case name
1191
+ when 'LastModified' then
1192
+ @result[:last_modified] = @text
1193
+ when 'ETag' then
1194
+ @result[:e_tag] = @text
1195
+ end
1196
+ end
1197
+ end
1198
+
1199
+ #-----------------------------------------------------------------
1200
+ # PARSERS: Non XML
1201
+ #-----------------------------------------------------------------
1202
+
1203
+ class S3HttpResponseParser # :nodoc:
1204
+ attr_reader :result
1205
+
1206
+ def parse(response)
1207
+ @result = response
1208
+ end
1209
+
1210
+ def headers_to_string(headers)
1211
+ result = {}
1212
+ headers.each do |key, value|
1213
+ value = value[0] if value.is_a?(Array) && value.size<2
1214
+ result[key] = value
1215
+ end
1216
+ result
1217
+ end
1218
+ end
1219
+
1220
+ class S3HttpResponseBodyParser < S3HttpResponseParser # :nodoc:
1221
+ def parse(response)
1222
+ @result = {
1223
+ :object => response.body,
1224
+ :headers => headers_to_string(response.to_hash)
1225
+ }
1226
+ end
1227
+ end
1228
+
1229
+ class S3HttpResponseHeadParser < S3HttpResponseParser # :nodoc:
1230
+ def parse(response)
1231
+ @result = headers_to_string(response.to_hash)
1232
+ end
1233
+ end
1234
+
1235
+ end
1236
+
1237
+ end