hackerdude-aws 2.3.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1235 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ module Aws
25
+
26
+ class S3Interface < AwsBase
27
+
28
+ USE_100_CONTINUE_PUT_SIZE = 1_000_000
29
+
30
+ include AwsBaseInterface
31
+
32
+ DEFAULT_HOST = 's3.amazonaws.com'
33
+ DEFAULT_PORT = 443
34
+ DEFAULT_PROTOCOL = 'https'
35
+ DEFAULT_SERVICE = '/'
36
+ REQUEST_TTL = 30
37
+ DEFAULT_EXPIRES_AFTER = 1 * 24 * 60 * 60 # One day's worth of seconds
38
+ ONE_YEAR_IN_SECONDS = 365 * 24 * 60 * 60
39
+ AMAZON_HEADER_PREFIX = 'x-amz-'
40
+ AMAZON_METADATA_PREFIX = 'x-amz-meta-'
41
+
42
+ @@bench = AwsBenchmarkingBlock.new
43
+
44
+ def self.bench_xml
45
+ @@bench.xml
46
+ end
47
+
48
+ def self.bench_s3
49
+ @@bench.service
50
+ end
51
+
52
+
53
+ # Creates new RightS3 instance.
54
+ #
55
+ # s3 = Aws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:multi_thread => true, :logger => Logger.new('/tmp/x.log')}) #=> #<Aws::S3Interface:0xb7b3c27c>
56
+ #
57
+ # Params is a hash:
58
+ #
59
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
60
+ # :port => 443 # Amazon service port: 80 or 443(default)
61
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
62
+ # :connection_mode => :default # options are
63
+ # :default (will use best known safe (as in won't need explicit close) option, may change in the future)
64
+ # :per_request (opens and closes a connection on every request)
65
+ # :single (one thread across entire app)
66
+ # :per_thread (one connection per thread)
67
+ # :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
68
+ #
69
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
70
+ init({:name => 'S3',
71
+ :default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST,
72
+ :default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT,
73
+ :default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE,
74
+ :default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL},
75
+ aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
76
+ aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'],
77
+ params)
78
+ end
79
+
80
+
81
+ def close_connection
82
+ close_conn :s3_connection
83
+ end
84
+
85
+ #-----------------------------------------------------------------
86
+ # Requests
87
+ #-----------------------------------------------------------------
88
+ # Produces canonical string for signing.
89
+ def canonical_string(method, path, headers={}, expires=nil) # :nodoc:
90
+ s3_headers = {}
91
+ headers.each do |key, value|
92
+ key = key.downcase
93
+ s3_headers[key] = value.join("").strip if key[/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o]
94
+ end
95
+ s3_headers['content-type'] ||= ''
96
+ s3_headers['content-md5'] ||= ''
97
+ s3_headers['date'] = '' if s3_headers.has_key? 'x-amz-date'
98
+ s3_headers['date'] = expires if expires
99
+ # prepare output string
100
+ out_string = "#{method}\n"
101
+ s3_headers.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
102
+ out_string << (key[/^#{AMAZON_HEADER_PREFIX}/o] ? "#{key}:#{value}\n" : "#{value}\n")
103
+ end
104
+ # ignore everything after the question mark...
105
+ out_string << path.gsub(/\?.*$/, '')
106
+ # ...unless there is an acl or torrent parameter
107
+ out_string << '?acl' if path[/[&?]acl($|&|=)/]
108
+ out_string << '?torrent' if path[/[&?]torrent($|&|=)/]
109
+ out_string << '?location' if path[/[&?]location($|&|=)/]
110
+ out_string << '?logging' if path[/[&?]logging($|&|=)/] # this one is beta, no support for now
111
+ out_string
112
+ end
113
+
114
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
115
+ def is_dns_bucket?(bucket_name)
116
+ bucket_name = bucket_name.to_s
117
+ return nil unless (3..63) === bucket_name.size
118
+ bucket_name.split('.').each do |component|
119
+ return nil unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
120
+ end
121
+ true
122
+ end
123
+
124
+ def fetch_request_params(headers) #:nodoc:
125
+ # default server to use
126
+ server = @params[:server]
127
+ service = @params[:service].to_s
128
+ service.chop! if service[%r{/$}] # remove trailing '/' from service
129
+ # extract bucket name and check it's dns compartibility
130
+ headers[:url].to_s[%r{^([a-z0-9._-]*)(/[^?]*)?(\?.+)?}i]
131
+ bucket_name, key_path, params_list = $1, $2, $3
132
+ # select request model
133
+ if is_dns_bucket?(bucket_name)
134
+ # fix a path
135
+ server = "#{bucket_name}.#{server}"
136
+ key_path ||= '/'
137
+ path = "#{service}#{key_path}#{params_list}"
138
+ else
139
+ path = "#{service}/#{bucket_name}#{key_path}#{params_list}"
140
+ end
141
+ path_to_sign = "#{service}/#{bucket_name}#{key_path}#{params_list}"
142
+ # path_to_sign = "/#{bucket_name}#{key_path}#{params_list}"
143
+ [server, path, path_to_sign]
144
+ end
145
+
146
+ # Generates request hash for REST API.
147
+ # Assumes that headers[:url] is URL encoded (use CGI::escape)
148
+ def generate_rest_request(method, headers) # :nodoc:
149
+ # calculate request data
150
+ server, path, path_to_sign = fetch_request_params(headers)
151
+ data = headers[:data]
152
+ # remove unset(==optional) and symbolyc keys
153
+ headers.each { |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
154
+ #
155
+ headers['content-type'] ||= ''
156
+ headers['date'] = Time.now.httpdate
157
+ # create request
158
+ request = "Net::HTTP::#{method.capitalize}".constantize.new(path)
159
+ request.body = data if data
160
+ # set request headers and meta headers
161
+ headers.each { |key, value| request[key.to_s] = value }
162
+ #generate auth strings
163
+ auth_string = canonical_string(request.method, path_to_sign, request.to_hash)
164
+ signature = AwsUtils::sign(@aws_secret_access_key, auth_string)
165
+ # set other headers
166
+ request['Authorization'] = "AWS #{@aws_access_key_id}:#{signature}"
167
+ # prepare output hash
168
+ {:request => request,
169
+ :server => server,
170
+ :port => @params[:port],
171
+ :protocol => @params[:protocol]}
172
+ end
173
+
174
+ # Sends request to Amazon and parses the response.
175
+ # Raises AwsError if any banana happened.
176
+ def request_info(request, parser, &block) # :nodoc:
177
+ request_info2(request, parser, @params, :s3_connection, @logger, @@bench, &block)
178
+
179
+ end
180
+
181
+
182
+ # Returns an array of customer's buckets. Each item is a +hash+.
183
+ #
184
+ # s3.list_all_my_buckets #=>
185
+ # [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
186
+ # :owner_display_name => "root",
187
+ # :name => "bucket_name",
188
+ # :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
189
+ #
190
+ def list_all_my_buckets(headers={})
191
+ req_hash = generate_rest_request('GET', headers.merge(:url=>''))
192
+ request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger))
193
+ rescue
194
+ on_exception
195
+ end
196
+
197
+ # Creates new bucket. Returns +true+ or an exception.
198
+ #
199
+ # # create a bucket at American server
200
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
201
+ # # create a bucket at European server
202
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
203
+ #
204
+ def create_bucket(bucket, headers={})
205
+ data = nil
206
+ unless headers[:location].blank?
207
+ data = "<CreateBucketConfiguration><LocationConstraint>#{headers[:location].to_s.upcase}</LocationConstraint></CreateBucketConfiguration>"
208
+ end
209
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data))
210
+ request_info(req_hash, RightHttp2xxParser.new)
211
+ rescue Exception => e
212
+ # if the bucket exists AWS returns an error for the location constraint interface. Drop it
213
+ e.is_a?(Aws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception
214
+ end
215
+
216
+ # Retrieve bucket location
217
+ #
218
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
219
+ # puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed)
220
+ #
221
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
222
+ # puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
223
+ #
224
+ def bucket_location(bucket, headers={})
225
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location"))
226
+ request_info(req_hash, S3BucketLocationParser.new)
227
+ rescue
228
+ on_exception
229
+ end
230
+
231
+ # Retrieves the logging configuration for a bucket.
232
+ # Returns a hash of {:enabled, :targetbucket, :targetprefix}
233
+ #
234
+ # s3.interface.get_logging_parse(:bucket => "asset_bucket")
235
+ # => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
236
+ #
237
+ #
238
+ def get_logging_parse(params)
239
+ AwsUtils.mandatory_arguments([:bucket], params)
240
+ AwsUtils.allow_only([:bucket, :headers], params)
241
+ params[:headers] = {} unless params[:headers]
242
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging"))
243
+ request_info(req_hash, S3LoggingParser.new)
244
+ rescue
245
+ on_exception
246
+ end
247
+
248
+ # Sets logging configuration for a bucket from the XML configuration document.
249
+ # params:
250
+ # :bucket
251
+ # :xmldoc
252
+ def put_logging(params)
253
+ AwsUtils.mandatory_arguments([:bucket, :xmldoc], params)
254
+ AwsUtils.allow_only([:bucket, :xmldoc, :headers], params)
255
+ params[:headers] = {} unless params[:headers]
256
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc]))
257
+ request_info(req_hash, S3TrueParser.new)
258
+ rescue
259
+ on_exception
260
+ end
261
+
262
+ # Deletes new bucket. Bucket must be empty! Returns +true+ or an exception.
263
+ #
264
+ # s3.delete_bucket('my_awesome_bucket') #=> true
265
+ #
266
+ # See also: force_delete_bucket method
267
+ #
268
+ def delete_bucket(bucket, headers={})
269
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket))
270
+ request_info(req_hash, RightHttp2xxParser.new)
271
+ rescue
272
+ on_exception
273
+ end
274
+
275
+ # Returns an array of bucket's keys. Each array item (key data) is a +hash+.
276
+ #
277
+ # s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=>
278
+ # [{:key => "test1",
279
+ # :last_modified => "2007-05-18T07:00:59.000Z",
280
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
281
+ # :owner_display_name => "root",
282
+ # :e_tag => "000000000059075b964b07152d234b70",
283
+ # :storage_class => "STANDARD",
284
+ # :size => 3,
285
+ # :service=> {'is_truncated' => false,
286
+ # 'prefix' => "t",
287
+ # 'marker' => "",
288
+ # 'name' => "my_awesome_bucket",
289
+ # 'max-keys' => "5"}, ..., {...}]
290
+ #
291
+ def list_bucket(bucket, options={}, headers={})
292
+ bucket += '?'+options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') unless options.blank?
293
+ req_hash = generate_rest_request('GET', headers.merge(:url=>bucket))
294
+ request_info(req_hash, S3ListBucketParser.new(:logger => @logger))
295
+ rescue
296
+ on_exception
297
+ end
298
+
299
+ # Incrementally list the contents of a bucket. Yields the following hash to a block:
300
+ # s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields
301
+ # {
302
+ # :name => 'bucketname',
303
+ # :prefix => 'subfolder/',
304
+ # :marker => 'fileN.jpg',
305
+ # :max_keys => 234,
306
+ # :delimiter => '/',
307
+ # :is_truncated => true,
308
+ # :next_marker => 'fileX.jpg',
309
+ # :contents => [
310
+ # { :key => "file1",
311
+ # :last_modified => "2007-05-18T07:00:59.000Z",
312
+ # :e_tag => "000000000059075b964b07152d234b70",
313
+ # :size => 3,
314
+ # :storage_class => "STANDARD",
315
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
316
+ # :owner_display_name => "root"
317
+ # }, { :key, ...}, ... {:key, ...}
318
+ # ]
319
+ # :common_prefixes => [
320
+ # "prefix1",
321
+ # "prefix2",
322
+ # ...,
323
+ # "prefixN"
324
+ # ]
325
+ # }
326
+ def incrementally_list_bucket(bucket, options={}, headers={}, &block)
327
+ internal_options = options.symbolize_keys
328
+ begin
329
+ internal_bucket = bucket.dup
330
+ internal_bucket += '?'+internal_options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') unless internal_options.blank?
331
+ req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket))
332
+ response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger))
333
+ there_are_more_keys = response[:is_truncated]
334
+ if (there_are_more_keys)
335
+ internal_options[:marker] = decide_marker(response)
336
+ total_results = response[:contents].length + response[:common_prefixes].length
337
+ internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil
338
+ end
339
+ yield response
340
+ end while there_are_more_keys && under_max_keys(internal_options)
341
+ true
342
+ rescue
343
+ on_exception
344
+ end
345
+
346
+
347
+ private
348
+ def decide_marker(response)
349
+ return response[:next_marker].dup if response[:next_marker]
350
+ last_key = response[:contents].last[:key]
351
+ last_prefix = response[:common_prefixes].last
352
+ if (!last_key)
353
+ return nil if (!last_prefix)
354
+ last_prefix.dup
355
+ elsif (!last_prefix)
356
+ last_key.dup
357
+ else
358
+ last_key > last_prefix ? last_key.dup : last_prefix.dup
359
+ end
360
+ end
361
+
362
+ def under_max_keys(internal_options)
363
+ internal_options[:'max-keys'] ? internal_options[:'max-keys'] > 0 : true
364
+ end
365
+
366
+ public
367
+ # Saves object to Amazon. Returns +true+ or an exception.
368
+ # Any header starting with AMAZON_METADATA_PREFIX is considered
369
+ # user metadata. It will be stored with the object and returned
370
+ # when you retrieve the object. The total size of the HTTP
371
+ # request, not including the body, must be less than 4 KB.
372
+ #
373
+ # s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
374
+ #
375
+ # This method is capable of 'streaming' uploads; that is, it can upload
376
+ # data from a file or other IO object without first reading all the data
377
+ # into memory. This is most useful for large PUTs - it is difficult to read
378
+ # a 2 GB file entirely into memory before sending it to S3.
379
+ # To stream an upload, pass an object that responds to 'read' (like the read
380
+ # method of IO) and to either 'lstat' or 'size'. For files, this means
381
+ # streaming is enabled by simply making the call:
382
+ #
383
+ # s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
384
+ #
385
+ # If the IO object you wish to stream from responds to the read method but
386
+ # doesn't implement lstat or size, you can extend the object dynamically
387
+ # to implement these methods, or define your own class which defines these
388
+ # methods. Be sure that your class returns 'nil' from read() after having
389
+ # read 'size' bytes. Otherwise S3 will drop the socket after
390
+ # 'Content-Length' bytes have been uploaded, and HttpConnection will
391
+ # interpret this as an error.
392
+ #
393
+ # This method now supports very large PUTs, where very large
394
+ # is > 2 GB.
395
+ #
396
+ # For Win32 users: Files and IO objects should be opened in binary mode. If
397
+ # a text mode IO object is passed to PUT, it will be converted to binary
398
+ # mode.
399
+ #
400
+
401
+ def put(bucket, key, data=nil, headers={})
402
+ # On Windows, if someone opens a file in text mode, we must reset it so
403
+ # to binary mode for streaming to work properly
404
+ if (data.respond_to?(:binmode))
405
+ data.binmode
406
+ end
407
+ data_size = data.respond_to?(:lstat) ? data.lstat.size :
408
+ (data.respond_to?(:size) ? data.size : 0)
409
+ if (data_size >= USE_100_CONTINUE_PUT_SIZE)
410
+ headers['expect'] = '100-continue'
411
+ end
412
+ req_hash = generate_rest_request('PUT', headers.merge(:url =>"#{bucket}/#{CGI::escape key}", :data=>data,
413
+ 'Content-Length' => data_size.to_s))
414
+ request_info(req_hash, RightHttp2xxParser.new)
415
+ rescue
416
+ on_exception
417
+ end
418
+
419
+
420
+ # New experimental API for uploading objects, introduced in Aws 1.8.1.
421
+ # store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification
422
+ # of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
423
+ # The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
424
+ #
425
+ # If the optional :md5 argument is provided, store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
426
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
427
+ #
428
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
429
+ #
430
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
431
+ # => {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/",
432
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
433
+ # "date"=>"Mon, 29 Sep 2008 18:57:46 GMT",
434
+ # :verified_md5=>true,
435
+ # "x-amz-request-id"=>"63916465939995BA",
436
+ # "server"=>"AmazonS3",
437
+ # "content-length"=>"0"}
438
+ #
439
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
440
+ # => {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR",
441
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
442
+ # "date"=>"Mon, 29 Sep 2008 18:58:56 GMT",
443
+ # :verified_md5=>false,
444
+ # "x-amz-request-id"=>"3B25A996BC2CDD3B",
445
+ # "server"=>"AmazonS3",
446
+ # "content-length"=>"0"}
447
+
448
+ def store_object(params)
449
+ AwsUtils.allow_only([:bucket, :key, :data, :headers, :md5], params)
450
+ AwsUtils.mandatory_arguments([:bucket, :key, :data], params)
451
+ params[:headers] = {} unless params[:headers]
452
+
453
+ params[:data].binmode if (params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly
454
+ if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
455
+ (params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE)
456
+ params[:headers]['expect'] = '100-continue'
457
+ end
458
+
459
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data]))
460
+ resp = request_info(req_hash, S3HttpResponseHeadParser.new)
461
+ if (params[:md5])
462
+ resp[:verified_md5] = (resp['etag'].gsub(/\"/, '') == params[:md5]) ? true : false
463
+ else
464
+ resp[:verified_md5] = false
465
+ end
466
+ resp
467
+ rescue
468
+ on_exception
469
+ end
470
+
471
+ # Identical in function to store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
472
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
473
+ # This call is implemented as a wrapper around store_object and the user may gain different semantics by creating a custom wrapper.
474
+ #
475
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
476
+ # => {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb",
477
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
478
+ # "date"=>"Mon, 29 Sep 2008 18:38:32 GMT",
479
+ # :verified_md5=>true,
480
+ # "x-amz-request-id"=>"E8D7EA4FE00F5DF7",
481
+ # "server"=>"AmazonS3",
482
+ # "content-length"=>"0"}
483
+ #
484
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
485
+ # Aws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n",
486
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
487
+ # "date"=>"Mon, 29 Sep 2008 18:38:41 GMT",
488
+ # :verified_md5=>false,
489
+ # "x-amz-request-id"=>"0D7ADE09F42606F2",
490
+ # "server"=>"AmazonS3",
491
+ # "content-length"=>"0"}
492
+ def store_object_and_verify(params)
493
+ AwsUtils.mandatory_arguments([:md5], params)
494
+ r = store_object(params)
495
+ r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}"))
496
+ end
497
+
498
+ # Retrieves object data from Amazon. Returns a +hash+ or an exception.
499
+ #
500
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=>
501
+ #
502
+ # {:object => "Ola-la!",
503
+ # :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
504
+ # "content-type" => "",
505
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
506
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
507
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
508
+ # "x-amz-meta-family" => "Woho556!",
509
+ # "x-amz-request-id" => "0000000C246D770C",
510
+ # "server" => "AmazonS3",
511
+ # "content-length" => "7"}}
512
+ #
513
+ # If a block is provided, yields incrementally to the block as
514
+ # the response is read. For large responses, this function is ideal as
515
+ # the response can be 'streamed'. The hash containing header fields is
516
+ # still returned.
517
+ # Example:
518
+ # foo = File.new('./chunder.txt', File::CREAT|File::RDWR)
519
+ # rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
520
+ # foo.write(chunk)
521
+ # end
522
+ # foo.close
523
+ #
524
+
525
+ def get(bucket, key, headers={}, &block)
526
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
527
+ request_info(req_hash, S3HttpResponseBodyParser.new, &block)
528
+ rescue
529
+ on_exception
530
+ end
531
+
532
+ # New experimental API for retrieving objects, introduced in Aws 1.8.1.
533
+ # retrieve_object is similar in function to the older function get. It allows for optional verification
534
+ # of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
535
+ #
536
+ # If the optional :md5 argument is provided, retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
537
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
538
+ #
539
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
540
+ # Mandatory arguments:
541
+ # :bucket - the bucket in which the object is stored
542
+ # :key - the object address (or path) within the bucket
543
+ # Optional arguments:
544
+ # :headers - hash of additional HTTP headers to include with the request
545
+ # :md5 - MD5 checksum against which to verify the retrieved object
546
+ #
547
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo")
548
+ # => {:verified_md5=>false,
549
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
550
+ # "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp",
551
+ # "content-type"=>"",
552
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
553
+ # "date"=>"Tue, 30 Sep 2008 00:52:44 GMT",
554
+ # "x-amz-request-id"=>"EE4855DE27A2688C",
555
+ # "server"=>"AmazonS3",
556
+ # "content-length"=>"10"},
557
+ # :object=>"polemonium"}
558
+ #
559
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954')
560
+ # => {:verified_md5=>true,
561
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
562
+ # "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH",
563
+ # "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
564
+ # "date"=>"Tue, 30 Sep 2008 00:53:08 GMT",
565
+ # "x-amz-request-id"=>"6E7F317356580599",
566
+ # "server"=>"AmazonS3",
567
+ # "content-length"=>"10"},
568
+ # :object=>"polemonium"}
569
+ # If a block is provided, yields incrementally to the block as
570
+ # the response is read. For large responses, this function is ideal as
571
+ # the response can be 'streamed'. The hash containing header fields is
572
+ # still returned.
573
+ def retrieve_object(params, &block)
574
+ AwsUtils.mandatory_arguments([:bucket, :key], params)
575
+ AwsUtils.allow_only([:bucket, :key, :headers, :md5], params)
576
+ params[:headers] = {} unless params[:headers]
577
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}"))
578
+ resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block)
579
+ resp[:verified_md5] = false
580
+ if (params[:md5] && (resp[:headers]['etag'].gsub(/\"/, '') == params[:md5]))
581
+ resp[:verified_md5] = true
582
+ end
583
+ resp
584
+ rescue
585
+ on_exception
586
+ end
587
+
588
+ # Identical in function to retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
589
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
590
+ # This call is implemented as a wrapper around retrieve_object and the user may gain different semantics by creating a custom wrapper.
591
+ def retrieve_object_and_verify(params, &block)
592
+ AwsUtils.mandatory_arguments([:md5], params)
593
+ resp = retrieve_object(params, &block)
594
+ return resp if resp[:verified_md5]
595
+ raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}")
596
+ end
597
+
598
+ # Retrieves object metadata. Returns a +hash+ of http_response_headers.
599
+ #
600
+ # s3.head('my_awesome_bucket', 'log/curent/1.log') #=>
601
+ # {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
602
+ # "content-type" => "",
603
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
604
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
605
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
606
+ # "x-amz-meta-family" => "Woho556!",
607
+ # "x-amz-request-id" => "0000000C246D770C",
608
+ # "server" => "AmazonS3",
609
+ # "content-length" => "7"}
610
+ #
611
+ def head(bucket, key, headers={})
612
+ req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
613
+ request_info(req_hash, S3HttpResponseHeadParser.new)
614
+ rescue
615
+ on_exception
616
+ end
617
+
618
+ # Deletes key. Returns +true+ or an exception.
619
+ #
620
+ # s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
621
+ #
622
+ def delete(bucket, key='', headers={})
623
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
624
+ request_info(req_hash, RightHttp2xxParser.new)
625
+ rescue
626
+ on_exception
627
+ end
628
+
629
+ # Copy an object.
630
+ # directive: :copy - copy meta-headers from source (default value)
631
+ # :replace - replace meta-headers by passed ones
632
+ #
633
+ # # copy a key with meta-headers
634
+ # s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"}
635
+ #
636
+ # # copy a key, overwrite meta-headers
637
+ # s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
638
+ #
639
+ # see: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
640
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
641
+ #
642
+ def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
643
+ dest_key ||= src_key
644
+ headers['x-amz-metadata-directive'] = directive.to_s.upcase
645
+ headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}"
646
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}"))
647
+ request_info(req_hash, S3CopyParser.new)
648
+ rescue
649
+ on_exception
650
+ end
651
+
652
+ # Move an object.
653
+ # directive: :copy - copy meta-headers from source (default value)
654
+ # :replace - replace meta-headers by passed ones
655
+ #
656
+ # # move bucket1/key1 to bucket1/key2
657
+ # s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"}
658
+ #
659
+ # # move bucket1/key1 to bucket2/key2 with new meta-headers assignment
660
+ # s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
661
+ #
662
+ def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
663
+ copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers)
664
+ # delete an original key if it differs from a destination one
665
+ delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key
666
+ copy_result
667
+ end
668
+
669
+ # Rename an object.
670
+ #
671
+ # # rename bucket1/key1 to bucket1/key2
672
+ # s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
673
+ #
674
+ def rename(src_bucket, src_key, dest_key, headers={})
675
+ move(src_bucket, src_key, src_bucket, dest_key, :copy, headers)
676
+ end
677
+
678
+ # Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
679
+ #
680
+ # s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=>
681
+ # {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX",
682
+ # "content-type"=>"application/xml;charset=ISO-8859-1",
683
+ # "date"=>"Wed, 23 May 2007 09:40:16 GMT",
684
+ # "x-amz-request-id"=>"B183FA7AB5FBB4DD",
685
+ # "server"=>"AmazonS3",
686
+ # "transfer-encoding"=>"chunked"},
687
+ # :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner>
688
+ # <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner>
689
+ # <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>
690
+ # 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee>
691
+ # <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
692
+ #
693
+ def get_acl(bucket, key='', headers={})
694
+ key = key.blank? ? '' : "/#{CGI::escape key}"
695
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
696
+ request_info(req_hash, S3HttpResponseBodyParser.new)
697
+ rescue
698
+ on_exception
699
+ end
700
+
701
+ # Retieves the ACL (access control policy) for a bucket or object.
702
+ # Returns a hash of {:owner, :grantees}
703
+ #
704
+ # s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=>
705
+ #
706
+ # { :grantees=>
707
+ # { "16...2a"=>
708
+ # { :display_name=>"root",
709
+ # :permissions=>["FULL_CONTROL"],
710
+ # :attributes=>
711
+ # { "xsi:type"=>"CanonicalUser",
712
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}},
713
+ # "http://acs.amazonaws.com/groups/global/AllUsers"=>
714
+ # { :display_name=>"AllUsers",
715
+ # :permissions=>["READ"],
716
+ # :attributes=>
717
+ # { "xsi:type"=>"Group",
718
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}},
719
+ # :owner=>
720
+ # { :id=>"16..2a",
721
+ # :display_name=>"root"}}
722
+ #
723
+ def get_acl_parse(bucket, key='', headers={})
724
+ key = key.blank? ? '' : "/#{CGI::escape key}"
725
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
726
+ acl = request_info(req_hash, S3AclParser.new(:logger => @logger))
727
+ result = {}
728
+ result[:owner] = acl[:owner]
729
+ result[:grantees] = {}
730
+ acl[:grantees].each do |grantee|
731
+ key = grantee[:id] || grantee[:uri]
732
+ if result[:grantees].key?(key)
733
+ result[:grantees][key][:permissions] << grantee[:permissions]
734
+ else
735
+ result[:grantees][key] =
736
+ {:display_name => grantee[:display_name] || grantee[:uri].to_s[/[^\/]*$/],
737
+ :permissions => grantee[:permissions].lines.to_a,
738
+ :attributes => grantee[:attributes]}
739
+ end
740
+ end
741
+ result
742
+ rescue
743
+ on_exception
744
+ end
745
+
746
+ # Sets the ACL on a bucket or object.
747
+ def put_acl(bucket, key, acl_xml_doc, headers={})
748
+ key = key.blank? ? '' : "/#{CGI::escape key}"
749
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc))
750
+ request_info(req_hash, S3HttpResponseBodyParser.new)
751
+ rescue
752
+ on_exception
753
+ end
754
+
755
+ # Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
756
+ def get_bucket_acl(bucket, headers={})
757
+ return get_acl(bucket, '', headers)
758
+ rescue
759
+ on_exception
760
+ end
761
+
762
+ # Sets the ACL on a bucket only.
763
+ def put_bucket_acl(bucket, acl_xml_doc, headers={})
764
+ return put_acl(bucket, '', acl_xml_doc, headers)
765
+ rescue
766
+ on_exception
767
+ end
768
+
769
+
770
+ # Removes all keys from bucket. Returns +true+ or an exception.
771
+ #
772
+ # s3.clear_bucket('my_awesome_bucket') #=> true
773
+ #
774
+ def clear_bucket(bucket)
775
+ incrementally_list_bucket(bucket) do |results|
776
+ results[:contents].each { |key| delete(bucket, key[:key]) }
777
+ end
778
+ true
779
+ rescue
780
+ on_exception
781
+ end
782
+
783
+ # Deletes all keys in bucket then deletes bucket. Returns +true+ or an exception.
784
+ #
785
+ # s3.force_delete_bucket('my_awesome_bucket')
786
+ #
787
+ def force_delete_bucket(bucket)
788
+ clear_bucket(bucket)
789
+ delete_bucket(bucket)
790
+ rescue
791
+ on_exception
792
+ end
793
+
794
+ # Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
795
+ #
796
+ # s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs']
797
+ # s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
798
+ #
799
+ def delete_folder(bucket, folder_key, separator='/')
800
+ folder_key.chomp!(separator)
801
+ allkeys = []
802
+ incrementally_list_bucket(bucket, {'prefix' => folder_key}) do |results|
803
+ keys = results[:contents].map { |s3_key| s3_key[:key][/^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil }.compact
804
+ keys.each { |key| delete(bucket, key) }
805
+ allkeys << keys
806
+ end
807
+ allkeys
808
+ rescue
809
+ on_exception
810
+ end
811
+
812
+ # Retrieves object data only (headers are omitted). Returns +string+ or an exception.
813
+ #
814
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
815
+ #
816
+ def get_object(bucket, key, headers={})
817
+ get(bucket, key, headers)[:object]
818
+ rescue
819
+ on_exception
820
+ end
821
+
822
+ #-----------------------------------------------------------------
823
+ # Query API: Links
824
+ #-----------------------------------------------------------------
825
+
826
+ # Generates link for QUERY API
827
+ def generate_link(method, headers={}, expires=nil) #:nodoc:
828
+ # calculate request data
829
+ server, path, path_to_sign = fetch_request_params(headers)
830
+ # expiration time
831
+ expires ||= DEFAULT_EXPIRES_AFTER
832
+ expires = Time.now.utc + expires if expires.is_a?(Fixnum) && (expires < ONE_YEAR_IN_SECONDS)
833
+ expires = expires.to_i
834
+ # remove unset(==optional) and symbolyc keys
835
+ headers.each { |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
836
+ #generate auth strings
837
+ auth_string = canonical_string(method, path_to_sign, headers, expires)
838
+ signature = CGI::escape(Base64.encode64(OpenSSL::HMAC.digest(OpenSSL::Digest::Digest.new("sha1"), @aws_secret_access_key, auth_string)).strip)
839
+ # path building
840
+ addon = "Signature=#{signature}&Expires=#{expires}&AWSAccessKeyId=#{@aws_access_key_id}"
841
+ path += path[/\?/] ? "&#{addon}" : "?#{addon}"
842
+ "#{@params[:protocol]}://#{server}:#{@params[:port]}#{path}"
843
+ rescue
844
+ on_exception
845
+ end
846
+
847
+ # Generates link for 'ListAllMyBuckets'.
848
+ #
849
+ # s3.list_all_my_buckets_link #=> url string
850
+ #
851
+ def list_all_my_buckets_link(expires=nil, headers={})
852
+ generate_link('GET', headers.merge(:url=>''), expires)
853
+ rescue
854
+ on_exception
855
+ end
856
+
857
+ # Generates link for 'CreateBucket'.
858
+ #
859
+ # s3.create_bucket_link('my_awesome_bucket') #=> url string
860
+ #
861
+ def create_bucket_link(bucket, expires=nil, headers={})
862
+ generate_link('PUT', headers.merge(:url=>bucket), expires)
863
+ rescue
864
+ on_exception
865
+ end
866
+
867
+ # Generates link for 'DeleteBucket'.
868
+ #
869
+ # s3.delete_bucket_link('my_awesome_bucket') #=> url string
870
+ #
871
+ def delete_bucket_link(bucket, expires=nil, headers={})
872
+ generate_link('DELETE', headers.merge(:url=>bucket), expires)
873
+ rescue
874
+ on_exception
875
+ end
876
+
877
+ # Generates link for 'ListBucket'.
878
+ #
879
+ # s3.list_bucket_link('my_awesome_bucket') #=> url string
880
+ #
881
+ def list_bucket_link(bucket, options=nil, expires=nil, headers={})
882
+ bucket += '?' + options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') unless options.blank?
883
+ generate_link('GET', headers.merge(:url=>bucket), expires)
884
+ rescue
885
+ on_exception
886
+ end
887
+
888
+ # Generates link for 'PutObject'.
889
+ #
890
+ # s3.put_link('my_awesome_bucket',key, object) #=> url string
891
+ #
892
+ def put_link(bucket, key, data=nil, expires=nil, headers={})
893
+ generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}", :data=>data), expires)
894
+ rescue
895
+ on_exception
896
+ end
897
+
898
+ # Generates link for 'GetObject'.
899
+ #
900
+ # if a bucket comply with virtual hosting naming then retuns a link with the
901
+ # bucket as a part of host name:
902
+ #
903
+ # s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
904
+ #
905
+ # otherwise returns an old style link (the bucket is a part of path):
906
+ #
907
+ # s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
908
+ #
909
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
910
+ def get_link(bucket, key, expires=nil, headers={})
911
+ generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
912
+ rescue
913
+ on_exception
914
+ end
915
+
916
+ # Generates link for 'HeadObject'.
917
+ #
918
+ # s3.head_link('my_awesome_bucket',key) #=> url string
919
+ #
920
+ def head_link(bucket, key, expires=nil, headers={})
921
+ generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
922
+ rescue
923
+ on_exception
924
+ end
925
+
926
+ # Generates link for 'DeleteObject'.
927
+ #
928
+ # s3.delete_link('my_awesome_bucket',key) #=> url string
929
+ #
930
+ def delete_link(bucket, key, expires=nil, headers={})
931
+ generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
932
+ rescue
933
+ on_exception
934
+ end
935
+
936
+
937
+ # Generates link for 'GetACL'.
938
+ #
939
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
940
+ #
941
+ def get_acl_link(bucket, key='', headers={})
942
+ return generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
943
+ rescue
944
+ on_exception
945
+ end
946
+
947
+ # Generates link for 'PutACL'.
948
+ #
949
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
950
+ #
951
+ def put_acl_link(bucket, key='', headers={})
952
+ return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
953
+ rescue
954
+ on_exception
955
+ end
956
+
957
+ # Generates link for 'GetBucketACL'.
958
+ #
959
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
960
+ #
961
+ def get_bucket_acl_link(bucket, headers={})
962
+ return get_acl_link(bucket, '', headers)
963
+ rescue
964
+ on_exception
965
+ end
966
+
967
+ # Generates link for 'PutBucketACL'.
968
+ #
969
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
970
+ #
971
+ def put_bucket_acl_link(bucket, acl_xml_doc, headers={})
972
+ return put_acl_link(bucket, '', acl_xml_doc, headers)
973
+ rescue
974
+ on_exception
975
+ end
976
+
977
+ #-----------------------------------------------------------------
978
+ # PARSERS:
979
+ #-----------------------------------------------------------------
980
+
981
+ class S3ListAllMyBucketsParser < AwsParser # :nodoc:
982
+ def reset
983
+ @result = []
984
+ @owner = {}
985
+ end
986
+
987
+ def tagstart(name, attributes)
988
+ @current_bucket = {} if name == 'Bucket'
989
+ end
990
+
991
+ def tagend(name)
992
+ case name
993
+ when 'ID';
994
+ @owner[:owner_id] = @text
995
+ when 'DisplayName';
996
+ @owner[:owner_display_name] = @text
997
+ when 'Name';
998
+ @current_bucket[:name] = @text
999
+ when 'CreationDate';
1000
+ @current_bucket[:creation_date] = @text
1001
+ when 'Bucket';
1002
+ @result << @current_bucket.merge(@owner)
1003
+ end
1004
+ end
1005
+ end
1006
+
1007
+ class S3ListBucketParser < AwsParser # :nodoc:
1008
+ def reset
1009
+ @result = []
1010
+ @service = {}
1011
+ @current_key = {}
1012
+ end
1013
+
1014
+ def tagstart(name, attributes)
1015
+ @current_key = {} if name == 'Contents'
1016
+ end
1017
+
1018
+ def tagend(name)
1019
+ case name
1020
+ # service info
1021
+ when 'Name';
1022
+ @service['name'] = @text
1023
+ when 'Prefix';
1024
+ @service['prefix'] = @text
1025
+ when 'Marker';
1026
+ @service['marker'] = @text
1027
+ when 'MaxKeys';
1028
+ @service['max-keys'] = @text
1029
+ when 'Delimiter';
1030
+ @service['delimiter'] = @text
1031
+ when 'IsTruncated';
1032
+ @service['is_truncated'] = (@text =~ /false/ ? false : true)
1033
+ # key data
1034
+ when 'Key';
1035
+ @current_key[:key] = @text
1036
+ when 'LastModified';
1037
+ @current_key[:last_modified] = @text
1038
+ when 'ETag';
1039
+ @current_key[:e_tag] = @text
1040
+ when 'Size';
1041
+ @current_key[:size] = @text.to_i
1042
+ when 'StorageClass';
1043
+ @current_key[:storage_class] = @text
1044
+ when 'ID';
1045
+ @current_key[:owner_id] = @text
1046
+ when 'DisplayName';
1047
+ @current_key[:owner_display_name] = @text
1048
+ when 'Contents';
1049
+ @current_key[:service] = @service; @result << @current_key
1050
+ end
1051
+ end
1052
+ end
1053
+
1054
+ class S3ImprovedListBucketParser < AwsParser # :nodoc:
1055
+ def reset
1056
+ @result = {}
1057
+ @result[:contents] = []
1058
+ @result[:common_prefixes] = []
1059
+ @contents = []
1060
+ @current_key = {}
1061
+ @common_prefixes = []
1062
+ @in_common_prefixes = false
1063
+ end
1064
+
1065
+ def tagstart(name, attributes)
1066
+ @current_key = {} if name == 'Contents'
1067
+ @in_common_prefixes = true if name == 'CommonPrefixes'
1068
+ end
1069
+
1070
+ def tagend(name)
1071
+ case name
1072
+ # service info
1073
+ when 'Name';
1074
+ @result[:name] = @text
1075
+ # Amazon uses the same tag for the search prefix and for the entries
1076
+ # in common prefix...so use our simple flag to see which element
1077
+ # we are parsing
1078
+ when 'Prefix';
1079
+ @in_common_prefixes ? @common_prefixes << @text : @result[:prefix] = @text
1080
+ when 'Marker';
1081
+ @result[:marker] = @text
1082
+ when 'MaxKeys';
1083
+ @result[:max_keys] = @text
1084
+ when 'Delimiter';
1085
+ @result[:delimiter] = @text
1086
+ when 'IsTruncated';
1087
+ @result[:is_truncated] = (@text =~ /false/ ? false : true)
1088
+ when 'NextMarker';
1089
+ @result[:next_marker] = @text
1090
+ # key data
1091
+ when 'Key';
1092
+ @current_key[:key] = @text
1093
+ when 'LastModified';
1094
+ @current_key[:last_modified] = @text
1095
+ when 'ETag';
1096
+ @current_key[:e_tag] = @text
1097
+ when 'Size';
1098
+ @current_key[:size] = @text.to_i
1099
+ when 'StorageClass';
1100
+ @current_key[:storage_class] = @text
1101
+ when 'ID';
1102
+ @current_key[:owner_id] = @text
1103
+ when 'DisplayName';
1104
+ @current_key[:owner_display_name] = @text
1105
+ when 'Contents';
1106
+ @result[:contents] << @current_key
1107
+ # Common Prefix stuff
1108
+ when 'CommonPrefixes';
1109
+ @result[:common_prefixes] = @common_prefixes; @in_common_prefixes = false
1110
+ end
1111
+ end
1112
+ end
1113
+
1114
+ class S3BucketLocationParser < AwsParser # :nodoc:
1115
+ def reset
1116
+ @result = ''
1117
+ end
1118
+
1119
+ def tagend(name)
1120
+ @result = @text if name == 'LocationConstraint'
1121
+ end
1122
+ end
1123
+
1124
+ class S3AclParser < AwsParser # :nodoc:
1125
+ def reset
1126
+ @result = {:grantees=>[], :owner=>{}}
1127
+ @current_grantee = {}
1128
+ end
1129
+
1130
+ def tagstart(name, attributes)
1131
+ @current_grantee = {:attributes => attributes} if name=='Grantee'
1132
+ end
1133
+
1134
+ def tagend(name)
1135
+ case name
1136
+ # service info
1137
+ when 'ID'
1138
+ if @xmlpath == 'AccessControlPolicy/Owner'
1139
+ @result[:owner][:id] = @text
1140
+ else
1141
+ @current_grantee[:id] = @text
1142
+ end
1143
+ when 'DisplayName'
1144
+ if @xmlpath == 'AccessControlPolicy/Owner'
1145
+ @result[:owner][:display_name] = @text
1146
+ else
1147
+ @current_grantee[:display_name] = @text
1148
+ end
1149
+ when 'URI'
1150
+ @current_grantee[:uri] = @text
1151
+ when 'Permission'
1152
+ @current_grantee[:permissions] = @text
1153
+ when 'Grant'
1154
+ @result[:grantees] << @current_grantee
1155
+ end
1156
+ end
1157
+ end
1158
+
1159
+ class S3LoggingParser < AwsParser # :nodoc:
1160
+ def reset
1161
+ @result = {:enabled => false, :targetbucket => '', :targetprefix => ''}
1162
+ @current_grantee = {}
1163
+ end
1164
+
1165
+ def tagend(name)
1166
+ case name
1167
+ # service info
1168
+ when 'TargetBucket'
1169
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1170
+ @result[:targetbucket] = @text
1171
+ @result[:enabled] = true
1172
+ end
1173
+ when 'TargetPrefix'
1174
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1175
+ @result[:targetprefix] = @text
1176
+ @result[:enabled] = true
1177
+ end
1178
+ end
1179
+ end
1180
+ end
1181
+
1182
+ class S3CopyParser < AwsParser # :nodoc:
1183
+ def reset
1184
+ @result = {}
1185
+ end
1186
+
1187
+ def tagend(name)
1188
+ case name
1189
+ when 'LastModified' then
1190
+ @result[:last_modified] = @text
1191
+ when 'ETag' then
1192
+ @result[:e_tag] = @text
1193
+ end
1194
+ end
1195
+ end
1196
+
1197
+ #-----------------------------------------------------------------
1198
+ # PARSERS: Non XML
1199
+ #-----------------------------------------------------------------
1200
+
1201
+ class S3HttpResponseParser # :nodoc:
1202
+ attr_reader :result
1203
+
1204
+ def parse(response)
1205
+ @result = response
1206
+ end
1207
+
1208
+ def headers_to_string(headers)
1209
+ result = {}
1210
+ headers.each do |key, value|
1211
+ value = value[0] if value.is_a?(Array) && value.size<2
1212
+ result[key] = value
1213
+ end
1214
+ result
1215
+ end
1216
+ end
1217
+
1218
+ class S3HttpResponseBodyParser < S3HttpResponseParser # :nodoc:
1219
+ def parse(response)
1220
+ @result = {
1221
+ :object => response.body,
1222
+ :headers => headers_to_string(response.to_hash)
1223
+ }
1224
+ end
1225
+ end
1226
+
1227
+ class S3HttpResponseHeadParser < S3HttpResponseParser # :nodoc:
1228
+ def parse(response)
1229
+ @result = headers_to_string(response.to_hash)
1230
+ end
1231
+ end
1232
+
1233
+ end
1234
+
1235
+ end