gotime_aws 2.5.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1279 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ module Aws
25
+
26
+ class S3Interface < AwsBase
27
+
28
+ USE_100_CONTINUE_PUT_SIZE = 1_000_000
29
+
30
+ include AwsBaseInterface
31
+ extend AwsBaseInterface::ClassMethods
32
+
33
+ DEFAULT_HOST = 's3.amazonaws.com'
34
+ DEFAULT_PORT = 443
35
+ DEFAULT_PROTOCOL = 'https'
36
+ DEFAULT_SERVICE = '/'
37
+ REQUEST_TTL = 30
38
+ DEFAULT_EXPIRES_AFTER = 1 * 24 * 60 * 60 # One day's worth of seconds
39
+ ONE_YEAR_IN_SECONDS = 365 * 24 * 60 * 60
40
+ AMAZON_HEADER_PREFIX = 'x-amz-'
41
+ AMAZON_METADATA_PREFIX = 'x-amz-meta-'
42
+
43
+ def self.connection_name
44
+ :s3_connection
45
+ end
46
+
47
+ @@bench = AwsBenchmarkingBlock.new
48
+
49
+ def self.bench
50
+ @@bench
51
+ end
52
+
53
+ def self.bench_xml
54
+ @@bench.xml
55
+ end
56
+
57
+ def self.bench_s3
58
+ @@bench.service
59
+ end
60
+
61
+
62
+ # Creates new RightS3 instance.
63
+ #
64
+ # s3 = Aws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:multi_thread => true, :logger => Logger.new('/tmp/x.log')}) #=> #<Aws::S3Interface:0xb7b3c27c>
65
+ #
66
+ # Params is a hash:
67
+ #
68
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
69
+ # :port => 443 # Amazon service port: 80 or 443(default)
70
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
71
+ # :connection_mode => :default # options are
72
+ # :default (will use best known safe (as in won't need explicit close) option, may change in the future)
73
+ # :per_request (opens and closes a connection on every request)
74
+ # :single (one thread across entire app)
75
+ # :per_thread (one connection per thread)
76
+ # :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
77
+ #
78
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
79
+ init({:name => 'S3',
80
+ :default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST,
81
+ :default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT,
82
+ :default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE,
83
+ :default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL},
84
+ aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
85
+ aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'],
86
+ params)
87
+ end
88
+
89
+ #-----------------------------------------------------------------
90
+ # Requests
91
+ #-----------------------------------------------------------------
92
+ # Produces canonical string for signing.
93
+ def canonical_string(method, path, headers={}, expires=nil) # :nodoc:
94
+ s3_headers = {}
95
+ headers.each do |key, value|
96
+ key = key.downcase
97
+ s3_headers[key] = value.join("").strip if key[/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o]
98
+ end
99
+ s3_headers['content-type'] ||= ''
100
+ s3_headers['content-md5'] ||= ''
101
+ s3_headers['date'] = '' if s3_headers.has_key? 'x-amz-date'
102
+ s3_headers['date'] = expires if expires
103
+ # prepare output string
104
+ out_string = "#{method}\n"
105
+ s3_headers.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
106
+ out_string << (key[/^#{AMAZON_HEADER_PREFIX}/o] ? "#{key}:#{value}\n" : "#{value}\n")
107
+ end
108
+ # ignore everything after the question mark...
109
+ out_string << path.gsub(/\?.*$/, '')
110
+ # ...unless there is an acl or torrent parameter
111
+ out_string << '?acl' if path[/[&?]acl($|&|=)/]
112
+ out_string << '?policy' if path[/[&?]policy($|&|=)/]
113
+ out_string << '?torrent' if path[/[&?]torrent($|&|=)/]
114
+ out_string << '?location' if path[/[&?]location($|&|=)/]
115
+ out_string << '?logging' if path[/[&?]logging($|&|=)/] # this one is beta, no support for now
116
+ out_string
117
+ end
118
+
119
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
120
+ def is_dns_bucket?(bucket_name)
121
+ bucket_name = bucket_name.to_s
122
+ return nil unless (3..63) === bucket_name.size
123
+ bucket_name.split('.').each do |component|
124
+ return nil unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
125
+ end
126
+ true
127
+ end
128
+
129
+ def fetch_request_params(headers) #:nodoc:
130
+ # default server to use
131
+ server = @params[:server]
132
+ service = @params[:service].to_s
133
+ service.chop! if service[%r{/$}] # remove trailing '/' from service
134
+ # extract bucket name and check it's dns compatibility
135
+ headers[:url].to_s[%r{^([a-z0-9._-]*)(/[^?]*)?(\?.+)?}i]
136
+ bucket_name, key_path, params_list = $1, $2, $3
137
+ # select request model
138
+ if is_dns_bucket?(bucket_name) and !@params[:virtual_hosting]
139
+ # fix a path
140
+ server = "#{bucket_name}.#{server}"
141
+ key_path ||= '/'
142
+ path = "#{service}#{key_path}#{params_list}"
143
+ else
144
+ path = "#{service}/#{bucket_name}#{key_path}#{params_list}"
145
+ end
146
+ path_to_sign = "#{service}/#{bucket_name}#{key_path}#{params_list}"
147
+ [server, path, path_to_sign]
148
+ end
149
+
150
+ # Generates request hash for REST API.
151
+ # Assumes that headers[:url] is URL encoded (use CGI::escape)
152
+ def generate_rest_request(method, headers) # :nodoc:
153
+ # calculate request data
154
+ server, path, path_to_sign = fetch_request_params(headers)
155
+ data = headers[:data]
156
+ # remove unset(==optional) and symbolyc keys
157
+ headers.each { |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
158
+ #
159
+ headers['content-type'] ||= ''
160
+ headers['date'] = Time.now.httpdate
161
+ # create request
162
+ request = Net::HTTP.const_get(method.capitalize).new(path)
163
+ request.body = data if data
164
+ # set request headers and meta headers
165
+ headers.each { |key, value| request[key.to_s] = value }
166
+ #generate auth strings
167
+ auth_string = canonical_string(request.method, path_to_sign, request.to_hash)
168
+ signature = Utils::sign(@aws_secret_access_key, auth_string)
169
+ # set other headers
170
+ request['Authorization'] = "AWS #{@aws_access_key_id}:#{signature}"
171
+ # prepare output hash
172
+ {:request => request,
173
+ :server => server,
174
+ :port => @params[:port],
175
+ :protocol => @params[:protocol]}
176
+ end
177
+
178
+ # Sends request to Amazon and parses the response.
179
+ # Raises AwsError if any banana happened.
180
+ def request_info(request, parser, options={}, &block) # :nodoc:
181
+ # request_info2(request, parser, @params, :s3_connection, @logger, @@bench, options, &block)
182
+ request_info3(self, request, parser, options, &block)
183
+ end
184
+
185
+
186
+ # Returns an array of customer's buckets. Each item is a +hash+.
187
+ #
188
+ # s3.list_all_my_buckets #=>
189
+ # [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
190
+ # :owner_display_name => "root",
191
+ # :name => "bucket_name",
192
+ # :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
193
+ #
194
+ def list_all_my_buckets(headers={})
195
+ req_hash = generate_rest_request('GET', headers.merge(:url=>''))
196
+ request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger))
197
+ rescue
198
+ on_exception
199
+ end
200
+
201
+ # Creates new bucket. Returns +true+ or an exception.
202
+ #
203
+ # # create a bucket at American server
204
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
205
+ # # create a bucket at European server
206
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
207
+ #
208
+ def create_bucket(bucket, headers={})
209
+ data = nil
210
+ unless Aws::Utils.blank?(headers[:location])
211
+ # data = "<CreateBucketConfiguration><LocationConstraint>#{headers[:location].to_s.upcase}</LocationConstraint></CreateBucketConfiguration>"
212
+ location = headers[:location].to_s
213
+ location.upcase! if location == 'eu'
214
+ data = "<CreateBucketConfiguration><LocationConstraint>#{location}</LocationConstraint></CreateBucketConfiguration>"
215
+ end
216
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data))
217
+ request_info(req_hash, RightHttp2xxParser.new)
218
+ rescue Exception => e
219
+ # if the bucket exists AWS returns an error for the location constraint interface. Drop it
220
+ e.is_a?(Aws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception
221
+ end
222
+
223
+ # Retrieve bucket location
224
+ #
225
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
226
+ # puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed)
227
+ #
228
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
229
+ # puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
230
+ #
231
+ def bucket_location(bucket, headers={})
232
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location"))
233
+ request_info(req_hash, S3BucketLocationParser.new)
234
+ rescue
235
+ on_exception
236
+ end
237
+
238
+ # Retrieves the logging configuration for a bucket.
239
+ # Returns a hash of {:enabled, :targetbucket, :targetprefix}
240
+ #
241
+ # s3.interface.get_logging_parse(:bucket => "asset_bucket")
242
+ # => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
243
+ #
244
+ #
245
+ def get_logging_parse(params)
246
+ Utils.mandatory_arguments([:bucket], params)
247
+ Utils.allow_only([:bucket, :headers], params)
248
+ params[:headers] = {} unless params[:headers]
249
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging"))
250
+ request_info(req_hash, S3LoggingParser.new)
251
+ rescue
252
+ on_exception
253
+ end
254
+
255
+ # Sets logging configuration for a bucket from the XML configuration document.
256
+ # params:
257
+ # :bucket
258
+ # :xmldoc
259
+ def put_logging(params)
260
+ Utils.mandatory_arguments([:bucket, :xmldoc], params)
261
+ Utils.allow_only([:bucket, :xmldoc, :headers], params)
262
+ params[:headers] = {} unless params[:headers]
263
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc]))
264
+ request_info(req_hash, S3TrueParser.new)
265
+ rescue
266
+ on_exception
267
+ end
268
+
269
+ # Deletes new bucket. Bucket must be empty! Returns +true+ or an exception.
270
+ #
271
+ # s3.delete_bucket('my_awesome_bucket') #=> true
272
+ #
273
+ # See also: force_delete_bucket method
274
+ #
275
+ def delete_bucket(bucket, headers={})
276
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket))
277
+ request_info(req_hash, RightHttp2xxParser.new)
278
+ rescue
279
+ on_exception
280
+ end
281
+
282
+ # Returns an array of bucket's keys. Each array item (key data) is a +hash+.
283
+ #
284
+ # s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=>
285
+ # [{:key => "test1",
286
+ # :last_modified => "2007-05-18T07:00:59.000Z",
287
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
288
+ # :owner_display_name => "root",
289
+ # :e_tag => "000000000059075b964b07152d234b70",
290
+ # :storage_class => "STANDARD",
291
+ # :size => 3,
292
+ # :service=> {'is_truncated' => false,
293
+ # 'prefix' => "t",
294
+ # 'marker' => "",
295
+ # 'name' => "my_awesome_bucket",
296
+ # 'max-keys' => "5"}, ..., {...}]
297
+ #
298
+ def list_bucket(bucket, options={}, headers={})
299
+ unless options.nil? || options.empty?
300
+ bucket << '?'
301
+ bucket << options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&')
302
+ end
303
+ req_hash = generate_rest_request('GET', headers.merge(:url=>bucket))
304
+ request_info(req_hash, S3ListBucketParser.new(:logger => @logger))
305
+ rescue
306
+ on_exception
307
+ end
308
+
309
+ # Incrementally list the contents of a bucket. Yields the following hash to a block:
310
+ # s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields
311
+ # {
312
+ # :name => 'bucketname',
313
+ # :prefix => 'subfolder/',
314
+ # :marker => 'fileN.jpg',
315
+ # :max_keys => 234,
316
+ # :delimiter => '/',
317
+ # :is_truncated => true,
318
+ # :next_marker => 'fileX.jpg',
319
+ # :contents => [
320
+ # { :key => "file1",
321
+ # :last_modified => "2007-05-18T07:00:59.000Z",
322
+ # :e_tag => "000000000059075b964b07152d234b70",
323
+ # :size => 3,
324
+ # :storage_class => "STANDARD",
325
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
326
+ # :owner_display_name => "root"
327
+ # }, { :key, ...}, ... {:key, ...}
328
+ # ]
329
+ # :common_prefixes => [
330
+ # "prefix1",
331
+ # "prefix2",
332
+ # ...,
333
+ # "prefixN"
334
+ # ]
335
+ # }
336
+ def incrementally_list_bucket(bucket, options={}, headers={}, &block)
337
+ internal_options = (options.map {|k,v| [k.to_sym, v] }).inject({}) {|h, ar| h[ar[0]] = ar[1]; h}
338
+ begin
339
+ internal_bucket = bucket.dup
340
+ unless internal_options.nil? || internal_options.empty?
341
+ internal_bucket << '?'
342
+ internal_bucket << internal_options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&')
343
+ end
344
+ req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket))
345
+ response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger))
346
+ there_are_more_keys = response[:is_truncated]
347
+ if (there_are_more_keys)
348
+ internal_options[:marker] = decide_marker(response)
349
+ total_results = response[:contents].length + response[:common_prefixes].length
350
+ internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil
351
+ end
352
+ yield response
353
+ end while there_are_more_keys && under_max_keys(internal_options)
354
+ true
355
+ rescue
356
+ on_exception
357
+ end
358
+
359
+
360
+ private
361
+ def decide_marker(response)
362
+ return response[:next_marker].dup if response[:next_marker]
363
+ last_key = response[:contents].last[:key]
364
+ last_prefix = response[:common_prefixes].last
365
+ if (!last_key)
366
+ return nil if (!last_prefix)
367
+ last_prefix.dup
368
+ elsif (!last_prefix)
369
+ last_key.dup
370
+ else
371
+ last_key > last_prefix ? last_key.dup : last_prefix.dup
372
+ end
373
+ end
374
+
375
+ def under_max_keys(internal_options)
376
+ internal_options[:'max-keys'] ? internal_options[:'max-keys'] > 0 : true
377
+ end
378
+
379
+ public
380
+ # Saves object to Amazon. Returns +true+ or an exception.
381
+ # Any header starting with AMAZON_METADATA_PREFIX is considered
382
+ # user metadata. It will be stored with the object and returned
383
+ # when you retrieve the object. The total size of the HTTP
384
+ # request, not including the body, must be less than 4 KB.
385
+ #
386
+ # s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
387
+ #
388
+ # This method is capable of 'streaming' uploads; that is, it can upload
389
+ # data from a file or other IO object without first reading all the data
390
+ # into memory. This is most useful for large PUTs - it is difficult to read
391
+ # a 2 GB file entirely into memory before sending it to S3.
392
+ # To stream an upload, pass an object that responds to 'read' (like the read
393
+ # method of IO) and to either 'lstat' or 'size'. For files, this means
394
+ # streaming is enabled by simply making the call:
395
+ #
396
+ # s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
397
+ #
398
+ # If the IO object you wish to stream from responds to the read method but
399
+ # doesn't implement lstat or size, you can extend the object dynamically
400
+ # to implement these methods, or define your own class which defines these
401
+ # methods. Be sure that your class returns 'nil' from read() after having
402
+ # read 'size' bytes. Otherwise S3 will drop the socket after
403
+ # 'Content-Length' bytes have been uploaded, and HttpConnection will
404
+ # interpret this as an error.
405
+ #
406
+ # This method now supports very large PUTs, where very large
407
+ # is > 2 GB.
408
+ #
409
+ # For Win32 users: Files and IO objects should be opened in binary mode. If
410
+ # a text mode IO object is passed to PUT, it will be converted to binary
411
+ # mode.
412
+ #
413
+
414
+ def put(bucket, key, data=nil, headers={})
415
+ # On Windows, if someone opens a file in text mode, we must reset it so
416
+ # to binary mode for streaming to work properly
417
+ if (data.respond_to?(:binmode))
418
+ data.binmode
419
+ end
420
+ if data.is_a?(String)
421
+ data = StringIO.new(data)
422
+ # puts "encoding = #{data.external_encoding} - #{data.internal_encoding}"
423
+ # data.set_encoding("UTF-8")
424
+ # puts "encoding = #{data.external_encoding} - #{data.internal_encoding}"
425
+ end
426
+
427
+ data_size = data.respond_to?(:lstat) ? data.lstat.size :
428
+ # data.respond_to?(:bytesize) ? data.bytesize :
429
+ (data.respond_to?(:size) ? data.size : 0)
430
+ # puts 'data_size=' + data_size.to_s
431
+ if (data_size >= USE_100_CONTINUE_PUT_SIZE)
432
+ headers['expect'] = '100-continue'
433
+ end
434
+ req_hash = generate_rest_request('PUT', headers.merge(:url =>"#{bucket}/#{CGI::escape key}",
435
+ :data =>data,
436
+ 'Content-Length' => data_size.to_s))
437
+ request_info(req_hash, RightHttp2xxParser.new)
438
+ rescue
439
+ on_exception
440
+ end
441
+
442
+
443
+ # New experimental API for uploading objects, introduced in Aws 1.8.1.
444
+ # store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification
445
+ # of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
446
+ # The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
447
+ #
448
+ # If the optional :md5 argument is provided, store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
449
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
450
+ #
451
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
452
+ #
453
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
454
+ # => {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/",
455
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
456
+ # "date"=>"Mon, 29 Sep 2008 18:57:46 GMT",
457
+ # :verified_md5=>true,
458
+ # "x-amz-request-id"=>"63916465939995BA",
459
+ # "server"=>"AmazonS3",
460
+ # "content-length"=>"0"}
461
+ #
462
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
463
+ # => {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR",
464
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
465
+ # "date"=>"Mon, 29 Sep 2008 18:58:56 GMT",
466
+ # :verified_md5=>false,
467
+ # "x-amz-request-id"=>"3B25A996BC2CDD3B",
468
+ # "server"=>"AmazonS3",
469
+ # "content-length"=>"0"}
470
+
471
+ def store_object(params)
472
+ Utils.allow_only([:bucket, :key, :data, :headers, :md5], params)
473
+ Utils.mandatory_arguments([:bucket, :key, :data], params)
474
+ params[:headers] = {} unless params[:headers]
475
+
476
+ params[:data].binmode if (params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly
477
+ if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
478
+ (params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE)
479
+ params[:headers]['expect'] = '100-continue'
480
+ end
481
+
482
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data]))
483
+ resp = request_info(req_hash, S3HttpResponseHeadParser.new)
484
+ if (params[:md5])
485
+ resp[:verified_md5] = (resp['etag'].gsub(/\"/, '') == params[:md5]) ? true : false
486
+ else
487
+ resp[:verified_md5] = false
488
+ end
489
+ resp
490
+ rescue
491
+ on_exception
492
+ end
493
+
494
+ # Identical in function to store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
495
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
496
+ # This call is implemented as a wrapper around store_object and the user may gain different semantics by creating a custom wrapper.
497
+ #
498
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
499
+ # => {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb",
500
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
501
+ # "date"=>"Mon, 29 Sep 2008 18:38:32 GMT",
502
+ # :verified_md5=>true,
503
+ # "x-amz-request-id"=>"E8D7EA4FE00F5DF7",
504
+ # "server"=>"AmazonS3",
505
+ # "content-length"=>"0"}
506
+ #
507
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
508
+ # Aws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n",
509
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
510
+ # "date"=>"Mon, 29 Sep 2008 18:38:41 GMT",
511
+ # :verified_md5=>false,
512
+ # "x-amz-request-id"=>"0D7ADE09F42606F2",
513
+ # "server"=>"AmazonS3",
514
+ # "content-length"=>"0"}
515
+ def store_object_and_verify(params)
516
+ Utils.mandatory_arguments([:md5], params)
517
+ r = store_object(params)
518
+ r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}"))
519
+ end
520
+
521
+ # Retrieves object data from Amazon. Returns a +hash+ or an exception.
522
+ #
523
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=>
524
+ #
525
+ # {:object => "Ola-la!",
526
+ # :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
527
+ # "content-type" => "",
528
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
529
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
530
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
531
+ # "x-amz-meta-family" => "Woho556!",
532
+ # "x-amz-request-id" => "0000000C246D770C",
533
+ # "server" => "AmazonS3",
534
+ # "content-length" => "7"}}
535
+ #
536
+ # If a block is provided, yields incrementally to the block as
537
+ # the response is read. For large responses, this function is ideal as
538
+ # the response can be 'streamed'. The hash containing header fields is
539
+ # still returned.
540
+ # Example:
541
+ # foo = File.new('./chunder.txt', File::CREAT|File::RDWR)
542
+ # rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
543
+ # foo.write(chunk)
544
+ # end
545
+ # foo.close
546
+ #
547
+
548
+ def get(bucket, key, headers={}, &block)
549
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
550
+ request_info(req_hash, S3HttpResponseBodyParser.new, &block)
551
+ rescue
552
+ on_exception
553
+ end
554
+
555
+ # New experimental API for retrieving objects, introduced in Aws 1.8.1.
556
+ # retrieve_object is similar in function to the older function get. It allows for optional verification
557
+ # of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
558
+ #
559
+ # If the optional :md5 argument is provided, retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
560
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
561
+ #
562
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
563
+ # Mandatory arguments:
564
+ # :bucket - the bucket in which the object is stored
565
+ # :key - the object address (or path) within the bucket
566
+ # Optional arguments:
567
+ # :headers - hash of additional HTTP headers to include with the request
568
+ # :md5 - MD5 checksum against which to verify the retrieved object
569
+ #
570
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo")
571
+ # => {:verified_md5=>false,
572
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
573
+ # "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp",
574
+ # "content-type"=>"",
575
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
576
+ # "date"=>"Tue, 30 Sep 2008 00:52:44 GMT",
577
+ # "x-amz-request-id"=>"EE4855DE27A2688C",
578
+ # "server"=>"AmazonS3",
579
+ # "content-length"=>"10"},
580
+ # :object=>"polemonium"}
581
+ #
582
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954')
583
+ # => {:verified_md5=>true,
584
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
585
+ # "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH",
586
+ # "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
587
+ # "date"=>"Tue, 30 Sep 2008 00:53:08 GMT",
588
+ # "x-amz-request-id"=>"6E7F317356580599",
589
+ # "server"=>"AmazonS3",
590
+ # "content-length"=>"10"},
591
+ # :object=>"polemonium"}
592
+ # If a block is provided, yields incrementally to the block as
593
+ # the response is read. For large responses, this function is ideal as
594
+ # the response can be 'streamed'. The hash containing header fields is
595
+ # still returned.
596
+ def retrieve_object(params, &block)
597
+ Utils.mandatory_arguments([:bucket, :key], params)
598
+ Utils.allow_only([:bucket, :key, :headers, :md5], params)
599
+ params[:headers] = {} unless params[:headers]
600
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}"))
601
+ resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block)
602
+ resp[:verified_md5] = false
603
+ if (params[:md5] && (resp[:headers]['etag'].gsub(/\"/, '') == params[:md5]))
604
+ resp[:verified_md5] = true
605
+ end
606
+ resp
607
+ rescue
608
+ on_exception
609
+ end
610
+
611
+ # Identical in function to retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
612
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
613
+ # This call is implemented as a wrapper around retrieve_object and the user may gain different semantics by creating a custom wrapper.
614
+ def retrieve_object_and_verify(params, &block)
615
+ Utils.mandatory_arguments([:md5], params)
616
+ resp = retrieve_object(params, &block)
617
+ return resp if resp[:verified_md5]
618
+ raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}")
619
+ end
620
+
621
+ # Retrieves object metadata. Returns a +hash+ of http_response_headers.
622
+ #
623
+ # s3.head('my_awesome_bucket', 'log/curent/1.log') #=>
624
+ # {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
625
+ # "content-type" => "",
626
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
627
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
628
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
629
+ # "x-amz-meta-family" => "Woho556!",
630
+ # "x-amz-request-id" => "0000000C246D770C",
631
+ # "server" => "AmazonS3",
632
+ # "content-length" => "7"}
633
+ #
634
+ def head(bucket, key, headers={})
635
+ req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
636
+ request_info(req_hash, S3HttpResponseHeadParser.new)
637
+ rescue
638
+ on_exception
639
+ end
640
+
641
+ # Deletes key. Returns +true+ or an exception.
642
+ #
643
+ # s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
644
+ #
645
+ def delete(bucket, key='', headers={})
646
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
647
+ request_info(req_hash, RightHttp2xxParser.new)
648
+ rescue
649
+ on_exception
650
+ end
651
+
652
+ # Copy an object.
653
+ # directive: :copy - copy meta-headers from source (default value)
654
+ # :replace - replace meta-headers by passed ones
655
+ #
656
+ # # copy a key with meta-headers
657
+ # s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"}
658
+ #
659
+ # # copy a key, overwrite meta-headers
660
+ # s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
661
+ #
662
+ # see: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
663
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
664
+ #
665
+ def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
666
+ dest_key ||= src_key
667
+ headers['x-amz-metadata-directive'] = directive.to_s.upcase
668
+ headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}"
669
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}"))
670
+ request_info(req_hash, S3CopyParser.new)
671
+ rescue
672
+ on_exception
673
+ end
674
+
675
+ # Move an object.
676
+ # directive: :copy - copy meta-headers from source (default value)
677
+ # :replace - replace meta-headers by passed ones
678
+ #
679
+ # # move bucket1/key1 to bucket1/key2
680
+ # s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"}
681
+ #
682
+ # # move bucket1/key1 to bucket2/key2 with new meta-headers assignment
683
+ # s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
684
+ #
685
+ def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
686
+ copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers)
687
+ # delete an original key if it differs from a destination one
688
+ delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key
689
+ copy_result
690
+ end
691
+
692
+ # Rename an object.
693
+ #
694
+ # # rename bucket1/key1 to bucket1/key2
695
+ # s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
696
+ #
697
+ def rename(src_bucket, src_key, dest_key, headers={})
698
+ move(src_bucket, src_key, src_bucket, dest_key, :copy, headers)
699
+ end
700
+
701
+ # Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
702
+ #
703
+ # s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=>
704
+ # {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX",
705
+ # "content-type"=>"application/xml;charset=ISO-8859-1",
706
+ # "date"=>"Wed, 23 May 2007 09:40:16 GMT",
707
+ # "x-amz-request-id"=>"B183FA7AB5FBB4DD",
708
+ # "server"=>"AmazonS3",
709
+ # "transfer-encoding"=>"chunked"},
710
+ # :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner>
711
+ # <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner>
712
+ # <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>
713
+ # 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee>
714
+ # <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
715
+ #
716
+ def get_acl(bucket, key='', headers={})
717
+ key = Aws::Utils.blank?(key) ? '' : "/#{CGI::escape key}"
718
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
719
+ request_info(req_hash, S3HttpResponseBodyParser.new)
720
+ rescue
721
+ on_exception
722
+ end
723
+
724
+ # Retieves the ACL (access control policy) for a bucket or object.
725
+ # Returns a hash of {:owner, :grantees}
726
+ #
727
+ # s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=>
728
+ #
729
+ # { :grantees=>
730
+ # { "16...2a"=>
731
+ # { :display_name=>"root",
732
+ # :permissions=>["FULL_CONTROL"],
733
+ # :attributes=>
734
+ # { "xsi:type"=>"CanonicalUser",
735
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}},
736
+ # "http://acs.amazonaws.com/groups/global/AllUsers"=>
737
+ # { :display_name=>"AllUsers",
738
+ # :permissions=>["READ"],
739
+ # :attributes=>
740
+ # { "xsi:type"=>"Group",
741
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}},
742
+ # :owner=>
743
+ # { :id=>"16..2a",
744
+ # :display_name=>"root"}}
745
+ #
746
+ def get_acl_parse(bucket, key='', headers={})
747
+ key = Aws::Utils.blank?(key) ? '' : "/#{CGI::escape key}"
748
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
749
+ acl = request_info(req_hash, S3AclParser.new(:logger => @logger))
750
+ result = {}
751
+ result[:owner] = acl[:owner]
752
+ result[:grantees] = {}
753
+ acl[:grantees].each do |grantee|
754
+ key = grantee[:id] || grantee[:uri]
755
+ if result[:grantees].key?(key)
756
+ result[:grantees][key][:permissions] << grantee[:permissions]
757
+ else
758
+ result[:grantees][key] =
759
+ {:display_name => grantee[:display_name] || grantee[:uri].to_s[/[^\/]*$/],
760
+ :permissions => grantee[:permissions].lines.to_a,
761
+ :attributes => grantee[:attributes]}
762
+ end
763
+ end
764
+ result
765
+ rescue
766
+ on_exception
767
+ end
768
+
769
+ # Sets the ACL on a bucket or object.
770
+ def put_acl(bucket, key, acl_xml_doc, headers={})
771
+ key = Aws::Utils.blank?(key) ? '' : "/#{CGI::escape key}"
772
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc))
773
+ request_info(req_hash, S3HttpResponseBodyParser.new)
774
+ rescue
775
+ on_exception
776
+ end
777
+
778
+ # Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
779
+ def get_bucket_acl(bucket, headers={})
780
+ return get_acl(bucket, '', headers)
781
+ rescue
782
+ on_exception
783
+ end
784
+
785
+ # Sets the ACL on a bucket only.
786
+ def put_bucket_acl(bucket, acl_xml_doc, headers={})
787
+ return put_acl(bucket, '', acl_xml_doc, headers)
788
+ rescue
789
+ on_exception
790
+ end
791
+
792
+ def get_bucket_policy(bucket)
793
+ req_hash = generate_rest_request('GET', {:url=>"#{bucket}?policy"})
794
+ request_info(req_hash, S3HttpResponseBodyParser.new)
795
+ rescue
796
+ on_exception
797
+ end
798
+
799
+ def put_bucket_policy(bucket, policy)
800
+ key = Aws::Utils.blank?(key) ? '' : "/#{CGI::escape key}"
801
+ req_hash = generate_rest_request('PUT', {:url=>"#{bucket}?policy", :data=>policy})
802
+ request_info(req_hash, S3HttpResponseBodyParser.new)
803
+ rescue
804
+ on_exception
805
+ end
806
+
807
+ # Removes all keys from bucket. Returns +true+ or an exception.
808
+ #
809
+ # s3.clear_bucket('my_awesome_bucket') #=> true
810
+ #
811
+ def clear_bucket(bucket)
812
+ incrementally_list_bucket(bucket) do |results|
813
+ p results
814
+ results[:contents].each { |key| p key; delete(bucket, key[:key]) }
815
+ end
816
+ true
817
+ rescue
818
+ on_exception
819
+ end
820
+
821
+ # Deletes all keys in bucket then deletes bucket. Returns +true+ or an exception.
822
+ #
823
+ # s3.force_delete_bucket('my_awesome_bucket')
824
+ #
825
+ def force_delete_bucket(bucket)
826
+ clear_bucket(bucket)
827
+ delete_bucket(bucket)
828
+ rescue
829
+ on_exception
830
+ end
831
+
832
+ # Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
833
+ #
834
+ # s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs']
835
+ # s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
836
+ #
837
+ def delete_folder(bucket, folder_key, separator='/')
838
+ folder_key.chomp!(separator)
839
+ allkeys = []
840
+ incrementally_list_bucket(bucket, {'prefix' => folder_key}) do |results|
841
+ keys = results[:contents].map { |s3_key| s3_key[:key][/^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil }.compact
842
+ keys.each { |key| delete(bucket, key) }
843
+ allkeys << keys
844
+ end
845
+ allkeys
846
+ rescue
847
+ on_exception
848
+ end
849
+
850
+ # Retrieves object data only (headers are omitted). Returns +string+ or an exception.
851
+ #
852
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
853
+ #
854
+ def get_object(bucket, key, headers={})
855
+ get(bucket, key, headers)[:object]
856
+ rescue
857
+ on_exception
858
+ end
859
+
860
+ #-----------------------------------------------------------------
861
+ # Query API: Links
862
+ #-----------------------------------------------------------------
863
+
864
+ # Generates link for QUERY API
865
+ def generate_link(method, headers={}, expires=nil) #:nodoc:
866
+ # calculate request data
867
+ server, path, path_to_sign = fetch_request_params(headers)
868
+ # expiration time
869
+ expires ||= DEFAULT_EXPIRES_AFTER
870
+ expires = Time.now.utc + expires if expires.is_a?(Fixnum) && (expires < ONE_YEAR_IN_SECONDS)
871
+ expires = expires.to_i
872
+ # remove unset(==optional) and symbolyc keys
873
+ headers.each { |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
874
+ #generate auth strings
875
+ auth_string = canonical_string(method, path_to_sign, headers, expires)
876
+ signature = CGI::escape(Base64.encode64(OpenSSL::HMAC.digest(OpenSSL::Digest::Digest.new("sha1"), @aws_secret_access_key, auth_string)).strip)
877
+ # path building
878
+ addon = "Signature=#{signature}&Expires=#{expires}&AWSAccessKeyId=#{@aws_access_key_id}"
879
+ path += path[/\?/] ? "&#{addon}" : "?#{addon}"
880
+ "#{@params[:protocol]}://#{server}:#{@params[:port]}#{path}"
881
+ rescue
882
+ on_exception
883
+ end
884
+
885
+ # Generates link for 'ListAllMyBuckets'.
886
+ #
887
+ # s3.list_all_my_buckets_link #=> url string
888
+ #
889
+ def list_all_my_buckets_link(expires=nil, headers={})
890
+ generate_link('GET', headers.merge(:url=>''), expires)
891
+ rescue
892
+ on_exception
893
+ end
894
+
895
+ # Generates link for 'CreateBucket'.
896
+ #
897
+ # s3.create_bucket_link('my_awesome_bucket') #=> url string
898
+ #
899
+ def create_bucket_link(bucket, expires=nil, headers={})
900
+ generate_link('PUT', headers.merge(:url=>bucket), expires)
901
+ rescue
902
+ on_exception
903
+ end
904
+
905
+ # Generates link for 'DeleteBucket'.
906
+ #
907
+ # s3.delete_bucket_link('my_awesome_bucket') #=> url string
908
+ #
909
+ def delete_bucket_link(bucket, expires=nil, headers={})
910
+ generate_link('DELETE', headers.merge(:url=>bucket), expires)
911
+ rescue
912
+ on_exception
913
+ end
914
+
915
+ # Generates link for 'ListBucket'.
916
+ #
917
+ # s3.list_bucket_link('my_awesome_bucket') #=> url string
918
+ #
919
+ def list_bucket_link(bucket, options=nil, expires=nil, headers={})
920
+ unless options.nil? || options.empty?
921
+ bucket << '?'
922
+ bucket << options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&')
923
+ end
924
+ generate_link('GET', headers.merge(:url=>bucket), expires)
925
+ rescue
926
+ on_exception
927
+ end
928
+
929
+ # Generates link for 'PutObject'.
930
+ #
931
+ # s3.put_link('my_awesome_bucket',key, object) #=> url string
932
+ #
933
+ def put_link(bucket, key, data=nil, expires=nil, headers={})
934
+ generate_link('PUT', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}", :data=>data), expires)
935
+ rescue
936
+ on_exception
937
+ end
938
+
939
+ # Generates link for 'GetObject'.
940
+ #
941
+ # if a bucket comply with virtual hosting naming then retuns a link with the
942
+ # bucket as a part of host name:
943
+ #
944
+ # s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
945
+ #
946
+ # otherwise returns an old style link (the bucket is a part of path):
947
+ #
948
+ # s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
949
+ #
950
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
951
+ def get_link(bucket, key, expires=nil, headers={})
952
+ generate_link('GET', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key.to_s}"), expires)
953
+ rescue
954
+ on_exception
955
+ end
956
+
957
+ # Generates link for 'HeadObject'.
958
+ #
959
+ # s3.head_link('my_awesome_bucket',key) #=> url string
960
+ #
961
+ def head_link(bucket, key, expires=nil, headers={})
962
+ generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}"), expires)
963
+ rescue
964
+ on_exception
965
+ end
966
+
967
+ # Generates link for 'DeleteObject'.
968
+ #
969
+ # s3.delete_link('my_awesome_bucket',key) #=> url string
970
+ #
971
+ def delete_link(bucket, key, expires=nil, headers={})
972
+ generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}"), expires)
973
+ rescue
974
+ on_exception
975
+ end
976
+
977
+
978
+ # Generates link for 'GetACL'.
979
+ #
980
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
981
+ #
982
+ def get_acl_link(bucket, key='', headers={})
983
+ return generate_link('GET', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}?acl"))
984
+ rescue
985
+ on_exception
986
+ end
987
+
988
+ # Generates link for 'PutACL'.
989
+ #
990
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
991
+ #
992
+ def put_acl_link(bucket, key='', headers={})
993
+ return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}?acl"))
994
+ rescue
995
+ on_exception
996
+ end
997
+
998
+ # Generates link for 'GetBucketACL'.
999
+ #
1000
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
1001
+ #
1002
+ def get_bucket_acl_link(bucket, headers={})
1003
+ return get_acl_link(bucket, '', headers)
1004
+ rescue
1005
+ on_exception
1006
+ end
1007
+
1008
+ # Generates link for 'PutBucketACL'.
1009
+ #
1010
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
1011
+ #
1012
+ def put_bucket_acl_link(bucket, acl_xml_doc, headers={})
1013
+ return put_acl_link(bucket, '', acl_xml_doc, headers)
1014
+ rescue
1015
+ on_exception
1016
+ end
1017
+
1018
+ #-----------------------------------------------------------------
1019
+ # PARSERS:
1020
+ #-----------------------------------------------------------------
1021
+
1022
+ class S3ListAllMyBucketsParser < AwsParser # :nodoc:
1023
+ def reset
1024
+ @result = []
1025
+ @owner = {}
1026
+ end
1027
+
1028
+ def tagstart(name, attributes)
1029
+ @current_bucket = {} if name == 'Bucket'
1030
+ end
1031
+
1032
+ def tagend(name)
1033
+ case name
1034
+ when 'ID';
1035
+ @owner[:owner_id] = @text
1036
+ when 'DisplayName';
1037
+ @owner[:owner_display_name] = @text
1038
+ when 'Name';
1039
+ @current_bucket[:name] = @text
1040
+ when 'CreationDate';
1041
+ @current_bucket[:creation_date] = @text
1042
+ when 'Bucket';
1043
+ @result << @current_bucket.merge(@owner)
1044
+ end
1045
+ end
1046
+ end
1047
+
1048
+ class S3ListBucketParser < AwsParser # :nodoc:
1049
+ def reset
1050
+ @result = []
1051
+ @service = {}
1052
+ @current_key = {}
1053
+ end
1054
+
1055
+ def tagstart(name, attributes)
1056
+ @current_key = {} if name == 'Contents'
1057
+ end
1058
+
1059
+ def tagend(name)
1060
+ case name
1061
+ # service info
1062
+ when 'Name';
1063
+ @service['name'] = @text
1064
+ when 'Prefix';
1065
+ @service['prefix'] = @text
1066
+ when 'Marker';
1067
+ @service['marker'] = @text
1068
+ when 'MaxKeys';
1069
+ @service['max-keys'] = @text
1070
+ when 'Delimiter';
1071
+ @service['delimiter'] = @text
1072
+ when 'IsTruncated';
1073
+ @service['is_truncated'] = (@text =~ /false/ ? false : true)
1074
+ # key data
1075
+ when 'Key';
1076
+ @current_key[:key] = @text
1077
+ when 'LastModified';
1078
+ @current_key[:last_modified] = @text
1079
+ when 'ETag';
1080
+ @current_key[:e_tag] = @text
1081
+ when 'Size';
1082
+ @current_key[:size] = @text.to_i
1083
+ when 'StorageClass';
1084
+ @current_key[:storage_class] = @text
1085
+ when 'ID';
1086
+ @current_key[:owner_id] = @text
1087
+ when 'DisplayName';
1088
+ @current_key[:owner_display_name] = @text
1089
+ when 'Contents';
1090
+ @current_key[:service] = @service; @result << @current_key
1091
+ end
1092
+ end
1093
+ end
1094
+
1095
+ class S3ImprovedListBucketParser < AwsParser # :nodoc:
1096
+ def reset
1097
+ @result = {}
1098
+ @result[:contents] = []
1099
+ @result[:common_prefixes] = []
1100
+ @contents = []
1101
+ @current_key = {}
1102
+ @common_prefixes = []
1103
+ @in_common_prefixes = false
1104
+ end
1105
+
1106
+ def tagstart(name, attributes)
1107
+ @current_key = {} if name == 'Contents'
1108
+ @in_common_prefixes = true if name == 'CommonPrefixes'
1109
+ end
1110
+
1111
+ def tagend(name)
1112
+ case name
1113
+ # service info
1114
+ when 'Name';
1115
+ @result[:name] = @text
1116
+ # Amazon uses the same tag for the search prefix and for the entries
1117
+ # in common prefix...so use our simple flag to see which element
1118
+ # we are parsing
1119
+ when 'Prefix';
1120
+ @in_common_prefixes ? @common_prefixes << @text : @result[:prefix] = @text
1121
+ when 'Marker';
1122
+ @result[:marker] = @text
1123
+ when 'MaxKeys';
1124
+ @result[:max_keys] = @text
1125
+ when 'Delimiter';
1126
+ @result[:delimiter] = @text
1127
+ when 'IsTruncated';
1128
+ @result[:is_truncated] = (@text =~ /false/ ? false : true)
1129
+ when 'NextMarker';
1130
+ @result[:next_marker] = @text
1131
+ # key data
1132
+ when 'Key';
1133
+ @current_key[:key] = @text
1134
+ when 'LastModified';
1135
+ @current_key[:last_modified] = @text
1136
+ when 'ETag';
1137
+ @current_key[:e_tag] = @text
1138
+ when 'Size';
1139
+ @current_key[:size] = @text.to_i
1140
+ when 'StorageClass';
1141
+ @current_key[:storage_class] = @text
1142
+ when 'ID';
1143
+ @current_key[:owner_id] = @text
1144
+ when 'DisplayName';
1145
+ @current_key[:owner_display_name] = @text
1146
+ when 'Contents';
1147
+ @result[:contents] << @current_key
1148
+ # Common Prefix stuff
1149
+ when 'CommonPrefixes';
1150
+ @result[:common_prefixes] = @common_prefixes; @in_common_prefixes = false
1151
+ end
1152
+ end
1153
+ end
1154
+
1155
+ class S3BucketLocationParser < AwsParser # :nodoc:
1156
+ def reset
1157
+ @result = ''
1158
+ end
1159
+
1160
+ def tagend(name)
1161
+ @result = @text if name == 'LocationConstraint'
1162
+ end
1163
+ end
1164
+
1165
+ class S3AclParser < AwsParser # :nodoc:
1166
+ def reset
1167
+ @result = {:grantees=>[], :owner=>{}}
1168
+ @current_grantee = {}
1169
+ end
1170
+
1171
+ def tagstart(name, attributes)
1172
+ @current_grantee = {:attributes => attributes} if name=='Grantee'
1173
+ end
1174
+
1175
+ def tagend(name)
1176
+ case name
1177
+ # service info
1178
+ when 'ID'
1179
+ if @xmlpath == 'AccessControlPolicy/Owner'
1180
+ @result[:owner][:id] = @text
1181
+ else
1182
+ @current_grantee[:id] = @text
1183
+ end
1184
+ when 'DisplayName'
1185
+ if @xmlpath == 'AccessControlPolicy/Owner'
1186
+ @result[:owner][:display_name] = @text
1187
+ else
1188
+ @current_grantee[:display_name] = @text
1189
+ end
1190
+ when 'URI'
1191
+ @current_grantee[:uri] = @text
1192
+ when 'Permission'
1193
+ @current_grantee[:permissions] = @text
1194
+ when 'Grant'
1195
+ @result[:grantees] << @current_grantee
1196
+ end
1197
+ end
1198
+ end
1199
+
1200
+ class S3LoggingParser < AwsParser # :nodoc:
1201
+ def reset
1202
+ @result = {:enabled => false, :targetbucket => '', :targetprefix => ''}
1203
+ @current_grantee = {}
1204
+ end
1205
+
1206
+ def tagend(name)
1207
+ case name
1208
+ # service info
1209
+ when 'TargetBucket'
1210
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1211
+ @result[:targetbucket] = @text
1212
+ @result[:enabled] = true
1213
+ end
1214
+ when 'TargetPrefix'
1215
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1216
+ @result[:targetprefix] = @text
1217
+ @result[:enabled] = true
1218
+ end
1219
+ end
1220
+ end
1221
+ end
1222
+
1223
+ class S3CopyParser < AwsParser # :nodoc:
1224
+ def reset
1225
+ @result = {}
1226
+ end
1227
+
1228
+ def tagend(name)
1229
+ case name
1230
+ when 'LastModified' then
1231
+ @result[:last_modified] = @text
1232
+ when 'ETag' then
1233
+ @result[:e_tag] = @text
1234
+ end
1235
+ end
1236
+ end
1237
+
1238
+ #-----------------------------------------------------------------
1239
+ # PARSERS: Non XML
1240
+ #-----------------------------------------------------------------
1241
+
1242
+ class S3HttpResponseParser # :nodoc:
1243
+ attr_reader :result
1244
+
1245
+ def parse(response)
1246
+ @result = response
1247
+ end
1248
+
1249
+ def headers_to_string(headers)
1250
+ result = {}
1251
+ headers.each do |key, value|
1252
+ value = value[0] if value.is_a?(Array) && value.size<2
1253
+ result[key] = value
1254
+ end
1255
+ result
1256
+ end
1257
+ end
1258
+
1259
+ class S3HttpResponseBodyParser < S3HttpResponseParser # :nodoc:
1260
+ def parse(response)
1261
+ x = response.body
1262
+ x= x.respond_to?(:force_encoding) ? x.force_encoding("UTF-8") : x
1263
+ # puts 'x.encoding = ' + response.body.encoding.to_s
1264
+ @result = {
1265
+ :object => x,
1266
+ :headers => headers_to_string(response.to_hash)
1267
+ }
1268
+ end
1269
+ end
1270
+
1271
+ class S3HttpResponseHeadParser < S3HttpResponseParser # :nodoc:
1272
+ def parse(response)
1273
+ @result = headers_to_string(response.to_hash)
1274
+ end
1275
+ end
1276
+
1277
+ end
1278
+
1279
+ end