talaris-right_aws 2.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (66) hide show
  1. data/History.txt +305 -0
  2. data/Manifest.txt +60 -0
  3. data/README.txt +163 -0
  4. data/Rakefile +104 -0
  5. data/lib/acf/right_acf_interface.rb +549 -0
  6. data/lib/acf/right_acf_invalidations.rb +144 -0
  7. data/lib/acf/right_acf_origin_access_identities.rb +230 -0
  8. data/lib/acf/right_acf_streaming_interface.rb +229 -0
  9. data/lib/acw/right_acw_interface.rb +248 -0
  10. data/lib/as/right_as_interface.rb +698 -0
  11. data/lib/awsbase/benchmark_fix.rb +39 -0
  12. data/lib/awsbase/right_awsbase.rb +1174 -0
  13. data/lib/awsbase/support.rb +35 -0
  14. data/lib/awsbase/version.rb +9 -0
  15. data/lib/ec2/right_ec2.rb +458 -0
  16. data/lib/ec2/right_ec2_ebs.rb +465 -0
  17. data/lib/ec2/right_ec2_images.rb +413 -0
  18. data/lib/ec2/right_ec2_instances.rb +785 -0
  19. data/lib/ec2/right_ec2_monitoring.rb +70 -0
  20. data/lib/ec2/right_ec2_placement_groups.rb +108 -0
  21. data/lib/ec2/right_ec2_reserved_instances.rb +174 -0
  22. data/lib/ec2/right_ec2_security_groups.rb +396 -0
  23. data/lib/ec2/right_ec2_spot_instances.rb +425 -0
  24. data/lib/ec2/right_ec2_tags.rb +139 -0
  25. data/lib/ec2/right_ec2_vpc.rb +583 -0
  26. data/lib/ec2/right_ec2_windows_mobility.rb +84 -0
  27. data/lib/elb/right_elb_interface.rb +571 -0
  28. data/lib/iam/right_iam_access_keys.rb +71 -0
  29. data/lib/iam/right_iam_groups.rb +195 -0
  30. data/lib/iam/right_iam_interface.rb +341 -0
  31. data/lib/iam/right_iam_mfa_devices.rb +67 -0
  32. data/lib/iam/right_iam_users.rb +251 -0
  33. data/lib/rds/right_rds_interface.rb +1309 -0
  34. data/lib/right_aws.rb +83 -0
  35. data/lib/route_53/right_route_53_interface.rb +630 -0
  36. data/lib/s3/right_s3.rb +1123 -0
  37. data/lib/s3/right_s3_interface.rb +1198 -0
  38. data/lib/sdb/active_sdb.rb +1107 -0
  39. data/lib/sdb/right_sdb_interface.rb +753 -0
  40. data/lib/sqs/right_sqs.rb +387 -0
  41. data/lib/sqs/right_sqs_gen2.rb +342 -0
  42. data/lib/sqs/right_sqs_gen2_interface.rb +523 -0
  43. data/lib/sqs/right_sqs_interface.rb +593 -0
  44. data/right_aws.gemspec +91 -0
  45. data/test/acf/test_helper.rb +2 -0
  46. data/test/acf/test_right_acf.rb +138 -0
  47. data/test/awsbase/test_helper.rb +2 -0
  48. data/test/awsbase/test_right_awsbase.rb +12 -0
  49. data/test/ec2/test_helper.rb +2 -0
  50. data/test/ec2/test_right_ec2.rb +108 -0
  51. data/test/http_connection.rb +87 -0
  52. data/test/rds/test_helper.rb +2 -0
  53. data/test/rds/test_right_rds.rb +120 -0
  54. data/test/s3/test_helper.rb +2 -0
  55. data/test/s3/test_right_s3.rb +421 -0
  56. data/test/s3/test_right_s3_stubbed.rb +97 -0
  57. data/test/sdb/test_active_sdb.rb +357 -0
  58. data/test/sdb/test_batch_put_attributes.rb +54 -0
  59. data/test/sdb/test_helper.rb +3 -0
  60. data/test/sdb/test_right_sdb.rb +253 -0
  61. data/test/sqs/test_helper.rb +2 -0
  62. data/test/sqs/test_right_sqs.rb +285 -0
  63. data/test/sqs/test_right_sqs_gen2.rb +264 -0
  64. data/test/test_credentials.rb +37 -0
  65. data/test/ts_right_aws.rb +14 -0
  66. metadata +214 -0
@@ -0,0 +1,1198 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ module RightAws
25
+
26
+ class S3Interface < RightAwsBase
27
+
28
+ USE_100_CONTINUE_PUT_SIZE = 1_000_000
29
+
30
+ include RightAwsBaseInterface
31
+
32
+ DEFAULT_HOST = 's3.amazonaws.com'
33
+ DEFAULT_PORT = 443
34
+ DEFAULT_PROTOCOL = 'https'
35
+ DEFAULT_SERVICE = '/'
36
+ REQUEST_TTL = 30
37
+ DEFAULT_EXPIRES_AFTER = 1 * 24 * 60 * 60 # One day's worth of seconds
38
+ ONE_YEAR_IN_SECONDS = 365 * 24 * 60 * 60
39
+ AMAZON_HEADER_PREFIX = 'x-amz-'
40
+ AMAZON_METADATA_PREFIX = 'x-amz-meta-'
41
+
42
+ @@bench = AwsBenchmarkingBlock.new
43
+ def self.bench_xml
44
+ @@bench.xml
45
+ end
46
+ def self.bench_s3
47
+ @@bench.service
48
+ end
49
+
50
+ # Params supported:
51
+ # :no_subdomains => true # do not use bucket as a part of domain name but as a part of path
52
+ @@params = {}
53
+ def self.params
54
+ @@params
55
+ end
56
+
57
+ # get custom option
58
+ def param(name)
59
+ # - check explicitly defined param (@params)
60
+ # - otherwise check implicitly defined one (@@params)
61
+ @params.has_key?(name) ? @params[name] : @@params[name]
62
+ end
63
+
64
+ # Creates new RightS3 instance.
65
+ #
66
+ # s3 = RightAws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:logger => Logger.new('/tmp/x.log')}) #=> #<RightAws::S3Interface:0xb7b3c27c>
67
+ #
68
+ # Params is a hash:
69
+ #
70
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
71
+ # :port => 443 # Amazon service port: 80 or 443(default)
72
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
73
+ # :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
74
+ #
75
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
76
+ init({ :name => 'S3',
77
+ :default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST,
78
+ :default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT,
79
+ :default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE,
80
+ :default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL },
81
+ aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
82
+ aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'],
83
+ params)
84
+ end
85
+
86
+
87
+ #-----------------------------------------------------------------
88
+ # Requests
89
+ #-----------------------------------------------------------------
90
+ # Produces canonical string for signing.
91
+ def canonical_string(method, path, headers={}, expires=nil) # :nodoc:
92
+ s3_headers = {}
93
+ headers.each do |key, value|
94
+ key = key.downcase
95
+ value = case
96
+ when value.is_a?(Array) then value.join('')
97
+ else value.to_s
98
+ end
99
+ s3_headers[key] = value.strip if key[/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o]
100
+ end
101
+ s3_headers['content-type'] ||= ''
102
+ s3_headers['content-md5'] ||= ''
103
+ s3_headers['date'] = '' if s3_headers.has_key? 'x-amz-date'
104
+ s3_headers['date'] = expires if expires
105
+ # prepare output string
106
+ out_string = "#{method}\n"
107
+ s3_headers.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
108
+ out_string << (key[/^#{AMAZON_HEADER_PREFIX}/o] ? "#{key}:#{value}\n" : "#{value}\n")
109
+ end
110
+ # ignore everything after the question mark...
111
+ out_string << path.gsub(/\?.*$/, '')
112
+ # ...unless there is an acl or torrent parameter
113
+ out_string << '?acl' if path[/[&?]acl($|&|=)/]
114
+ out_string << '?torrent' if path[/[&?]torrent($|&|=)/]
115
+ out_string << '?location' if path[/[&?]location($|&|=)/]
116
+ out_string << '?logging' if path[/[&?]logging($|&|=)/] # this one is beta, no support for now
117
+ out_string
118
+ end
119
+
120
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
121
+ def is_dns_bucket?(bucket_name)
122
+ bucket_name = bucket_name.to_s
123
+ return nil unless (3..63) === bucket_name.size
124
+ bucket_name.split('.').each do |component|
125
+ return nil unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
126
+ end
127
+ true
128
+ end
129
+
130
+ def fetch_request_params(headers) #:nodoc:
131
+ # default server to use
132
+ server = @params[:server]
133
+ service = @params[:service].to_s
134
+ service.chop! if service[%r{/$}] # remove trailing '/' from service
135
+ # extract bucket name and check it's dns compartibility
136
+ headers[:url].to_s[%r{^([a-z0-9._-]*)(/[^?]*)?(\?.+)?}i]
137
+ bucket_name, key_path, params_list = $1, $2, $3
138
+ # select request model
139
+ if !param(:no_subdomains) && is_dns_bucket?(bucket_name)
140
+ # fix a path
141
+ server = "#{bucket_name}.#{server}"
142
+ key_path ||= '/'
143
+ path = "#{service}#{key_path}#{params_list}"
144
+ else
145
+ path = "#{service}/#{bucket_name}#{key_path}#{params_list}"
146
+ end
147
+ path_to_sign = "#{service}/#{bucket_name}#{key_path}#{params_list}"
148
+ # path_to_sign = "/#{bucket_name}#{key_path}#{params_list}"
149
+ [ server, path, path_to_sign ]
150
+ end
151
+
152
+ # Generates request hash for REST API.
153
+ # Assumes that headers[:url] is URL encoded (use CGI::escape)
154
+ def generate_rest_request(method, headers) # :nodoc:
155
+ # calculate request data
156
+ server, path, path_to_sign = fetch_request_params(headers)
157
+ data = headers[:data]
158
+ # remove unset(==optional) and symbolyc keys
159
+ headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
160
+ #
161
+ headers['content-type'] ||= ''
162
+ headers['date'] = Time.now.httpdate
163
+ # create request
164
+ request = "Net::HTTP::#{method.capitalize}".right_constantize.new(path)
165
+ request.body = data if data
166
+ # set request headers and meta headers
167
+ headers.each { |key, value| request[key.to_s] = value }
168
+ #generate auth strings
169
+ auth_string = canonical_string(request.method, path_to_sign, request.to_hash)
170
+ signature = AwsUtils::sign(@aws_secret_access_key, auth_string)
171
+ # set other headers
172
+ request['Authorization'] = "AWS #{@aws_access_key_id}:#{signature}"
173
+ # prepare output hash
174
+ { :request => request,
175
+ :server => server,
176
+ :port => @params[:port],
177
+ :protocol => @params[:protocol] }
178
+ end
179
+
180
+ # Sends request to Amazon and parses the response.
181
+ # Raises AwsError if any banana happened.
182
+ def request_info(request, parser, &block) # :nodoc:
183
+ request_info_impl(:s3_connection, @@bench, request, parser, &block)
184
+ end
185
+
186
+ # Returns an array of customer's buckets. Each item is a +hash+.
187
+ #
188
+ # s3.list_all_my_buckets #=>
189
+ # [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
190
+ # :owner_display_name => "root",
191
+ # :name => "bucket_name",
192
+ # :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
193
+ #
194
+ def list_all_my_buckets(headers={})
195
+ req_hash = generate_rest_request('GET', headers.merge(:url=>''))
196
+ request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger))
197
+ rescue
198
+ on_exception
199
+ end
200
+
201
+ # Creates new bucket. Returns +true+ or an exception.
202
+ #
203
+ # # create a bucket at American server
204
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
205
+ # # create a bucket at European server
206
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
207
+ #
208
+ def create_bucket(bucket, headers={})
209
+ data = nil
210
+ location = case headers[:location].to_s
211
+ when 'us','US' then ''
212
+ when 'eu' then 'EU'
213
+ else headers[:location].to_s
214
+ end
215
+
216
+ unless location.right_blank?
217
+ data = "<CreateBucketConfiguration><LocationConstraint>#{location}</LocationConstraint></CreateBucketConfiguration>"
218
+ end
219
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data))
220
+ request_info(req_hash, RightHttp2xxParser.new)
221
+ rescue Exception => e
222
+ # if the bucket exists AWS returns an error for the location constraint interface. Drop it
223
+ e.is_a?(RightAws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception
224
+ end
225
+
226
+ # Retrieve bucket location
227
+ #
228
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
229
+ # puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed)
230
+ #
231
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
232
+ # puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
233
+ #
234
+ def bucket_location(bucket, headers={})
235
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location"))
236
+ request_info(req_hash, S3BucketLocationParser.new)
237
+ rescue
238
+ on_exception
239
+ end
240
+
241
+ # Retrieves the logging configuration for a bucket.
242
+ # Returns a hash of {:enabled, :targetbucket, :targetprefix}
243
+ #
244
+ # s3.interface.get_logging_parse(:bucket => "asset_bucket")
245
+ # => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
246
+ #
247
+ #
248
+ def get_logging_parse(params)
249
+ AwsUtils.mandatory_arguments([:bucket], params)
250
+ AwsUtils.allow_only([:bucket, :headers], params)
251
+ params[:headers] = {} unless params[:headers]
252
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging"))
253
+ request_info(req_hash, S3LoggingParser.new)
254
+ rescue
255
+ on_exception
256
+ end
257
+
258
+ # Sets logging configuration for a bucket from the XML configuration document.
259
+ # params:
260
+ # :bucket
261
+ # :xmldoc
262
+ def put_logging(params)
263
+ AwsUtils.mandatory_arguments([:bucket,:xmldoc], params)
264
+ AwsUtils.allow_only([:bucket,:xmldoc, :headers], params)
265
+ params[:headers] = {} unless params[:headers]
266
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc]))
267
+ request_info(req_hash, RightHttp2xxParser.new)
268
+ rescue
269
+ on_exception
270
+ end
271
+
272
+ # Deletes new bucket. Bucket must be empty! Returns +true+ or an exception.
273
+ #
274
+ # s3.delete_bucket('my_awesome_bucket') #=> true
275
+ #
276
+ # See also: force_delete_bucket method
277
+ #
278
+ def delete_bucket(bucket, headers={})
279
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket))
280
+ request_info(req_hash, RightHttp2xxParser.new)
281
+ rescue
282
+ on_exception
283
+ end
284
+
285
+ # Returns an array of bucket's keys. Each array item (key data) is a +hash+.
286
+ #
287
+ # s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=>
288
+ # [{:key => "test1",
289
+ # :last_modified => "2007-05-18T07:00:59.000Z",
290
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
291
+ # :owner_display_name => "root",
292
+ # :e_tag => "000000000059075b964b07152d234b70",
293
+ # :storage_class => "STANDARD",
294
+ # :size => 3,
295
+ # :service=> {'is_truncated' => false,
296
+ # 'prefix' => "t",
297
+ # 'marker' => "",
298
+ # 'name' => "my_awesome_bucket",
299
+ # 'max-keys' => "5"}, ..., {...}]
300
+ #
301
+ def list_bucket(bucket, options={}, headers={})
302
+ bucket += '?'+options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.right_blank?
303
+ req_hash = generate_rest_request('GET', headers.merge(:url=>bucket))
304
+ request_info(req_hash, S3ListBucketParser.new(:logger => @logger))
305
+ rescue
306
+ on_exception
307
+ end
308
+
309
+ # Incrementally list the contents of a bucket. Yields the following hash to a block:
310
+ # s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields
311
+ # {
312
+ # :name => 'bucketname',
313
+ # :prefix => 'subfolder/',
314
+ # :marker => 'fileN.jpg',
315
+ # :max_keys => 234,
316
+ # :delimiter => '/',
317
+ # :is_truncated => true,
318
+ # :next_marker => 'fileX.jpg',
319
+ # :contents => [
320
+ # { :key => "file1",
321
+ # :last_modified => "2007-05-18T07:00:59.000Z",
322
+ # :e_tag => "000000000059075b964b07152d234b70",
323
+ # :size => 3,
324
+ # :storage_class => "STANDARD",
325
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
326
+ # :owner_display_name => "root"
327
+ # }, { :key, ...}, ... {:key, ...}
328
+ # ]
329
+ # :common_prefixes => [
330
+ # "prefix1",
331
+ # "prefix2",
332
+ # ...,
333
+ # "prefixN"
334
+ # ]
335
+ # }
336
+ def incrementally_list_bucket(bucket, options={}, headers={}, &block)
337
+ internal_options = options.right_symbolize_keys
338
+ begin
339
+ internal_bucket = bucket.dup
340
+ internal_bucket += '?'+internal_options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless internal_options.right_blank?
341
+ req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket))
342
+ response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger))
343
+ there_are_more_keys = response[:is_truncated]
344
+ if(there_are_more_keys)
345
+ internal_options[:marker] = decide_marker(response)
346
+ total_results = response[:contents].length + response[:common_prefixes].length
347
+ internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil
348
+ end
349
+ yield response
350
+ end while there_are_more_keys && under_max_keys(internal_options)
351
+ true
352
+ rescue
353
+ on_exception
354
+ end
355
+
356
+
357
+ private
358
+ def decide_marker(response)
359
+ return response[:next_marker].dup if response[:next_marker]
360
+ last_key = response[:contents].last[:key]
361
+ last_prefix = response[:common_prefixes].last
362
+ if(!last_key)
363
+ return nil if(!last_prefix)
364
+ last_prefix.dup
365
+ elsif(!last_prefix)
366
+ last_key.dup
367
+ else
368
+ last_key > last_prefix ? last_key.dup : last_prefix.dup
369
+ end
370
+ end
371
+
372
+ def under_max_keys(internal_options)
373
+ internal_options[:'max-keys'] ? internal_options[:'max-keys'] > 0 : true
374
+ end
375
+
376
+ public
377
+ # Saves object to Amazon. Returns +true+ or an exception.
378
+ # Any header starting with AMAZON_METADATA_PREFIX is considered
379
+ # user metadata. It will be stored with the object and returned
380
+ # when you retrieve the object. The total size of the HTTP
381
+ # request, not including the body, must be less than 4 KB.
382
+ #
383
+ # s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
384
+ #
385
+ # This method is capable of 'streaming' uploads; that is, it can upload
386
+ # data from a file or other IO object without first reading all the data
387
+ # into memory. This is most useful for large PUTs - it is difficult to read
388
+ # a 2 GB file entirely into memory before sending it to S3.
389
+ # To stream an upload, pass an object that responds to 'read' (like the read
390
+ # method of IO) and to either 'lstat' or 'size'. For files, this means
391
+ # streaming is enabled by simply making the call:
392
+ #
393
+ # s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
394
+ #
395
+ # If the IO object you wish to stream from responds to the read method but
396
+ # doesn't implement lstat or size, you can extend the object dynamically
397
+ # to implement these methods, or define your own class which defines these
398
+ # methods. Be sure that your class returns 'nil' from read() after having
399
+ # read 'size' bytes. Otherwise S3 will drop the socket after
400
+ # 'Content-Length' bytes have been uploaded, and HttpConnection will
401
+ # interpret this as an error.
402
+ #
403
+ # This method now supports very large PUTs, where very large
404
+ # is > 2 GB.
405
+ #
406
+ # For Win32 users: Files and IO objects should be opened in binary mode. If
407
+ # a text mode IO object is passed to PUT, it will be converted to binary
408
+ # mode.
409
+ #
410
+
411
+ def put(bucket, key, data=nil, headers={})
412
+ # On Windows, if someone opens a file in text mode, we must reset it so
413
+ # to binary mode for streaming to work properly
414
+ if(data.respond_to?(:binmode))
415
+ data.binmode
416
+ end
417
+ if (data.respond_to?(:lstat) && data.lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
418
+ (data.respond_to?(:size) && data.size >= USE_100_CONTINUE_PUT_SIZE)
419
+ headers['expect'] = '100-continue'
420
+ end
421
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}", :data=>data))
422
+ request_info(req_hash, RightHttp2xxParser.new)
423
+ rescue
424
+ on_exception
425
+ end
426
+
427
+
428
+
429
+ # New experimental API for uploading objects, introduced in RightAws 1.8.1.
430
+ # store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification
431
+ # of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
432
+ # The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
433
+ #
434
+ # If the optional :md5 argument is provided, store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
435
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
436
+ #
437
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
438
+ #
439
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
440
+ # => {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/",
441
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
442
+ # "date"=>"Mon, 29 Sep 2008 18:57:46 GMT",
443
+ # :verified_md5=>true,
444
+ # "x-amz-request-id"=>"63916465939995BA",
445
+ # "server"=>"AmazonS3",
446
+ # "content-length"=>"0"}
447
+ #
448
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
449
+ # => {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR",
450
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
451
+ # "date"=>"Mon, 29 Sep 2008 18:58:56 GMT",
452
+ # :verified_md5=>false,
453
+ # "x-amz-request-id"=>"3B25A996BC2CDD3B",
454
+ # "server"=>"AmazonS3",
455
+ # "content-length"=>"0"}
456
+
457
+ def store_object(params)
458
+ AwsUtils.allow_only([:bucket, :key, :data, :headers, :md5], params)
459
+ AwsUtils.mandatory_arguments([:bucket, :key, :data], params)
460
+ params[:headers] = {} unless params[:headers]
461
+
462
+ params[:data].binmode if(params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly
463
+ if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
464
+ (params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE)
465
+ params[:headers]['expect'] = '100-continue'
466
+ end
467
+
468
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data]))
469
+ resp = request_info(req_hash, S3HttpResponseHeadParser.new)
470
+ if(params[:md5])
471
+ resp[:verified_md5] = (resp['etag'].gsub(/\"/, '') == params[:md5]) ? true : false
472
+ else
473
+ resp[:verified_md5] = false
474
+ end
475
+ resp
476
+ rescue
477
+ on_exception
478
+ end
479
+
480
+ # Identical in function to store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
481
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
482
+ # This call is implemented as a wrapper around store_object and the user may gain different semantics by creating a custom wrapper.
483
+ #
484
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
485
+ # => {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb",
486
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
487
+ # "date"=>"Mon, 29 Sep 2008 18:38:32 GMT",
488
+ # :verified_md5=>true,
489
+ # "x-amz-request-id"=>"E8D7EA4FE00F5DF7",
490
+ # "server"=>"AmazonS3",
491
+ # "content-length"=>"0"}
492
+ #
493
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
494
+ # RightAws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n",
495
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
496
+ # "date"=>"Mon, 29 Sep 2008 18:38:41 GMT",
497
+ # :verified_md5=>false,
498
+ # "x-amz-request-id"=>"0D7ADE09F42606F2",
499
+ # "server"=>"AmazonS3",
500
+ # "content-length"=>"0"}
501
+ def store_object_and_verify(params)
502
+ AwsUtils.mandatory_arguments([:md5], params)
503
+ r = store_object(params)
504
+ r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}"))
505
+ end
506
+
507
+ # Retrieves object data from Amazon. Returns a +hash+ or an exception.
508
+ #
509
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=>
510
+ #
511
+ # {:object => "Ola-la!",
512
+ # :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
513
+ # "content-type" => "",
514
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
515
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
516
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
517
+ # "x-amz-meta-family" => "Woho556!",
518
+ # "x-amz-request-id" => "0000000C246D770C",
519
+ # "server" => "AmazonS3",
520
+ # "content-length" => "7"}}
521
+ #
522
+ # If a block is provided, yields incrementally to the block as
523
+ # the response is read. For large responses, this function is ideal as
524
+ # the response can be 'streamed'. The hash containing header fields is
525
+ # still returned.
526
+ # Example:
527
+ # foo = File.new('./chunder.txt', File::CREAT|File::RDWR)
528
+ # rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
529
+ # foo.write(chunk)
530
+ # end
531
+ # foo.close
532
+ #
533
+
534
+ def get(bucket, key, headers={}, &block)
535
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
536
+ request_info(req_hash, S3HttpResponseBodyParser.new, &block)
537
+ rescue
538
+ on_exception
539
+ end
540
+
541
+ # New experimental API for retrieving objects, introduced in RightAws 1.8.1.
542
+ # retrieve_object is similar in function to the older function get. It allows for optional verification
543
+ # of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
544
+ #
545
+ # If the optional :md5 argument is provided, retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
546
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
547
+ #
548
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
549
+ # Mandatory arguments:
550
+ # :bucket - the bucket in which the object is stored
551
+ # :key - the object address (or path) within the bucket
552
+ # Optional arguments:
553
+ # :headers - hash of additional HTTP headers to include with the request
554
+ # :md5 - MD5 checksum against which to verify the retrieved object
555
+ #
556
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo")
557
+ # => {:verified_md5=>false,
558
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
559
+ # "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp",
560
+ # "content-type"=>"",
561
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
562
+ # "date"=>"Tue, 30 Sep 2008 00:52:44 GMT",
563
+ # "x-amz-request-id"=>"EE4855DE27A2688C",
564
+ # "server"=>"AmazonS3",
565
+ # "content-length"=>"10"},
566
+ # :object=>"polemonium"}
567
+ #
568
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954')
569
+ # => {:verified_md5=>true,
570
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
571
+ # "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH",
572
+ # "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
573
+ # "date"=>"Tue, 30 Sep 2008 00:53:08 GMT",
574
+ # "x-amz-request-id"=>"6E7F317356580599",
575
+ # "server"=>"AmazonS3",
576
+ # "content-length"=>"10"},
577
+ # :object=>"polemonium"}
578
+ # If a block is provided, yields incrementally to the block as
579
+ # the response is read. For large responses, this function is ideal as
580
+ # the response can be 'streamed'. The hash containing header fields is
581
+ # still returned.
582
+ def retrieve_object(params, &block)
583
+ AwsUtils.mandatory_arguments([:bucket, :key], params)
584
+ AwsUtils.allow_only([:bucket, :key, :headers, :md5], params)
585
+ params[:headers] = {} unless params[:headers]
586
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}"))
587
+ resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block)
588
+ resp[:verified_md5] = false
589
+ if(params[:md5] && (resp[:headers]['etag'].gsub(/\"/,'') == params[:md5]))
590
+ resp[:verified_md5] = true
591
+ end
592
+ resp
593
+ rescue
594
+ on_exception
595
+ end
596
+
597
+ # Identical in function to retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
598
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
599
+ # This call is implemented as a wrapper around retrieve_object and the user may gain different semantics by creating a custom wrapper.
600
+ def retrieve_object_and_verify(params, &block)
601
+ AwsUtils.mandatory_arguments([:md5], params)
602
+ resp = retrieve_object(params, &block)
603
+ return resp if resp[:verified_md5]
604
+ raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}")
605
+ end
606
+
607
+ # Retrieves object metadata. Returns a +hash+ of http_response_headers.
608
+ #
609
+ # s3.head('my_awesome_bucket', 'log/curent/1.log') #=>
610
+ # {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
611
+ # "content-type" => "",
612
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
613
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
614
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
615
+ # "x-amz-meta-family" => "Woho556!",
616
+ # "x-amz-request-id" => "0000000C246D770C",
617
+ # "server" => "AmazonS3",
618
+ # "content-length" => "7"}
619
+ #
620
+ def head(bucket, key, headers={})
621
+ req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
622
+ request_info(req_hash, S3HttpResponseHeadParser.new)
623
+ rescue
624
+ on_exception
625
+ end
626
+
627
+ # Deletes key. Returns +true+ or an exception.
628
+ #
629
+ # s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
630
+ #
631
+ def delete(bucket, key='', headers={})
632
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
633
+ request_info(req_hash, RightHttp2xxParser.new)
634
+ rescue
635
+ on_exception
636
+ end
637
+
638
+ # Copy an object.
639
+ # directive: :copy - copy meta-headers from source (default value)
640
+ # :replace - replace meta-headers by passed ones
641
+ #
642
+ # # copy a key with meta-headers
643
+ # s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"}
644
+ #
645
+ # # copy a key, overwrite meta-headers
646
+ # s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
647
+ #
648
+ # see: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
649
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
650
+ #
651
+ def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
652
+ dest_key ||= src_key
653
+ headers['x-amz-metadata-directive'] = directive.to_s.upcase
654
+ headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}"
655
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}"))
656
+ request_info(req_hash, S3CopyParser.new)
657
+ rescue
658
+ on_exception
659
+ end
660
+
661
+ # Move an object.
662
+ # directive: :copy - copy meta-headers from source (default value)
663
+ # :replace - replace meta-headers by passed ones
664
+ #
665
+ # # move bucket1/key1 to bucket1/key2
666
+ # s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"}
667
+ #
668
+ # # move bucket1/key1 to bucket2/key2 with new meta-headers assignment
669
+ # s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
670
+ #
671
+ def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
672
+ copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers)
673
+ # delete an original key if it differs from a destination one
674
+ delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key
675
+ copy_result
676
+ end
677
+
678
+ # Rename an object.
679
+ #
680
+ # # rename bucket1/key1 to bucket1/key2
681
+ # s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
682
+ #
683
+ def rename(src_bucket, src_key, dest_key, headers={})
684
+ move(src_bucket, src_key, src_bucket, dest_key, :copy, headers)
685
+ end
686
+
687
+ # Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
688
+ #
689
+ # s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=>
690
+ # {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX",
691
+ # "content-type"=>"application/xml;charset=ISO-8859-1",
692
+ # "date"=>"Wed, 23 May 2007 09:40:16 GMT",
693
+ # "x-amz-request-id"=>"B183FA7AB5FBB4DD",
694
+ # "server"=>"AmazonS3",
695
+ # "transfer-encoding"=>"chunked"},
696
+ # :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner>
697
+ # <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner>
698
+ # <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>
699
+ # 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee>
700
+ # <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
701
+ #
702
+ def get_acl(bucket, key='', headers={})
703
+ key = key.right_blank? ? '' : "/#{CGI::escape key}"
704
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
705
+ request_info(req_hash, S3HttpResponseBodyParser.new)
706
+ rescue
707
+ on_exception
708
+ end
709
+
710
+ # Retieves the ACL (access control policy) for a bucket or object.
711
+ # Returns a hash of {:owner, :grantees}
712
+ #
713
+ # s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=>
714
+ #
715
+ # { :grantees=>
716
+ # { "16...2a"=>
717
+ # { :display_name=>"root",
718
+ # :permissions=>["FULL_CONTROL"],
719
+ # :attributes=>
720
+ # { "xsi:type"=>"CanonicalUser",
721
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}},
722
+ # "http://acs.amazonaws.com/groups/global/AllUsers"=>
723
+ # { :display_name=>"AllUsers",
724
+ # :permissions=>["READ"],
725
+ # :attributes=>
726
+ # { "xsi:type"=>"Group",
727
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}},
728
+ # :owner=>
729
+ # { :id=>"16..2a",
730
+ # :display_name=>"root"}}
731
+ #
732
+ def get_acl_parse(bucket, key='', headers={})
733
+ key = key.right_blank? ? '' : "/#{CGI::escape key}"
734
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
735
+ acl = request_info(req_hash, S3AclParser.new(:logger => @logger))
736
+ result = {}
737
+ result[:owner] = acl[:owner]
738
+ result[:grantees] = {}
739
+ acl[:grantees].each do |grantee|
740
+ key = grantee[:id] || grantee[:uri]
741
+ if result[:grantees].key?(key)
742
+ result[:grantees][key][:permissions] << grantee[:permissions]
743
+ else
744
+ result[:grantees][key] =
745
+ { :display_name => grantee[:display_name] || grantee[:uri].to_s[/[^\/]*$/],
746
+ :permissions => Array(grantee[:permissions]),
747
+ :attributes => grantee[:attributes] }
748
+ end
749
+ end
750
+ result
751
+ rescue
752
+ on_exception
753
+ end
754
+
755
+ # Sets the ACL on a bucket or object.
756
+ def put_acl(bucket, key, acl_xml_doc, headers={})
757
+ key = key.right_blank? ? '' : "/#{CGI::escape key}"
758
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc))
759
+ request_info(req_hash, S3HttpResponseBodyParser.new)
760
+ rescue
761
+ on_exception
762
+ end
763
+
764
+ # Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
765
+ def get_bucket_acl(bucket, headers={})
766
+ return get_acl(bucket, '', headers)
767
+ rescue
768
+ on_exception
769
+ end
770
+
771
+ # Sets the ACL on a bucket only.
772
+ def put_bucket_acl(bucket, acl_xml_doc, headers={})
773
+ return put_acl(bucket, '', acl_xml_doc, headers)
774
+ rescue
775
+ on_exception
776
+ end
777
+
778
+
779
+ # Removes all keys from bucket. Returns +true+ or an exception.
780
+ #
781
+ # s3.clear_bucket('my_awesome_bucket') #=> true
782
+ #
783
+ def clear_bucket(bucket)
784
+ incrementally_list_bucket(bucket) do |results|
785
+ results[:contents].each { |key| delete(bucket, key[:key]) }
786
+ end
787
+ true
788
+ rescue
789
+ on_exception
790
+ end
791
+
792
+ # Deletes all keys in bucket then deletes bucket. Returns +true+ or an exception.
793
+ #
794
+ # s3.force_delete_bucket('my_awesome_bucket')
795
+ #
796
+ def force_delete_bucket(bucket)
797
+ clear_bucket(bucket)
798
+ delete_bucket(bucket)
799
+ rescue
800
+ on_exception
801
+ end
802
+
803
+ # Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
804
+ #
805
+ # s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs']
806
+ # s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
807
+ #
808
+ def delete_folder(bucket, folder_key, separator='/')
809
+ folder_key.chomp!(separator)
810
+ allkeys = []
811
+ incrementally_list_bucket(bucket, { 'prefix' => folder_key }) do |results|
812
+ keys = results[:contents].map{ |s3_key| s3_key[:key][/^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil}.compact
813
+ keys.each{ |key| delete(bucket, key) }
814
+ allkeys << keys
815
+ end
816
+ allkeys
817
+ rescue
818
+ on_exception
819
+ end
820
+
821
+ # Retrieves object data only (headers are omitted). Returns +string+ or an exception.
822
+ #
823
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
824
+ #
825
+ def get_object(bucket, key, headers={})
826
+ get(bucket, key, headers)[:object]
827
+ rescue
828
+ on_exception
829
+ end
830
+
831
+ #-----------------------------------------------------------------
832
+ # Query API: Links
833
+ #-----------------------------------------------------------------
834
+
835
+ # Generates link for QUERY API
836
+ def generate_link(method, headers={}, expires=nil) #:nodoc:
837
+ # calculate request data
838
+ server, path, path_to_sign = fetch_request_params(headers)
839
+ # expiration time
840
+ expires ||= DEFAULT_EXPIRES_AFTER
841
+ expires = Time.now.utc + expires if expires.is_a?(Fixnum) && (expires < ONE_YEAR_IN_SECONDS)
842
+ expires = expires.to_i
843
+ # remove unset(==optional) and symbolyc keys
844
+ headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
845
+ #generate auth strings
846
+ auth_string = canonical_string(method, path_to_sign, headers, expires)
847
+ signature = CGI::escape(Base64.encode64(OpenSSL::HMAC.digest(OpenSSL::Digest::Digest.new("sha1"), @aws_secret_access_key, auth_string)).strip)
848
+ # path building
849
+ addon = "Signature=#{signature}&Expires=#{expires}&AWSAccessKeyId=#{@aws_access_key_id}"
850
+ path += path[/\?/] ? "&#{addon}" : "?#{addon}"
851
+ "#{@params[:protocol]}://#{server}:#{@params[:port]}#{path}"
852
+ rescue
853
+ on_exception
854
+ end
855
+
856
+ # Generates link for 'ListAllMyBuckets'.
857
+ #
858
+ # s3.list_all_my_buckets_link #=> url string
859
+ #
860
+ def list_all_my_buckets_link(expires=nil, headers={})
861
+ generate_link('GET', headers.merge(:url=>''), expires)
862
+ rescue
863
+ on_exception
864
+ end
865
+
866
+ # Generates link for 'CreateBucket'.
867
+ #
868
+ # s3.create_bucket_link('my_awesome_bucket') #=> url string
869
+ #
870
+ def create_bucket_link(bucket, expires=nil, headers={})
871
+ generate_link('PUT', headers.merge(:url=>bucket), expires)
872
+ rescue
873
+ on_exception
874
+ end
875
+
876
+ # Generates link for 'DeleteBucket'.
877
+ #
878
+ # s3.delete_bucket_link('my_awesome_bucket') #=> url string
879
+ #
880
+ def delete_bucket_link(bucket, expires=nil, headers={})
881
+ generate_link('DELETE', headers.merge(:url=>bucket), expires)
882
+ rescue
883
+ on_exception
884
+ end
885
+
886
+ # Generates link for 'ListBucket'.
887
+ #
888
+ # s3.list_bucket_link('my_awesome_bucket') #=> url string
889
+ #
890
+ def list_bucket_link(bucket, options=nil, expires=nil, headers={})
891
+ bucket += '?' + options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.right_blank?
892
+ generate_link('GET', headers.merge(:url=>bucket), expires)
893
+ rescue
894
+ on_exception
895
+ end
896
+
897
+ # Generates link for 'PutObject'.
898
+ #
899
+ # s3.put_link('my_awesome_bucket',key, object) #=> url string
900
+ #
901
+ def put_link(bucket, key, data=nil, expires=nil, headers={})
902
+ generate_link('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}", :data=>data), expires)
903
+ rescue
904
+ on_exception
905
+ end
906
+
907
+ # Generates link for 'GetObject'.
908
+ #
909
+ # if a bucket comply with virtual hosting naming then retuns a link with the
910
+ # bucket as a part of host name:
911
+ #
912
+ # s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
913
+ #
914
+ # otherwise returns an old style link (the bucket is a part of path):
915
+ #
916
+ # s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
917
+ #
918
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
919
+ def get_link(bucket, key, expires=nil, headers={})
920
+ generate_link('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"), expires)
921
+ rescue
922
+ on_exception
923
+ end
924
+
925
+ # Generates link for 'HeadObject'.
926
+ #
927
+ # s3.head_link('my_awesome_bucket',key) #=> url string
928
+ #
929
+ def head_link(bucket, key, expires=nil, headers={})
930
+ generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"), expires)
931
+ rescue
932
+ on_exception
933
+ end
934
+
935
+ # Generates link for 'DeleteObject'.
936
+ #
937
+ # s3.delete_link('my_awesome_bucket',key) #=> url string
938
+ #
939
+ def delete_link(bucket, key, expires=nil, headers={})
940
+ generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"), expires)
941
+ rescue
942
+ on_exception
943
+ end
944
+
945
+
946
+ # Generates link for 'GetACL'.
947
+ #
948
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
949
+ #
950
+ def get_acl_link(bucket, key='', headers={})
951
+ return generate_link('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}?acl"))
952
+ rescue
953
+ on_exception
954
+ end
955
+
956
+ # Generates link for 'PutACL'.
957
+ #
958
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
959
+ #
960
+ def put_acl_link(bucket, key='', headers={})
961
+ return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}?acl"))
962
+ rescue
963
+ on_exception
964
+ end
965
+
966
+ # Generates link for 'GetBucketACL'.
967
+ #
968
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
969
+ #
970
+ def get_bucket_acl_link(bucket, headers={})
971
+ return get_acl_link(bucket, '', headers)
972
+ rescue
973
+ on_exception
974
+ end
975
+
976
+ # Generates link for 'PutBucketACL'.
977
+ #
978
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
979
+ #
980
+ def put_bucket_acl_link(bucket, acl_xml_doc, headers={})
981
+ return put_acl_link(bucket, '', acl_xml_doc, headers)
982
+ rescue
983
+ on_exception
984
+ end
985
+
986
+ #-----------------------------------------------------------------
987
+ # PARSERS:
988
+ #-----------------------------------------------------------------
989
+
990
+ class S3ListAllMyBucketsParser < RightAWSParser # :nodoc:
991
+ def reset
992
+ @result = []
993
+ @owner = {}
994
+ end
995
+ def tagstart(name, attributes)
996
+ @current_bucket = {} if name == 'Bucket'
997
+ end
998
+ def tagend(name)
999
+ case name
1000
+ when 'ID' then @owner[:owner_id] = @text
1001
+ when 'DisplayName' then @owner[:owner_display_name] = @text
1002
+ when 'Name' then @current_bucket[:name] = @text
1003
+ when 'CreationDate'then @current_bucket[:creation_date] = @text
1004
+ when 'Bucket' then @result << @current_bucket.merge(@owner)
1005
+ end
1006
+ end
1007
+ end
1008
+
1009
+ class S3ListBucketParser < RightAWSParser # :nodoc:
1010
+ def reset
1011
+ @result = []
1012
+ @service = {}
1013
+ @current_key = {}
1014
+ end
1015
+ def tagstart(name, attributes)
1016
+ @current_key = {} if name == 'Contents'
1017
+ end
1018
+ def tagend(name)
1019
+ case name
1020
+ # service info
1021
+ when 'Name' then @service['name'] = @text
1022
+ when 'Prefix' then @service['prefix'] = @text
1023
+ when 'Marker' then @service['marker'] = @text
1024
+ when 'MaxKeys' then @service['max-keys'] = @text
1025
+ when 'Delimiter' then @service['delimiter'] = @text
1026
+ when 'IsTruncated' then @service['is_truncated'] = (@text =~ /false/ ? false : true)
1027
+ # key data
1028
+ when 'Key' then @current_key[:key] = @text
1029
+ when 'LastModified'then @current_key[:last_modified] = @text
1030
+ when 'ETag' then @current_key[:e_tag] = @text
1031
+ when 'Size' then @current_key[:size] = @text.to_i
1032
+ when 'StorageClass'then @current_key[:storage_class] = @text
1033
+ when 'ID' then @current_key[:owner_id] = @text
1034
+ when 'DisplayName' then @current_key[:owner_display_name] = @text
1035
+ when 'Contents'
1036
+ @current_key[:service] = @service
1037
+ @result << @current_key
1038
+ end
1039
+ end
1040
+ end
1041
+
1042
+ class S3ImprovedListBucketParser < RightAWSParser # :nodoc:
1043
+ def reset
1044
+ @result = {}
1045
+ @result[:contents] = []
1046
+ @result[:common_prefixes] = []
1047
+ @contents = []
1048
+ @current_key = {}
1049
+ @common_prefixes = []
1050
+ @in_common_prefixes = false
1051
+ end
1052
+ def tagstart(name, attributes)
1053
+ @current_key = {} if name == 'Contents'
1054
+ @in_common_prefixes = true if name == 'CommonPrefixes'
1055
+ end
1056
+ def tagend(name)
1057
+ case name
1058
+ # service info
1059
+ when 'Name' then @result[:name] = @text
1060
+ # Amazon uses the same tag for the search prefix and for the entries
1061
+ # in common prefix...so use our simple flag to see which element
1062
+ # we are parsing
1063
+ when 'Prefix' then @in_common_prefixes ? @common_prefixes << @text : @result[:prefix] = @text
1064
+ when 'Marker' then @result[:marker] = @text
1065
+ when 'MaxKeys' then @result[:max_keys] = @text
1066
+ when 'Delimiter' then @result[:delimiter] = @text
1067
+ when 'IsTruncated' then @result[:is_truncated] = (@text =~ /false/ ? false : true)
1068
+ when 'NextMarker' then @result[:next_marker] = @text
1069
+ # key data
1070
+ when 'Key' then @current_key[:key] = @text
1071
+ when 'LastModified'then @current_key[:last_modified] = @text
1072
+ when 'ETag' then @current_key[:e_tag] = @text
1073
+ when 'Size' then @current_key[:size] = @text.to_i
1074
+ when 'StorageClass'then @current_key[:storage_class] = @text
1075
+ when 'ID' then @current_key[:owner_id] = @text
1076
+ when 'DisplayName' then @current_key[:owner_display_name] = @text
1077
+ when 'Contents' then @result[:contents] << @current_key
1078
+ # Common Prefix stuff
1079
+ when 'CommonPrefixes'
1080
+ @result[:common_prefixes] = @common_prefixes
1081
+ @in_common_prefixes = false
1082
+ end
1083
+ end
1084
+ end
1085
+
1086
+ class S3BucketLocationParser < RightAWSParser # :nodoc:
1087
+ def reset
1088
+ @result = ''
1089
+ end
1090
+ def tagend(name)
1091
+ @result = @text if name == 'LocationConstraint'
1092
+ end
1093
+ end
1094
+
1095
+ class S3AclParser < RightAWSParser # :nodoc:
1096
+ def reset
1097
+ @result = {:grantees=>[], :owner=>{}}
1098
+ @current_grantee = {}
1099
+ end
1100
+ def tagstart(name, attributes)
1101
+ @current_grantee = { :attributes => attributes } if name=='Grantee'
1102
+ end
1103
+ def tagend(name)
1104
+ case name
1105
+ # service info
1106
+ when 'ID'
1107
+ if @xmlpath == 'AccessControlPolicy/Owner'
1108
+ @result[:owner][:id] = @text
1109
+ else
1110
+ @current_grantee[:id] = @text
1111
+ end
1112
+ when 'DisplayName'
1113
+ if @xmlpath == 'AccessControlPolicy/Owner'
1114
+ @result[:owner][:display_name] = @text
1115
+ else
1116
+ @current_grantee[:display_name] = @text
1117
+ end
1118
+ when 'URI'
1119
+ @current_grantee[:uri] = @text
1120
+ when 'Permission'
1121
+ @current_grantee[:permissions] = @text
1122
+ when 'Grant'
1123
+ @result[:grantees] << @current_grantee
1124
+ end
1125
+ end
1126
+ end
1127
+
1128
+ class S3LoggingParser < RightAWSParser # :nodoc:
1129
+ def reset
1130
+ @result = {:enabled => false, :targetbucket => '', :targetprefix => ''}
1131
+ @current_grantee = {}
1132
+ end
1133
+ def tagend(name)
1134
+ case name
1135
+ # service info
1136
+ when 'TargetBucket'
1137
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1138
+ @result[:targetbucket] = @text
1139
+ @result[:enabled] = true
1140
+ end
1141
+ when 'TargetPrefix'
1142
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1143
+ @result[:targetprefix] = @text
1144
+ @result[:enabled] = true
1145
+ end
1146
+ end
1147
+ end
1148
+ end
1149
+
1150
+ class S3CopyParser < RightAWSParser # :nodoc:
1151
+ def reset
1152
+ @result = {}
1153
+ end
1154
+ def tagend(name)
1155
+ case name
1156
+ when 'LastModified' then @result[:last_modified] = @text
1157
+ when 'ETag' then @result[:e_tag] = @text
1158
+ end
1159
+ end
1160
+ end
1161
+
1162
+ #-----------------------------------------------------------------
1163
+ # PARSERS: Non XML
1164
+ #-----------------------------------------------------------------
1165
+
1166
+ class S3HttpResponseParser # :nodoc:
1167
+ attr_reader :result
1168
+ def parse(response)
1169
+ @result = response
1170
+ end
1171
+ def headers_to_string(headers)
1172
+ result = {}
1173
+ headers.each do |key, value|
1174
+ value = value.to_s if value.is_a?(Array) && value.size<2
1175
+ result[key] = value
1176
+ end
1177
+ result
1178
+ end
1179
+ end
1180
+
1181
+ class S3HttpResponseBodyParser < S3HttpResponseParser # :nodoc:
1182
+ def parse(response)
1183
+ @result = {
1184
+ :object => response.body,
1185
+ :headers => headers_to_string(response.to_hash)
1186
+ }
1187
+ end
1188
+ end
1189
+
1190
+ class S3HttpResponseHeadParser < S3HttpResponseParser # :nodoc:
1191
+ def parse(response)
1192
+ @result = headers_to_string(response.to_hash)
1193
+ end
1194
+ end
1195
+
1196
+ end
1197
+
1198
+ end