aboisvert_aws 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. data/History.txt +329 -0
  2. data/Manifest.txt +61 -0
  3. data/README.txt +163 -0
  4. data/Rakefile +130 -0
  5. data/lib/acf/right_acf_interface.rb +549 -0
  6. data/lib/acf/right_acf_invalidations.rb +144 -0
  7. data/lib/acf/right_acf_origin_access_identities.rb +230 -0
  8. data/lib/acf/right_acf_streaming_interface.rb +229 -0
  9. data/lib/acw/right_acw_interface.rb +248 -0
  10. data/lib/as/right_as_interface.rb +698 -0
  11. data/lib/awsbase/benchmark_fix.rb +39 -0
  12. data/lib/awsbase/right_awsbase.rb +1343 -0
  13. data/lib/awsbase/support.rb +35 -0
  14. data/lib/awsbase/version.rb +9 -0
  15. data/lib/ec2/right_ec2.rb +541 -0
  16. data/lib/ec2/right_ec2_ebs.rb +481 -0
  17. data/lib/ec2/right_ec2_images.rb +444 -0
  18. data/lib/ec2/right_ec2_instances.rb +788 -0
  19. data/lib/ec2/right_ec2_monitoring.rb +70 -0
  20. data/lib/ec2/right_ec2_placement_groups.rb +108 -0
  21. data/lib/ec2/right_ec2_reserved_instances.rb +184 -0
  22. data/lib/ec2/right_ec2_security_groups.rb +491 -0
  23. data/lib/ec2/right_ec2_spot_instances.rb +422 -0
  24. data/lib/ec2/right_ec2_tags.rb +139 -0
  25. data/lib/ec2/right_ec2_vpc.rb +590 -0
  26. data/lib/ec2/right_ec2_vpc2.rb +381 -0
  27. data/lib/ec2/right_ec2_windows_mobility.rb +84 -0
  28. data/lib/elb/right_elb_interface.rb +573 -0
  29. data/lib/emr/right_emr_interface.rb +727 -0
  30. data/lib/iam/right_iam_access_keys.rb +71 -0
  31. data/lib/iam/right_iam_groups.rb +195 -0
  32. data/lib/iam/right_iam_interface.rb +341 -0
  33. data/lib/iam/right_iam_mfa_devices.rb +67 -0
  34. data/lib/iam/right_iam_users.rb +251 -0
  35. data/lib/rds/right_rds_interface.rb +1384 -0
  36. data/lib/right_aws.rb +86 -0
  37. data/lib/route_53/right_route_53_interface.rb +640 -0
  38. data/lib/s3/right_s3.rb +1138 -0
  39. data/lib/s3/right_s3_interface.rb +1278 -0
  40. data/lib/sdb/active_sdb.rb +1107 -0
  41. data/lib/sdb/right_sdb_interface.rb +762 -0
  42. data/lib/sns/right_sns_interface.rb +286 -0
  43. data/lib/sqs/right_sqs.rb +387 -0
  44. data/lib/sqs/right_sqs_gen2.rb +342 -0
  45. data/lib/sqs/right_sqs_gen2_interface.rb +523 -0
  46. data/lib/sqs/right_sqs_interface.rb +593 -0
  47. data/right_aws.gemspec +90 -0
  48. data/test/README.mdown +39 -0
  49. data/test/acf/test_helper.rb +2 -0
  50. data/test/acf/test_right_acf.rb +138 -0
  51. data/test/awsbase/test_helper.rb +2 -0
  52. data/test/awsbase/test_right_awsbase.rb +11 -0
  53. data/test/ec2/test_helper.rb +2 -0
  54. data/test/ec2/test_right_ec2.rb +107 -0
  55. data/test/elb/test_helper.rb +2 -0
  56. data/test/elb/test_right_elb.rb +43 -0
  57. data/test/http_connection.rb +87 -0
  58. data/test/rds/test_helper.rb +2 -0
  59. data/test/rds/test_right_rds.rb +120 -0
  60. data/test/route_53/fixtures/a_record.xml +18 -0
  61. data/test/route_53/fixtures/alias_record.xml +18 -0
  62. data/test/route_53/test_helper.rb +2 -0
  63. data/test/route_53/test_right_route_53.rb +141 -0
  64. data/test/s3/test_helper.rb +2 -0
  65. data/test/s3/test_right_s3.rb +528 -0
  66. data/test/s3/test_right_s3_stubbed.rb +97 -0
  67. data/test/sdb/test_active_sdb.rb +357 -0
  68. data/test/sdb/test_batch_put_attributes.rb +54 -0
  69. data/test/sdb/test_helper.rb +3 -0
  70. data/test/sdb/test_right_sdb.rb +253 -0
  71. data/test/sns/test_helper.rb +2 -0
  72. data/test/sns/test_right_sns.rb +153 -0
  73. data/test/sqs/test_helper.rb +2 -0
  74. data/test/sqs/test_right_sqs.rb +285 -0
  75. data/test/sqs/test_right_sqs_gen2.rb +264 -0
  76. data/test/test_credentials.rb +37 -0
  77. data/test/ts_right_aws.rb +15 -0
  78. metadata +257 -0
@@ -0,0 +1,1278 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ module RightAws
25
+
26
+ class S3Interface < RightAwsBase
27
+
28
+ USE_100_CONTINUE_PUT_SIZE = 1_000_000
29
+
30
+ include RightAwsBaseInterface
31
+
32
+ DEFAULT_HOST = 's3.amazonaws.com'
33
+ DEFAULT_PORT = 443
34
+ DEFAULT_PROTOCOL = 'https'
35
+ DEFAULT_SERVICE = '/'
36
+ REQUEST_TTL = 30
37
+ DEFAULT_EXPIRES_AFTER = 1 * 24 * 60 * 60 # One day's worth of seconds
38
+ ONE_YEAR_IN_SECONDS = 365 * 24 * 60 * 60
39
+ AMAZON_HEADER_PREFIX = 'x-amz-'
40
+ AMAZON_METADATA_PREFIX = 'x-amz-meta-'
41
+ S3_REQUEST_PARAMETERS = [ 'acl',
42
+ 'delete',
43
+ 'location',
44
+ 'logging', # this one is beta, no support for now
45
+ 'response-content-type',
46
+ 'response-content-language',
47
+ 'response-expires',
48
+ 'response-cache-control',
49
+ 'response-content-disposition',
50
+ 'response-content-encoding',
51
+ 'torrent' ].sort
52
+ MULTI_OBJECT_DELETE_MAX_KEYS = 1000
53
+
54
+
55
+ @@bench = AwsBenchmarkingBlock.new
56
+ def self.bench_xml
57
+ @@bench.xml
58
+ end
59
+ def self.bench_s3
60
+ @@bench.service
61
+ end
62
+
63
+ # Params supported:
64
+ # :no_subdomains => true # do not use bucket as a part of domain name but as a part of path
65
+ @@params = {}
66
+ def self.params
67
+ @@params
68
+ end
69
+
70
+ # get custom option
71
+ def param(name)
72
+ # - check explicitly defined param (@params)
73
+ # - otherwise check implicitly defined one (@@params)
74
+ @params.has_key?(name) ? @params[name] : @@params[name]
75
+ end
76
+
77
+ # Creates new RightS3 instance.
78
+ #
79
+ # s3 = RightAws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:logger => Logger.new('/tmp/x.log')}) #=> #<RightAws::S3Interface:0xb7b3c27c>
80
+ #
81
+ # Params is a hash:
82
+ #
83
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
84
+ # :port => 443 # Amazon service port: 80 or 443(default)
85
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
86
+ # :logger => Logger Object # Logger instance: logs to STDOUT if omitted
87
+ # :no_subdomains => true} # Force placing bucket name into path instead of domain name
88
+ #
89
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
90
+ init({ :name => 'S3',
91
+ :default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST,
92
+ :default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT,
93
+ :default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE,
94
+ :default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL },
95
+ aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
96
+ aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'],
97
+ params)
98
+ end
99
+
100
+
101
+ #-----------------------------------------------------------------
102
+ # Requests
103
+ #-----------------------------------------------------------------
104
+ # Produces canonical string for signing.
105
+ def canonical_string(method, path, headers={}, expires=nil) # :nodoc:
106
+ s3_headers = {}
107
+ headers.each do |key, value|
108
+ key = key.downcase
109
+ value = case
110
+ when value.is_a?(Array) then value.join('')
111
+ else value.to_s
112
+ end
113
+ s3_headers[key] = value.strip if key[/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o]
114
+ end
115
+ s3_headers['content-type'] ||= ''
116
+ s3_headers['content-md5'] ||= ''
117
+ s3_headers['date'] = '' if s3_headers.has_key? 'x-amz-date'
118
+ s3_headers['date'] = expires if expires
119
+ # prepare output string
120
+ out_string = "#{method}\n"
121
+ s3_headers.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
122
+ out_string << (key[/^#{AMAZON_HEADER_PREFIX}/o] ? "#{key}:#{value}\n" : "#{value}\n")
123
+ end
124
+ # ignore everything after the question mark by default...
125
+ out_string << path.gsub(/\?.*$/, '')
126
+ # ... unless there is a parameter that we care about.
127
+ S3_REQUEST_PARAMETERS.each do |parameter|
128
+ if path[/[&?]#{parameter}(=[^&]*)?($|&)/]
129
+ if $1
130
+ value = CGI::unescape($1)
131
+ else
132
+ value = ''
133
+ end
134
+ out_string << (out_string[/[?]/] ? "&#{parameter}#{value}" : "?#{parameter}#{value}")
135
+ end
136
+ end
137
+
138
+ out_string
139
+ end
140
+
141
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
142
+ def is_dns_bucket?(bucket_name)
143
+ bucket_name = bucket_name.to_s
144
+ return nil unless (3..63) === bucket_name.size
145
+ bucket_name.split('.').each do |component|
146
+ return nil unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
147
+ end
148
+ true
149
+ end
150
+
151
+ def fetch_request_params(headers) #:nodoc:
152
+ # default server to use
153
+ server = @params[:server]
154
+ service = @params[:service].to_s
155
+ service.chop! if service[%r{/$}] # remove trailing '/' from service
156
+ # extract bucket name and check it's dns compartibility
157
+ headers[:url].to_s[%r{^([a-z0-9._-]*)(/[^?]*)?(\?.+)?}i]
158
+ bucket_name, key_path, params_list = $1, $2, $3
159
+ key_path = key_path.gsub( '%2F', '/' ) if key_path
160
+ # select request model
161
+ if !param(:no_subdomains) && is_dns_bucket?(bucket_name)
162
+ # fix a path
163
+ server = "#{bucket_name}.#{server}"
164
+ key_path ||= '/'
165
+ path = "#{service}#{key_path}#{params_list}"
166
+ else
167
+ path = "#{service}/#{bucket_name}#{key_path}#{params_list}"
168
+ end
169
+ path_to_sign = "#{service}/#{bucket_name}#{key_path}#{params_list}"
170
+ # path_to_sign = "/#{bucket_name}#{key_path}#{params_list}"
171
+ [ server, path, path_to_sign ]
172
+ end
173
+
174
+ # Generates request hash for REST API.
175
+ # Assumes that headers[:url] is URL encoded (use CGI::escape)
176
+ def generate_rest_request(method, headers) # :nodoc:
177
+ # calculate request data
178
+ server, path, path_to_sign = fetch_request_params(headers)
179
+ data = headers[:data]
180
+ # remove unset(==optional) and symbolyc keys
181
+ headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
182
+ #
183
+ headers['content-type'] ||= ''
184
+ headers['date'] = Time.now.httpdate
185
+ # create request
186
+ request = "Net::HTTP::#{method.capitalize}".right_constantize.new(path)
187
+ request.body = data if data
188
+ # set request headers and meta headers
189
+ headers.each { |key, value| request[key.to_s] = value }
190
+ #generate auth strings
191
+ auth_string = canonical_string(request.method, path_to_sign, request.to_hash)
192
+ signature = AwsUtils::sign(@aws_secret_access_key, auth_string)
193
+ # set other headers
194
+ request['Authorization'] = "AWS #{@aws_access_key_id}:#{signature}"
195
+ # prepare output hash
196
+ { :request => request,
197
+ :server => server,
198
+ :port => @params[:port],
199
+ :protocol => @params[:protocol] }
200
+ end
201
+
202
+ # Sends request to Amazon and parses the response.
203
+ # Raises AwsError if any banana happened.
204
+ def request_info(request, parser, &block) # :nodoc:
205
+ request_info_impl(:s3_connection, @@bench, request, parser, &block)
206
+ end
207
+
208
+ # Returns an array of customer's buckets. Each item is a +hash+.
209
+ #
210
+ # s3.list_all_my_buckets #=>
211
+ # [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
212
+ # :owner_display_name => "root",
213
+ # :name => "bucket_name",
214
+ # :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
215
+ #
216
+ def list_all_my_buckets(headers={})
217
+ req_hash = generate_rest_request('GET', headers.merge(:url=>''))
218
+ request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger))
219
+ rescue
220
+ on_exception
221
+ end
222
+
223
+ # Creates new bucket. Returns +true+ or an exception.
224
+ #
225
+ # # create a bucket at American server
226
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
227
+ # # create a bucket at European server
228
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
229
+ #
230
+ def create_bucket(bucket, headers={})
231
+ data = nil
232
+ location = case headers[:location].to_s
233
+ when 'us','US' then ''
234
+ when 'eu' then 'EU'
235
+ else headers[:location].to_s
236
+ end
237
+
238
+ unless location.right_blank?
239
+ data = "<CreateBucketConfiguration><LocationConstraint>#{location}</LocationConstraint></CreateBucketConfiguration>"
240
+ end
241
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data))
242
+ request_info(req_hash, RightHttp2xxParser.new)
243
+ rescue Exception => e
244
+ # if the bucket exists AWS returns an error for the location constraint interface. Drop it
245
+ e.is_a?(RightAws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception
246
+ end
247
+
248
+ # Retrieve bucket location
249
+ #
250
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
251
+ # puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed)
252
+ #
253
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
254
+ # puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
255
+ #
256
+ def bucket_location(bucket, headers={})
257
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location"))
258
+ request_info(req_hash, S3BucketLocationParser.new)
259
+ rescue
260
+ on_exception
261
+ end
262
+
263
+ # Retrieves the logging configuration for a bucket.
264
+ # Returns a hash of {:enabled, :targetbucket, :targetprefix}
265
+ #
266
+ # s3.interface.get_logging_parse(:bucket => "asset_bucket")
267
+ # => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
268
+ #
269
+ #
270
+ def get_logging_parse(params)
271
+ AwsUtils.mandatory_arguments([:bucket], params)
272
+ AwsUtils.allow_only([:bucket, :headers], params)
273
+ params[:headers] = {} unless params[:headers]
274
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging"))
275
+ request_info(req_hash, S3LoggingParser.new)
276
+ rescue
277
+ on_exception
278
+ end
279
+
280
+ # Sets logging configuration for a bucket from the XML configuration document.
281
+ # params:
282
+ # :bucket
283
+ # :xmldoc
284
+ def put_logging(params)
285
+ AwsUtils.mandatory_arguments([:bucket,:xmldoc], params)
286
+ AwsUtils.allow_only([:bucket,:xmldoc, :headers], params)
287
+ params[:headers] = {} unless params[:headers]
288
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc]))
289
+ request_info(req_hash, RightHttp2xxParser.new)
290
+ rescue
291
+ on_exception
292
+ end
293
+
294
+ # Deletes new bucket. Bucket must be empty! Returns +true+ or an exception.
295
+ #
296
+ # s3.delete_bucket('my_awesome_bucket') #=> true
297
+ #
298
+ # See also: force_delete_bucket method
299
+ #
300
+ def delete_bucket(bucket, headers={})
301
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket))
302
+ request_info(req_hash, RightHttp2xxParser.new)
303
+ rescue
304
+ on_exception
305
+ end
306
+
307
+ # Returns an array of bucket's keys. Each array item (key data) is a +hash+.
308
+ #
309
+ # s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=>
310
+ # [{:key => "test1",
311
+ # :last_modified => "2007-05-18T07:00:59.000Z",
312
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
313
+ # :owner_display_name => "root",
314
+ # :e_tag => "000000000059075b964b07152d234b70",
315
+ # :storage_class => "STANDARD",
316
+ # :size => 3,
317
+ # :service=> {'is_truncated' => false,
318
+ # 'prefix' => "t",
319
+ # 'marker' => "",
320
+ # 'name' => "my_awesome_bucket",
321
+ # 'max-keys' => "5"}, ..., {...}]
322
+ #
323
+ def list_bucket(bucket, options={}, headers={})
324
+ bucket += '?'+options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.right_blank?
325
+ req_hash = generate_rest_request('GET', headers.merge(:url=>bucket))
326
+ request_info(req_hash, S3ListBucketParser.new(:logger => @logger))
327
+ rescue
328
+ on_exception
329
+ end
330
+
331
+ # Incrementally list the contents of a bucket. Yields the following hash to a block:
332
+ # s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields
333
+ # {
334
+ # :name => 'bucketname',
335
+ # :prefix => 'subfolder/',
336
+ # :marker => 'fileN.jpg',
337
+ # :max_keys => 234,
338
+ # :delimiter => '/',
339
+ # :is_truncated => true,
340
+ # :next_marker => 'fileX.jpg',
341
+ # :contents => [
342
+ # { :key => "file1",
343
+ # :last_modified => "2007-05-18T07:00:59.000Z",
344
+ # :e_tag => "000000000059075b964b07152d234b70",
345
+ # :size => 3,
346
+ # :storage_class => "STANDARD",
347
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
348
+ # :owner_display_name => "root"
349
+ # }, { :key, ...}, ... {:key, ...}
350
+ # ]
351
+ # :common_prefixes => [
352
+ # "prefix1",
353
+ # "prefix2",
354
+ # ...,
355
+ # "prefixN"
356
+ # ]
357
+ # }
358
+ def incrementally_list_bucket(bucket, options={}, headers={}, &block)
359
+ internal_options = options.right_symbolize_keys
360
+ begin
361
+ internal_bucket = bucket.dup
362
+ internal_bucket += '?'+internal_options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless internal_options.right_blank?
363
+ req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket))
364
+ response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger))
365
+ there_are_more_keys = response[:is_truncated]
366
+ if(there_are_more_keys)
367
+ internal_options[:marker] = decide_marker(response)
368
+ total_results = response[:contents].length + response[:common_prefixes].length
369
+ internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil
370
+ end
371
+ yield response
372
+ end while there_are_more_keys && under_max_keys(internal_options)
373
+ true
374
+ rescue
375
+ on_exception
376
+ end
377
+
378
+
379
+ private
380
+ def decide_marker(response)
381
+ return response[:next_marker].dup if response[:next_marker]
382
+ last_key = response[:contents].last[:key]
383
+ last_prefix = response[:common_prefixes].last
384
+ if(!last_key)
385
+ return nil if(!last_prefix)
386
+ last_prefix.dup
387
+ elsif(!last_prefix)
388
+ last_key.dup
389
+ else
390
+ last_key > last_prefix ? last_key.dup : last_prefix.dup
391
+ end
392
+ end
393
+
394
+ def under_max_keys(internal_options)
395
+ internal_options[:'max-keys'] ? internal_options[:'max-keys'] > 0 : true
396
+ end
397
+
398
+ public
399
+ # Saves object to Amazon. Returns +true+ or an exception.
400
+ # Any header starting with AMAZON_METADATA_PREFIX is considered
401
+ # user metadata. It will be stored with the object and returned
402
+ # when you retrieve the object. The total size of the HTTP
403
+ # request, not including the body, must be less than 4 KB.
404
+ #
405
+ # s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
406
+ #
407
+ # This method is capable of 'streaming' uploads; that is, it can upload
408
+ # data from a file or other IO object without first reading all the data
409
+ # into memory. This is most useful for large PUTs - it is difficult to read
410
+ # a 2 GB file entirely into memory before sending it to S3.
411
+ # To stream an upload, pass an object that responds to 'read' (like the read
412
+ # method of IO) and to either 'lstat' or 'size'. For files, this means
413
+ # streaming is enabled by simply making the call:
414
+ #
415
+ # s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
416
+ #
417
+ # If the IO object you wish to stream from responds to the read method but
418
+ # doesn't implement lstat or size, you can extend the object dynamically
419
+ # to implement these methods, or define your own class which defines these
420
+ # methods. Be sure that your class returns 'nil' from read() after having
421
+ # read 'size' bytes. Otherwise S3 will drop the socket after
422
+ # 'Content-Length' bytes have been uploaded, and HttpConnection will
423
+ # interpret this as an error.
424
+ #
425
+ # This method now supports very large PUTs, where very large
426
+ # is > 2 GB.
427
+ #
428
+ # For Win32 users: Files and IO objects should be opened in binary mode. If
429
+ # a text mode IO object is passed to PUT, it will be converted to binary
430
+ # mode.
431
+ #
432
+
433
+ def put(bucket, key, data=nil, headers={})
434
+ # On Windows, if someone opens a file in text mode, we must reset it so
435
+ # to binary mode for streaming to work properly
436
+ if(data.respond_to?(:binmode))
437
+ data.binmode
438
+ end
439
+ if (data.respond_to?(:lstat) && data.lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
440
+ (data.respond_to?(:size) && data.size >= USE_100_CONTINUE_PUT_SIZE)
441
+ headers['expect'] = '100-continue'
442
+ end
443
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}", :data=>data))
444
+ request_info(req_hash, RightHttp2xxParser.new)
445
+ rescue
446
+ on_exception
447
+ end
448
+
449
+
450
+
451
+ # New experimental API for uploading objects, introduced in RightAws 1.8.1.
452
+ # store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification
453
+ # of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
454
+ # The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
455
+ #
456
+ # If the optional :md5 argument is provided, store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
457
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
458
+ #
459
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
460
+ #
461
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
462
+ # => {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/",
463
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
464
+ # "date"=>"Mon, 29 Sep 2008 18:57:46 GMT",
465
+ # :verified_md5=>true,
466
+ # "x-amz-request-id"=>"63916465939995BA",
467
+ # "server"=>"AmazonS3",
468
+ # "content-length"=>"0"}
469
+ #
470
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
471
+ # => {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR",
472
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
473
+ # "date"=>"Mon, 29 Sep 2008 18:58:56 GMT",
474
+ # :verified_md5=>false,
475
+ # "x-amz-request-id"=>"3B25A996BC2CDD3B",
476
+ # "server"=>"AmazonS3",
477
+ # "content-length"=>"0"}
478
+
479
+ def store_object(params)
480
+ AwsUtils.allow_only([:bucket, :key, :data, :headers, :md5], params)
481
+ AwsUtils.mandatory_arguments([:bucket, :key, :data], params)
482
+ params[:headers] = {} unless params[:headers]
483
+
484
+ params[:data].binmode if(params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly
485
+ if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
486
+ (params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE)
487
+ params[:headers]['expect'] = '100-continue'
488
+ end
489
+
490
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data]))
491
+ resp = request_info(req_hash, S3HttpResponseHeadParser.new)
492
+ if(params[:md5])
493
+ resp[:verified_md5] = (resp['etag'].gsub(/\"/, '') == params[:md5]) ? true : false
494
+ else
495
+ resp[:verified_md5] = false
496
+ end
497
+ resp
498
+ rescue
499
+ on_exception
500
+ end
501
+
502
+ # Identical in function to store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
503
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
504
+ # This call is implemented as a wrapper around store_object and the user may gain different semantics by creating a custom wrapper.
505
+ #
506
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
507
+ # => {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb",
508
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
509
+ # "date"=>"Mon, 29 Sep 2008 18:38:32 GMT",
510
+ # :verified_md5=>true,
511
+ # "x-amz-request-id"=>"E8D7EA4FE00F5DF7",
512
+ # "server"=>"AmazonS3",
513
+ # "content-length"=>"0"}
514
+ #
515
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
516
+ # RightAws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n",
517
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
518
+ # "date"=>"Mon, 29 Sep 2008 18:38:41 GMT",
519
+ # :verified_md5=>false,
520
+ # "x-amz-request-id"=>"0D7ADE09F42606F2",
521
+ # "server"=>"AmazonS3",
522
+ # "content-length"=>"0"}
523
+ def store_object_and_verify(params)
524
+ AwsUtils.mandatory_arguments([:md5], params)
525
+ r = store_object(params)
526
+ r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}"))
527
+ end
528
+
529
+ # Retrieves object data from Amazon. Returns a +hash+ or an exception.
530
+ #
531
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=>
532
+ #
533
+ # {:object => "Ola-la!",
534
+ # :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
535
+ # "content-type" => "",
536
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
537
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
538
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
539
+ # "x-amz-meta-family" => "Woho556!",
540
+ # "x-amz-request-id" => "0000000C246D770C",
541
+ # "server" => "AmazonS3",
542
+ # "content-length" => "7"}}
543
+ #
544
+ # If a block is provided, yields incrementally to the block as
545
+ # the response is read. For large responses, this function is ideal as
546
+ # the response can be 'streamed'. The hash containing header fields is
547
+ # still returned.
548
+ # Example:
549
+ # foo = File.new('./chunder.txt', File::CREAT|File::RDWR)
550
+ # rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
551
+ # foo.write(chunk)
552
+ # end
553
+ # foo.close
554
+ #
555
+
556
+ def get(bucket, key, headers={}, &block)
557
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
558
+ request_info(req_hash, S3HttpResponseBodyParser.new, &block)
559
+ rescue
560
+ on_exception
561
+ end
562
+
563
+ # New experimental API for retrieving objects, introduced in RightAws 1.8.1.
564
+ # retrieve_object is similar in function to the older function get. It allows for optional verification
565
+ # of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
566
+ #
567
+ # If the optional :md5 argument is provided, retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
568
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
569
+ #
570
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
571
+ # Mandatory arguments:
572
+ # :bucket - the bucket in which the object is stored
573
+ # :key - the object address (or path) within the bucket
574
+ # Optional arguments:
575
+ # :headers - hash of additional HTTP headers to include with the request
576
+ # :md5 - MD5 checksum against which to verify the retrieved object
577
+ #
578
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo")
579
+ # => {:verified_md5=>false,
580
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
581
+ # "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp",
582
+ # "content-type"=>"",
583
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
584
+ # "date"=>"Tue, 30 Sep 2008 00:52:44 GMT",
585
+ # "x-amz-request-id"=>"EE4855DE27A2688C",
586
+ # "server"=>"AmazonS3",
587
+ # "content-length"=>"10"},
588
+ # :object=>"polemonium"}
589
+ #
590
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954')
591
+ # => {:verified_md5=>true,
592
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
593
+ # "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH",
594
+ # "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
595
+ # "date"=>"Tue, 30 Sep 2008 00:53:08 GMT",
596
+ # "x-amz-request-id"=>"6E7F317356580599",
597
+ # "server"=>"AmazonS3",
598
+ # "content-length"=>"10"},
599
+ # :object=>"polemonium"}
600
+ # If a block is provided, yields incrementally to the block as
601
+ # the response is read. For large responses, this function is ideal as
602
+ # the response can be 'streamed'. The hash containing header fields is
603
+ # still returned.
604
+ def retrieve_object(params, &block)
605
+ AwsUtils.mandatory_arguments([:bucket, :key], params)
606
+ AwsUtils.allow_only([:bucket, :key, :headers, :md5], params)
607
+ params[:headers] = {} unless params[:headers]
608
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}"))
609
+ resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block)
610
+ resp[:verified_md5] = false
611
+ if(params[:md5] && (resp[:headers]['etag'].gsub(/\"/,'') == params[:md5]))
612
+ resp[:verified_md5] = true
613
+ end
614
+ resp
615
+ rescue
616
+ on_exception
617
+ end
618
+
619
+ # Identical in function to retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
620
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
621
+ # This call is implemented as a wrapper around retrieve_object and the user may gain different semantics by creating a custom wrapper.
622
+ def retrieve_object_and_verify(params, &block)
623
+ AwsUtils.mandatory_arguments([:md5], params)
624
+ resp = retrieve_object(params, &block)
625
+ return resp if resp[:verified_md5]
626
+ raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}")
627
+ end
628
+
629
+ # Retrieves object metadata. Returns a +hash+ of http_response_headers.
630
+ #
631
+ # s3.head('my_awesome_bucket', 'log/curent/1.log') #=>
632
+ # {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
633
+ # "content-type" => "",
634
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
635
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
636
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
637
+ # "x-amz-meta-family" => "Woho556!",
638
+ # "x-amz-request-id" => "0000000C246D770C",
639
+ # "server" => "AmazonS3",
640
+ # "content-length" => "7"}
641
+ #
642
+ def head(bucket, key, headers={})
643
+ req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
644
+ request_info(req_hash, S3HttpResponseHeadParser.new)
645
+ rescue
646
+ on_exception
647
+ end
648
+
649
+ # Deletes key. Returns +true+ or an exception.
650
+ #
651
+ # s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
652
+ #
653
+ def delete(bucket, key='', headers={})
654
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
655
+ request_info(req_hash, RightHttp2xxParser.new)
656
+ rescue
657
+ on_exception
658
+ end
659
+
660
+ # Deletes multiple keys. Returns an array with errors, if any.
661
+ #
662
+ # s3.delete_multiple('my_awesome_bucket', ['key1', 'key2', ...)
663
+ # #=> [ { :key => 'key2', :code => 'AccessDenied', :message => "Access Denied" } ]
664
+ #
665
+ def delete_multiple(bucket, keys=[], headers={})
666
+ errors = []
667
+ keys = Array.new(keys)
668
+ while keys.length > 0
669
+ data = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
670
+ data += "<Delete>\n<Quiet>true</Quiet>\n"
671
+ keys.take(MULTI_OBJECT_DELETE_MAX_KEYS).each do |key|
672
+ data += "<Object><Key>#{AwsUtils::xml_escape(key)}</Key></Object>\n"
673
+ end
674
+ data += "</Delete>"
675
+ req_hash = generate_rest_request('POST', headers.merge(
676
+ :url => "#{bucket}?delete",
677
+ :data => data,
678
+ 'content-md5' => AwsUtils::content_md5(data)
679
+ ))
680
+ errors += request_info(req_hash, S3DeleteMultipleParser.new)
681
+ keys = keys.drop(MULTI_OBJECT_DELETE_MAX_KEYS)
682
+ end
683
+ errors
684
+ rescue
685
+ on_exception
686
+ end
687
+
688
+ # Copy an object.
689
+ # directive: :copy - copy meta-headers from source (default value)
690
+ # :replace - replace meta-headers by passed ones
691
+ #
692
+ # # copy a key with meta-headers
693
+ # s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"}
694
+ #
695
+ # # copy a key, overwrite meta-headers
696
+ # s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
697
+ #
698
+ # see: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
699
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
700
+ #
701
+ def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
702
+ dest_key ||= src_key
703
+ headers['x-amz-metadata-directive'] = directive.to_s.upcase
704
+ headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}"
705
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}"))
706
+ request_info(req_hash, S3CopyParser.new)
707
+ rescue
708
+ on_exception
709
+ end
710
+
711
+ # Move an object.
712
+ # directive: :copy - copy meta-headers from source (default value)
713
+ # :replace - replace meta-headers by passed ones
714
+ #
715
+ # # move bucket1/key1 to bucket1/key2
716
+ # s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"}
717
+ #
718
+ # # move bucket1/key1 to bucket2/key2 with new meta-headers assignment
719
+ # s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
720
+ #
721
+ def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
722
+ copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers)
723
+ # delete an original key if it differs from a destination one
724
+ delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key
725
+ copy_result
726
+ end
727
+
728
+ # Rename an object.
729
+ #
730
+ # # rename bucket1/key1 to bucket1/key2
731
+ # s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
732
+ #
733
+ def rename(src_bucket, src_key, dest_key, headers={})
734
+ move(src_bucket, src_key, src_bucket, dest_key, :copy, headers)
735
+ end
736
+
737
+ # Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
738
+ #
739
+ # s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=>
740
+ # {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX",
741
+ # "content-type"=>"application/xml;charset=ISO-8859-1",
742
+ # "date"=>"Wed, 23 May 2007 09:40:16 GMT",
743
+ # "x-amz-request-id"=>"B183FA7AB5FBB4DD",
744
+ # "server"=>"AmazonS3",
745
+ # "transfer-encoding"=>"chunked"},
746
+ # :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner>
747
+ # <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner>
748
+ # <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>
749
+ # 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee>
750
+ # <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
751
+ #
752
+ def get_acl(bucket, key='', headers={})
753
+ key = key.right_blank? ? '' : "/#{CGI::escape key}"
754
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
755
+ request_info(req_hash, S3HttpResponseBodyParser.new)
756
+ rescue
757
+ on_exception
758
+ end
759
+
760
+ # Retieves the ACL (access control policy) for a bucket or object.
761
+ # Returns a hash of {:owner, :grantees}
762
+ #
763
+ # s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=>
764
+ #
765
+ # { :grantees=>
766
+ # { "16...2a"=>
767
+ # { :display_name=>"root",
768
+ # :permissions=>["FULL_CONTROL"],
769
+ # :attributes=>
770
+ # { "xsi:type"=>"CanonicalUser",
771
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}},
772
+ # "http://acs.amazonaws.com/groups/global/AllUsers"=>
773
+ # { :display_name=>"AllUsers",
774
+ # :permissions=>["READ"],
775
+ # :attributes=>
776
+ # { "xsi:type"=>"Group",
777
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}},
778
+ # :owner=>
779
+ # { :id=>"16..2a",
780
+ # :display_name=>"root"}}
781
+ #
782
+ def get_acl_parse(bucket, key='', headers={})
783
+ key = key.right_blank? ? '' : "/#{CGI::escape key}"
784
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
785
+ acl = request_info(req_hash, S3AclParser.new(:logger => @logger))
786
+ result = {}
787
+ result[:owner] = acl[:owner]
788
+ result[:grantees] = {}
789
+ acl[:grantees].each do |grantee|
790
+ key = grantee[:id] || grantee[:uri]
791
+ if result[:grantees].key?(key)
792
+ result[:grantees][key][:permissions] << grantee[:permissions]
793
+ else
794
+ result[:grantees][key] =
795
+ { :display_name => grantee[:display_name] || grantee[:uri].to_s[/[^\/]*$/],
796
+ :permissions => Array(grantee[:permissions]),
797
+ :attributes => grantee[:attributes] }
798
+ end
799
+ end
800
+ result
801
+ rescue
802
+ on_exception
803
+ end
804
+
805
+ # Sets the ACL on a bucket or object.
806
+ def put_acl(bucket, key, acl_xml_doc, headers={})
807
+ key = key.right_blank? ? '' : "/#{CGI::escape key}"
808
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc))
809
+ request_info(req_hash, S3HttpResponseBodyParser.new)
810
+ rescue
811
+ on_exception
812
+ end
813
+
814
+ # Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
815
+ def get_bucket_acl(bucket, headers={})
816
+ return get_acl(bucket, '', headers)
817
+ rescue
818
+ on_exception
819
+ end
820
+
821
+ # Sets the ACL on a bucket only.
822
+ def put_bucket_acl(bucket, acl_xml_doc, headers={})
823
+ return put_acl(bucket, '', acl_xml_doc, headers)
824
+ rescue
825
+ on_exception
826
+ end
827
+
828
+
829
+ # Removes all keys from bucket. Returns +true+ or an exception.
830
+ #
831
+ # s3.clear_bucket('my_awesome_bucket') #=> true
832
+ #
833
+ def clear_bucket(bucket)
834
+ incrementally_list_bucket(bucket) do |results|
835
+ results[:contents].each { |key| delete(bucket, key[:key]) }
836
+ end
837
+ true
838
+ rescue
839
+ on_exception
840
+ end
841
+
842
+ # Deletes all keys in bucket then deletes bucket. Returns +true+ or an exception.
843
+ #
844
+ # s3.force_delete_bucket('my_awesome_bucket')
845
+ #
846
+ def force_delete_bucket(bucket)
847
+ clear_bucket(bucket)
848
+ delete_bucket(bucket)
849
+ rescue
850
+ on_exception
851
+ end
852
+
853
+ # Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
854
+ #
855
+ # s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs']
856
+ # s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
857
+ #
858
+ def delete_folder(bucket, folder_key, separator='/')
859
+ folder_key.chomp!(separator)
860
+ allkeys = []
861
+ incrementally_list_bucket(bucket, { 'prefix' => folder_key }) do |results|
862
+ keys = results[:contents].map{ |s3_key| s3_key[:key][/^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil}.compact
863
+ keys.each{ |key| delete(bucket, key) }
864
+ allkeys << keys
865
+ end
866
+ allkeys
867
+ rescue
868
+ on_exception
869
+ end
870
+
871
+ # Retrieves object data only (headers are omitted). Returns +string+ or an exception.
872
+ #
873
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
874
+ #
875
+ def get_object(bucket, key, headers={})
876
+ get(bucket, key, headers)[:object]
877
+ rescue
878
+ on_exception
879
+ end
880
+
881
+ #-----------------------------------------------------------------
882
+ # Query API: Links
883
+ #-----------------------------------------------------------------
884
+
885
+ # Generates link for QUERY API
886
+ def generate_link(method, headers={}, expires=nil) #:nodoc:
887
+ # calculate request data
888
+ server, path, path_to_sign = fetch_request_params(headers)
889
+ path_to_sign = CGI.unescape(path_to_sign)
890
+ # expiration time
891
+ expires ||= DEFAULT_EXPIRES_AFTER
892
+ expires = Time.now.utc + expires if expires.is_a?(Fixnum) && (expires < ONE_YEAR_IN_SECONDS)
893
+ expires = expires.to_i
894
+ # remove unset(==optional) and symbolyc keys
895
+ headers.each{ |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
896
+ #generate auth strings
897
+ auth_string = canonical_string(method, path_to_sign, headers, expires)
898
+ signature = CGI::escape(AwsUtils::sign( @aws_secret_access_key, auth_string))
899
+ # path building
900
+ addon = "Signature=#{signature}&Expires=#{expires}&AWSAccessKeyId=#{@aws_access_key_id}"
901
+ path += path[/\?/] ? "&#{addon}" : "?#{addon}"
902
+ "#{@params[:protocol]}://#{server}:#{@params[:port]}#{path}"
903
+ rescue
904
+ on_exception
905
+ end
906
+
907
+ # Generates link for 'ListAllMyBuckets'.
908
+ #
909
+ # s3.list_all_my_buckets_link #=> url string
910
+ #
911
+ def list_all_my_buckets_link(expires=nil, headers={})
912
+ generate_link('GET', headers.merge(:url=>''), expires)
913
+ rescue
914
+ on_exception
915
+ end
916
+
917
+ # Generates link for 'CreateBucket'.
918
+ #
919
+ # s3.create_bucket_link('my_awesome_bucket') #=> url string
920
+ #
921
+ def create_bucket_link(bucket, expires=nil, headers={})
922
+ generate_link('PUT', headers.merge(:url=>bucket), expires)
923
+ rescue
924
+ on_exception
925
+ end
926
+
927
+ # Generates link for 'DeleteBucket'.
928
+ #
929
+ # s3.delete_bucket_link('my_awesome_bucket') #=> url string
930
+ #
931
+ def delete_bucket_link(bucket, expires=nil, headers={})
932
+ generate_link('DELETE', headers.merge(:url=>bucket), expires)
933
+ rescue
934
+ on_exception
935
+ end
936
+
937
+ # Generates link for 'ListBucket'.
938
+ #
939
+ # s3.list_bucket_link('my_awesome_bucket') #=> url string
940
+ #
941
+ def list_bucket_link(bucket, options=nil, expires=nil, headers={})
942
+ bucket += '?' + options.map{|k, v| "#{k.to_s}=#{CGI::escape v.to_s}"}.join('&') unless options.right_blank?
943
+ generate_link('GET', headers.merge(:url=>bucket), expires)
944
+ rescue
945
+ on_exception
946
+ end
947
+
948
+ # Generates link for 'PutObject'.
949
+ #
950
+ # s3.put_link('my_awesome_bucket',key, object) #=> url string
951
+ #
952
+ def put_link(bucket, key, data=nil, expires=nil, headers={})
953
+ generate_link('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}", :data=>data), expires)
954
+ rescue
955
+ on_exception
956
+ end
957
+
958
+ # Generates link for 'GetObject'.
959
+ #
960
+ # if a bucket comply with virtual hosting naming then retuns a link with the
961
+ # bucket as a part of host name:
962
+ #
963
+ # s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
964
+ #
965
+ # otherwise returns an old style link (the bucket is a part of path):
966
+ #
967
+ # s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
968
+ #
969
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
970
+ #
971
+ # To specify +response+-* parameters, define them in the response_params hash:
972
+ #
973
+ # s3.get_link('my_awesome_bucket',key,nil,{},{ "response-content-disposition" => "attachment; filename=caf�.png", "response-content-type" => "image/png"})
974
+ #
975
+ # #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?response-content-disposition=attachment%3B%20filename%3Dcaf%25C3%25A9.png&response-content-type=image%2Fpng&Signature=wio...
976
+ #
977
+ def get_link(bucket, key, expires=nil, headers={}, response_params={})
978
+ if response_params.size > 0
979
+ response_params = '?' + response_params.map { |k, v| "#{k}=#{CGI::escape(v).gsub(/[+]/, '%20')}" }.join('&')
980
+ else
981
+ response_params = ''
982
+ end
983
+ generate_link('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}#{response_params}"), expires)
984
+ rescue
985
+ on_exception
986
+ end
987
+
988
+ # Generates link for 'HeadObject'.
989
+ #
990
+ # s3.head_link('my_awesome_bucket',key) #=> url string
991
+ #
992
+ def head_link(bucket, key, expires=nil, headers={})
993
+ generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"), expires)
994
+ rescue
995
+ on_exception
996
+ end
997
+
998
+ # Generates link for 'DeleteObject'.
999
+ #
1000
+ # s3.delete_link('my_awesome_bucket',key) #=> url string
1001
+ #
1002
+ def delete_link(bucket, key, expires=nil, headers={})
1003
+ generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"), expires)
1004
+ rescue
1005
+ on_exception
1006
+ end
1007
+
1008
+
1009
+ # Generates link for 'GetACL'.
1010
+ #
1011
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
1012
+ #
1013
+ def get_acl_link(bucket, key='', headers={})
1014
+ return generate_link('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}?acl"))
1015
+ rescue
1016
+ on_exception
1017
+ end
1018
+
1019
+ # Generates link for 'PutACL'.
1020
+ #
1021
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
1022
+ #
1023
+ def put_acl_link(bucket, key='', headers={})
1024
+ return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{CGI::escape key}?acl"))
1025
+ rescue
1026
+ on_exception
1027
+ end
1028
+
1029
+ # Generates link for 'GetBucketACL'.
1030
+ #
1031
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
1032
+ #
1033
+ def get_bucket_acl_link(bucket, headers={})
1034
+ return get_acl_link(bucket, '', headers)
1035
+ rescue
1036
+ on_exception
1037
+ end
1038
+
1039
+ # Generates link for 'PutBucketACL'.
1040
+ #
1041
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
1042
+ #
1043
+ def put_bucket_acl_link(bucket, acl_xml_doc, headers={})
1044
+ return put_acl_link(bucket, '', acl_xml_doc, headers)
1045
+ rescue
1046
+ on_exception
1047
+ end
1048
+
1049
+ #-----------------------------------------------------------------
1050
+ # PARSERS:
1051
+ #-----------------------------------------------------------------
1052
+
1053
+ class S3ListAllMyBucketsParser < RightAWSParser # :nodoc:
1054
+ def reset
1055
+ @result = []
1056
+ @owner = {}
1057
+ end
1058
+ def tagstart(name, attributes)
1059
+ @current_bucket = {} if name == 'Bucket'
1060
+ end
1061
+ def tagend(name)
1062
+ case name
1063
+ when 'ID' then @owner[:owner_id] = @text
1064
+ when 'DisplayName' then @owner[:owner_display_name] = @text
1065
+ when 'Name' then @current_bucket[:name] = @text
1066
+ when 'CreationDate'then @current_bucket[:creation_date] = @text
1067
+ when 'Bucket' then @result << @current_bucket.merge(@owner)
1068
+ end
1069
+ end
1070
+ end
1071
+
1072
+ class S3ListBucketParser < RightAWSParser # :nodoc:
1073
+ def reset
1074
+ @result = []
1075
+ @service = {}
1076
+ @current_key = {}
1077
+ end
1078
+ def tagstart(name, attributes)
1079
+ @current_key = {} if name == 'Contents'
1080
+ end
1081
+ def tagend(name)
1082
+ case name
1083
+ # service info
1084
+ when 'Name' then @service['name'] = @text
1085
+ when 'Prefix' then @service['prefix'] = @text
1086
+ when 'Marker' then @service['marker'] = @text
1087
+ when 'MaxKeys' then @service['max-keys'] = @text
1088
+ when 'Delimiter' then @service['delimiter'] = @text
1089
+ when 'IsTruncated' then @service['is_truncated'] = (@text =~ /false/ ? false : true)
1090
+ # key data
1091
+ when 'Key' then @current_key[:key] = @text
1092
+ when 'LastModified'then @current_key[:last_modified] = @text
1093
+ when 'ETag' then @current_key[:e_tag] = @text
1094
+ when 'Size' then @current_key[:size] = @text.to_i
1095
+ when 'StorageClass'then @current_key[:storage_class] = @text
1096
+ when 'ID' then @current_key[:owner_id] = @text
1097
+ when 'DisplayName' then @current_key[:owner_display_name] = @text
1098
+ when 'Contents'
1099
+ @current_key[:service] = @service
1100
+ @result << @current_key
1101
+ end
1102
+ end
1103
+ end
1104
+
1105
+ class S3ImprovedListBucketParser < RightAWSParser # :nodoc:
1106
+ def reset
1107
+ @result = {}
1108
+ @result[:contents] = []
1109
+ @result[:common_prefixes] = []
1110
+ @contents = []
1111
+ @current_key = {}
1112
+ @common_prefixes = []
1113
+ @in_common_prefixes = false
1114
+ end
1115
+ def tagstart(name, attributes)
1116
+ @current_key = {} if name == 'Contents'
1117
+ @in_common_prefixes = true if name == 'CommonPrefixes'
1118
+ end
1119
+ def tagend(name)
1120
+ case name
1121
+ # service info
1122
+ when 'Name' then @result[:name] = @text
1123
+ # Amazon uses the same tag for the search prefix and for the entries
1124
+ # in common prefix...so use our simple flag to see which element
1125
+ # we are parsing
1126
+ when 'Prefix' then @in_common_prefixes ? @common_prefixes << @text : @result[:prefix] = @text
1127
+ when 'Marker' then @result[:marker] = @text
1128
+ when 'MaxKeys' then @result[:max_keys] = @text
1129
+ when 'Delimiter' then @result[:delimiter] = @text
1130
+ when 'IsTruncated' then @result[:is_truncated] = (@text =~ /false/ ? false : true)
1131
+ when 'NextMarker' then @result[:next_marker] = @text
1132
+ # key data
1133
+ when 'Key' then @current_key[:key] = @text
1134
+ when 'LastModified'then @current_key[:last_modified] = @text
1135
+ when 'ETag' then @current_key[:e_tag] = @text
1136
+ when 'Size' then @current_key[:size] = @text.to_i
1137
+ when 'StorageClass'then @current_key[:storage_class] = @text
1138
+ when 'ID' then @current_key[:owner_id] = @text
1139
+ when 'DisplayName' then @current_key[:owner_display_name] = @text
1140
+ when 'Contents' then @result[:contents] << @current_key
1141
+ # Common Prefix stuff
1142
+ when 'CommonPrefixes'
1143
+ @result[:common_prefixes] = @common_prefixes
1144
+ @in_common_prefixes = false
1145
+ end
1146
+ end
1147
+ end
1148
+
1149
+ class S3BucketLocationParser < RightAWSParser # :nodoc:
1150
+ def reset
1151
+ @result = ''
1152
+ end
1153
+ def tagend(name)
1154
+ @result = @text if name == 'LocationConstraint'
1155
+ end
1156
+ end
1157
+
1158
+ class S3AclParser < RightAWSParser # :nodoc:
1159
+ def reset
1160
+ @result = {:grantees=>[], :owner=>{}}
1161
+ @current_grantee = {}
1162
+ end
1163
+ def tagstart(name, attributes)
1164
+ @current_grantee = { :attributes => attributes } if name=='Grantee'
1165
+ end
1166
+ def tagend(name)
1167
+ case name
1168
+ # service info
1169
+ when 'ID'
1170
+ if @xmlpath == 'AccessControlPolicy/Owner'
1171
+ @result[:owner][:id] = @text
1172
+ else
1173
+ @current_grantee[:id] = @text
1174
+ end
1175
+ when 'DisplayName'
1176
+ if @xmlpath == 'AccessControlPolicy/Owner'
1177
+ @result[:owner][:display_name] = @text
1178
+ else
1179
+ @current_grantee[:display_name] = @text
1180
+ end
1181
+ when 'URI'
1182
+ @current_grantee[:uri] = @text
1183
+ when 'Permission'
1184
+ @current_grantee[:permissions] = @text
1185
+ when 'Grant'
1186
+ @result[:grantees] << @current_grantee
1187
+ end
1188
+ end
1189
+ end
1190
+
1191
+ class S3LoggingParser < RightAWSParser # :nodoc:
1192
+ def reset
1193
+ @result = {:enabled => false, :targetbucket => '', :targetprefix => ''}
1194
+ @current_grantee = {}
1195
+ end
1196
+ def tagend(name)
1197
+ case name
1198
+ # service info
1199
+ when 'TargetBucket'
1200
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1201
+ @result[:targetbucket] = @text
1202
+ @result[:enabled] = true
1203
+ end
1204
+ when 'TargetPrefix'
1205
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1206
+ @result[:targetprefix] = @text
1207
+ @result[:enabled] = true
1208
+ end
1209
+ end
1210
+ end
1211
+ end
1212
+
1213
+ class S3CopyParser < RightAWSParser # :nodoc:
1214
+ def reset
1215
+ @result = {}
1216
+ end
1217
+ def tagend(name)
1218
+ case name
1219
+ when 'LastModified' then @result[:last_modified] = @text
1220
+ when 'ETag' then @result[:e_tag] = @text
1221
+ end
1222
+ end
1223
+ end
1224
+
1225
+ class S3DeleteMultipleParser < RightAWSParser # :nodoc:
1226
+ def reset
1227
+ @result = []
1228
+ end
1229
+ def tagstart(name, attributes)
1230
+ @error = {}
1231
+ end
1232
+ def tagend(name)
1233
+ case name
1234
+ when 'Key' then current[:key] = @text
1235
+ when 'Code' then current[:code] = @text
1236
+ when 'Message' then current[:message] = @text
1237
+ when 'Error' then @result << @error
1238
+ end
1239
+ end
1240
+ end
1241
+
1242
+ #-----------------------------------------------------------------
1243
+ # PARSERS: Non XML
1244
+ #-----------------------------------------------------------------
1245
+
1246
+ class S3HttpResponseParser # :nodoc:
1247
+ attr_reader :result
1248
+ def parse(response)
1249
+ @result = response
1250
+ end
1251
+ def headers_to_string(headers)
1252
+ result = {}
1253
+ headers.each do |key, value|
1254
+ value = value.first if value.is_a?(Array) && value.size<2
1255
+ result[key] = value
1256
+ end
1257
+ result
1258
+ end
1259
+ end
1260
+
1261
+ class S3HttpResponseBodyParser < S3HttpResponseParser # :nodoc:
1262
+ def parse(response)
1263
+ @result = {
1264
+ :object => response.body,
1265
+ :headers => headers_to_string(response.to_hash)
1266
+ }
1267
+ end
1268
+ end
1269
+
1270
+ class S3HttpResponseHeadParser < S3HttpResponseParser # :nodoc:
1271
+ def parse(response)
1272
+ @result = headers_to_string(response.to_hash)
1273
+ end
1274
+ end
1275
+
1276
+ end
1277
+
1278
+ end