aws 2.3.34 → 2.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,346 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+ module Aws
24
+
25
+ # = Aws::S3 -- RightScale's Amazon S3 interface
26
+ # The Aws::S3 class provides a complete interface to Amazon's Simple
27
+ # Storage Service.
28
+ # For explanations of the semantics
29
+ # of each call, please refer to Amazon's documentation at
30
+ # http://developer.amazonwebservices.com/connect/kbcategory.jspa?categoryID=48
31
+ #
32
+ # See examples below for the bucket and buckets methods.
33
+ #
34
+ # Error handling: all operations raise an Aws::AwsError in case
35
+ # of problems. Note that transient errors are automatically retried.
36
+ #
37
+ # It is a good way to use domain naming style getting a name for the buckets.
38
+ # See http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingBucket.html
39
+ # about the naming convention for the buckets. This case they can be accessed using a virtual domains.
40
+ #
41
+ # Let assume you have 3 buckets: 'awesome-bucket', 'awesome_bucket' and 'AWEsomE-bucket'.
42
+ # The first ones objects can be accessed as: http:// awesome-bucket.s3.amazonaws.com/key/object
43
+ #
44
+ # But the rest have to be accessed as:
45
+ # http:// s3.amazonaws.com/awesome_bucket/key/object and http:// s3.amazonaws.com/AWEsomE-bucket/key/object
46
+ #
47
+ # See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html for better explanation.
48
+ #
49
+ class S3
50
+
51
+
52
+ class Owner
53
+ attr_reader :id, :name
54
+
55
+ def initialize(id, name)
56
+ @id = id
57
+ @name = name
58
+ end
59
+
60
+ # Return Owner name as a +String+.
61
+ def to_s
62
+ @name
63
+ end
64
+ end
65
+
66
+ require_relative 'bucket'
67
+ require_relative 'key'
68
+ require_relative 'grantee'
69
+
70
+
71
+ attr_reader :interface
72
+
73
+
74
+ # Create a new handle to an S3 account. All handles share the same per process or per thread
75
+ # HTTP connection to Amazon S3. Each handle is for a specific account.
76
+ # The +params+ are passed through as-is to Aws::S3Interface.new
77
+ #
78
+ # Params is a hash:
79
+ #
80
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
81
+ # :port => 443 # Amazon service port: 80 or 443(default)
82
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
83
+ # :connection_mode => :default # options are
84
+ # :default (will use best known safe (as in won't need explicit close) option, may change in the future)
85
+ # :per_request (opens and closes a connection on every request)
86
+ # :single (one thread across entire app)
87
+ # :per_thread (one connection per thread)
88
+ # :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
89
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
90
+ @interface = S3Interface.new(aws_access_key_id, aws_secret_access_key, params)
91
+ end
92
+
93
+ def close_connection
94
+ @interface.close_connection
95
+ end
96
+
97
+ # Retrieve a list of buckets.
98
+ # Returns an array of Aws::S3::Bucket instances.
99
+ # # Create handle to S3 account
100
+ # s3 = Aws::S3.new(aws_access_key_id, aws_secret_access_key)
101
+ # my_buckets_names = s3.buckets.map{|b| b.name}
102
+ # puts "Buckets on S3: #{my_bucket_names.join(', ')}"
103
+ def buckets
104
+ @interface.list_all_my_buckets.map! do |entry|
105
+ owner = Owner.new(entry[:owner_id], entry[:owner_display_name])
106
+ Bucket.new(self, entry[:name], entry[:creation_date], owner)
107
+ end
108
+ end
109
+
110
+ # Retrieve an individual bucket.
111
+ # If the bucket does not exist and +create+ is set, a new bucket
112
+ # is created on S3. Launching this method with +create+=+true+ may
113
+ # affect on the bucket's ACL if the bucket already exists.
114
+ # Returns a Aws::S3::Bucket instance or +nil+ if the bucket does not exist
115
+ # and +create+ is not set.
116
+ #
117
+ # s3 = Aws::S3.new(aws_access_key_id, aws_secret_access_key)
118
+ # bucket1 = s3.bucket('my_awesome_bucket_1')
119
+ # bucket1.keys #=> exception here if the bucket does not exists
120
+ # ...
121
+ # bucket2 = s3.bucket('my_awesome_bucket_2', true)
122
+ # bucket2.keys #=> list of keys
123
+ # # create a bucket at the European location with public read access
124
+ # bucket3 = s3.bucket('my-awesome-bucket-3', true, 'public-read', :location => :eu)
125
+ #
126
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html
127
+ # (section: Canned Access Policies)
128
+ #
129
+ def bucket(name, create=false, perms=nil, headers={})
130
+ headers['x-amz-acl'] = perms if perms
131
+ @interface.create_bucket(name, headers) if create
132
+ return Bucket.new(self, name)
133
+ # The old way below was too slow and unnecessary because it retreived all the buckets every time.
134
+ # owner = Owner.new(entry[:owner_id], entry[:owner_display_name])
135
+ # buckets.each { |bucket| return bucket if bucket.name == name }
136
+ # nil
137
+ end
138
+
139
+
140
+ end
141
+
142
+ # Aws::S3Generator and Aws::S3Generator::Bucket methods:
143
+ #
144
+ # s3g = Aws::S3Generator.new('1...2', 'nx...Y6') #=> #<Aws::S3Generator:0xb7b5cc94>
145
+ #
146
+ # # List all buckets(method 'GET'):
147
+ # buckets_list = s3g.buckets #=> 'https://s3.amazonaws.com:443/?Signature=Y...D&Expires=1180941864&AWSAccessKeyId=1...2'
148
+ # # Create bucket link (method 'PUT'):
149
+ # bucket = s3g.bucket('my_awesome_bucket') #=> #<Aws::S3Generator::Bucket:0xb7bcbda8>
150
+ # link_to_create = bucket.create_link(1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket?Signature=4...D&Expires=1180942132&AWSAccessKeyId=1...2
151
+ # # ... or:
152
+ # bucket = Aws::S3Generator::Bucket.create(s3g, 'my_awesome_bucket') #=> #<Aws::S3Generator::Bucket:0xb7bcbda8>
153
+ # link_to_create = bucket.create_link(1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket?Signature=4...D&Expires=1180942132&AWSAccessKeyId=1...2
154
+ # # ... or:
155
+ # bucket = Aws::S3Generator::Bucket.new(s3g, 'my_awesome_bucket') #=> #<Aws::S3Generator::Bucket:0xb7bcbda8>
156
+ # link_to_create = bucket.create_link(1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket?Signature=4...D&Expires=1180942132&AWSAccessKeyId=1...2
157
+ # # List bucket(method 'GET'):
158
+ # bucket.keys(1.day) #=> https://s3.amazonaws.com:443/my_awesome_bucket?Signature=i...D&Expires=1180942620&AWSAccessKeyId=1...2
159
+ # # Create/put key (method 'PUT'):
160
+ # bucket.put('my_cool_key') #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=q...D&Expires=1180943094&AWSAccessKeyId=1...2
161
+ # # Get key data (method 'GET'):
162
+ # bucket.get('logs/today/1.log', 1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=h...M%3D&Expires=1180820032&AWSAccessKeyId=1...2
163
+ # # Delete bucket (method 'DELETE'):
164
+ # bucket.delete(2.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=4...D&Expires=1180820032&AWSAccessKeyId=1...2
165
+ #
166
+ # Aws::S3Generator::Key methods:
167
+ #
168
+ # # Create Key instance:
169
+ # key = Aws::S3Generator::Key.new(bicket, 'my_cool_key') #=> #<Aws::S3Generator::Key:0xb7b7394c>
170
+ # # Put key data (method 'PUT'):
171
+ # key.put #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=2...D&Expires=1180943302&AWSAccessKeyId=1...2
172
+ # # Get key data (method 'GET'):
173
+ # key.get #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=a...D&Expires=1180820032&AWSAccessKeyId=1...2
174
+ # # Head key (method 'HEAD'):
175
+ # key.head #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=b...D&Expires=1180820032&AWSAccessKeyId=1...2
176
+ # # Delete key (method 'DELETE'):
177
+ # key.delete #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=x...D&Expires=1180820032&AWSAccessKeyId=1...2
178
+ #
179
+ class S3Generator
180
+ attr_reader :interface
181
+
182
+ def initialize(aws_access_key_id, aws_secret_access_key, params={})
183
+ @interface = S3Interface.new(aws_access_key_id, aws_secret_access_key, params)
184
+ end
185
+
186
+ # Generate link to list all buckets
187
+ #
188
+ # s3.buckets(1.hour)
189
+ #
190
+ def buckets(expires=nil, headers={})
191
+ @interface.list_all_my_buckets_link(expires, headers)
192
+ end
193
+
194
+ # Create new S3LinkBucket instance and generate link to create it at S3.
195
+ #
196
+ # bucket= s3.bucket('my_owesome_bucket')
197
+ #
198
+ def bucket(name, expires=nil, headers={})
199
+ Bucket.create(self, name.to_s)
200
+ end
201
+
202
+ class Bucket
203
+ attr_reader :s3, :name
204
+
205
+ def to_s
206
+ @name
207
+ end
208
+
209
+ alias_method :full_name, :to_s
210
+
211
+ # Return a public link to bucket.
212
+ #
213
+ # bucket.public_link #=> 'https://s3.amazonaws.com:443/my_awesome_bucket'
214
+ #
215
+ def public_link
216
+ params = @s3.interface.params
217
+ "#{params[:protocol]}://#{params[:server]}:#{params[:port]}/#{full_name}"
218
+ end
219
+
220
+ # Create new S3LinkBucket instance and generate creation link for it.
221
+ def self.create(s3, name, expires=nil, headers={})
222
+ new(s3, name.to_s)
223
+ end
224
+
225
+ # Create new S3LinkBucket instance.
226
+ def initialize(s3, name)
227
+ @s3, @name = s3, name.to_s
228
+ end
229
+
230
+ # Return a link to create this bucket.
231
+ #
232
+ def create_link(expires=nil, headers={})
233
+ @s3.interface.create_bucket_link(@name, expires, headers)
234
+ end
235
+
236
+ # Generate link to list keys.
237
+ #
238
+ # bucket.keys
239
+ # bucket.keys('prefix'=>'logs')
240
+ #
241
+ def keys(options=nil, expires=nil, headers={})
242
+ @s3.interface.list_bucket_link(@name, options, expires, headers)
243
+ end
244
+
245
+ # Return a S3Generator::Key instance.
246
+ #
247
+ # bucket.key('my_cool_key').get #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=B...D&Expires=1180820032&AWSAccessKeyId=1...2
248
+ # bucket.key('my_cool_key').delete #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=B...D&Expires=1180820098&AWSAccessKeyId=1...2
249
+ #
250
+ def key(name)
251
+ Key.new(self, name)
252
+ end
253
+
254
+ # Generates link to PUT key data.
255
+ #
256
+ # puts bucket.put('logs/today/1.log', 2.hour)
257
+ #
258
+ def put(key, meta_headers={}, expires=nil, headers={})
259
+ meta = Aws::S3::Key.add_meta_prefix(meta_headers)
260
+ @s3.interface.put_link(@name, key.to_s, nil, expires, meta.merge(headers))
261
+ end
262
+
263
+ # Generate link to GET key data.
264
+ #
265
+ # bucket.get('logs/today/1.log', 1.hour)
266
+ #
267
+ def get(key, expires=nil, headers={})
268
+ @s3.interface.get_link(@name, key.to_s, expires, headers)
269
+ end
270
+
271
+ # Generate link to delete bucket.
272
+ #
273
+ # bucket.delete(2.hour)
274
+ #
275
+ def delete(expires=nil, headers={})
276
+ @s3.interface.delete_bucket_link(@name, expires, headers)
277
+ end
278
+ end
279
+
280
+
281
+ class Key
282
+ attr_reader :bucket, :name
283
+
284
+ def to_s
285
+ @name
286
+ end
287
+
288
+ # Return a full S# name (bucket/key).
289
+ #
290
+ # key.full_name #=> 'my_awesome_bucket/cool_key'
291
+ #
292
+ def full_name(separator='/')
293
+ "#{@bucket.to_s}#{separator}#{@name}"
294
+ end
295
+
296
+ # Return a public link to key.
297
+ #
298
+ # key.public_link #=> 'https://s3.amazonaws.com:443/my_awesome_bucket/cool_key'
299
+ #
300
+ def public_link
301
+ params = @bucket.s3.interface.params
302
+ "#{params[:protocol]}://#{params[:server]}:#{params[:port]}/#{full_name('/')}"
303
+ end
304
+
305
+ def initialize(bucket, name, meta_headers={})
306
+ @bucket = bucket
307
+ @name = name.to_s
308
+ @meta_headers = meta_headers
309
+ raise 'Key name can not be empty.' if @name.blank?
310
+ end
311
+
312
+ # Generate link to PUT key data.
313
+ #
314
+ # puts bucket.put('logs/today/1.log', '123', 2.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=B...D&Expires=1180820032&AWSAccessKeyId=1...2
315
+ #
316
+ def put(expires=nil, headers={})
317
+ @bucket.put(@name.to_s, @meta_headers, expires, headers)
318
+ end
319
+
320
+ # Generate link to GET key data.
321
+ #
322
+ # bucket.get('logs/today/1.log', 1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=h...M%3D&Expires=1180820032&AWSAccessKeyId=1...2
323
+ #
324
+ def get(expires=nil, headers={})
325
+ @bucket.s3.interface.get_link(@bucket.to_s, @name, expires, headers)
326
+ end
327
+
328
+ # Generate link to delete key.
329
+ #
330
+ # bucket.delete(2.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=4...D&Expires=1180820032&AWSAccessKeyId=1...2
331
+ #
332
+ def delete(expires=nil, headers={})
333
+ @bucket.s3.interface.delete_link(@bucket.to_s, @name, expires, headers)
334
+ end
335
+
336
+ # Generate link to head key.
337
+ #
338
+ # bucket.head(2.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=4...D&Expires=1180820032&AWSAccessKeyId=1...2
339
+ #
340
+ def head(expires=nil, headers={})
341
+ @bucket.s3.interface.head_link(@bucket.to_s, @name, expires, headers)
342
+ end
343
+ end
344
+ end
345
+
346
+ end
@@ -0,0 +1,1275 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ module Aws
25
+
26
+ class S3Interface < AwsBase
27
+
28
+ USE_100_CONTINUE_PUT_SIZE = 1_000_000
29
+
30
+ include AwsBaseInterface
31
+ extend AwsBaseInterface::ClassMethods
32
+
33
+ DEFAULT_HOST = 's3.amazonaws.com'
34
+ DEFAULT_PORT = 443
35
+ DEFAULT_PROTOCOL = 'https'
36
+ DEFAULT_SERVICE = '/'
37
+ REQUEST_TTL = 30
38
+ DEFAULT_EXPIRES_AFTER = 1 * 24 * 60 * 60 # One day's worth of seconds
39
+ ONE_YEAR_IN_SECONDS = 365 * 24 * 60 * 60
40
+ AMAZON_HEADER_PREFIX = 'x-amz-'
41
+ AMAZON_METADATA_PREFIX = 'x-amz-meta-'
42
+
43
+ def self.connection_name
44
+ :s3_connection
45
+ end
46
+
47
+ @@bench = AwsBenchmarkingBlock.new
48
+
49
+ def self.bench
50
+ @@bench
51
+ end
52
+
53
+ def self.bench
54
+ @@bench
55
+ end
56
+
57
+ def self.bench_xml
58
+ @@bench.xml
59
+ end
60
+
61
+ def self.bench_s3
62
+ @@bench.service
63
+ end
64
+
65
+
66
+ # Creates new RightS3 instance.
67
+ #
68
+ # s3 = Aws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:multi_thread => true, :logger => Logger.new('/tmp/x.log')}) #=> #<Aws::S3Interface:0xb7b3c27c>
69
+ #
70
+ # Params is a hash:
71
+ #
72
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
73
+ # :port => 443 # Amazon service port: 80 or 443(default)
74
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
75
+ # :connection_mode => :default # options are
76
+ # :default (will use best known safe (as in won't need explicit close) option, may change in the future)
77
+ # :per_request (opens and closes a connection on every request)
78
+ # :single (one thread across entire app)
79
+ # :per_thread (one connection per thread)
80
+ # :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
81
+ #
82
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
83
+ init({:name => 'S3',
84
+ :default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST,
85
+ :default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT,
86
+ :default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE,
87
+ :default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL},
88
+ aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
89
+ aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'],
90
+ params)
91
+ end
92
+
93
+ #-----------------------------------------------------------------
94
+ # Requests
95
+ #-----------------------------------------------------------------
96
+ # Produces canonical string for signing.
97
+ def canonical_string(method, path, headers={}, expires=nil) # :nodoc:
98
+ s3_headers = {}
99
+ headers.each do |key, value|
100
+ key = key.downcase
101
+ s3_headers[key] = value.join("").strip if key[/^#{AMAZON_HEADER_PREFIX}|^content-md5$|^content-type$|^date$/o]
102
+ end
103
+ s3_headers['content-type'] ||= ''
104
+ s3_headers['content-md5'] ||= ''
105
+ s3_headers['date'] = '' if s3_headers.has_key? 'x-amz-date'
106
+ s3_headers['date'] = expires if expires
107
+ # prepare output string
108
+ out_string = "#{method}\n"
109
+ s3_headers.sort { |a, b| a[0] <=> b[0] }.each do |key, value|
110
+ out_string << (key[/^#{AMAZON_HEADER_PREFIX}/o] ? "#{key}:#{value}\n" : "#{value}\n")
111
+ end
112
+ # ignore everything after the question mark...
113
+ out_string << path.gsub(/\?.*$/, '')
114
+ # ...unless there is an acl or torrent parameter
115
+ out_string << '?acl' if path[/[&?]acl($|&|=)/]
116
+ out_string << '?policy' if path[/[&?]policy($|&|=)/]
117
+ out_string << '?torrent' if path[/[&?]torrent($|&|=)/]
118
+ out_string << '?location' if path[/[&?]location($|&|=)/]
119
+ out_string << '?logging' if path[/[&?]logging($|&|=)/] # this one is beta, no support for now
120
+ out_string
121
+ end
122
+
123
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
124
+ def is_dns_bucket?(bucket_name)
125
+ bucket_name = bucket_name.to_s
126
+ return nil unless (3..63) === bucket_name.size
127
+ bucket_name.split('.').each do |component|
128
+ return nil unless component[/^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/]
129
+ end
130
+ true
131
+ end
132
+
133
+ def fetch_request_params(headers) #:nodoc:
134
+ # default server to use
135
+ server = @params[:server]
136
+ service = @params[:service].to_s
137
+ service.chop! if service[%r{/$}] # remove trailing '/' from service
138
+ # extract bucket name and check it's dns compartibility
139
+ headers[:url].to_s[%r{^([a-z0-9._-]*)(/[^?]*)?(\?.+)?}i]
140
+ bucket_name, key_path, params_list = $1, $2, $3
141
+ # select request model
142
+ if is_dns_bucket?(bucket_name)
143
+ # fix a path
144
+ server = "#{bucket_name}.#{server}"
145
+ key_path ||= '/'
146
+ path = "#{service}#{key_path}#{params_list}"
147
+ else
148
+ path = "#{service}/#{bucket_name}#{key_path}#{params_list}"
149
+ end
150
+ path_to_sign = "#{service}/#{bucket_name}#{key_path}#{params_list}"
151
+ # path_to_sign = "/#{bucket_name}#{key_path}#{params_list}"
152
+ [server, path, path_to_sign]
153
+ end
154
+
155
+ # Generates request hash for REST API.
156
+ # Assumes that headers[:url] is URL encoded (use CGI::escape)
157
+ def generate_rest_request(method, headers) # :nodoc:
158
+ # calculate request data
159
+ server, path, path_to_sign = fetch_request_params(headers)
160
+ data = headers[:data]
161
+ # remove unset(==optional) and symbolyc keys
162
+ headers.each { |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
163
+ #
164
+ headers['content-type'] ||= ''
165
+ headers['date'] = Time.now.httpdate
166
+ # create request
167
+ request = "Net::HTTP::#{method.capitalize}".constantize.new(path)
168
+ request.body = data if data
169
+ # set request headers and meta headers
170
+ headers.each { |key, value| request[key.to_s] = value }
171
+ #generate auth strings
172
+ auth_string = canonical_string(request.method, path_to_sign, request.to_hash)
173
+ signature = AwsUtils::sign(@aws_secret_access_key, auth_string)
174
+ # set other headers
175
+ request['Authorization'] = "AWS #{@aws_access_key_id}:#{signature}"
176
+ # prepare output hash
177
+ {:request => request,
178
+ :server => server,
179
+ :port => @params[:port],
180
+ :protocol => @params[:protocol]}
181
+ end
182
+
183
+ # Sends request to Amazon and parses the response.
184
+ # Raises AwsError if any banana happened.
185
+ def request_info(request, parser, options={}, &block) # :nodoc:
186
+ # request_info2(request, parser, @params, :s3_connection, @logger, @@bench, options, &block)
187
+ request_info3(self, request, parser, options, &block)
188
+ end
189
+
190
+
191
+ # Returns an array of customer's buckets. Each item is a +hash+.
192
+ #
193
+ # s3.list_all_my_buckets #=>
194
+ # [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
195
+ # :owner_display_name => "root",
196
+ # :name => "bucket_name",
197
+ # :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
198
+ #
199
+ def list_all_my_buckets(headers={})
200
+ req_hash = generate_rest_request('GET', headers.merge(:url=>''))
201
+ request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger))
202
+ rescue
203
+ on_exception
204
+ end
205
+
206
+ # Creates new bucket. Returns +true+ or an exception.
207
+ #
208
+ # # create a bucket at American server
209
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
210
+ # # create a bucket at European server
211
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
212
+ #
213
+ def create_bucket(bucket, headers={})
214
+ data = nil
215
+ unless headers[:location].blank?
216
+ # data = "<CreateBucketConfiguration><LocationConstraint>#{headers[:location].to_s.upcase}</LocationConstraint></CreateBucketConfiguration>"
217
+ location = headers[:location].to_s
218
+ location.upcase! if location == 'eu'
219
+ data = "<CreateBucketConfiguration><LocationConstraint>#{location}</LocationConstraint></CreateBucketConfiguration>"
220
+ end
221
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data))
222
+ request_info(req_hash, RightHttp2xxParser.new)
223
+ rescue Exception => e
224
+ # if the bucket exists AWS returns an error for the location constraint interface. Drop it
225
+ e.is_a?(Aws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception
226
+ end
227
+
228
+ # Retrieve bucket location
229
+ #
230
+ # s3.create_bucket('my-awesome-bucket-us') #=> true
231
+ # puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed)
232
+ #
233
+ # s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
234
+ # puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
235
+ #
236
+ def bucket_location(bucket, headers={})
237
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location"))
238
+ request_info(req_hash, S3BucketLocationParser.new)
239
+ rescue
240
+ on_exception
241
+ end
242
+
243
+ # Retrieves the logging configuration for a bucket.
244
+ # Returns a hash of {:enabled, :targetbucket, :targetprefix}
245
+ #
246
+ # s3.interface.get_logging_parse(:bucket => "asset_bucket")
247
+ # => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
248
+ #
249
+ #
250
+ def get_logging_parse(params)
251
+ AwsUtils.mandatory_arguments([:bucket], params)
252
+ AwsUtils.allow_only([:bucket, :headers], params)
253
+ params[:headers] = {} unless params[:headers]
254
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging"))
255
+ request_info(req_hash, S3LoggingParser.new)
256
+ rescue
257
+ on_exception
258
+ end
259
+
260
+ # Sets logging configuration for a bucket from the XML configuration document.
261
+ # params:
262
+ # :bucket
263
+ # :xmldoc
264
+ def put_logging(params)
265
+ AwsUtils.mandatory_arguments([:bucket, :xmldoc], params)
266
+ AwsUtils.allow_only([:bucket, :xmldoc, :headers], params)
267
+ params[:headers] = {} unless params[:headers]
268
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc]))
269
+ request_info(req_hash, S3TrueParser.new)
270
+ rescue
271
+ on_exception
272
+ end
273
+
274
+ # Deletes new bucket. Bucket must be empty! Returns +true+ or an exception.
275
+ #
276
+ # s3.delete_bucket('my_awesome_bucket') #=> true
277
+ #
278
+ # See also: force_delete_bucket method
279
+ #
280
+ def delete_bucket(bucket, headers={})
281
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket))
282
+ request_info(req_hash, RightHttp2xxParser.new)
283
+ rescue
284
+ on_exception
285
+ end
286
+
287
+ # Returns an array of bucket's keys. Each array item (key data) is a +hash+.
288
+ #
289
+ # s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=>
290
+ # [{:key => "test1",
291
+ # :last_modified => "2007-05-18T07:00:59.000Z",
292
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
293
+ # :owner_display_name => "root",
294
+ # :e_tag => "000000000059075b964b07152d234b70",
295
+ # :storage_class => "STANDARD",
296
+ # :size => 3,
297
+ # :service=> {'is_truncated' => false,
298
+ # 'prefix' => "t",
299
+ # 'marker' => "",
300
+ # 'name' => "my_awesome_bucket",
301
+ # 'max-keys' => "5"}, ..., {...}]
302
+ #
303
+ def list_bucket(bucket, options={}, headers={})
304
+ bucket += '?'+options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') unless options.blank?
305
+ req_hash = generate_rest_request('GET', headers.merge(:url=>bucket))
306
+ request_info(req_hash, S3ListBucketParser.new(:logger => @logger))
307
+ rescue
308
+ on_exception
309
+ end
310
+
311
+ # Incrementally list the contents of a bucket. Yields the following hash to a block:
312
+ # s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields
313
+ # {
314
+ # :name => 'bucketname',
315
+ # :prefix => 'subfolder/',
316
+ # :marker => 'fileN.jpg',
317
+ # :max_keys => 234,
318
+ # :delimiter => '/',
319
+ # :is_truncated => true,
320
+ # :next_marker => 'fileX.jpg',
321
+ # :contents => [
322
+ # { :key => "file1",
323
+ # :last_modified => "2007-05-18T07:00:59.000Z",
324
+ # :e_tag => "000000000059075b964b07152d234b70",
325
+ # :size => 3,
326
+ # :storage_class => "STANDARD",
327
+ # :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a",
328
+ # :owner_display_name => "root"
329
+ # }, { :key, ...}, ... {:key, ...}
330
+ # ]
331
+ # :common_prefixes => [
332
+ # "prefix1",
333
+ # "prefix2",
334
+ # ...,
335
+ # "prefixN"
336
+ # ]
337
+ # }
338
+ def incrementally_list_bucket(bucket, options={}, headers={}, &block)
339
+ internal_options = options.symbolize_keys
340
+ begin
341
+ internal_bucket = bucket.dup
342
+ internal_bucket += '?'+internal_options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') unless internal_options.blank?
343
+ req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket))
344
+ response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger))
345
+ there_are_more_keys = response[:is_truncated]
346
+ if (there_are_more_keys)
347
+ internal_options[:marker] = decide_marker(response)
348
+ total_results = response[:contents].length + response[:common_prefixes].length
349
+ internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil
350
+ end
351
+ yield response
352
+ end while there_are_more_keys && under_max_keys(internal_options)
353
+ true
354
+ rescue
355
+ on_exception
356
+ end
357
+
358
+
359
+ private
360
+ def decide_marker(response)
361
+ return response[:next_marker].dup if response[:next_marker]
362
+ last_key = response[:contents].last[:key]
363
+ last_prefix = response[:common_prefixes].last
364
+ if (!last_key)
365
+ return nil if (!last_prefix)
366
+ last_prefix.dup
367
+ elsif (!last_prefix)
368
+ last_key.dup
369
+ else
370
+ last_key > last_prefix ? last_key.dup : last_prefix.dup
371
+ end
372
+ end
373
+
374
+ def under_max_keys(internal_options)
375
+ internal_options[:'max-keys'] ? internal_options[:'max-keys'] > 0 : true
376
+ end
377
+
378
+ public
379
+ # Saves object to Amazon. Returns +true+ or an exception.
380
+ # Any header starting with AMAZON_METADATA_PREFIX is considered
381
+ # user metadata. It will be stored with the object and returned
382
+ # when you retrieve the object. The total size of the HTTP
383
+ # request, not including the body, must be less than 4 KB.
384
+ #
385
+ # s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
386
+ #
387
+ # This method is capable of 'streaming' uploads; that is, it can upload
388
+ # data from a file or other IO object without first reading all the data
389
+ # into memory. This is most useful for large PUTs - it is difficult to read
390
+ # a 2 GB file entirely into memory before sending it to S3.
391
+ # To stream an upload, pass an object that responds to 'read' (like the read
392
+ # method of IO) and to either 'lstat' or 'size'. For files, this means
393
+ # streaming is enabled by simply making the call:
394
+ #
395
+ # s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
396
+ #
397
+ # If the IO object you wish to stream from responds to the read method but
398
+ # doesn't implement lstat or size, you can extend the object dynamically
399
+ # to implement these methods, or define your own class which defines these
400
+ # methods. Be sure that your class returns 'nil' from read() after having
401
+ # read 'size' bytes. Otherwise S3 will drop the socket after
402
+ # 'Content-Length' bytes have been uploaded, and HttpConnection will
403
+ # interpret this as an error.
404
+ #
405
+ # This method now supports very large PUTs, where very large
406
+ # is > 2 GB.
407
+ #
408
+ # For Win32 users: Files and IO objects should be opened in binary mode. If
409
+ # a text mode IO object is passed to PUT, it will be converted to binary
410
+ # mode.
411
+ #
412
+
413
+ def put(bucket, key, data=nil, headers={})
414
+ # On Windows, if someone opens a file in text mode, we must reset it so
415
+ # to binary mode for streaming to work properly
416
+ if (data.respond_to?(:binmode))
417
+ data.binmode
418
+ end
419
+ if data.is_a?(String)
420
+ data = StringIO.new(data)
421
+ # puts "encoding = #{data.external_encoding} - #{data.internal_encoding}"
422
+ # data.set_encoding("UTF-8")
423
+ # puts "encoding = #{data.external_encoding} - #{data.internal_encoding}"
424
+ end
425
+
426
+ data_size = data.respond_to?(:lstat) ? data.lstat.size :
427
+ # data.respond_to?(:bytesize) ? data.bytesize :
428
+ (data.respond_to?(:size) ? data.size : 0)
429
+ # puts 'data_size=' + data_size.to_s
430
+ if (data_size >= USE_100_CONTINUE_PUT_SIZE)
431
+ headers['expect'] = '100-continue'
432
+ end
433
+ req_hash = generate_rest_request('PUT', headers.merge(:url =>"#{bucket}/#{CGI::escape key}",
434
+ :data =>data,
435
+ 'Content-Length' => data_size.to_s))
436
+ request_info(req_hash, RightHttp2xxParser.new)
437
+ rescue
438
+ on_exception
439
+ end
440
+
441
+
442
+ # New experimental API for uploading objects, introduced in Aws 1.8.1.
443
+ # store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification
444
+ # of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
445
+ # The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
446
+ #
447
+ # If the optional :md5 argument is provided, store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
448
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
449
+ #
450
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
451
+ #
452
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
453
+ # => {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/",
454
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
455
+ # "date"=>"Mon, 29 Sep 2008 18:57:46 GMT",
456
+ # :verified_md5=>true,
457
+ # "x-amz-request-id"=>"63916465939995BA",
458
+ # "server"=>"AmazonS3",
459
+ # "content-length"=>"0"}
460
+ #
461
+ # s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
462
+ # => {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR",
463
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
464
+ # "date"=>"Mon, 29 Sep 2008 18:58:56 GMT",
465
+ # :verified_md5=>false,
466
+ # "x-amz-request-id"=>"3B25A996BC2CDD3B",
467
+ # "server"=>"AmazonS3",
468
+ # "content-length"=>"0"}
469
+
470
+ def store_object(params)
471
+ AwsUtils.allow_only([:bucket, :key, :data, :headers, :md5], params)
472
+ AwsUtils.mandatory_arguments([:bucket, :key, :data], params)
473
+ params[:headers] = {} unless params[:headers]
474
+
475
+ params[:data].binmode if (params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly
476
+ if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) ||
477
+ (params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE)
478
+ params[:headers]['expect'] = '100-continue'
479
+ end
480
+
481
+ req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data]))
482
+ resp = request_info(req_hash, S3HttpResponseHeadParser.new)
483
+ if (params[:md5])
484
+ resp[:verified_md5] = (resp['etag'].gsub(/\"/, '') == params[:md5]) ? true : false
485
+ else
486
+ resp[:verified_md5] = false
487
+ end
488
+ resp
489
+ rescue
490
+ on_exception
491
+ end
492
+
493
+ # Identical in function to store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
494
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
495
+ # This call is implemented as a wrapper around store_object and the user may gain different semantics by creating a custom wrapper.
496
+ #
497
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
498
+ # => {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb",
499
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
500
+ # "date"=>"Mon, 29 Sep 2008 18:38:32 GMT",
501
+ # :verified_md5=>true,
502
+ # "x-amz-request-id"=>"E8D7EA4FE00F5DF7",
503
+ # "server"=>"AmazonS3",
504
+ # "content-length"=>"0"}
505
+ #
506
+ # s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
507
+ # Aws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n",
508
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
509
+ # "date"=>"Mon, 29 Sep 2008 18:38:41 GMT",
510
+ # :verified_md5=>false,
511
+ # "x-amz-request-id"=>"0D7ADE09F42606F2",
512
+ # "server"=>"AmazonS3",
513
+ # "content-length"=>"0"}
514
+ def store_object_and_verify(params)
515
+ AwsUtils.mandatory_arguments([:md5], params)
516
+ r = store_object(params)
517
+ r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}"))
518
+ end
519
+
520
+ # Retrieves object data from Amazon. Returns a +hash+ or an exception.
521
+ #
522
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=>
523
+ #
524
+ # {:object => "Ola-la!",
525
+ # :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
526
+ # "content-type" => "",
527
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
528
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
529
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
530
+ # "x-amz-meta-family" => "Woho556!",
531
+ # "x-amz-request-id" => "0000000C246D770C",
532
+ # "server" => "AmazonS3",
533
+ # "content-length" => "7"}}
534
+ #
535
+ # If a block is provided, yields incrementally to the block as
536
+ # the response is read. For large responses, this function is ideal as
537
+ # the response can be 'streamed'. The hash containing header fields is
538
+ # still returned.
539
+ # Example:
540
+ # foo = File.new('./chunder.txt', File::CREAT|File::RDWR)
541
+ # rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
542
+ # foo.write(chunk)
543
+ # end
544
+ # foo.close
545
+ #
546
+
547
+ def get(bucket, key, headers={}, &block)
548
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
549
+ request_info(req_hash, S3HttpResponseBodyParser.new, &block)
550
+ rescue
551
+ on_exception
552
+ end
553
+
554
+ # New experimental API for retrieving objects, introduced in Aws 1.8.1.
555
+ # retrieve_object is similar in function to the older function get. It allows for optional verification
556
+ # of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
557
+ #
558
+ # If the optional :md5 argument is provided, retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is
559
+ # set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
560
+ #
561
+ # The optional argument of :headers allows the caller to specify arbitrary request header values.
562
+ # Mandatory arguments:
563
+ # :bucket - the bucket in which the object is stored
564
+ # :key - the object address (or path) within the bucket
565
+ # Optional arguments:
566
+ # :headers - hash of additional HTTP headers to include with the request
567
+ # :md5 - MD5 checksum against which to verify the retrieved object
568
+ #
569
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo")
570
+ # => {:verified_md5=>false,
571
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
572
+ # "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp",
573
+ # "content-type"=>"",
574
+ # "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
575
+ # "date"=>"Tue, 30 Sep 2008 00:52:44 GMT",
576
+ # "x-amz-request-id"=>"EE4855DE27A2688C",
577
+ # "server"=>"AmazonS3",
578
+ # "content-length"=>"10"},
579
+ # :object=>"polemonium"}
580
+ #
581
+ # s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954')
582
+ # => {:verified_md5=>true,
583
+ # :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT",
584
+ # "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH",
585
+ # "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"",
586
+ # "date"=>"Tue, 30 Sep 2008 00:53:08 GMT",
587
+ # "x-amz-request-id"=>"6E7F317356580599",
588
+ # "server"=>"AmazonS3",
589
+ # "content-length"=>"10"},
590
+ # :object=>"polemonium"}
591
+ # If a block is provided, yields incrementally to the block as
592
+ # the response is read. For large responses, this function is ideal as
593
+ # the response can be 'streamed'. The hash containing header fields is
594
+ # still returned.
595
+ def retrieve_object(params, &block)
596
+ AwsUtils.mandatory_arguments([:bucket, :key], params)
597
+ AwsUtils.allow_only([:bucket, :key, :headers, :md5], params)
598
+ params[:headers] = {} unless params[:headers]
599
+ req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}"))
600
+ resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block)
601
+ resp[:verified_md5] = false
602
+ if (params[:md5] && (resp[:headers]['etag'].gsub(/\"/, '') == params[:md5]))
603
+ resp[:verified_md5] = true
604
+ end
605
+ resp
606
+ rescue
607
+ on_exception
608
+ end
609
+
610
+ # Identical in function to retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument.
611
+ # If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict.
612
+ # This call is implemented as a wrapper around retrieve_object and the user may gain different semantics by creating a custom wrapper.
613
+ def retrieve_object_and_verify(params, &block)
614
+ AwsUtils.mandatory_arguments([:md5], params)
615
+ resp = retrieve_object(params, &block)
616
+ return resp if resp[:verified_md5]
617
+ raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}")
618
+ end
619
+
620
+ # Retrieves object metadata. Returns a +hash+ of http_response_headers.
621
+ #
622
+ # s3.head('my_awesome_bucket', 'log/curent/1.log') #=>
623
+ # {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT",
624
+ # "content-type" => "",
625
+ # "etag" => "\"000000000096f4ee74bc4596443ef2a4\"",
626
+ # "date" => "Wed, 23 May 2007 09:08:03 GMT",
627
+ # "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu",
628
+ # "x-amz-meta-family" => "Woho556!",
629
+ # "x-amz-request-id" => "0000000C246D770C",
630
+ # "server" => "AmazonS3",
631
+ # "content-length" => "7"}
632
+ #
633
+ def head(bucket, key, headers={})
634
+ req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
635
+ request_info(req_hash, S3HttpResponseHeadParser.new)
636
+ rescue
637
+ on_exception
638
+ end
639
+
640
+ # Deletes key. Returns +true+ or an exception.
641
+ #
642
+ # s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
643
+ #
644
+ def delete(bucket, key='', headers={})
645
+ req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}"))
646
+ request_info(req_hash, RightHttp2xxParser.new)
647
+ rescue
648
+ on_exception
649
+ end
650
+
651
+ # Copy an object.
652
+ # directive: :copy - copy meta-headers from source (default value)
653
+ # :replace - replace meta-headers by passed ones
654
+ #
655
+ # # copy a key with meta-headers
656
+ # s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"}
657
+ #
658
+ # # copy a key, overwrite meta-headers
659
+ # s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
660
+ #
661
+ # see: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
662
+ # http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
663
+ #
664
+ def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
665
+ dest_key ||= src_key
666
+ headers['x-amz-metadata-directive'] = directive.to_s.upcase
667
+ headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}"
668
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}"))
669
+ request_info(req_hash, S3CopyParser.new)
670
+ rescue
671
+ on_exception
672
+ end
673
+
674
+ # Move an object.
675
+ # directive: :copy - copy meta-headers from source (default value)
676
+ # :replace - replace meta-headers by passed ones
677
+ #
678
+ # # move bucket1/key1 to bucket1/key2
679
+ # s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"}
680
+ #
681
+ # # move bucket1/key1 to bucket2/key2 with new meta-headers assignment
682
+ # s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
683
+ #
684
+ def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={})
685
+ copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers)
686
+ # delete an original key if it differs from a destination one
687
+ delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key
688
+ copy_result
689
+ end
690
+
691
+ # Rename an object.
692
+ #
693
+ # # rename bucket1/key1 to bucket1/key2
694
+ # s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
695
+ #
696
+ def rename(src_bucket, src_key, dest_key, headers={})
697
+ move(src_bucket, src_key, src_bucket, dest_key, :copy, headers)
698
+ end
699
+
700
+ # Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
701
+ #
702
+ # s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=>
703
+ # {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX",
704
+ # "content-type"=>"application/xml;charset=ISO-8859-1",
705
+ # "date"=>"Wed, 23 May 2007 09:40:16 GMT",
706
+ # "x-amz-request-id"=>"B183FA7AB5FBB4DD",
707
+ # "server"=>"AmazonS3",
708
+ # "transfer-encoding"=>"chunked"},
709
+ # :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner>
710
+ # <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner>
711
+ # <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID>
712
+ # 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee>
713
+ # <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
714
+ #
715
+ def get_acl(bucket, key='', headers={})
716
+ key = key.blank? ? '' : "/#{CGI::escape key}"
717
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
718
+ request_info(req_hash, S3HttpResponseBodyParser.new)
719
+ rescue
720
+ on_exception
721
+ end
722
+
723
+ # Retieves the ACL (access control policy) for a bucket or object.
724
+ # Returns a hash of {:owner, :grantees}
725
+ #
726
+ # s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=>
727
+ #
728
+ # { :grantees=>
729
+ # { "16...2a"=>
730
+ # { :display_name=>"root",
731
+ # :permissions=>["FULL_CONTROL"],
732
+ # :attributes=>
733
+ # { "xsi:type"=>"CanonicalUser",
734
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}},
735
+ # "http://acs.amazonaws.com/groups/global/AllUsers"=>
736
+ # { :display_name=>"AllUsers",
737
+ # :permissions=>["READ"],
738
+ # :attributes=>
739
+ # { "xsi:type"=>"Group",
740
+ # "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}},
741
+ # :owner=>
742
+ # { :id=>"16..2a",
743
+ # :display_name=>"root"}}
744
+ #
745
+ def get_acl_parse(bucket, key='', headers={})
746
+ key = key.blank? ? '' : "/#{CGI::escape key}"
747
+ req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl"))
748
+ acl = request_info(req_hash, S3AclParser.new(:logger => @logger))
749
+ result = {}
750
+ result[:owner] = acl[:owner]
751
+ result[:grantees] = {}
752
+ acl[:grantees].each do |grantee|
753
+ key = grantee[:id] || grantee[:uri]
754
+ if result[:grantees].key?(key)
755
+ result[:grantees][key][:permissions] << grantee[:permissions]
756
+ else
757
+ result[:grantees][key] =
758
+ {:display_name => grantee[:display_name] || grantee[:uri].to_s[/[^\/]*$/],
759
+ :permissions => grantee[:permissions].lines.to_a,
760
+ :attributes => grantee[:attributes]}
761
+ end
762
+ end
763
+ result
764
+ rescue
765
+ on_exception
766
+ end
767
+
768
+ # Sets the ACL on a bucket or object.
769
+ def put_acl(bucket, key, acl_xml_doc, headers={})
770
+ key = key.blank? ? '' : "/#{CGI::escape key}"
771
+ req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc))
772
+ request_info(req_hash, S3HttpResponseBodyParser.new)
773
+ rescue
774
+ on_exception
775
+ end
776
+
777
+ # Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
778
+ def get_bucket_acl(bucket, headers={})
779
+ return get_acl(bucket, '', headers)
780
+ rescue
781
+ on_exception
782
+ end
783
+
784
+ # Sets the ACL on a bucket only.
785
+ def put_bucket_acl(bucket, acl_xml_doc, headers={})
786
+ return put_acl(bucket, '', acl_xml_doc, headers)
787
+ rescue
788
+ on_exception
789
+ end
790
+
791
+ def get_bucket_policy(bucket)
792
+ req_hash = generate_rest_request('GET', {:url=>"#{bucket}?policy"})
793
+ request_info(req_hash, S3HttpResponseBodyParser.new)
794
+ rescue
795
+ on_exception
796
+ end
797
+
798
+ def put_bucket_policy(bucket, policy)
799
+ key = key.blank? ? '' : "/#{CGI::escape key}"
800
+ req_hash = generate_rest_request('PUT', {:url=>"#{bucket}?policy", :data=>policy})
801
+ request_info(req_hash, S3HttpResponseBodyParser.new)
802
+ rescue
803
+ on_exception
804
+ end
805
+
806
+ # Removes all keys from bucket. Returns +true+ or an exception.
807
+ #
808
+ # s3.clear_bucket('my_awesome_bucket') #=> true
809
+ #
810
+ def clear_bucket(bucket)
811
+ incrementally_list_bucket(bucket) do |results|
812
+ p results
813
+ results[:contents].each { |key| p key; delete(bucket, key[:key]) }
814
+ end
815
+ true
816
+ rescue
817
+ on_exception
818
+ end
819
+
820
+ # Deletes all keys in bucket then deletes bucket. Returns +true+ or an exception.
821
+ #
822
+ # s3.force_delete_bucket('my_awesome_bucket')
823
+ #
824
+ def force_delete_bucket(bucket)
825
+ clear_bucket(bucket)
826
+ delete_bucket(bucket)
827
+ rescue
828
+ on_exception
829
+ end
830
+
831
+ # Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
832
+ #
833
+ # s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs']
834
+ # s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
835
+ #
836
+ def delete_folder(bucket, folder_key, separator='/')
837
+ folder_key.chomp!(separator)
838
+ allkeys = []
839
+ incrementally_list_bucket(bucket, {'prefix' => folder_key}) do |results|
840
+ keys = results[:contents].map { |s3_key| s3_key[:key][/^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil }.compact
841
+ keys.each { |key| delete(bucket, key) }
842
+ allkeys << keys
843
+ end
844
+ allkeys
845
+ rescue
846
+ on_exception
847
+ end
848
+
849
+ # Retrieves object data only (headers are omitted). Returns +string+ or an exception.
850
+ #
851
+ # s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
852
+ #
853
+ def get_object(bucket, key, headers={})
854
+ get(bucket, key, headers)[:object]
855
+ rescue
856
+ on_exception
857
+ end
858
+
859
+ #-----------------------------------------------------------------
860
+ # Query API: Links
861
+ #-----------------------------------------------------------------
862
+
863
+ # Generates link for QUERY API
864
+ def generate_link(method, headers={}, expires=nil) #:nodoc:
865
+ # calculate request data
866
+ server, path, path_to_sign = fetch_request_params(headers)
867
+ # expiration time
868
+ expires ||= DEFAULT_EXPIRES_AFTER
869
+ expires = Time.now.utc + expires if expires.is_a?(Fixnum) && (expires < ONE_YEAR_IN_SECONDS)
870
+ expires = expires.to_i
871
+ # remove unset(==optional) and symbolyc keys
872
+ headers.each { |key, value| headers.delete(key) if (value.nil? || key.is_a?(Symbol)) }
873
+ #generate auth strings
874
+ auth_string = canonical_string(method, path_to_sign, headers, expires)
875
+ signature = CGI::escape(Base64.encode64(OpenSSL::HMAC.digest(OpenSSL::Digest::Digest.new("sha1"), @aws_secret_access_key, auth_string)).strip)
876
+ # path building
877
+ addon = "Signature=#{signature}&Expires=#{expires}&AWSAccessKeyId=#{@aws_access_key_id}"
878
+ path += path[/\?/] ? "&#{addon}" : "?#{addon}"
879
+ "#{@params[:protocol]}://#{server}:#{@params[:port]}#{path}"
880
+ rescue
881
+ on_exception
882
+ end
883
+
884
+ # Generates link for 'ListAllMyBuckets'.
885
+ #
886
+ # s3.list_all_my_buckets_link #=> url string
887
+ #
888
+ def list_all_my_buckets_link(expires=nil, headers={})
889
+ generate_link('GET', headers.merge(:url=>''), expires)
890
+ rescue
891
+ on_exception
892
+ end
893
+
894
+ # Generates link for 'CreateBucket'.
895
+ #
896
+ # s3.create_bucket_link('my_awesome_bucket') #=> url string
897
+ #
898
+ def create_bucket_link(bucket, expires=nil, headers={})
899
+ generate_link('PUT', headers.merge(:url=>bucket), expires)
900
+ rescue
901
+ on_exception
902
+ end
903
+
904
+ # Generates link for 'DeleteBucket'.
905
+ #
906
+ # s3.delete_bucket_link('my_awesome_bucket') #=> url string
907
+ #
908
+ def delete_bucket_link(bucket, expires=nil, headers={})
909
+ generate_link('DELETE', headers.merge(:url=>bucket), expires)
910
+ rescue
911
+ on_exception
912
+ end
913
+
914
+ # Generates link for 'ListBucket'.
915
+ #
916
+ # s3.list_bucket_link('my_awesome_bucket') #=> url string
917
+ #
918
+ def list_bucket_link(bucket, options=nil, expires=nil, headers={})
919
+ bucket += '?' + options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') unless options.blank?
920
+ generate_link('GET', headers.merge(:url=>bucket), expires)
921
+ rescue
922
+ on_exception
923
+ end
924
+
925
+ # Generates link for 'PutObject'.
926
+ #
927
+ # s3.put_link('my_awesome_bucket',key, object) #=> url string
928
+ #
929
+ def put_link(bucket, key, data=nil, expires=nil, headers={})
930
+ generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}", :data=>data), expires)
931
+ rescue
932
+ on_exception
933
+ end
934
+
935
+ # Generates link for 'GetObject'.
936
+ #
937
+ # if a bucket comply with virtual hosting naming then retuns a link with the
938
+ # bucket as a part of host name:
939
+ #
940
+ # s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
941
+ #
942
+ # otherwise returns an old style link (the bucket is a part of path):
943
+ #
944
+ # s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
945
+ #
946
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
947
+ def get_link(bucket, key, expires=nil, headers={})
948
+ generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
949
+ rescue
950
+ on_exception
951
+ end
952
+
953
+ # Generates link for 'HeadObject'.
954
+ #
955
+ # s3.head_link('my_awesome_bucket',key) #=> url string
956
+ #
957
+ def head_link(bucket, key, expires=nil, headers={})
958
+ generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
959
+ rescue
960
+ on_exception
961
+ end
962
+
963
+ # Generates link for 'DeleteObject'.
964
+ #
965
+ # s3.delete_link('my_awesome_bucket',key) #=> url string
966
+ #
967
+ def delete_link(bucket, key, expires=nil, headers={})
968
+ generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}"), expires)
969
+ rescue
970
+ on_exception
971
+ end
972
+
973
+
974
+ # Generates link for 'GetACL'.
975
+ #
976
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
977
+ #
978
+ def get_acl_link(bucket, key='', headers={})
979
+ return generate_link('GET', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
980
+ rescue
981
+ on_exception
982
+ end
983
+
984
+ # Generates link for 'PutACL'.
985
+ #
986
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
987
+ #
988
+ def put_acl_link(bucket, key='', headers={})
989
+ return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{AwsUtils::URLencode key}?acl"))
990
+ rescue
991
+ on_exception
992
+ end
993
+
994
+ # Generates link for 'GetBucketACL'.
995
+ #
996
+ # s3.get_acl_link('my_awesome_bucket',key) #=> url string
997
+ #
998
+ def get_bucket_acl_link(bucket, headers={})
999
+ return get_acl_link(bucket, '', headers)
1000
+ rescue
1001
+ on_exception
1002
+ end
1003
+
1004
+ # Generates link for 'PutBucketACL'.
1005
+ #
1006
+ # s3.put_acl_link('my_awesome_bucket',key) #=> url string
1007
+ #
1008
+ def put_bucket_acl_link(bucket, acl_xml_doc, headers={})
1009
+ return put_acl_link(bucket, '', acl_xml_doc, headers)
1010
+ rescue
1011
+ on_exception
1012
+ end
1013
+
1014
+ #-----------------------------------------------------------------
1015
+ # PARSERS:
1016
+ #-----------------------------------------------------------------
1017
+
1018
+ class S3ListAllMyBucketsParser < AwsParser # :nodoc:
1019
+ def reset
1020
+ @result = []
1021
+ @owner = {}
1022
+ end
1023
+
1024
+ def tagstart(name, attributes)
1025
+ @current_bucket = {} if name == 'Bucket'
1026
+ end
1027
+
1028
+ def tagend(name)
1029
+ case name
1030
+ when 'ID';
1031
+ @owner[:owner_id] = @text
1032
+ when 'DisplayName';
1033
+ @owner[:owner_display_name] = @text
1034
+ when 'Name';
1035
+ @current_bucket[:name] = @text
1036
+ when 'CreationDate';
1037
+ @current_bucket[:creation_date] = @text
1038
+ when 'Bucket';
1039
+ @result << @current_bucket.merge(@owner)
1040
+ end
1041
+ end
1042
+ end
1043
+
1044
+ class S3ListBucketParser < AwsParser # :nodoc:
1045
+ def reset
1046
+ @result = []
1047
+ @service = {}
1048
+ @current_key = {}
1049
+ end
1050
+
1051
+ def tagstart(name, attributes)
1052
+ @current_key = {} if name == 'Contents'
1053
+ end
1054
+
1055
+ def tagend(name)
1056
+ case name
1057
+ # service info
1058
+ when 'Name';
1059
+ @service['name'] = @text
1060
+ when 'Prefix';
1061
+ @service['prefix'] = @text
1062
+ when 'Marker';
1063
+ @service['marker'] = @text
1064
+ when 'MaxKeys';
1065
+ @service['max-keys'] = @text
1066
+ when 'Delimiter';
1067
+ @service['delimiter'] = @text
1068
+ when 'IsTruncated';
1069
+ @service['is_truncated'] = (@text =~ /false/ ? false : true)
1070
+ # key data
1071
+ when 'Key';
1072
+ @current_key[:key] = @text
1073
+ when 'LastModified';
1074
+ @current_key[:last_modified] = @text
1075
+ when 'ETag';
1076
+ @current_key[:e_tag] = @text
1077
+ when 'Size';
1078
+ @current_key[:size] = @text.to_i
1079
+ when 'StorageClass';
1080
+ @current_key[:storage_class] = @text
1081
+ when 'ID';
1082
+ @current_key[:owner_id] = @text
1083
+ when 'DisplayName';
1084
+ @current_key[:owner_display_name] = @text
1085
+ when 'Contents';
1086
+ @current_key[:service] = @service; @result << @current_key
1087
+ end
1088
+ end
1089
+ end
1090
+
1091
+ class S3ImprovedListBucketParser < AwsParser # :nodoc:
1092
+ def reset
1093
+ @result = {}
1094
+ @result[:contents] = []
1095
+ @result[:common_prefixes] = []
1096
+ @contents = []
1097
+ @current_key = {}
1098
+ @common_prefixes = []
1099
+ @in_common_prefixes = false
1100
+ end
1101
+
1102
+ def tagstart(name, attributes)
1103
+ @current_key = {} if name == 'Contents'
1104
+ @in_common_prefixes = true if name == 'CommonPrefixes'
1105
+ end
1106
+
1107
+ def tagend(name)
1108
+ case name
1109
+ # service info
1110
+ when 'Name';
1111
+ @result[:name] = @text
1112
+ # Amazon uses the same tag for the search prefix and for the entries
1113
+ # in common prefix...so use our simple flag to see which element
1114
+ # we are parsing
1115
+ when 'Prefix';
1116
+ @in_common_prefixes ? @common_prefixes << @text : @result[:prefix] = @text
1117
+ when 'Marker';
1118
+ @result[:marker] = @text
1119
+ when 'MaxKeys';
1120
+ @result[:max_keys] = @text
1121
+ when 'Delimiter';
1122
+ @result[:delimiter] = @text
1123
+ when 'IsTruncated';
1124
+ @result[:is_truncated] = (@text =~ /false/ ? false : true)
1125
+ when 'NextMarker';
1126
+ @result[:next_marker] = @text
1127
+ # key data
1128
+ when 'Key';
1129
+ @current_key[:key] = @text
1130
+ when 'LastModified';
1131
+ @current_key[:last_modified] = @text
1132
+ when 'ETag';
1133
+ @current_key[:e_tag] = @text
1134
+ when 'Size';
1135
+ @current_key[:size] = @text.to_i
1136
+ when 'StorageClass';
1137
+ @current_key[:storage_class] = @text
1138
+ when 'ID';
1139
+ @current_key[:owner_id] = @text
1140
+ when 'DisplayName';
1141
+ @current_key[:owner_display_name] = @text
1142
+ when 'Contents';
1143
+ @result[:contents] << @current_key
1144
+ # Common Prefix stuff
1145
+ when 'CommonPrefixes';
1146
+ @result[:common_prefixes] = @common_prefixes; @in_common_prefixes = false
1147
+ end
1148
+ end
1149
+ end
1150
+
1151
+ class S3BucketLocationParser < AwsParser # :nodoc:
1152
+ def reset
1153
+ @result = ''
1154
+ end
1155
+
1156
+ def tagend(name)
1157
+ @result = @text if name == 'LocationConstraint'
1158
+ end
1159
+ end
1160
+
1161
+ class S3AclParser < AwsParser # :nodoc:
1162
+ def reset
1163
+ @result = {:grantees=>[], :owner=>{}}
1164
+ @current_grantee = {}
1165
+ end
1166
+
1167
+ def tagstart(name, attributes)
1168
+ @current_grantee = {:attributes => attributes} if name=='Grantee'
1169
+ end
1170
+
1171
+ def tagend(name)
1172
+ case name
1173
+ # service info
1174
+ when 'ID'
1175
+ if @xmlpath == 'AccessControlPolicy/Owner'
1176
+ @result[:owner][:id] = @text
1177
+ else
1178
+ @current_grantee[:id] = @text
1179
+ end
1180
+ when 'DisplayName'
1181
+ if @xmlpath == 'AccessControlPolicy/Owner'
1182
+ @result[:owner][:display_name] = @text
1183
+ else
1184
+ @current_grantee[:display_name] = @text
1185
+ end
1186
+ when 'URI'
1187
+ @current_grantee[:uri] = @text
1188
+ when 'Permission'
1189
+ @current_grantee[:permissions] = @text
1190
+ when 'Grant'
1191
+ @result[:grantees] << @current_grantee
1192
+ end
1193
+ end
1194
+ end
1195
+
1196
+ class S3LoggingParser < AwsParser # :nodoc:
1197
+ def reset
1198
+ @result = {:enabled => false, :targetbucket => '', :targetprefix => ''}
1199
+ @current_grantee = {}
1200
+ end
1201
+
1202
+ def tagend(name)
1203
+ case name
1204
+ # service info
1205
+ when 'TargetBucket'
1206
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1207
+ @result[:targetbucket] = @text
1208
+ @result[:enabled] = true
1209
+ end
1210
+ when 'TargetPrefix'
1211
+ if @xmlpath == 'BucketLoggingStatus/LoggingEnabled'
1212
+ @result[:targetprefix] = @text
1213
+ @result[:enabled] = true
1214
+ end
1215
+ end
1216
+ end
1217
+ end
1218
+
1219
+ class S3CopyParser < AwsParser # :nodoc:
1220
+ def reset
1221
+ @result = {}
1222
+ end
1223
+
1224
+ def tagend(name)
1225
+ case name
1226
+ when 'LastModified' then
1227
+ @result[:last_modified] = @text
1228
+ when 'ETag' then
1229
+ @result[:e_tag] = @text
1230
+ end
1231
+ end
1232
+ end
1233
+
1234
+ #-----------------------------------------------------------------
1235
+ # PARSERS: Non XML
1236
+ #-----------------------------------------------------------------
1237
+
1238
+ class S3HttpResponseParser # :nodoc:
1239
+ attr_reader :result
1240
+
1241
+ def parse(response)
1242
+ @result = response
1243
+ end
1244
+
1245
+ def headers_to_string(headers)
1246
+ result = {}
1247
+ headers.each do |key, value|
1248
+ value = value[0] if value.is_a?(Array) && value.size<2
1249
+ result[key] = value
1250
+ end
1251
+ result
1252
+ end
1253
+ end
1254
+
1255
+ class S3HttpResponseBodyParser < S3HttpResponseParser # :nodoc:
1256
+ def parse(response)
1257
+ x = response.body
1258
+ x.force_encoding("UTF-8")
1259
+ # puts 'x.encoding = ' + response.body.encoding.to_s
1260
+ @result = {
1261
+ :object => x,
1262
+ :headers => headers_to_string(response.to_hash)
1263
+ }
1264
+ end
1265
+ end
1266
+
1267
+ class S3HttpResponseHeadParser < S3HttpResponseParser # :nodoc:
1268
+ def parse(response)
1269
+ @result = headers_to_string(response.to_hash)
1270
+ end
1271
+ end
1272
+
1273
+ end
1274
+
1275
+ end