hackerdude-aws 2.3.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,216 @@
1
+ module Aws
2
+ require 'xmlsimple'
3
+
4
+ # API Reference: http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/
5
+ class Rds < AwsBase
6
+ include AwsBaseInterface
7
+
8
+
9
+ # Amazon API version being used
10
+ API_VERSION = nil
11
+ DEFAULT_HOST = "rds.amazonaws.com"
12
+ DEFAULT_PATH = '/'
13
+ DEFAULT_PROTOCOL = 'https'
14
+ DEFAULT_PORT = 443
15
+
16
+ @@api = ENV['RDS_API_VERSION'] || API_VERSION
17
+
18
+
19
+ def self.api
20
+ @@api
21
+ end
22
+
23
+
24
+ @@bench = AwsBenchmarkingBlock.new
25
+
26
+
27
+ def self.bench_xml
28
+ @@bench.xml
29
+ end
30
+
31
+
32
+ def self.bench_ec2
33
+ @@bench.service
34
+ end
35
+
36
+
37
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
38
+ uri = ENV['RDS_URL'] ? URI.parse(ENV['RDS_URL']) : nil
39
+ init({ :name => 'RDS',
40
+ :default_host => uri ? uri.host : DEFAULT_HOST,
41
+ :default_port => uri ? uri.port : DEFAULT_PORT,
42
+ :default_service => uri ? uri.path : DEFAULT_PATH,
43
+ :default_protocol => uri ? uri.scheme : DEFAULT_PROTOCOL,
44
+ :api_version => API_VERSION },
45
+ aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'],
46
+ aws_secret_access_key|| ENV['AWS_SECRET_ACCESS_KEY'],
47
+ params)
48
+ end
49
+
50
+
51
+ def do_request(action, params, options={})
52
+ link = generate_request(action, params)
53
+ resp = request_info_xml_simple(:rds_connection, @params, link, @logger,
54
+ :group_tags=>{"DBInstances"=>"DBInstance",
55
+ "DBParameterGroups"=>"DBParameterGroup",
56
+ "DBSecurityGroups"=>"DBSecurityGroup",
57
+ "EC2SecurityGroups"=>"EC2SecurityGroup",
58
+ "IPRanges"=>"IPRange"},
59
+ :force_array=>["DBInstances",
60
+ "DBParameterGroups",
61
+ "DBSecurityGroups",
62
+ "EC2SecurityGroups",
63
+ "IPRanges"],
64
+ :pull_out_array=>options[:pull_out_array],
65
+ :pull_out_single=>options[:pull_out_single],
66
+ :wrapper=>options[:wrapper])
67
+ end
68
+
69
+
70
+ #-----------------------------------------------------------------
71
+ # REQUESTS
72
+ #-----------------------------------------------------------------
73
+
74
+ #
75
+ # identifier: db instance identifier. Must be unique per account per zone.
76
+ # instance_class: db.m1.small | db.m1.large | db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge
77
+ # See this for other values: http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/
78
+ #
79
+ # options:
80
+ # db_name: if you want a database created at the same time as the instance, specify :db_name option.
81
+ # availability_zone: default is random zone.
82
+ def create_db_instance(identifier, instance_class, allocated_storage, master_username, master_password, options={})
83
+ params = {}
84
+ params['DBInstanceIdentifier'] = identifier
85
+ params['DBInstanceClass'] = instance_class
86
+ params['AllocatedStorage'] = allocated_storage
87
+ params['MasterUsername'] = master_username
88
+ params['MasterUserPassword'] = master_password
89
+
90
+ params['Engine'] = options[:engine] || "MySQL5.1"
91
+ params['DBName'] = options[:db_name] if options[:db_name]
92
+ params['AvailabilityZone'] = options[:availability_zone] if options[:availability_zone]
93
+ params['PreferredMaintenanceWindow'] = options[:preferred_maintenance_window] if options[:preferred_maintenance_window]
94
+ params['BackupRetentionPeriod'] = options[:preferred_retention_period] if options[:preferred_retention_period]
95
+ params['PreferredBackupWindow'] = options[:preferred_backup_window] if options[:preferred_backup_window]
96
+
97
+ @logger.info("Creating DB Instance called #{identifier}")
98
+
99
+ link = do_request("CreateDBInstance", params, :pull_out_single=>[:create_db_instance_result, :db_instance])
100
+
101
+ rescue Exception
102
+ on_exception
103
+ end
104
+
105
+
106
+ # options:
107
+ # DBInstanceIdentifier
108
+ # MaxRecords
109
+ # Marker
110
+ #
111
+ # Returns array of instances as hashes.
112
+ # Response metadata can be retreived by calling array.response_metadata on the returned array.
113
+ def describe_db_instances(options={})
114
+ params = {}
115
+ params['DBInstanceIdentifier'] = options[:db_instance_identifier] if options[:db_instance_identifier]
116
+ params['MaxRecords'] = options[:max_records] if options[:max_records]
117
+ params['Marker'] = options[:marker] if options[:marker]
118
+
119
+ resp = do_request("DescribeDBInstances", params, :pull_out_array=>[:describe_db_instances_result, :db_instances])
120
+
121
+ rescue Exception
122
+ on_exception
123
+ end
124
+
125
+
126
+ # identifier: identifier of db instance to delete.
127
+ # final_snapshot_identifier: if specified, RDS will crate a final snapshot before deleting so you can restore it later.
128
+ def delete_db_instance(identifier, final_snapshot_identifier=nil)
129
+ @logger.info("Deleting DB Instance - " + identifier.to_s)
130
+
131
+ params = {}
132
+ params['DBInstanceIdentifier'] = identifier
133
+ if final_snapshot_identifier
134
+ params['FinalDBSnapshotIdentifier'] = final_snapshot_identifier
135
+ else
136
+ params['SkipFinalSnapshot'] = true
137
+ end
138
+
139
+ link = do_request("DeleteDBInstance", params, :pull_out_single=>[:delete_db_instance_result, :db_instance])
140
+
141
+ rescue Exception
142
+ on_exception
143
+ end
144
+
145
+
146
+ def create_db_security_group(group_name, description, options={})
147
+ params = {}
148
+ params['DBSecurityGroupName'] = group_name
149
+ params['DBSecurityGroupDescription'] = description
150
+
151
+ link = do_request("CreateDBSecurityGroup", params, :pull_out_single => [:create_db_security_group_result, :db_security_group])
152
+
153
+ rescue Exception
154
+ on_exception
155
+ end
156
+
157
+
158
+ def delete_db_security_group(group_name, options={})
159
+ params = {}
160
+ params['DBSecurityGroupName'] = group_name
161
+
162
+ link = do_request("DeleteDBSecurityGroup", params)
163
+
164
+ rescue Exception
165
+ on_exception
166
+ end
167
+
168
+
169
+ def describe_db_security_groups(options={})
170
+ params = {}
171
+ params['DBSecurityGroupName'] = options[:DBSecurityGroupName] if options[:DBSecurityGroupName]
172
+ params['MaxRecords'] = options[:MaxRecords] if options[:MaxRecords]
173
+
174
+ link = do_request("DescribeDBSecurityGroups", params, :pull_out_array=>[:describe_db_security_groups_result, :db_security_groups], :wrapper=>:db_security_group)
175
+
176
+
177
+ rescue Exception
178
+ on_exception
179
+ end
180
+
181
+
182
+ def authorize_db_security_group_ingress_ec2group(group_name, ec2_group_name, ec2_group_owner_id, options={})
183
+ params = {}
184
+ params['DBSecurityGroupName'] = group_name
185
+ params['EC2SecurityGroupOwnerId'] = ec2_group_owner_id
186
+ params['EC2SecurityGroupName'] = ec2_group_name
187
+ link = do_request("AuthorizeDBSecurityGroupIngress", params)
188
+ rescue Exception
189
+ on_exception
190
+ end
191
+
192
+
193
+ def authorize_db_security_group_ingress_range(group_name, ip_range, options={})
194
+ params = {}
195
+ params['DBSecurityGroupName'] = group_name
196
+ params['CIDRIP'] = ip_range
197
+ link = do_request("AuthorizeDBSecurityGroupIngress", params)
198
+ rescue Exception
199
+ on_exception
200
+ end
201
+
202
+
203
+ def revoke_db_security_group_ingress(group_name, ip_range, options={})
204
+ params = {}
205
+ params['DBSecurityGroupName'] = group_name
206
+ params['CIDRIP'] = ip_range
207
+ link = do_request("RevokeDBSecurityGroupIngress", params)
208
+ rescue Exception
209
+ on_exception
210
+ end
211
+
212
+
213
+
214
+ end
215
+
216
+ end
@@ -0,0 +1,57 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ require 'benchmark'
25
+ require 'net/https'
26
+ require 'uri'
27
+ require 'time'
28
+ require "cgi"
29
+ require "base64"
30
+ require "rexml/document"
31
+ require "openssl"
32
+ require "digest/sha1"
33
+
34
+ require 'rubygems'
35
+ require 'right_http_connection'
36
+
37
+ $:.unshift(File.dirname(__FILE__))
38
+ require 'awsbase/benchmark_fix'
39
+ require 'awsbase/support'
40
+ require 'awsbase/right_awsbase'
41
+ require 'ec2/right_ec2'
42
+ require 'ec2/right_mon_interface'
43
+ require 's3/right_s3_interface'
44
+ require 's3/right_s3'
45
+ require 'sqs/right_sqs_interface'
46
+ require 'sqs/right_sqs'
47
+ require 'sdb/right_sdb_interface'
48
+ require 'acf/right_acf_interface'
49
+ require 'elb/elb_interface'
50
+
51
+
52
+ # backwards compatible.
53
+ # @deprecated
54
+ module RightAws
55
+ include Aws
56
+ extend Aws
57
+ end
@@ -0,0 +1,1110 @@
1
+ #
2
+ # Copyright (c) 2007-2008 RightScale Inc
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining
5
+ # a copy of this software and associated documentation files (the
6
+ # "Software"), to deal in the Software without restriction, including
7
+ # without limitation the rights to use, copy, modify, merge, publish,
8
+ # distribute, sublicense, and/or sell copies of the Software, and to
9
+ # permit persons to whom the Software is furnished to do so, subject to
10
+ # the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be
13
+ # included in all copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19
+ # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20
+ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22
+ #
23
+
24
+ module Aws
25
+
26
+ # = Aws::S3 -- RightScale's Amazon S3 interface
27
+ # The Aws::S3 class provides a complete interface to Amazon's Simple
28
+ # Storage Service.
29
+ # For explanations of the semantics
30
+ # of each call, please refer to Amazon's documentation at
31
+ # http://developer.amazonwebservices.com/connect/kbcategory.jspa?categoryID=48
32
+ #
33
+ # See examples below for the bucket and buckets methods.
34
+ #
35
+ # Error handling: all operations raise an Aws::AwsError in case
36
+ # of problems. Note that transient errors are automatically retried.
37
+ #
38
+ # It is a good way to use domain naming style getting a name for the buckets.
39
+ # See http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingBucket.html
40
+ # about the naming convention for the buckets. This case they can be accessed using a virtual domains.
41
+ #
42
+ # Let assume you have 3 buckets: 'awesome-bucket', 'awesome_bucket' and 'AWEsomE-bucket'.
43
+ # The first ones objects can be accessed as: http:// awesome-bucket.s3.amazonaws.com/key/object
44
+ #
45
+ # But the rest have to be accessed as:
46
+ # http:// s3.amazonaws.com/awesome_bucket/key/object and http:// s3.amazonaws.com/AWEsomE-bucket/key/object
47
+ #
48
+ # See: http://docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html for better explanation.
49
+ #
50
+ class S3
51
+ attr_reader :interface
52
+
53
+ # Create a new handle to an S3 account. All handles share the same per process or per thread
54
+ # HTTP connection to Amazon S3. Each handle is for a specific account.
55
+ # The +params+ are passed through as-is to Aws::S3Interface.new
56
+ #
57
+ # Params is a hash:
58
+ #
59
+ # {:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default)
60
+ # :port => 443 # Amazon service port: 80 or 443(default)
61
+ # :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default)
62
+ # :connection_mode => :default # options are
63
+ # :default (will use best known safe (as in won't need explicit close) option, may change in the future)
64
+ # :per_request (opens and closes a connection on every request)
65
+ # :single (one thread across entire app)
66
+ # :per_thread (one connection per thread)
67
+ # :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
68
+ def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={})
69
+ @interface = S3Interface.new(aws_access_key_id, aws_secret_access_key, params)
70
+ end
71
+
72
+ def close_connection
73
+ @interface.close_connection
74
+ end
75
+
76
+ # Retrieve a list of buckets.
77
+ # Returns an array of Aws::S3::Bucket instances.
78
+ # # Create handle to S3 account
79
+ # s3 = Aws::S3.new(aws_access_key_id, aws_secret_access_key)
80
+ # my_buckets_names = s3.buckets.map{|b| b.name}
81
+ # puts "Buckets on S3: #{my_bucket_names.join(', ')}"
82
+ def buckets
83
+ @interface.list_all_my_buckets.map! do |entry|
84
+ owner = Owner.new(entry[:owner_id], entry[:owner_display_name])
85
+ Bucket.new(self, entry[:name], entry[:creation_date], owner)
86
+ end
87
+ end
88
+
89
+ # Retrieve an individual bucket.
90
+ # If the bucket does not exist and +create+ is set, a new bucket
91
+ # is created on S3. Launching this method with +create+=+true+ may
92
+ # affect on the bucket's ACL if the bucket already exists.
93
+ # Returns a Aws::S3::Bucket instance or +nil+ if the bucket does not exist
94
+ # and +create+ is not set.
95
+ #
96
+ # s3 = Aws::S3.new(aws_access_key_id, aws_secret_access_key)
97
+ # bucket1 = s3.bucket('my_awesome_bucket_1')
98
+ # bucket1.keys #=> exception here if the bucket does not exists
99
+ # ...
100
+ # bucket2 = s3.bucket('my_awesome_bucket_2', true)
101
+ # bucket2.keys #=> list of keys
102
+ # # create a bucket at the European location with public read access
103
+ # bucket3 = s3.bucket('my-awesome-bucket-3', true, 'public-read', :location => :eu)
104
+ #
105
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html
106
+ # (section: Canned Access Policies)
107
+ #
108
+ def bucket(name, create=false, perms=nil, headers={})
109
+ headers['x-amz-acl'] = perms if perms
110
+ @interface.create_bucket(name, headers) if create
111
+ return Bucket.new(self, name)
112
+ # The old way below was too slow and unnecessary because it retreived all the buckets every time.
113
+ # owner = Owner.new(entry[:owner_id], entry[:owner_display_name])
114
+ # buckets.each { |bucket| return bucket if bucket.name == name }
115
+ # nil
116
+ end
117
+
118
+
119
+ class Bucket
120
+ attr_reader :s3, :name, :owner, :creation_date
121
+
122
+ # Create a Bucket instance.
123
+ # If the bucket does not exist and +create+ is set, a new bucket
124
+ # is created on S3. Launching this method with +create+=+true+ may
125
+ # affect on the bucket's ACL if the bucket already exists.
126
+ # Returns Bucket instance or +nil+ if the bucket does not exist
127
+ # and +create+ is not set.
128
+ #
129
+ # s3 = Aws::S3.new(aws_access_key_id, aws_secret_access_key)
130
+ # ...
131
+ # bucket1 = Aws::S3::Bucket.create(s3, 'my_awesome_bucket_1')
132
+ # bucket1.keys #=> exception here if the bucket does not exists
133
+ # ...
134
+ # bucket2 = Aws::S3::Bucket.create(s3, 'my_awesome_bucket_2', true)
135
+ # bucket2.keys #=> list of keys
136
+ # # create a bucket at the European location with public read access
137
+ # bucket3 = Aws::S3::Bucket.create(s3,'my-awesome-bucket-3', true, 'public-read', :location => :eu)
138
+ #
139
+ # see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html
140
+ # (section: Canned Access Policies)
141
+ #
142
+ def self.create(s3, name, create=false, perms=nil, headers={})
143
+ s3.bucket(name, create, perms, headers)
144
+ end
145
+
146
+
147
+ # Create a bucket instance. In normal use this method should
148
+ # not be called directly.
149
+ # Use Aws::S3::Bucket.create or Aws::S3.bucket instead.
150
+ def initialize(s3, name, creation_date=nil, owner=nil)
151
+ @s3 = s3
152
+ @name = name
153
+ @owner = owner
154
+ @creation_date = creation_date
155
+ if @creation_date && !@creation_date.is_a?(Time)
156
+ @creation_date = Time.parse(@creation_date)
157
+ end
158
+ end
159
+
160
+ # Return bucket name as a String.
161
+ #
162
+ # bucket = Aws::S3.bucket('my_awesome_bucket')
163
+ # puts bucket #=> 'my_awesome_bucket'
164
+ #
165
+ def to_s
166
+ @name.to_s
167
+ end
168
+
169
+ alias_method :full_name, :to_s
170
+
171
+ # Return a public link to bucket.
172
+ #
173
+ # bucket.public_link #=> 'https://s3.amazonaws.com:443/my_awesome_bucket'
174
+ #
175
+ def public_link
176
+ params = @s3.interface.params
177
+ "#{params[:protocol]}://#{params[:server]}:#{params[:port]}/#{full_name}"
178
+ end
179
+
180
+ # Returns the bucket location
181
+ def location
182
+ @location ||= @s3.interface.bucket_location(@name)
183
+ end
184
+
185
+ # Retrieves the logging configuration for a bucket.
186
+ # Returns a hash of {:enabled, :targetbucket, :targetprefix}
187
+ #
188
+ # bucket.logging_info()
189
+ # => {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
190
+ def logging_info
191
+ @s3.interface.get_logging_parse(:bucket => @name)
192
+ end
193
+
194
+ # Enables S3 server access logging on a bucket. The target bucket must have been properly configured to receive server
195
+ # access logs.
196
+ # Params:
197
+ # :targetbucket - either the target bucket object or the name of the target bucket
198
+ # :targetprefix - the prefix under which all logs should be stored
199
+ #
200
+ # bucket.enable_logging(:targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/")
201
+ # => true
202
+ def enable_logging(params)
203
+ AwsUtils.mandatory_arguments([:targetbucket, :targetprefix], params)
204
+ AwsUtils.allow_only([:targetbucket, :targetprefix], params)
205
+ xmldoc = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\"><LoggingEnabled><TargetBucket>#{params[:targetbucket]}</TargetBucket><TargetPrefix>#{params[:targetprefix]}</TargetPrefix></LoggingEnabled></BucketLoggingStatus>"
206
+ @s3.interface.put_logging(:bucket => @name, :xmldoc => xmldoc)
207
+ end
208
+
209
+ # Disables S3 server access logging on a bucket. Takes no arguments.
210
+ def disable_logging
211
+ xmldoc = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\"></BucketLoggingStatus>"
212
+ @s3.interface.put_logging(:bucket => @name, :xmldoc => xmldoc)
213
+ end
214
+
215
+ # Retrieve a group of keys from Amazon.
216
+ # +options+ is a hash: { 'prefix'=>'', 'marker'=>'', 'max-keys'=>5, 'delimiter'=>'' }).
217
+ # Retrieves meta-headers information if +head+ it +true+.
218
+ # Returns an array of Key instances.
219
+ #
220
+ # bucket.keys #=> # returns all keys from bucket
221
+ # bucket.keys('prefix' => 'logs') #=> # returns all keys that starts with 'logs'
222
+ #
223
+ def keys(options={}, head=false)
224
+ keys_and_service(options, head)[0]
225
+ end
226
+
227
+ # Same as +keys+ method but return an array of [keys, service_data].
228
+ # where +service_data+ is a hash with additional output information.
229
+ #
230
+ # keys, service = bucket.keys_and_service({'max-keys'=> 2, 'prefix' => 'logs'})
231
+ # p keys #=> # 2 keys array
232
+ # p service #=> {"max-keys"=>"2", "prefix"=>"logs", "name"=>"my_awesome_bucket", "marker"=>"", "is_truncated"=>true}
233
+ #
234
+ def keys_and_service(options={}, head=false)
235
+ opt = {}; options.each { |key, value| opt[key.to_s] = value }
236
+ service_data = {}
237
+ thislist = {}
238
+ list = []
239
+ @s3.interface.incrementally_list_bucket(@name, opt) do |thislist|
240
+ thislist[:contents].each do |entry|
241
+ owner = Owner.new(entry[:owner_id], entry[:owner_display_name])
242
+ key = Key.new(self, entry[:key], nil, {}, {}, entry[:last_modified], entry[:e_tag], entry[:size], entry[:storage_class], owner)
243
+ key.head if head
244
+ list << key
245
+ end
246
+ end
247
+ thislist.each_key do |key|
248
+ service_data[key] = thislist[key] unless (key == :contents || key == :common_prefixes)
249
+ end
250
+ [list, service_data]
251
+ end
252
+
253
+ # Retrieve key information from Amazon.
254
+ # The +key_name+ is a +String+ or Key instance.
255
+ # Retrieves meta-header information if +head+ is +true+.
256
+ # Returns new Key instance.
257
+ #
258
+ # key = bucket.key('logs/today/1.log', true) #=> #<Aws::S3::Key:0xb7b1e240 ... >
259
+ # # is the same as:
260
+ # key = Aws::S3::Key.create(bucket, 'logs/today/1.log')
261
+ # key.head
262
+ #
263
+ def key(key_name, head=false)
264
+ raise 'Key name can not be empty.' if key_name.blank?
265
+ key_instance = nil
266
+ # if this key exists - find it ....
267
+ keys({'prefix'=>key_name}, head).each do |key|
268
+ if key.name == key_name.to_s
269
+ key_instance = key
270
+ break
271
+ end
272
+ end
273
+ # .... else this key is unknown
274
+ unless key_instance
275
+ key_instance = Key.create(self, key_name.to_s)
276
+ end
277
+ key_instance
278
+ end
279
+
280
+ # Store object data.
281
+ # The +key+ is a +String+ or Key instance.
282
+ # Returns +true+.
283
+ #
284
+ # bucket.put('logs/today/1.log', 'Olala!') #=> true
285
+ #
286
+ def put(key, data=nil, meta_headers={}, perms=nil, headers={})
287
+ key = Key.create(self, key.to_s, data, meta_headers) unless key.is_a?(Key)
288
+ key.put(data, perms, headers)
289
+ end
290
+
291
+ # Retrieve object data from Amazon.
292
+ # The +key+ is a +String+ or Key.
293
+ # Returns Key instance.
294
+ #
295
+ # key = bucket.get('logs/today/1.log') #=>
296
+ # puts key.data #=> 'sasfasfasdf'
297
+ #
298
+ def get(key, headers={})
299
+ key = Key.create(self, key.to_s) unless key.is_a?(Key)
300
+ key.get(headers)
301
+ end
302
+
303
+ # Rename object. Returns Aws::S3::Key instance.
304
+ #
305
+ # new_key = bucket.rename_key('logs/today/1.log','logs/today/2.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
306
+ # puts key.name #=> 'logs/today/2.log'
307
+ # key.exists? #=> true
308
+ #
309
+ def rename_key(old_key_or_name, new_name)
310
+ old_key_or_name = Key.create(self, old_key_or_name.to_s) unless old_key_or_name.is_a?(Key)
311
+ old_key_or_name.rename(new_name)
312
+ old_key_or_name
313
+ end
314
+
315
+ # Create an object copy. Returns a destination Aws::S3::Key instance.
316
+ #
317
+ # new_key = bucket.copy_key('logs/today/1.log','logs/today/2.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
318
+ # puts key.name #=> 'logs/today/2.log'
319
+ # key.exists? #=> true
320
+ #
321
+ def copy_key(old_key_or_name, new_key_or_name)
322
+ old_key_or_name = Key.create(self, old_key_or_name.to_s) unless old_key_or_name.is_a?(Key)
323
+ old_key_or_name.copy(new_key_or_name)
324
+ end
325
+
326
+ # Move an object to other location. Returns a destination Aws::S3::Key instance.
327
+ #
328
+ # new_key = bucket.copy_key('logs/today/1.log','logs/today/2.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
329
+ # puts key.name #=> 'logs/today/2.log'
330
+ # key.exists? #=> true
331
+ #
332
+ def move_key(old_key_or_name, new_key_or_name)
333
+ old_key_or_name = Key.create(self, old_key_or_name.to_s) unless old_key_or_name.is_a?(Key)
334
+ old_key_or_name.move(new_key_or_name)
335
+ end
336
+
337
+ # Remove all keys from a bucket.
338
+ # Returns +true+.
339
+ #
340
+ # bucket.clear #=> true
341
+ #
342
+ def clear
343
+ @s3.interface.clear_bucket(@name)
344
+ end
345
+
346
+ # Delete all keys where the 'folder_key' can be interpreted
347
+ # as a 'folder' name.
348
+ # Returns an array of string keys that have been deleted.
349
+ #
350
+ # bucket.keys.map{|key| key.name}.join(', ') #=> 'test, test/2/34, test/3, test1, test1/logs'
351
+ # bucket.delete_folder('test') #=> ['test','test/2/34','test/3']
352
+ #
353
+ def delete_folder(folder, separator='/')
354
+ @s3.interface.delete_folder(@name, folder, separator)
355
+ end
356
+
357
+ # Delete a bucket. Bucket must be empty.
358
+ # If +force+ is set, clears and deletes the bucket.
359
+ # Returns +true+.
360
+ #
361
+ # bucket.delete(true) #=> true
362
+ #
363
+ def delete(force=false)
364
+ force ? @s3.interface.force_delete_bucket(@name) : @s3.interface.delete_bucket(@name)
365
+ end
366
+
367
+ # Return a list of grantees.
368
+ #
369
+ def grantees
370
+ Grantee::grantees(self)
371
+ end
372
+
373
+ end
374
+
375
+
376
+ class Key
377
+ attr_reader :bucket, :name, :last_modified, :e_tag, :size, :storage_class, :owner
378
+ attr_accessor :headers, :meta_headers
379
+ attr_writer :data
380
+
381
+ # Separate Amazon meta headers from other headers
382
+ def self.split_meta(headers) #:nodoc:
383
+ hash = headers.dup
384
+ meta = {}
385
+ hash.each do |key, value|
386
+ if key[/^#{S3Interface::AMAZON_METADATA_PREFIX}/]
387
+ meta[key.gsub(S3Interface::AMAZON_METADATA_PREFIX, '')] = value
388
+ hash.delete(key)
389
+ end
390
+ end
391
+ [hash, meta]
392
+ end
393
+
394
+ def self.add_meta_prefix(meta_headers, prefix=S3Interface::AMAZON_METADATA_PREFIX)
395
+ meta = {}
396
+ meta_headers.each do |meta_header, value|
397
+ if meta_header[/#{prefix}/]
398
+ meta[meta_header] = value
399
+ else
400
+ meta["#{S3Interface::AMAZON_METADATA_PREFIX}#{meta_header}"] = value
401
+ end
402
+ end
403
+ meta
404
+ end
405
+
406
+
407
+ # Create a new Key instance, but do not create the actual key.
408
+ # The +name+ is a +String+.
409
+ # Returns a new Key instance.
410
+ #
411
+ # key = Aws::S3::Key.create(bucket, 'logs/today/1.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
412
+ # key.exists? #=> true | false
413
+ # key.put('Woohoo!') #=> true
414
+ # key.exists? #=> true
415
+ #
416
+ def self.create(bucket, name, data=nil, meta_headers={})
417
+ new(bucket, name, data, {}, meta_headers)
418
+ end
419
+
420
+ # Create a new Key instance, but do not create the actual key.
421
+ # In normal use this method should not be called directly.
422
+ # Use Aws::S3::Key.create or bucket.key() instead.
423
+ #
424
+ def initialize(bucket, name, data=nil, headers={}, meta_headers={},
425
+ last_modified=nil, e_tag=nil, size=nil, storage_class=nil, owner=nil)
426
+ raise 'Bucket must be a Bucket instance.' unless bucket.is_a?(Bucket)
427
+ @bucket = bucket
428
+ @name = name
429
+ @data = data
430
+ @e_tag = e_tag
431
+ @size = size.to_i
432
+ @storage_class = storage_class
433
+ @owner = owner
434
+ @last_modified = last_modified
435
+ if @last_modified && !@last_modified.is_a?(Time)
436
+ @last_modified = Time.parse(@last_modified)
437
+ end
438
+ @headers, @meta_headers = self.class.split_meta(headers)
439
+ @meta_headers.merge!(meta_headers)
440
+ end
441
+
442
+ # Return key name as a String.
443
+ #
444
+ # key = Aws::S3::Key.create(bucket, 'logs/today/1.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
445
+ # puts key #=> 'logs/today/1.log'
446
+ #
447
+ def to_s
448
+ @name.to_s
449
+ end
450
+
451
+ # Return the full S3 path to this key (bucket/key).
452
+ #
453
+ # key.full_name #=> 'my_awesome_bucket/cool_key'
454
+ #
455
+ def full_name(separator='/')
456
+ "#{@bucket.to_s}#{separator}#{@name}"
457
+ end
458
+
459
+ # Return a public link to a key.
460
+ #
461
+ # key.public_link #=> 'https://s3.amazonaws.com:443/my_awesome_bucket/cool_key'
462
+ #
463
+ def public_link
464
+ params = @bucket.s3.interface.params
465
+ "#{params[:protocol]}://#{params[:server]}:#{params[:port]}/#{full_name('/')}"
466
+ end
467
+
468
+ # Return Key data. Retrieve this data from Amazon if it is the first time call.
469
+ # TODO TRB 6/19/07 What does the above mean? Clarify.
470
+ #
471
+ def data
472
+ get if !@data and exists?
473
+ @data
474
+ end
475
+
476
+ # Retrieve object data and attributes from Amazon.
477
+ # Returns a +String+.
478
+ #
479
+ def get(headers={}, &block)
480
+ response = @bucket.s3.interface.get(@bucket.name, @name, headers, &block)
481
+ @data = response[:object]
482
+ @headers, @meta_headers = self.class.split_meta(response[:headers])
483
+ # refresh(false) Holy moly, this was doing two extra hits to s3 for making 3 hits for every get!!
484
+ @data
485
+ end
486
+
487
+ # Store object data on S3.
488
+ # Parameter +data+ is a +String+ or S3Object instance.
489
+ # Returns +true+.
490
+ #
491
+ # key = Aws::S3::Key.create(bucket, 'logs/today/1.log')
492
+ # key.data = 'Qwerty'
493
+ # key.put #=> true
494
+ # ...
495
+ # key.put('Olala!') #=> true
496
+ #
497
+ def put(data=nil, perms=nil, headers={})
498
+ headers['x-amz-acl'] = perms if perms
499
+ @data = data || @data
500
+ meta = self.class.add_meta_prefix(@meta_headers)
501
+ @bucket.s3.interface.put(@bucket.name, @name, @data, meta.merge(headers))
502
+ end
503
+
504
+ # Rename an object. Returns new object name.
505
+ #
506
+ # key = Aws::S3::Key.create(bucket, 'logs/today/1.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
507
+ # key.rename('logs/today/2.log') #=> 'logs/today/2.log'
508
+ # puts key.name #=> 'logs/today/2.log'
509
+ # key.exists? #=> true
510
+ #
511
+ def rename(new_name)
512
+ @bucket.s3.interface.rename(@bucket.name, @name, new_name)
513
+ @name = new_name
514
+ end
515
+
516
+ # Create an object copy. Returns a destination Aws::S3::Key instance.
517
+ #
518
+ # # Key instance as destination
519
+ # key1 = Aws::S3::Key.create(bucket, 'logs/today/1.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
520
+ # key2 = Aws::S3::Key.create(bucket, 'logs/today/2.log') #=> #<Aws::S3::Key:0xb7b5e240 ... >
521
+ # key1.put('Olala!') #=> true
522
+ # key1.copy(key2) #=> #<Aws::S3::Key:0xb7b5e240 ... >
523
+ # key1.exists? #=> true
524
+ # key2.exists? #=> true
525
+ # puts key2.data #=> 'Olala!'
526
+ #
527
+ # # String as destination
528
+ # key = Aws::S3::Key.create(bucket, 'logs/today/777.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
529
+ # key.put('Olala!') #=> true
530
+ # new_key = key.copy('logs/today/888.log') #=> #<Aws::S3::Key:0xb7b5e240 ... >
531
+ # key.exists? #=> true
532
+ # new_key.exists? #=> true
533
+ #
534
+ def copy(new_key_or_name)
535
+ new_key_or_name = Key.create(@bucket, new_key_or_name.to_s) unless new_key_or_name.is_a?(Key)
536
+ @bucket.s3.interface.copy(@bucket.name, @name, new_key_or_name.bucket.name, new_key_or_name.name)
537
+ new_key_or_name
538
+ end
539
+
540
+ # Move an object to other location. Returns a destination Aws::S3::Key instance.
541
+ #
542
+ # # Key instance as destination
543
+ # key1 = Aws::S3::Key.create(bucket, 'logs/today/1.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
544
+ # key2 = Aws::S3::Key.create(bucket, 'logs/today/2.log') #=> #<Aws::S3::Key:0xb7b5e240 ... >
545
+ # key1.put('Olala!') #=> true
546
+ # key1.move(key2) #=> #<Aws::S3::Key:0xb7b5e240 ... >
547
+ # key1.exists? #=> false
548
+ # key2.exists? #=> true
549
+ # puts key2.data #=> 'Olala!'
550
+ #
551
+ # # String as destination
552
+ # key = Aws::S3::Key.create(bucket, 'logs/today/777.log') #=> #<Aws::S3::Key:0xb7b1e240 ... >
553
+ # key.put('Olala!') #=> true
554
+ # new_key = key.move('logs/today/888.log') #=> #<Aws::S3::Key:0xb7b5e240 ... >
555
+ # key.exists? #=> false
556
+ # new_key.exists? #=> true
557
+ #
558
+ def move(new_key_or_name)
559
+ new_key_or_name = Key.create(@bucket, new_key_or_name.to_s) unless new_key_or_name.is_a?(Key)
560
+ @bucket.s3.interface.move(@bucket.name, @name, new_key_or_name.bucket.name, new_key_or_name.name)
561
+ new_key_or_name
562
+ end
563
+
564
+ # Retrieve key info from bucket and update attributes.
565
+ # Refresh meta-headers (by calling +head+ method) if +head+ is set.
566
+ # Returns +true+ if the key exists in bucket and +false+ otherwise.
567
+ #
568
+ # key = Aws::S3::Key.create(bucket, 'logs/today/1.log')
569
+ # key.e_tag #=> nil
570
+ # key.meta_headers #=> {}
571
+ # key.refresh #=> true
572
+ # key.e_tag #=> '12345678901234567890bf11094484b6'
573
+ # key.meta_headers #=> {"family"=>"qwerty", "name"=>"asdfg"}
574
+ #
575
+ def refresh(head=true)
576
+ new_key = @bucket.key(self)
577
+ @last_modified = new_key.last_modified
578
+ @e_tag = new_key.e_tag
579
+ @size = new_key.size
580
+ @storage_class = new_key.storage_class
581
+ @owner = new_key.owner
582
+ if @last_modified
583
+ self.head
584
+ true
585
+ else
586
+ @headers = @meta_headers = {}
587
+ false
588
+ end
589
+ end
590
+
591
+ # Updates headers and meta-headers from S3.
592
+ # Returns +true+.
593
+ #
594
+ # key.meta_headers #=> {"family"=>"qwerty"}
595
+ # key.head #=> true
596
+ # key.meta_headers #=> {"family"=>"qwerty", "name"=>"asdfg"}
597
+ #
598
+ def head
599
+ @headers, @meta_headers = self.class.split_meta(@bucket.s3.interface.head(@bucket, @name))
600
+ true
601
+ end
602
+
603
+ # Reload meta-headers only. Returns meta-headers hash.
604
+ #
605
+ # key.reload_meta #=> {"family"=>"qwerty", "name"=>"asdfg"}
606
+ #
607
+ def reload_meta
608
+ @meta_headers = self.class.split_meta(@bucket.s3.interface.head(@bucket, @name)).last
609
+ end
610
+
611
+ # Replace meta-headers by new hash at S3. Returns new meta-headers hash.
612
+ #
613
+ # key.reload_meta #=> {"family"=>"qwerty", "name"=>"asdfg"}
614
+ # key.save_meta #=> {"family"=>"oops", "race" => "troll"}
615
+ # key.reload_meta #=> {"family"=>"oops", "race" => "troll"}
616
+ #
617
+ def save_meta(meta_headers)
618
+ meta = self.class.add_meta_prefix(meta_headers)
619
+ @bucket.s3.interface.copy(@bucket.name, @name, @bucket.name, @name, :replace, meta)
620
+ @meta_headers = self.class.split_meta(meta)[1]
621
+ end
622
+
623
+ # Check for existence of the key in the given bucket.
624
+ # Returns +true+ or +false+.
625
+ #
626
+ # key = Aws::S3::Key.create(bucket,'logs/today/1.log')
627
+ # key.exists? #=> false
628
+ # key.put('Woohoo!') #=> true
629
+ # key.exists? #=> true
630
+ #
631
+ def exists?
632
+ @bucket.key(self).last_modified ? true : false
633
+ end
634
+
635
+ # Remove key from bucket.
636
+ # Returns +true+.
637
+ #
638
+ # key.delete #=> true
639
+ #
640
+ def delete
641
+ raise 'Key name must be specified.' if @name.blank?
642
+ @bucket.s3.interface.delete(@bucket, @name)
643
+ end
644
+
645
+ # Return a list of grantees.
646
+ #
647
+ def grantees
648
+ Grantee::grantees(self)
649
+ end
650
+
651
+ end
652
+
653
+
654
+ class Owner
655
+ attr_reader :id, :name
656
+
657
+ def initialize(id, name)
658
+ @id = id
659
+ @name = name
660
+ end
661
+
662
+ # Return Owner name as a +String+.
663
+ def to_s
664
+ @name
665
+ end
666
+ end
667
+
668
+
669
+ # There are 2 ways to set permissions for a bucket or key (called a +thing+ below):
670
+ #
671
+ # 1 . Use +perms+ param to set 'Canned Access Policies' when calling the <tt>bucket.create</tt>,
672
+ # <tt>bucket.put</tt> and <tt>key.put</tt> methods.
673
+ # The +perms+ param can take these values: 'private', 'public-read', 'public-read-write' and
674
+ # 'authenticated-read'.
675
+ # (see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html).
676
+ #
677
+ # bucket = s3.bucket('bucket_for_kd_test_13', true, 'public-read')
678
+ # key.put('Woohoo!','public-read-write' )
679
+ #
680
+ # 2 . Use Grantee instances (the permission is a +String+ or an +Array+ of: 'READ', 'WRITE',
681
+ # 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL'):
682
+ #
683
+ # bucket = s3.bucket('my_awesome_bucket', true)
684
+ # grantee1 = Aws::S3::Grantee.new(bucket, 'a123b...223c', FULL_CONTROL, :apply)
685
+ # grantee2 = Aws::S3::Grantee.new(bucket, 'xy3v3...5fhp', [READ, WRITE], :apply)
686
+ #
687
+ # There is only one way to get and to remove permission (via Grantee instances):
688
+ #
689
+ # grantees = bucket.grantees # a list of Grantees that have any access for this bucket
690
+ # grantee1 = Aws::S3::Grantee.new(bucket, 'a123b...223c')
691
+ # grantee1.perms #=> returns a list of perms for this grantee to that bucket
692
+ # ...
693
+ # grantee1.drop # remove all perms for this grantee
694
+ # grantee2.revoke('WRITE') # revoke write access only
695
+ #
696
+ class Grantee
697
+ # A bucket or a key the grantee has an access to.
698
+ attr_reader :thing
699
+ # Grantee Amazon id.
700
+ attr_reader :id
701
+ # Grantee display name.
702
+ attr_reader :name
703
+ # Array of permissions.
704
+ attr_accessor :perms
705
+
706
+ # Retrieve Owner information and a list of Grantee instances that have
707
+ # a access to this thing (bucket or key).
708
+ #
709
+ # bucket = s3.bucket('my_awesome_bucket', true, 'public-read')
710
+ # ...
711
+ # Aws::S3::Grantee.owner_and_grantees(bucket) #=> [owner, grantees]
712
+ #
713
+ def self.owner_and_grantees(thing)
714
+ if thing.is_a?(Bucket)
715
+ bucket, key = thing, ''
716
+ else
717
+ bucket, key = thing.bucket, thing
718
+ end
719
+ hash = bucket.s3.interface.get_acl_parse(bucket.to_s, key.to_s)
720
+ owner = Owner.new(hash[:owner][:id], hash[:owner][:display_name])
721
+
722
+ grantees = []
723
+ hash[:grantees].each do |id, params|
724
+ grantees << new(thing, id, params[:permissions], nil, params[:display_name])
725
+ end
726
+ [owner, grantees]
727
+ end
728
+
729
+ # Retrieves a list of Grantees instances that have an access to this thing(bucket or key).
730
+ #
731
+ # bucket = s3.bucket('my_awesome_bucket', true, 'public-read')
732
+ # ...
733
+ # Aws::S3::Grantee.grantees(bucket) #=> grantees
734
+ #
735
+ def self.grantees(thing)
736
+ owner_and_grantees(thing)[1]
737
+ end
738
+
739
+ def self.put_acl(thing, owner, grantees) #:nodoc:
740
+ if thing.is_a?(Bucket)
741
+ bucket, key = thing, ''
742
+ else
743
+ bucket, key = thing.bucket, thing
744
+ end
745
+ body = "<AccessControlPolicy>" +
746
+ "<Owner>" +
747
+ "<ID>#{owner.id}</ID>" +
748
+ "<DisplayName>#{owner.name}</DisplayName>" +
749
+ "</Owner>" +
750
+ "<AccessControlList>" +
751
+ grantees.map { |grantee| grantee.to_xml }.join +
752
+ "</AccessControlList>" +
753
+ "</AccessControlPolicy>"
754
+ bucket.s3.interface.put_acl(bucket.to_s, key.to_s, body)
755
+ end
756
+
757
+ # Create a new Grantee instance.
758
+ # Grantee +id+ must exist on S3. If +action+ == :refresh, then retrieve
759
+ # permissions from S3 and update @perms. If +action+ == :apply, then apply
760
+ # perms to +thing+ at S3. If +action+ == :apply_and_refresh then it performs.
761
+ # both the actions. This is used for the new grantees that had no perms to
762
+ # this thing before. The default action is :refresh.
763
+ #
764
+ # bucket = s3.bucket('my_awesome_bucket', true, 'public-read')
765
+ # grantee1 = Aws::S3::Grantee.new(bucket, 'a123b...223c', FULL_CONTROL)
766
+ # ...
767
+ # grantee2 = Aws::S3::Grantee.new(bucket, 'abcde...asdf', [FULL_CONTROL, READ], :apply)
768
+ # grantee3 = Aws::S3::Grantee.new(bucket, 'aaaaa...aaaa', 'READ', :apply_and_refresh)
769
+ #
770
+ def initialize(thing, id, perms=[], action=:refresh, name=nil)
771
+ @thing = thing
772
+ @id = id
773
+ @name = name
774
+ @perms = perms.to_a
775
+ case action
776
+ when :apply then
777
+ apply
778
+ when :refresh then
779
+ refresh
780
+ when :apply_and_refresh then
781
+ apply; refresh
782
+ end
783
+ end
784
+
785
+ # Return +true+ if the grantee has any permissions to the thing.
786
+ def exists?
787
+ self.class.grantees(@thing).each do |grantee|
788
+ return true if @id == grantee.id
789
+ end
790
+ false
791
+ end
792
+
793
+ # Return Grantee type (+String+): "Group" or "CanonicalUser".
794
+ def type
795
+ @id[/^http:/] ? "Group" : "CanonicalUser"
796
+ end
797
+
798
+ # Return a name or an id.
799
+ def to_s
800
+ @name || @id
801
+ end
802
+
803
+ # Add permissions for grantee.
804
+ # Permissions: 'READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL'.
805
+ # See http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingPermissions.html .
806
+ # Returns +true+.
807
+ #
808
+ # grantee.grant('FULL_CONTROL') #=> true
809
+ # grantee.grant('FULL_CONTROL','WRITE','READ') #=> true
810
+ # grantee.grant(['WRITE_ACP','READ','READ_ACP']) #=> true
811
+ #
812
+ def grant(*permissions)
813
+ permissions.flatten!
814
+ old_perms = @perms.dup
815
+ @perms += permissions
816
+ @perms.uniq!
817
+ return true if @perms == old_perms
818
+ apply
819
+ end
820
+
821
+ # Revoke permissions for grantee.
822
+ # Permissions: 'READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL'
823
+ # See http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingPermissions.html .
824
+ # Default value is 'FULL_CONTROL'.
825
+ # Returns +true+.
826
+ #
827
+ # grantee.revoke('READ') #=> true
828
+ # grantee.revoke('FULL_CONTROL','WRITE') #=> true
829
+ # grantee.revoke(['READ_ACP','WRITE_ACP']) #=> true
830
+ #
831
+ def revoke(*permissions)
832
+ permissions.flatten!
833
+ old_perms = @perms.dup
834
+ @perms -= permissions
835
+ @perms.uniq!
836
+ return true if @perms == old_perms
837
+ apply
838
+ end
839
+
840
+ # Revoke all permissions for this grantee.
841
+ # Returns +true+.
842
+ #
843
+ # grantee.drop #=> true
844
+ #
845
+ def drop
846
+ @perms = []
847
+ apply
848
+ end
849
+
850
+ # Refresh grantee perms for its +thing+.
851
+ # Returns +true+ if the grantee has perms for this +thing+ or
852
+ # +false+ otherwise, and updates @perms value as a side-effect.
853
+ #
854
+ # grantee.grant('FULL_CONTROL') #=> true
855
+ # grantee.refresh #=> true
856
+ # grantee.drop #=> true
857
+ # grantee.refresh #=> false
858
+ #
859
+ def refresh
860
+ @perms = []
861
+ self.class.grantees(@thing).each do |grantee|
862
+ if @id == grantee.id
863
+ @name = grantee.name
864
+ @perms = grantee.perms
865
+ return true
866
+ end
867
+ end
868
+ false
869
+ end
870
+
871
+ # Apply current grantee @perms to +thing+. This method is called internally by the +grant+
872
+ # and +revoke+ methods. In normal use this method should not
873
+ # be called directly.
874
+ #
875
+ # grantee.perms = ['FULL_CONTROL']
876
+ # grantee.apply #=> true
877
+ #
878
+ def apply
879
+ @perms.uniq!
880
+ owner, grantees = self.class.owner_and_grantees(@thing)
881
+ # walk through all the grantees and replace the data for the current one and ...
882
+ grantees.map! { |grantee| grantee.id == @id ? self : grantee }
883
+ # ... if this grantee is not known - add this bad boy to a list
884
+ grantees << self unless grantees.include?(self)
885
+ # set permissions
886
+ self.class.put_acl(@thing, owner, grantees)
887
+ end
888
+
889
+ def to_xml # :nodoc:
890
+ id_str = @id[/^http/] ? "<URI>#{@id}</URI>" : "<ID>#{@id}</ID>"
891
+ grants = ''
892
+ @perms.each do |perm|
893
+ grants << "<Grant>" +
894
+ "<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" " +
895
+ "xsi:type=\"#{type}\">#{id_str}</Grantee>" +
896
+ "<Permission>#{perm}</Permission>" +
897
+ "</Grant>"
898
+ end
899
+ grants
900
+ end
901
+
902
+ end
903
+
904
+ end
905
+
906
+ # Aws::S3Generator and Aws::S3Generator::Bucket methods:
907
+ #
908
+ # s3g = Aws::S3Generator.new('1...2', 'nx...Y6') #=> #<Aws::S3Generator:0xb7b5cc94>
909
+ #
910
+ # # List all buckets(method 'GET'):
911
+ # buckets_list = s3g.buckets #=> 'https://s3.amazonaws.com:443/?Signature=Y...D&Expires=1180941864&AWSAccessKeyId=1...2'
912
+ # # Create bucket link (method 'PUT'):
913
+ # bucket = s3g.bucket('my_awesome_bucket') #=> #<Aws::S3Generator::Bucket:0xb7bcbda8>
914
+ # link_to_create = bucket.create_link(1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket?Signature=4...D&Expires=1180942132&AWSAccessKeyId=1...2
915
+ # # ... or:
916
+ # bucket = Aws::S3Generator::Bucket.create(s3g, 'my_awesome_bucket') #=> #<Aws::S3Generator::Bucket:0xb7bcbda8>
917
+ # link_to_create = bucket.create_link(1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket?Signature=4...D&Expires=1180942132&AWSAccessKeyId=1...2
918
+ # # ... or:
919
+ # bucket = Aws::S3Generator::Bucket.new(s3g, 'my_awesome_bucket') #=> #<Aws::S3Generator::Bucket:0xb7bcbda8>
920
+ # link_to_create = bucket.create_link(1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket?Signature=4...D&Expires=1180942132&AWSAccessKeyId=1...2
921
+ # # List bucket(method 'GET'):
922
+ # bucket.keys(1.day) #=> https://s3.amazonaws.com:443/my_awesome_bucket?Signature=i...D&Expires=1180942620&AWSAccessKeyId=1...2
923
+ # # Create/put key (method 'PUT'):
924
+ # bucket.put('my_cool_key') #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=q...D&Expires=1180943094&AWSAccessKeyId=1...2
925
+ # # Get key data (method 'GET'):
926
+ # bucket.get('logs/today/1.log', 1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=h...M%3D&Expires=1180820032&AWSAccessKeyId=1...2
927
+ # # Delete bucket (method 'DELETE'):
928
+ # bucket.delete(2.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=4...D&Expires=1180820032&AWSAccessKeyId=1...2
929
+ #
930
+ # Aws::S3Generator::Key methods:
931
+ #
932
+ # # Create Key instance:
933
+ # key = Aws::S3Generator::Key.new(bicket, 'my_cool_key') #=> #<Aws::S3Generator::Key:0xb7b7394c>
934
+ # # Put key data (method 'PUT'):
935
+ # key.put #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=2...D&Expires=1180943302&AWSAccessKeyId=1...2
936
+ # # Get key data (method 'GET'):
937
+ # key.get #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=a...D&Expires=1180820032&AWSAccessKeyId=1...2
938
+ # # Head key (method 'HEAD'):
939
+ # key.head #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=b...D&Expires=1180820032&AWSAccessKeyId=1...2
940
+ # # Delete key (method 'DELETE'):
941
+ # key.delete #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=x...D&Expires=1180820032&AWSAccessKeyId=1...2
942
+ #
943
+ class S3Generator
944
+ attr_reader :interface
945
+
946
+ def initialize(aws_access_key_id, aws_secret_access_key, params={})
947
+ @interface = S3Interface.new(aws_access_key_id, aws_secret_access_key, params)
948
+ end
949
+
950
+ # Generate link to list all buckets
951
+ #
952
+ # s3.buckets(1.hour)
953
+ #
954
+ def buckets(expires=nil, headers={})
955
+ @interface.list_all_my_buckets_link(expires, headers)
956
+ end
957
+
958
+ # Create new S3LinkBucket instance and generate link to create it at S3.
959
+ #
960
+ # bucket= s3.bucket('my_owesome_bucket')
961
+ #
962
+ def bucket(name, expires=nil, headers={})
963
+ Bucket.create(self, name.to_s)
964
+ end
965
+
966
+ class Bucket
967
+ attr_reader :s3, :name
968
+
969
+ def to_s
970
+ @name
971
+ end
972
+
973
+ alias_method :full_name, :to_s
974
+
975
+ # Return a public link to bucket.
976
+ #
977
+ # bucket.public_link #=> 'https://s3.amazonaws.com:443/my_awesome_bucket'
978
+ #
979
+ def public_link
980
+ params = @s3.interface.params
981
+ "#{params[:protocol]}://#{params[:server]}:#{params[:port]}/#{full_name}"
982
+ end
983
+
984
+ # Create new S3LinkBucket instance and generate creation link for it.
985
+ def self.create(s3, name, expires=nil, headers={})
986
+ new(s3, name.to_s)
987
+ end
988
+
989
+ # Create new S3LinkBucket instance.
990
+ def initialize(s3, name)
991
+ @s3, @name = s3, name.to_s
992
+ end
993
+
994
+ # Return a link to create this bucket.
995
+ #
996
+ def create_link(expires=nil, headers={})
997
+ @s3.interface.create_bucket_link(@name, expires, headers)
998
+ end
999
+
1000
+ # Generate link to list keys.
1001
+ #
1002
+ # bucket.keys
1003
+ # bucket.keys('prefix'=>'logs')
1004
+ #
1005
+ def keys(options=nil, expires=nil, headers={})
1006
+ @s3.interface.list_bucket_link(@name, options, expires, headers)
1007
+ end
1008
+
1009
+ # Return a S3Generator::Key instance.
1010
+ #
1011
+ # bucket.key('my_cool_key').get #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=B...D&Expires=1180820032&AWSAccessKeyId=1...2
1012
+ # bucket.key('my_cool_key').delete #=> https://s3.amazonaws.com:443/my_awesome_bucket/my_cool_key?Signature=B...D&Expires=1180820098&AWSAccessKeyId=1...2
1013
+ #
1014
+ def key(name)
1015
+ Key.new(self, name)
1016
+ end
1017
+
1018
+ # Generates link to PUT key data.
1019
+ #
1020
+ # puts bucket.put('logs/today/1.log', 2.hour)
1021
+ #
1022
+ def put(key, meta_headers={}, expires=nil, headers={})
1023
+ meta = Aws::S3::Key.add_meta_prefix(meta_headers)
1024
+ @s3.interface.put_link(@name, key.to_s, nil, expires, meta.merge(headers))
1025
+ end
1026
+
1027
+ # Generate link to GET key data.
1028
+ #
1029
+ # bucket.get('logs/today/1.log', 1.hour)
1030
+ #
1031
+ def get(key, expires=nil, headers={})
1032
+ @s3.interface.get_link(@name, key.to_s, expires, headers)
1033
+ end
1034
+
1035
+ # Generate link to delete bucket.
1036
+ #
1037
+ # bucket.delete(2.hour)
1038
+ #
1039
+ def delete(expires=nil, headers={})
1040
+ @s3.interface.delete_bucket_link(@name, expires, headers)
1041
+ end
1042
+ end
1043
+
1044
+
1045
+ class Key
1046
+ attr_reader :bucket, :name
1047
+
1048
+ def to_s
1049
+ @name
1050
+ end
1051
+
1052
+ # Return a full S# name (bucket/key).
1053
+ #
1054
+ # key.full_name #=> 'my_awesome_bucket/cool_key'
1055
+ #
1056
+ def full_name(separator='/')
1057
+ "#{@bucket.to_s}#{separator}#{@name}"
1058
+ end
1059
+
1060
+ # Return a public link to key.
1061
+ #
1062
+ # key.public_link #=> 'https://s3.amazonaws.com:443/my_awesome_bucket/cool_key'
1063
+ #
1064
+ def public_link
1065
+ params = @bucket.s3.interface.params
1066
+ "#{params[:protocol]}://#{params[:server]}:#{params[:port]}/#{full_name('/')}"
1067
+ end
1068
+
1069
+ def initialize(bucket, name, meta_headers={})
1070
+ @bucket = bucket
1071
+ @name = name.to_s
1072
+ @meta_headers = meta_headers
1073
+ raise 'Key name can not be empty.' if @name.blank?
1074
+ end
1075
+
1076
+ # Generate link to PUT key data.
1077
+ #
1078
+ # puts bucket.put('logs/today/1.log', '123', 2.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=B...D&Expires=1180820032&AWSAccessKeyId=1...2
1079
+ #
1080
+ def put(expires=nil, headers={})
1081
+ @bucket.put(@name.to_s, @meta_headers, expires, headers)
1082
+ end
1083
+
1084
+ # Generate link to GET key data.
1085
+ #
1086
+ # bucket.get('logs/today/1.log', 1.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=h...M%3D&Expires=1180820032&AWSAccessKeyId=1...2
1087
+ #
1088
+ def get(expires=nil, headers={})
1089
+ @bucket.s3.interface.get_link(@bucket.to_s, @name, expires, headers)
1090
+ end
1091
+
1092
+ # Generate link to delete key.
1093
+ #
1094
+ # bucket.delete(2.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=4...D&Expires=1180820032&AWSAccessKeyId=1...2
1095
+ #
1096
+ def delete(expires=nil, headers={})
1097
+ @bucket.s3.interface.delete_link(@bucket.to_s, @name, expires, headers)
1098
+ end
1099
+
1100
+ # Generate link to head key.
1101
+ #
1102
+ # bucket.head(2.hour) #=> https://s3.amazonaws.com:443/my_awesome_bucket/logs%2Ftoday%2F1.log?Signature=4...D&Expires=1180820032&AWSAccessKeyId=1...2
1103
+ #
1104
+ def head(expires=nil, headers={})
1105
+ @bucket.s3.interface.head_link(@bucket.to_s, @name, expires, headers)
1106
+ end
1107
+ end
1108
+ end
1109
+
1110
+ end