internuity-awsum 0.2 → 0.3
Sign up to get free protection for your applications and to get access to all the features.
- data/Rakefile +2 -1
- data/lib/awsum.rb +1 -1
- data/lib/ec2/address.rb +118 -0
- data/lib/ec2/availability_zone.rb +68 -0
- data/lib/ec2/keypair.rb +75 -0
- data/lib/ec2/region.rb +96 -0
- data/lib/ec2/security_group.rb +175 -0
- data/lib/error.rb +55 -0
- data/lib/net_fix.rb +100 -0
- data/lib/requestable.rb +2 -0
- data/lib/s3/bucket.rb +66 -0
- data/lib/s3/headers.rb +24 -0
- data/lib/s3/object.rb +138 -0
- data/lib/s3/s3.rb +219 -0
- data/test/fixtures/ec2/addresses.xml +10 -0
- data/test/fixtures/ec2/allocate_address.xml +5 -0
- data/test/fixtures/ec2/associate_address.xml +5 -0
- data/test/fixtures/ec2/authorize_ip_access.xml +5 -0
- data/test/fixtures/ec2/authorize_owner_group_access.xml +5 -0
- data/test/fixtures/ec2/authorize_owner_group_access_error.xml +2 -0
- data/test/fixtures/ec2/availability_zones.xml +16 -0
- data/test/fixtures/ec2/create_key_pair.xml +29 -0
- data/test/fixtures/ec2/create_security_group.xml +5 -0
- data/test/fixtures/ec2/delete_key_pair.xml +5 -0
- data/test/fixtures/ec2/delete_security_group.xml +5 -0
- data/test/fixtures/ec2/deregister_image.xml +5 -0
- data/test/fixtures/ec2/disassociate_address.xml +5 -0
- data/test/fixtures/ec2/internal_error.xml +2 -0
- data/test/fixtures/ec2/invalid_amiid_error.xml +2 -0
- data/test/fixtures/ec2/invalid_request_error.xml +2 -0
- data/test/fixtures/ec2/key_pairs.xml +10 -0
- data/test/fixtures/ec2/regions.xml +14 -0
- data/test/fixtures/ec2/register_image.xml +5 -0
- data/test/fixtures/ec2/release_address.xml +5 -0
- data/test/fixtures/ec2/revoke_ip_access.xml +5 -0
- data/test/fixtures/ec2/revoke_owner_group_access.xml +5 -0
- data/test/fixtures/ec2/security_groups.xml +159 -0
- data/test/fixtures/ec2/unassociated_address.xml +10 -0
- data/test/fixtures/s3/buckets.xml +2 -0
- data/test/fixtures/s3/copy_failure.xml +23 -0
- data/test/fixtures/s3/invalid_request_signature.xml +5 -0
- data/test/fixtures/s3/keys.xml +2 -0
- data/test/units/ec2/test_addresses.rb +60 -0
- data/test/units/ec2/test_keypair.rb +87 -0
- data/test/units/ec2/test_regions.rb +33 -0
- data/test/units/ec2/test_security_group.rb +105 -0
- data/test/units/s3/test_bucket.rb +58 -0
- data/test/units/s3/test_object.rb +111 -0
- data/test/units/s3/test_s3.rb +298 -0
- data/test/units/test_error.rb +101 -0
- data/test/units/test_requestable.rb +241 -0
- data/test/work_out_string_to_sign.rb +7 -0
- metadata +132 -43
data/lib/error.rb
ADDED
@@ -0,0 +1,55 @@
|
|
1
|
+
require 'parser'
|
2
|
+
|
3
|
+
module Awsum
|
4
|
+
class Error < StandardError
|
5
|
+
attr_reader :response_code, :code, :message, :request_id, :additional
|
6
|
+
|
7
|
+
def initialize(response)
|
8
|
+
@response_code = response.code
|
9
|
+
parser = ErrorParser.new
|
10
|
+
parser.parse(response.body)
|
11
|
+
@code = parser.code
|
12
|
+
@message = parser.message
|
13
|
+
@request_id = parser.request_id
|
14
|
+
@additional = parser.additional
|
15
|
+
end
|
16
|
+
|
17
|
+
def inspect
|
18
|
+
"#<Awsum::Error response_code=#{@response_code} code=#{@code} request_id=#{@request_id} message=#{@message}>"
|
19
|
+
end
|
20
|
+
|
21
|
+
private
|
22
|
+
class ErrorParser < Awsum::Parser #:nodoc:
|
23
|
+
attr_reader :code, :message, :request_id, :additional
|
24
|
+
|
25
|
+
def initialize
|
26
|
+
@additional = {}
|
27
|
+
@text = ""
|
28
|
+
end
|
29
|
+
|
30
|
+
def tag_start(tag, attributes)
|
31
|
+
end
|
32
|
+
|
33
|
+
def text(text)
|
34
|
+
@text << text unless @text.nil?
|
35
|
+
end
|
36
|
+
|
37
|
+
def tag_end(tag)
|
38
|
+
text = @text.strip
|
39
|
+
return if text.blank?
|
40
|
+
|
41
|
+
case tag
|
42
|
+
when 'Code'
|
43
|
+
@code = text
|
44
|
+
when 'Message'
|
45
|
+
@message = text
|
46
|
+
when 'RequestID', 'RequestId'
|
47
|
+
@request_id = text
|
48
|
+
else
|
49
|
+
@additional[tag] = text
|
50
|
+
end
|
51
|
+
@text = ''
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
data/lib/net_fix.rb
ADDED
@@ -0,0 +1,100 @@
|
|
1
|
+
# Some fixes for the net/http libraries to better suppport S3
|
2
|
+
module Net
|
3
|
+
class BufferedIO
|
4
|
+
#Increase the default read size for streaming from the socket
|
5
|
+
def rbuf_fill
|
6
|
+
timeout(@read_timeout) {
|
7
|
+
@rbuf << @io.sysread(1024 * 16)
|
8
|
+
}
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
class HTTPGenericRequest
|
13
|
+
@@local_read_size = 1024 * 16
|
14
|
+
|
15
|
+
# Added limit which can be one of :headers or :body to limit the sending of
|
16
|
+
# a request to either the headers or the body in order to make use of
|
17
|
+
# 100-continue processing for S3
|
18
|
+
def exec(sock, ver, path, limit = nil) #:nodoc: internal use only
|
19
|
+
if @body
|
20
|
+
send_request_with_body sock, ver, path, @body, limit
|
21
|
+
elsif @body_stream
|
22
|
+
send_request_with_body_stream sock, ver, path, @body_stream, limit
|
23
|
+
else
|
24
|
+
write_header sock, ver, path
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
private
|
29
|
+
|
30
|
+
# Will send both headers and body unless limit is set to either
|
31
|
+
# :headers or :body to restrict to one
|
32
|
+
def send_request_with_body(sock, ver, path, body, limit = nil)
|
33
|
+
self.content_length = body.length
|
34
|
+
delete 'Transfer-Encoding'
|
35
|
+
supply_default_content_type
|
36
|
+
write_header sock, ver, path unless limit == :body
|
37
|
+
sock.write body unless limit == :headers
|
38
|
+
end
|
39
|
+
|
40
|
+
# Will send both headers and body unless limit is set to either
|
41
|
+
# :headers or :body to restrict to one
|
42
|
+
#
|
43
|
+
# Increased the default read size for streaming from local streams to 1MB
|
44
|
+
def send_request_with_body_stream(sock, ver, path, f, limit = nil)
|
45
|
+
unless content_length() or chunked?
|
46
|
+
raise ArgumentError,
|
47
|
+
"Content-Length not given and Transfer-Encoding is not `chunked'"
|
48
|
+
end
|
49
|
+
supply_default_content_type
|
50
|
+
write_header sock, ver, path unless limit == :body
|
51
|
+
if limit != :headers
|
52
|
+
if chunked?
|
53
|
+
while s = f.read(1024 * 1024)
|
54
|
+
sock.write(sprintf("%x\r\n", s.length) << s << "\r\n")
|
55
|
+
end
|
56
|
+
sock.write "0\r\n\r\n"
|
57
|
+
else
|
58
|
+
while s = f.read(1024 * 1024)
|
59
|
+
sock.write s
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
class HTTP < Protocol
|
67
|
+
# Patched to handle 100-continue processing for S3
|
68
|
+
def request(req, body = nil, &block) # :yield: +response+
|
69
|
+
unless started?
|
70
|
+
start {
|
71
|
+
req['connection'] ||= 'close'
|
72
|
+
return request(req, body, &block)
|
73
|
+
}
|
74
|
+
end
|
75
|
+
if proxy_user()
|
76
|
+
unless use_ssl?
|
77
|
+
req.proxy_basic_auth proxy_user(), proxy_pass()
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
req.set_body_internal body
|
82
|
+
begin_transport req
|
83
|
+
# Send only the headers if a 100-continue request
|
84
|
+
limit = ((req.is_a?(Post) || req.is_a?(Put)) && req['expect'] == '100-continue') ? :headers : nil
|
85
|
+
req.exec @socket, @curr_http_version, edit_path(req.path), limit
|
86
|
+
begin
|
87
|
+
res = HTTPResponse.read_new(@socket)
|
88
|
+
if res.is_a?(HTTPContinue) && limit && req['content-length'].to_i > 0
|
89
|
+
req.exec @socket, @curr_http_version, edit_path(req.path), :body
|
90
|
+
end
|
91
|
+
end while res.kind_of?(HTTPContinue)
|
92
|
+
res.reading_body(@socket, req.response_body_permitted?) {
|
93
|
+
yield res if block_given?
|
94
|
+
}
|
95
|
+
end_transport req, res
|
96
|
+
|
97
|
+
res
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
data/lib/requestable.rb
CHANGED
data/lib/s3/bucket.rb
ADDED
@@ -0,0 +1,66 @@
|
|
1
|
+
module Awsum
|
2
|
+
class S3
|
3
|
+
class Bucket
|
4
|
+
attr_reader :name, :creation_date
|
5
|
+
|
6
|
+
def initialize(s3, name, creation_date = nil)
|
7
|
+
@s3 = s3
|
8
|
+
@name = name
|
9
|
+
@creation_date = creation_date
|
10
|
+
end
|
11
|
+
|
12
|
+
# Delete this Bucket
|
13
|
+
def delete
|
14
|
+
@s3.delete_bucket(@name)
|
15
|
+
end
|
16
|
+
|
17
|
+
# Delete this Bucket, recursively deleting all keys first
|
18
|
+
def delete!
|
19
|
+
@s3.keys(@name).each do |key|
|
20
|
+
key.delete
|
21
|
+
end
|
22
|
+
delete
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
class BucketParser < Awsum::Parser #:nodoc:
|
27
|
+
def initialize(s3)
|
28
|
+
@s3 = s3
|
29
|
+
@buckets = []
|
30
|
+
@text = nil
|
31
|
+
end
|
32
|
+
|
33
|
+
def tag_start(tag, attributes)
|
34
|
+
case tag
|
35
|
+
when 'Bucket'
|
36
|
+
@current = {}
|
37
|
+
@text = ''
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
def text(text)
|
42
|
+
@text << text unless @text.nil?
|
43
|
+
end
|
44
|
+
|
45
|
+
def tag_end(tag)
|
46
|
+
case tag
|
47
|
+
when 'Bucket'
|
48
|
+
@buckets << Bucket.new(
|
49
|
+
@s3,
|
50
|
+
@current['Name'],
|
51
|
+
Time.parse(@current['CreationDate'])
|
52
|
+
)
|
53
|
+
@text = nil
|
54
|
+
@current = nil
|
55
|
+
else
|
56
|
+
text = @text.strip unless @text.nil?
|
57
|
+
@current[tag] = (text == '' ? nil : text) unless @current.nil?
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def result
|
62
|
+
@buckets
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
data/lib/s3/headers.rb
ADDED
@@ -0,0 +1,24 @@
|
|
1
|
+
module Awsum
|
2
|
+
class S3
|
3
|
+
class Headers #:nodoc:
|
4
|
+
def initialize(response)
|
5
|
+
@response = response
|
6
|
+
end
|
7
|
+
|
8
|
+
# Locking down to HTTPHeader methods only
|
9
|
+
def method_missing(method, *args, &block)
|
10
|
+
if !%w(body body_permitted? entity inspect read_body to_ary value).include?(method.to_s) && @response.respond_to?(method)
|
11
|
+
@response.send(method, *args, &block)
|
12
|
+
else
|
13
|
+
raise NoMethodError.new("undefined method `#{method}' for #{inspect}")
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
def inspect
|
18
|
+
headers = []
|
19
|
+
@response.canonical_each do |h,v| headers << h end
|
20
|
+
"#<Awsum::S3::Headers \"#{headers.join('", "')}\">"
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
data/lib/s3/object.rb
ADDED
@@ -0,0 +1,138 @@
|
|
1
|
+
module Awsum
|
2
|
+
class S3
|
3
|
+
class Object
|
4
|
+
attr_reader :key, :bucket, :last_modified, :etag, :size, :owner, :storage_class
|
5
|
+
|
6
|
+
def initialize(s3, bucket, key, last_modified, etag, size, owner, storage_class)
|
7
|
+
@s3 = s3
|
8
|
+
@bucket = bucket
|
9
|
+
@key = key
|
10
|
+
@last_modified = last_modified
|
11
|
+
@etag = etag
|
12
|
+
@size = size
|
13
|
+
@owner = owner
|
14
|
+
@storage_class = storage_class
|
15
|
+
end
|
16
|
+
|
17
|
+
# Get the headers for this Object
|
18
|
+
#
|
19
|
+
# All header methods map directly to the Net::HTTPHeader module
|
20
|
+
def headers
|
21
|
+
@headers ||= @s3.object_headers(@bucket, @key)
|
22
|
+
end
|
23
|
+
|
24
|
+
# Retrieve the data stored for this Object
|
25
|
+
#
|
26
|
+
# You can get the data as a single call or add a block to retrieve the data in chunks
|
27
|
+
# ==Examples
|
28
|
+
# content = object.data
|
29
|
+
#
|
30
|
+
# or
|
31
|
+
#
|
32
|
+
# object.data do |chunk|
|
33
|
+
# # handle chunk
|
34
|
+
# puts chunk
|
35
|
+
# end
|
36
|
+
def data(&block)
|
37
|
+
@s3.object_data @bucket, @key, &block
|
38
|
+
end
|
39
|
+
|
40
|
+
# Delete this Key
|
41
|
+
def delete
|
42
|
+
@s3.delete_object(@bucket, @key)
|
43
|
+
end
|
44
|
+
|
45
|
+
# Make a copy of this Object with a new key
|
46
|
+
def copy(new_key, headers = nil, meta_headers = nil)
|
47
|
+
@s3.copy_object(@bucket, @key, nil, new_key, headers, meta_headers)
|
48
|
+
end
|
49
|
+
|
50
|
+
# Rename or move this Object to a new key
|
51
|
+
def rename(new_key, headers = nil, meta_headers = nil)
|
52
|
+
copied = @s3.copy_object(@bucket, @key, nil, new_key, headers, meta_headers)
|
53
|
+
@s3.delete_object(@bucket, @key) if copied
|
54
|
+
end
|
55
|
+
alias_method :move, :rename
|
56
|
+
|
57
|
+
# Copy this Object to another Bucket
|
58
|
+
#
|
59
|
+
def copy_to(new_bucket, new_key = nil, headers = nil, meta_headers = nil)
|
60
|
+
@s3.copy_object(@bucket, @key, new_bucket, new_key, headers, meta_headers)
|
61
|
+
end
|
62
|
+
|
63
|
+
# Move this Object to another Bucket
|
64
|
+
def move_to(new_bucket, new_key = nil, headers = nil, meta_headers = nil)
|
65
|
+
copied = @s3.copy_object(@bucket, @key, new_bucket, new_key, headers, meta_headers)
|
66
|
+
@s3.delete_object(@bucket, @key) if copied
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
#TODO: Create a more advanced array which can deal with pagination
|
71
|
+
class ObjectParser < Awsum::Parser #:nodoc:
|
72
|
+
def initialize(s3)
|
73
|
+
@s3 = s3
|
74
|
+
@bucket = ''
|
75
|
+
@objects = []
|
76
|
+
@text = nil
|
77
|
+
@stack = []
|
78
|
+
end
|
79
|
+
|
80
|
+
def tag_start(tag, attributes)
|
81
|
+
case tag
|
82
|
+
when 'ListBucketResult'
|
83
|
+
@stack << tag
|
84
|
+
@text = ''
|
85
|
+
when 'Contents'
|
86
|
+
@stack << tag
|
87
|
+
@current = {}
|
88
|
+
@text = ''
|
89
|
+
when 'Owner'
|
90
|
+
@owner = {}
|
91
|
+
@stack << tag
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
def text(text)
|
96
|
+
@text << text unless @text.nil?
|
97
|
+
end
|
98
|
+
|
99
|
+
def tag_end(tag)
|
100
|
+
case tag
|
101
|
+
when 'Name'
|
102
|
+
if @stack[-1] == 'ListBucketResult'
|
103
|
+
@bucket = @text.strip
|
104
|
+
end
|
105
|
+
when 'Contents'
|
106
|
+
@objects << Object.new(
|
107
|
+
@s3,
|
108
|
+
@bucket,
|
109
|
+
@current['Key'],
|
110
|
+
Time.parse(@current['LastModified']),
|
111
|
+
@current['ETag'],
|
112
|
+
@current['Size'].to_i,
|
113
|
+
{'id' => @owner['ID'], 'name' => @owner['DisplayName']},
|
114
|
+
@current['StorageClass']
|
115
|
+
)
|
116
|
+
@current = nil
|
117
|
+
@text = nil
|
118
|
+
@stack.pop
|
119
|
+
when 'Owner'
|
120
|
+
@stack.pop
|
121
|
+
else
|
122
|
+
text = @text.strip unless @text.nil?
|
123
|
+
case @stack[-1]
|
124
|
+
when 'Owner'
|
125
|
+
@owner[tag] = (text == '' ? nil : text) unless @owner.nil?
|
126
|
+
when 'Contents'
|
127
|
+
@current[tag] = (text == '' ? nil : text) unless @current.nil?
|
128
|
+
end
|
129
|
+
@text = ''
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
def result
|
134
|
+
@objects
|
135
|
+
end
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
data/lib/s3/s3.rb
ADDED
@@ -0,0 +1,219 @@
|
|
1
|
+
require 's3/bucket'
|
2
|
+
require 's3/object'
|
3
|
+
require 's3/headers'
|
4
|
+
|
5
|
+
module Awsum
|
6
|
+
# Handles all interaction with Amazon S3
|
7
|
+
#
|
8
|
+
#--
|
9
|
+
# TODO: Change this to S3
|
10
|
+
# ==Getting Started
|
11
|
+
# Create an Awsum::Ec2 object and begin calling methods on it.
|
12
|
+
# require 'rubygems'
|
13
|
+
# require 'awsum'
|
14
|
+
# ec2 = Awsum::Ec2.new('your access id', 'your secret key')
|
15
|
+
# images = ec2.my_images
|
16
|
+
# ...
|
17
|
+
#
|
18
|
+
# All calls to EC2 can be done directly in this class, or through a more
|
19
|
+
# object oriented way through the various returned classes
|
20
|
+
#
|
21
|
+
# ==Examples
|
22
|
+
# ec2.image('ami-ABCDEF').run
|
23
|
+
#
|
24
|
+
# ec2.instance('i-123456789').volumes.each do |vol|
|
25
|
+
# vol.create_snapsot
|
26
|
+
# end
|
27
|
+
#
|
28
|
+
# ec2.regions.each do |region|
|
29
|
+
# region.use
|
30
|
+
# images.each do |img|
|
31
|
+
# puts "#{img.id} - #{region.name}"
|
32
|
+
# end
|
33
|
+
# end
|
34
|
+
# end
|
35
|
+
#
|
36
|
+
# ==Errors
|
37
|
+
# All methods will raise an Awsum::Error if an error is returned from Amazon
|
38
|
+
#
|
39
|
+
# ==Missing Methods
|
40
|
+
# If you need any of this functionality, please consider getting involved
|
41
|
+
# and help complete this library.
|
42
|
+
class S3
|
43
|
+
include Awsum::Requestable
|
44
|
+
|
45
|
+
# Create an new S3 instance
|
46
|
+
#
|
47
|
+
# The access_key and secret_key are both required to do any meaningful work.
|
48
|
+
#
|
49
|
+
# If you want to get these keys from environment variables, you can do that
|
50
|
+
# in your code as follows:
|
51
|
+
# s3 = Awsum::S3.new(ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY'])
|
52
|
+
def initialize(access_key = nil, secret_key = nil)
|
53
|
+
@access_key = access_key
|
54
|
+
@secret_key = secret_key
|
55
|
+
end
|
56
|
+
|
57
|
+
# List all the Bucket(s)
|
58
|
+
def buckets
|
59
|
+
response = send_s3_request
|
60
|
+
parser = Awsum::S3::BucketParser.new(self)
|
61
|
+
parser.parse(response.body)
|
62
|
+
end
|
63
|
+
|
64
|
+
def bucket(name)
|
65
|
+
Bucket.new(self, name)
|
66
|
+
end
|
67
|
+
|
68
|
+
# Create a new Bucket
|
69
|
+
#
|
70
|
+
# ===Parameters
|
71
|
+
# * <tt>bucket_name</tt> - The name of the new bucket
|
72
|
+
# * <tt>location</tt> <i>(optional)</i> - Can be <tt>:default</tt>, <tt>:us</tt> or <tt>:eu</tt>
|
73
|
+
def create_bucket(bucket_name, location = :default)
|
74
|
+
raise ArgumentError.new('Bucket name cannot be in an ip address style') if bucket_name =~ /^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/
|
75
|
+
raise ArgumentError.new('Bucket name can only have lowercase letters, numbers, periods (.), underscores (_) and dashes (-)') unless bucket_name =~ /^[\w\d][-a-z\d._]+[a-z\d._]$/
|
76
|
+
raise ArgumentError.new('Bucket name cannot contain a dash (-) next to a period (.)') if bucket_name =~ /\.-|-\./
|
77
|
+
raise ArgumentError.new('Bucket name must be between 3 and 63 characters') if bucket_name.size < 3 || bucket_name.size > 63
|
78
|
+
|
79
|
+
data = nil
|
80
|
+
if location == :eu
|
81
|
+
data = '<CreateBucketConfiguration><LocationConstraint>EU</LocationConstraint></CreateBucketConfiguration>'
|
82
|
+
end
|
83
|
+
|
84
|
+
response = send_s3_request('PUT', :bucket => bucket_name, :data => data)
|
85
|
+
response.is_a?(Net::HTTPSuccess)
|
86
|
+
end
|
87
|
+
|
88
|
+
def delete_bucket(bucket_name)
|
89
|
+
response = send_s3_request('DELETE', :bucket => bucket_name)
|
90
|
+
response.is_a?(Net::HTTPSuccess)
|
91
|
+
end
|
92
|
+
|
93
|
+
# List the Key(s) of a Bucket
|
94
|
+
#
|
95
|
+
# ===Parameters
|
96
|
+
# * <tt>bucket_name</tt> - The name of the bucket to search for keys
|
97
|
+
# ====Options
|
98
|
+
# * <tt>:prefix</tt> - Limits the response to keys which begin with the indicated prefix. You can use prefixes to separate a bucket into different sets of keys in a way similar to how a file system uses folders.
|
99
|
+
# * <tt>:marker</tt> - Indicates where in the bucket to begin listing. The list will only include keys that occur lexicographically after marker. This is convenient for pagination: To get the next page of results use the last key of the current page as the marker.
|
100
|
+
# * <tt>:max_keys</tt> - The maximum number of keys you'd like to see in the response body. The server might return fewer than this many keys, but will not return more.
|
101
|
+
# * <tt>:delimeter</tt> - Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response.
|
102
|
+
def keys(bucket_name, options = {})
|
103
|
+
paramters = {}
|
104
|
+
paramters['prefix'] = options[:prefix] if options[:prefix]
|
105
|
+
paramters['marker'] = options[:marker] if options[:marker]
|
106
|
+
paramters['max_keys'] = options[:max_keys] if options[:max_keys]
|
107
|
+
paramters['prefix'] = options[:prefix] if options[:prefix]
|
108
|
+
|
109
|
+
response = send_s3_request('GET', :bucket => bucket_name, :paramters => paramters)
|
110
|
+
parser = Awsum::S3::ObjectParser.new(self)
|
111
|
+
parser.parse(response.body)
|
112
|
+
end
|
113
|
+
|
114
|
+
# Create a new Object in the specified Bucket
|
115
|
+
#
|
116
|
+
# ===Parameters
|
117
|
+
# * <tt>bucket_name</tt> - The name of the Bucket in which to store the Key
|
118
|
+
# * <tt>key</tt> - The name/path of the Key to store
|
119
|
+
# * <tt>data</tt> - The data to be stored in this Object
|
120
|
+
# * <tt>headers</tt> - Standard HTTP headers to be sent along
|
121
|
+
# * <tt>meta_headers</tt> - Meta headers to be stored along with the key
|
122
|
+
# * <tt>acl</tt> - A canned access policy, can be one of <tt>:private</tt>, <tt>:public_read</tt>, <tt>:public_read_write</tt> or <tt>:authenticated_read</tt>
|
123
|
+
def create_object(bucket_name, key, data, headers = {}, meta_headers = {}, acl = :private)
|
124
|
+
headers = headers.dup
|
125
|
+
meta_headers.each do |k,v|
|
126
|
+
headers[k =~ /^x-amz-meta-/i ? k : "x-amz-meta-#{k}"] = v
|
127
|
+
end
|
128
|
+
headers['x-amz-acl'] = acl.to_s.gsub(/_/, '-')
|
129
|
+
|
130
|
+
response = send_s3_request('PUT', :bucket => bucket_name, :key => key, :headers => headers, :data => data)
|
131
|
+
response.is_a?(Net::HTTPSuccess)
|
132
|
+
end
|
133
|
+
|
134
|
+
# Retrieve the headers for this Object
|
135
|
+
#
|
136
|
+
# All header methods map directly to the Net::HTTPHeader module
|
137
|
+
def object_headers(bucket_name, key)
|
138
|
+
response = send_s3_request('HEAD', :bucket => bucket_name, :key => key)
|
139
|
+
Headers.new(response)
|
140
|
+
end
|
141
|
+
|
142
|
+
# Retrieve the data stored for the specified Object
|
143
|
+
#
|
144
|
+
# You can get the data as a single call or add a block to retrieve the data in chunks
|
145
|
+
# ==Examples
|
146
|
+
# data = s3.object_data('test-bucket', 'key')
|
147
|
+
#
|
148
|
+
# or
|
149
|
+
#
|
150
|
+
# s3.object_data('test-bucket', 'key') do |chunk|
|
151
|
+
# # handle chunk
|
152
|
+
# puts chunk
|
153
|
+
# end
|
154
|
+
def object_data(bucket_name, key, &block)
|
155
|
+
send_s3_request('GET', :bucket => bucket_name, :key => key) do |response|
|
156
|
+
if block_given?
|
157
|
+
response.read_body &block
|
158
|
+
return true
|
159
|
+
else
|
160
|
+
return response.body
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
# Deletes an Object from a Bucket
|
166
|
+
def delete_object(bucket_name, key)
|
167
|
+
response = send_s3_request('DELETE', :bucket => bucket_name, :key => key)
|
168
|
+
response.is_a?(Net::HTTPSuccess)
|
169
|
+
end
|
170
|
+
|
171
|
+
# Copy the contents of an Object to another key and/or bucket
|
172
|
+
#
|
173
|
+
# ===Parameters
|
174
|
+
# * <tt>source_bucket_name</tt> - The name of the Bucket from which to copy
|
175
|
+
# * <tt>source_key</tt> - The name of the Key from which to copy
|
176
|
+
# * <tt>destination_bucket_name</tt> - The name of the Bucket to which to copy (Can be nil if copying within the same bucket, or updating header data of existing Key)
|
177
|
+
# * <tt>destination_key</tt> - The name of the Key to which to copy (Can be nil if copying to a new bucket with same key, or updating header data of existing Key)
|
178
|
+
# * <tt>headers</tt> - If not nil, the headers are replaced with this information
|
179
|
+
# * <tt>meta_headers</tt> - If not nil, the meta headers are replaced with this information
|
180
|
+
#
|
181
|
+
#--
|
182
|
+
# TODO: Need to handle copy-if-... headers
|
183
|
+
def copy_object(source_bucket_name, source_key, destination_bucket_name = nil, destination_key= nil, headers = nil, meta_headers = nil)
|
184
|
+
raise ArgumentError.new('You must include one of destination_bucket_name, destination_key or headers to be replaced') if destination_bucket_name.nil? && destination_key.nil? && headers.nil? && meta_headers.nil?
|
185
|
+
|
186
|
+
headers = {
|
187
|
+
'x-amz-copy-source' => "/#{source_bucket_name}/#{source_key}",
|
188
|
+
'x-amz-metadata-directive' => (((destination_bucket_name.nil? && destination_key.nil?) || !(headers.nil? || meta_headers.nil?)) ? 'REPLACE' : 'COPY')
|
189
|
+
}.merge(headers||{})
|
190
|
+
meta_headers.each do |k,v|
|
191
|
+
headers[k =~ /^x-amz-meta-/i ? k : "x-amz-meta-#{k}"] = v
|
192
|
+
end unless meta_headers.nil?
|
193
|
+
|
194
|
+
destination_bucket_name ||= source_bucket_name
|
195
|
+
destination_key ||= source_key
|
196
|
+
|
197
|
+
response = send_s3_request('PUT', :bucket => destination_bucket_name, :key => destination_key, :headers => headers, :data => nil)
|
198
|
+
if response.is_a?(Net::HTTPSuccess)
|
199
|
+
#Check for delayed error (See http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html#RESTObjectCOPY_Response)
|
200
|
+
response_body = response.body
|
201
|
+
if response_body =~ /<Error>/i
|
202
|
+
raise Awsum::Error.new(response)
|
203
|
+
else
|
204
|
+
true
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
209
|
+
#private
|
210
|
+
#The host to make all requests against
|
211
|
+
def host
|
212
|
+
@host ||= 's3.amazonaws.com'
|
213
|
+
end
|
214
|
+
|
215
|
+
def host=(host)
|
216
|
+
@host = host
|
217
|
+
end
|
218
|
+
end
|
219
|
+
end
|