scashin133-s3 0.3.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +7 -0
- data/Gemfile +4 -0
- data/Gemfile.lock +24 -0
- data/LICENSE +20 -0
- data/README.rdoc +22 -0
- data/Rakefile +21 -0
- data/extra/s3_attachment_fu.rb +159 -0
- data/extra/s3_paperclip.rb +157 -0
- data/lib/s3/bucket.rb +172 -0
- data/lib/s3/buckets_extension.rb +27 -0
- data/lib/s3/connection.rb +222 -0
- data/lib/s3/exceptions.rb +110 -0
- data/lib/s3/object.rb +251 -0
- data/lib/s3/objects_extension.rb +37 -0
- data/lib/s3/parser.rb +52 -0
- data/lib/s3/request.rb +31 -0
- data/lib/s3/service.rb +82 -0
- data/lib/s3/signature.rb +229 -0
- data/lib/s3/version.rb +3 -0
- data/lib/s3.rb +27 -0
- data/s3.gemspec +29 -0
- data/test/bucket_test.rb +215 -0
- data/test/connection_test.rb +214 -0
- data/test/object_test.rb +187 -0
- data/test/service_test.rb +111 -0
- data/test/signature_test.rb +218 -0
- data/test/test_helper.rb +3 -0
- metadata +156 -0
data/.gitignore
ADDED
data/Gemfile
ADDED
data/Gemfile.lock
ADDED
@@ -0,0 +1,24 @@
|
|
1
|
+
PATH
|
2
|
+
remote: .
|
3
|
+
specs:
|
4
|
+
scashin133-s3 (0.3.8)
|
5
|
+
proxies
|
6
|
+
|
7
|
+
GEM
|
8
|
+
remote: http://rubygems.org/
|
9
|
+
specs:
|
10
|
+
mocha (0.9.8)
|
11
|
+
rake
|
12
|
+
proxies (0.2.1)
|
13
|
+
rake (0.8.7)
|
14
|
+
test-unit (2.1.1)
|
15
|
+
|
16
|
+
PLATFORMS
|
17
|
+
ruby
|
18
|
+
|
19
|
+
DEPENDENCIES
|
20
|
+
bundler (>= 1.0.0)
|
21
|
+
mocha
|
22
|
+
proxies
|
23
|
+
scashin133-s3!
|
24
|
+
test-unit (>= 2.0)
|
data/LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2009 Jakub Kuźma, Mirosław Boruta
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.rdoc
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
= S3
|
2
|
+
|
3
|
+
S3 library provides access to {Amazon's Simple Storage Service}[http://aws.amazon.com/s3/].
|
4
|
+
It supports both: European and US buckets through {REST API}[http://docs.amazonwebservices.com/AmazonS3/latest/RESTAPI.html].
|
5
|
+
|
6
|
+
* homepage[http://jah.pl/projects/s3.html]
|
7
|
+
* gemcutter[http://gemcutter.org/gems/s3]
|
8
|
+
* repository[http://github.com/qoobaa/s3]
|
9
|
+
* {issue tracker}[http://github.com/qoobaa/s3/issues]
|
10
|
+
* rdoc[http://qoobaa.github.com/s3]
|
11
|
+
|
12
|
+
== Installation
|
13
|
+
|
14
|
+
gem install s3
|
15
|
+
|
16
|
+
== Usage
|
17
|
+
|
18
|
+
See homepage[http://jah.pl/projects/s3.html] for details.
|
19
|
+
|
20
|
+
== Copyright
|
21
|
+
|
22
|
+
Copyright (c) 2009 Jakub Kuźma, Mirosław Boruta. See LICENSE[http://github.com/qoobaa/s3/raw/master/LICENSE] for details.
|
data/Rakefile
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
require "bundler"
|
2
|
+
Bundler::GemHelper.install_tasks
|
3
|
+
Bundler.setup
|
4
|
+
|
5
|
+
require "rake/testtask"
|
6
|
+
require "rake/rdoctask"
|
7
|
+
|
8
|
+
Rake::TestTask.new(:test) do |test|
|
9
|
+
test.libs << "lib" << "test"
|
10
|
+
test.pattern = "test/**/*_test.rb"
|
11
|
+
test.verbose = true
|
12
|
+
end
|
13
|
+
|
14
|
+
Rake::RDocTask.new do |rdoc|
|
15
|
+
rdoc.rdoc_dir = "rdoc"
|
16
|
+
rdoc.title = "s3 #{S3::VERSION}"
|
17
|
+
rdoc.rdoc_files.include("README.rdoc")
|
18
|
+
rdoc.rdoc_files.include("lib/**/*.rb")
|
19
|
+
end
|
20
|
+
|
21
|
+
task :default => :test
|
@@ -0,0 +1,159 @@
|
|
1
|
+
require "singleton"
|
2
|
+
require "s3"
|
3
|
+
|
4
|
+
# S3 Backend for attachment-fu plugin. After installing attachment-fu
|
5
|
+
# plugin, copy the file to:
|
6
|
+
# +vendor/plugins/attachment-fu/lib/technoweenie/attachment_fu/backends+
|
7
|
+
#
|
8
|
+
# To configure S3Backend create initializer file in your Rails
|
9
|
+
# application, e.g. +config/initializers/s3_backend.rb+.
|
10
|
+
#
|
11
|
+
# Technoweenie::AttachmentFu::Backends::S3Backend.configuration do |config|
|
12
|
+
# config.access_key_id = "..." # your access key id
|
13
|
+
# config.secret_access_key = "..." # your secret access key
|
14
|
+
# config.bucket_name = "..." # default bucket name to store attachments
|
15
|
+
# config.use_ssl = false # pass true if you want to communicate via SSL
|
16
|
+
# end
|
17
|
+
|
18
|
+
module Technoweenie
|
19
|
+
module AttachmentFu
|
20
|
+
module Backends
|
21
|
+
module S3Backend
|
22
|
+
|
23
|
+
# S3Backend configuration class
|
24
|
+
class Configuration
|
25
|
+
include Singleton
|
26
|
+
|
27
|
+
ATTRIBUTES = [:access_key_id, :secret_access_key, :use_ssl, :bucket_name]
|
28
|
+
|
29
|
+
attr_accessor *ATTRIBUTES
|
30
|
+
end
|
31
|
+
|
32
|
+
# Method used to configure S3Backend, see the example above
|
33
|
+
def self.configuration
|
34
|
+
if block_given?
|
35
|
+
yield Configuration.instance
|
36
|
+
end
|
37
|
+
Configuration.instance
|
38
|
+
end
|
39
|
+
|
40
|
+
# :nodoc:
|
41
|
+
def self.included(base)
|
42
|
+
include S3
|
43
|
+
|
44
|
+
service = Service.new(:access_key_id => configuration.access_key_id,
|
45
|
+
:secret_access_key => configuration.secret_access_key,
|
46
|
+
:use_ssl => configuration.use_ssl)
|
47
|
+
|
48
|
+
bucket_name = base.attachment_options[:bucket_name] || configuration.bucket_name
|
49
|
+
|
50
|
+
base.cattr_accessor :bucket
|
51
|
+
base.bucket = service.buckets.build(bucket_name) # don't connect
|
52
|
+
|
53
|
+
base.before_update :rename_file
|
54
|
+
end
|
55
|
+
|
56
|
+
# The attachment ID used in the full path of a file
|
57
|
+
def attachment_path_id
|
58
|
+
((respond_to?(:parent_id) && parent_id) || id).to_s
|
59
|
+
end
|
60
|
+
|
61
|
+
# The pseudo hierarchy containing the file relative to the bucket name
|
62
|
+
# Example: <tt>:table_name/:id</tt>
|
63
|
+
def base_path
|
64
|
+
[attachment_options[:path_prefix], attachment_path_id].join("/")
|
65
|
+
end
|
66
|
+
|
67
|
+
# The full path to the file relative to the bucket name
|
68
|
+
# Example: <tt>:table_name/:id/:filename</tt>
|
69
|
+
def full_filename(thumbnail = nil)
|
70
|
+
[base_path, thumbnail_name_for(thumbnail)].join("/")
|
71
|
+
end
|
72
|
+
|
73
|
+
# All public objects are accessible via a GET request to the S3 servers. You can generate a
|
74
|
+
# url for an object using the s3_url method.
|
75
|
+
#
|
76
|
+
# @photo.s3_url
|
77
|
+
#
|
78
|
+
# The resulting url is in the form: <tt>http(s)://:server/:bucket_name/:table_name/:id/:file</tt> where
|
79
|
+
# the <tt>:server</tt> variable defaults to <tt>AWS::S3 URL::DEFAULT_HOST</tt> (s3.amazonaws.com) and can be
|
80
|
+
# set using the configuration parameters in <tt>RAILS_ROOT/config/amazon_s3.yml</tt>.
|
81
|
+
#
|
82
|
+
# The optional thumbnail argument will output the thumbnail's filename (if any).
|
83
|
+
def s3_url(thumbnail = nil)
|
84
|
+
if attachment_options[:cname]
|
85
|
+
["#{s3_protocol}#{bucket.name}", full_filename(thumbnail)].join("/")
|
86
|
+
else
|
87
|
+
["#{s3_protocol}#{s3_hostname}#{bucket.path_prefix}", full_filename(thumbnail)].join("/")
|
88
|
+
end
|
89
|
+
end
|
90
|
+
alias :public_url :s3_url
|
91
|
+
alias :public_filename :s3_url
|
92
|
+
|
93
|
+
# Name of the bucket used to store attachments
|
94
|
+
def bucket_name
|
95
|
+
self.class.bucket.name
|
96
|
+
end
|
97
|
+
|
98
|
+
# :nodoc:
|
99
|
+
def create_temp_file
|
100
|
+
write_to_temp_file current_data
|
101
|
+
end
|
102
|
+
|
103
|
+
# :nodoc:
|
104
|
+
def current_data
|
105
|
+
# Object.value full_filename, bucket_name
|
106
|
+
object = self.class.bucket.objects.find(full_filename)
|
107
|
+
object.content
|
108
|
+
end
|
109
|
+
|
110
|
+
# Returns http:// or https:// depending on use_ssl setting
|
111
|
+
def s3_protocol
|
112
|
+
attachment_options[:use_ssl] ? "https://" : "http://"
|
113
|
+
end
|
114
|
+
|
115
|
+
# Returns hostname of the bucket
|
116
|
+
# e.g. +bucketname.com.s3.amazonaws.com+. Additionally you can
|
117
|
+
# pass :cname => true option in has_attachment method to
|
118
|
+
# return CNAME only, e.g. +bucketname.com+
|
119
|
+
def s3_hostname
|
120
|
+
attachment_options[:cname] ? self.class.bucket.name : self.class.bucket.host
|
121
|
+
end
|
122
|
+
|
123
|
+
protected
|
124
|
+
|
125
|
+
# Frees the space in S3 bucket, used by after_destroy callback
|
126
|
+
def destroy_file
|
127
|
+
object = self.class.bucket.objects.find(full_filename)
|
128
|
+
object.destroy
|
129
|
+
end
|
130
|
+
|
131
|
+
# Renames file if filename has been changed - copy the file to
|
132
|
+
# new key and delete old one
|
133
|
+
def rename_file
|
134
|
+
return unless filename_changed?
|
135
|
+
|
136
|
+
old_full_filename = [base_path, filename_was].join("/")
|
137
|
+
|
138
|
+
object = self.class.bucket.objects.find(old_full_filename)
|
139
|
+
new_object = object.copy(:key => full_filename, :acl => attachment_options[:acl])
|
140
|
+
object.destroy
|
141
|
+
true
|
142
|
+
end
|
143
|
+
|
144
|
+
# Saves the file to storage
|
145
|
+
def save_to_storage
|
146
|
+
if save_attachment?
|
147
|
+
object = self.class.bucket.objects.build(full_filename)
|
148
|
+
|
149
|
+
object.content_type = content_type
|
150
|
+
object.acl = attachment_options[:acl]
|
151
|
+
object.content = temp_path ? File.open(temp_path) : temp_data
|
152
|
+
object.save
|
153
|
+
end
|
154
|
+
true
|
155
|
+
end
|
156
|
+
end
|
157
|
+
end
|
158
|
+
end
|
159
|
+
end
|
@@ -0,0 +1,157 @@
|
|
1
|
+
# S3 backend for paperclip plugin. Copy the file to:
|
2
|
+
# +config/initializers/+ directory
|
3
|
+
#
|
4
|
+
# Example configuration for CNAME bucket:
|
5
|
+
#
|
6
|
+
# has_attached_file :image,
|
7
|
+
# :s3_host_alias => "bucket.domain.tld",
|
8
|
+
# :url => ":s3_alias_url",
|
9
|
+
# :styles => {
|
10
|
+
# :medium => "300x300>",
|
11
|
+
# :thumb => "100x100>"
|
12
|
+
# },
|
13
|
+
# :storage => :s3,
|
14
|
+
# :s3_credentials => {
|
15
|
+
# :access_key_id => "...",
|
16
|
+
# :secret_access_key => "..."
|
17
|
+
# },
|
18
|
+
# :bucket => "bucket.domain.tld",
|
19
|
+
# :path => ":attachment/:id/:style.:extension"
|
20
|
+
module Paperclip
|
21
|
+
module Storage
|
22
|
+
module S3
|
23
|
+
def self.extended base
|
24
|
+
begin
|
25
|
+
require "s3"
|
26
|
+
rescue LoadError => e
|
27
|
+
e.message << " (You may need to install the s3 gem)"
|
28
|
+
raise e
|
29
|
+
end
|
30
|
+
|
31
|
+
base.instance_eval do
|
32
|
+
@s3_credentials = parse_credentials(@options[:s3_credentials])
|
33
|
+
@bucket_name = @options[:bucket] || @s3_credentials[:bucket]
|
34
|
+
@bucket_name = @bucket_name.call(self) if @bucket_name.is_a?(Proc)
|
35
|
+
@s3_options = @options[:s3_options] || {}
|
36
|
+
@s3_permissions = @options[:s3_permissions] || :public_read
|
37
|
+
@s3_storage_class = @options[:s3_storage_class] || :standard
|
38
|
+
@s3_protocol = @options[:s3_protocol] || (@s3_permissions == :public_read ? "http" : "https")
|
39
|
+
@s3_headers = @options[:s3_headers] || {}
|
40
|
+
@s3_host_alias = @options[:s3_host_alias]
|
41
|
+
@url = ":s3_path_url" unless @url.to_s.match(/^:s3.*url$/)
|
42
|
+
@service = ::S3::Service.new(@s3_options.merge(
|
43
|
+
:access_key_id => @s3_credentials[:access_key_id],
|
44
|
+
:secret_access_key => @s3_credentials[:secret_access_key],
|
45
|
+
:use_ssl => @s3_protocol == "https"
|
46
|
+
))
|
47
|
+
@bucket = @service.buckets.build(@bucket_name)
|
48
|
+
end
|
49
|
+
Paperclip.interpolates(:s3_alias_url) do |attachment, style|
|
50
|
+
"#{attachment.s3_protocol}://#{attachment.s3_host_alias}/#{attachment.path(style).gsub(%r{^/}, "")}"
|
51
|
+
end
|
52
|
+
Paperclip.interpolates(:s3_path_url) do |attachment, style|
|
53
|
+
"#{attachment.s3_protocol}://s3.amazonaws.com/#{attachment.bucket_name}/#{attachment.path(style).gsub(%r{^/}, "")}"
|
54
|
+
end
|
55
|
+
Paperclip.interpolates(:s3_domain_url) do |attachment, style|
|
56
|
+
"#{attachment.s3_protocol}://#{attachment.bucket_name}.s3.amazonaws.com/#{attachment.path(style).gsub(%r{^/}, "")}"
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
def expiring_url(style_name = default_style, time = 3600)
|
61
|
+
bucket.objects.build(path(style_name)).temporary_url(Time.now + time)
|
62
|
+
end
|
63
|
+
|
64
|
+
def bucket_name
|
65
|
+
@bucket_name
|
66
|
+
end
|
67
|
+
|
68
|
+
def bucket
|
69
|
+
@bucket
|
70
|
+
end
|
71
|
+
|
72
|
+
def s3_host_alias
|
73
|
+
@s3_host_alias
|
74
|
+
end
|
75
|
+
|
76
|
+
def parse_credentials creds
|
77
|
+
creds = find_credentials(creds).stringify_keys
|
78
|
+
(creds[RAILS_ENV] || creds).symbolize_keys
|
79
|
+
end
|
80
|
+
|
81
|
+
def exists?(style = default_style)
|
82
|
+
if original_filename
|
83
|
+
bucket.objects.build(path(style)).exists?
|
84
|
+
else
|
85
|
+
false
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
def s3_protocol
|
90
|
+
@s3_protocol
|
91
|
+
end
|
92
|
+
|
93
|
+
# Returns representation of the data of the file assigned to the given
|
94
|
+
# style, in the format most representative of the current storage.
|
95
|
+
def to_file style = default_style
|
96
|
+
return @queued_for_write[style] if @queued_for_write[style]
|
97
|
+
begin
|
98
|
+
file = Tempfile.new(path(style))
|
99
|
+
file.binmode if file.respond_to?(:binmode)
|
100
|
+
file.write(bucket.objects.find(path(style)).content)
|
101
|
+
file.rewind
|
102
|
+
rescue ::S3::Error::NoSuchKey
|
103
|
+
file.close if file.respond_to?(:close)
|
104
|
+
file = nil
|
105
|
+
end
|
106
|
+
file
|
107
|
+
end
|
108
|
+
|
109
|
+
def flush_writes #:nodoc:
|
110
|
+
@queued_for_write.each do |style, file|
|
111
|
+
begin
|
112
|
+
log("saving #{path(style)}")
|
113
|
+
object = bucket.objects.build(path(style))
|
114
|
+
file.rewind
|
115
|
+
object.content = file.read
|
116
|
+
object.acl = @s3_permissions
|
117
|
+
object.storage_class = @s3_storage_class
|
118
|
+
object.content_type = instance_read(:content_type)
|
119
|
+
object.content_disposition = @s3_headers[:content_disposition]
|
120
|
+
object.content_encoding = @s3_headers[:content_encoding]
|
121
|
+
object.save
|
122
|
+
rescue ::S3::Error::ResponseError => e
|
123
|
+
raise
|
124
|
+
end
|
125
|
+
end
|
126
|
+
@queued_for_write = {}
|
127
|
+
end
|
128
|
+
|
129
|
+
def flush_deletes #:nodoc:
|
130
|
+
@queued_for_delete.each do |path|
|
131
|
+
begin
|
132
|
+
log("deleting #{path}")
|
133
|
+
bucket.objects.find(path).destroy
|
134
|
+
rescue ::S3::Error::ResponseError
|
135
|
+
# Ignore this.
|
136
|
+
end
|
137
|
+
end
|
138
|
+
@queued_for_delete = []
|
139
|
+
end
|
140
|
+
|
141
|
+
def find_credentials creds
|
142
|
+
case creds
|
143
|
+
when File
|
144
|
+
YAML::load(ERB.new(File.read(creds.path)).result)
|
145
|
+
when String
|
146
|
+
YAML::load(ERB.new(File.read(creds)).result)
|
147
|
+
when Hash
|
148
|
+
creds
|
149
|
+
else
|
150
|
+
raise ArgumentError, "Credentials are not a path, file, or hash."
|
151
|
+
end
|
152
|
+
end
|
153
|
+
private :find_credentials
|
154
|
+
|
155
|
+
end
|
156
|
+
end
|
157
|
+
end
|
data/lib/s3/bucket.rb
ADDED
@@ -0,0 +1,172 @@
|
|
1
|
+
module S3
|
2
|
+
class Bucket
|
3
|
+
include Parser
|
4
|
+
include Proxies
|
5
|
+
extend Forwardable
|
6
|
+
|
7
|
+
attr_reader :name, :service
|
8
|
+
|
9
|
+
def_instance_delegators :service, :service_request
|
10
|
+
private_class_method :new
|
11
|
+
|
12
|
+
# Retrieves the bucket information from the server. Raises an
|
13
|
+
# S3::Error exception if the bucket doesn't exist or you don't
|
14
|
+
# have access to it, etc.
|
15
|
+
def retrieve
|
16
|
+
bucket_headers
|
17
|
+
self
|
18
|
+
end
|
19
|
+
|
20
|
+
# Returns location of the bucket, e.g. "EU"
|
21
|
+
def location(reload = false)
|
22
|
+
if reload or @location.nil?
|
23
|
+
@location = location_constraint
|
24
|
+
else
|
25
|
+
@location
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
# Compares the bucket with other bucket. Returns true if the names
|
30
|
+
# of the buckets are the same, and both have the same services
|
31
|
+
# (see Service equality)
|
32
|
+
def ==(other)
|
33
|
+
self.name == other.name and self.service == other.service
|
34
|
+
end
|
35
|
+
|
36
|
+
# Similar to retrieve, but catches S3::Error::NoSuchBucket
|
37
|
+
# exceptions and returns false instead.
|
38
|
+
def exists?
|
39
|
+
retrieve
|
40
|
+
true
|
41
|
+
rescue Error::NoSuchBucket
|
42
|
+
false
|
43
|
+
end
|
44
|
+
|
45
|
+
# Destroys given bucket. Raises an S3::Error::BucketNotEmpty
|
46
|
+
# exception if the bucket is not empty. You can destroy non-empty
|
47
|
+
# bucket passing true (to force destroy)
|
48
|
+
def destroy(force = false)
|
49
|
+
delete_bucket
|
50
|
+
true
|
51
|
+
rescue Error::BucketNotEmpty
|
52
|
+
if force
|
53
|
+
objects.destroy_all
|
54
|
+
retry
|
55
|
+
else
|
56
|
+
raise
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
# Saves the newly built bucket.
|
61
|
+
#
|
62
|
+
# ==== Options
|
63
|
+
# * <tt>:location</tt> - location of the bucket
|
64
|
+
# (<tt>:eu</tt> or <tt>us</tt>)
|
65
|
+
# * Any other options are passed through to
|
66
|
+
# Connection#request
|
67
|
+
def save(options = {})
|
68
|
+
options = {:location => options} unless options.is_a?(Hash)
|
69
|
+
create_bucket_configuration(options)
|
70
|
+
true
|
71
|
+
end
|
72
|
+
|
73
|
+
# Returns true if the name of the bucket can be used like +VHOST+
|
74
|
+
# name. If the bucket contains characters like underscore it can't
|
75
|
+
# be used as +VHOST+ (e.g. <tt>bucket_name.s3.amazonaws.com</tt>)
|
76
|
+
def vhost?
|
77
|
+
self.class.vhost?(@name)
|
78
|
+
end
|
79
|
+
|
80
|
+
# Returns host name of the bucket according (see #vhost? method)
|
81
|
+
def host
|
82
|
+
vhost? ? "#@name.#{HOST}" : "#{HOST}"
|
83
|
+
end
|
84
|
+
|
85
|
+
# Returns path prefix for non +VHOST+ bucket. Path prefix is used
|
86
|
+
# instead of +VHOST+ name, e.g. "bucket_name/"
|
87
|
+
def path_prefix
|
88
|
+
vhost? ? "" : "#@name/"
|
89
|
+
end
|
90
|
+
|
91
|
+
# Returns the objects in the bucket and caches the result (see
|
92
|
+
# #reload method).
|
93
|
+
def objects
|
94
|
+
Proxy.new(lambda { list_bucket }, :owner => self, :extend => ObjectsExtension)
|
95
|
+
end
|
96
|
+
|
97
|
+
def inspect #:nodoc:
|
98
|
+
"#<#{self.class}:#{name}>"
|
99
|
+
end
|
100
|
+
|
101
|
+
def self.vhost?(name)
|
102
|
+
"#{name}.#{HOST}" =~ /\A#{URI::REGEXP::PATTERN::HOSTNAME}\Z/
|
103
|
+
end
|
104
|
+
|
105
|
+
private
|
106
|
+
|
107
|
+
attr_writer :service
|
108
|
+
|
109
|
+
def location_constraint
|
110
|
+
response = bucket_request(:get, :params => {:location => nil})
|
111
|
+
parse_location_constraint(response.body)
|
112
|
+
end
|
113
|
+
|
114
|
+
def list_bucket(options = {})
|
115
|
+
response = bucket_request(:get, :params => options)
|
116
|
+
objects_attributes = parse_list_bucket_result(response.body)
|
117
|
+
|
118
|
+
# If there are more than 1000 objects S3 truncates listing
|
119
|
+
# and we need to request another listing for the remaining objects.
|
120
|
+
while parse_is_truncated(response.body)
|
121
|
+
marker = objects_attributes.last[:key]
|
122
|
+
response = bucket_request(:get, :params => options.merge(:marker => marker))
|
123
|
+
objects_attributes += parse_list_bucket_result(response.body)
|
124
|
+
end
|
125
|
+
|
126
|
+
objects_attributes.map { |object_attributes| Object.send(:new, self, object_attributes) }
|
127
|
+
end
|
128
|
+
|
129
|
+
def bucket_headers(options = {})
|
130
|
+
response = bucket_request(:head, :params => options)
|
131
|
+
rescue Error::ResponseError => e
|
132
|
+
if e.response.code.to_i == 404
|
133
|
+
raise Error::ResponseError.exception("NoSuchBucket").new("The specified bucket does not exist.", nil)
|
134
|
+
else
|
135
|
+
raise e
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
def create_bucket_configuration(options = {})
|
140
|
+
location = options[:location].to_s.upcase if options[:location]
|
141
|
+
options[:headers] ||= {}
|
142
|
+
if location and location != "US"
|
143
|
+
options[:body] = "<CreateBucketConfiguration><LocationConstraint>#{location}</LocationConstraint></CreateBucketConfiguration>"
|
144
|
+
options[:headers][:content_type] = "application/xml"
|
145
|
+
end
|
146
|
+
bucket_request(:put, options)
|
147
|
+
end
|
148
|
+
|
149
|
+
def delete_bucket
|
150
|
+
bucket_request(:delete)
|
151
|
+
end
|
152
|
+
|
153
|
+
def initialize(service, name) #:nodoc:
|
154
|
+
self.service = service
|
155
|
+
self.name = name
|
156
|
+
end
|
157
|
+
|
158
|
+
def name=(name)
|
159
|
+
raise ArgumentError.new("Invalid bucket name: #{name}") unless name_valid?(name)
|
160
|
+
@name = name
|
161
|
+
end
|
162
|
+
|
163
|
+
def bucket_request(method, options = {})
|
164
|
+
path = "#{path_prefix}#{options[:path]}"
|
165
|
+
service_request(method, options.merge(:host => host, :path => path))
|
166
|
+
end
|
167
|
+
|
168
|
+
def name_valid?(name)
|
169
|
+
name =~ /\A[a-z0-9][a-z0-9\._-]{2,254}\Z/i and name !~ /\A#{URI::REGEXP::PATTERN::IPV4ADDR}\Z/
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module S3
|
2
|
+
module BucketsExtension
|
3
|
+
# Builds new bucket with given name
|
4
|
+
def build(name)
|
5
|
+
Bucket.send(:new, proxy_owner, name)
|
6
|
+
end
|
7
|
+
|
8
|
+
# Finds the bucket with given name
|
9
|
+
def find_first(name)
|
10
|
+
bucket = build(name)
|
11
|
+
bucket.retrieve
|
12
|
+
end
|
13
|
+
alias :find :find_first
|
14
|
+
|
15
|
+
# Find all buckets in the service
|
16
|
+
def find_all
|
17
|
+
proxy_target
|
18
|
+
end
|
19
|
+
|
20
|
+
# Destroy all buckets in the service. Doesn't destroy non-empty
|
21
|
+
# buckets by default, pass true to force destroy (USE WITH
|
22
|
+
# CARE!).
|
23
|
+
def destroy_all(force = false)
|
24
|
+
proxy_target.each { |bucket| bucket.destroy(force) }
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|