scashin133-s3 0.3.8 → 0.3.11

Sign up to get free protection for your applications and to get access to all the features.
data/Gemfile CHANGED
@@ -1,4 +1,2 @@
1
- source :gemcutter
2
-
3
- # Specify your gem's dependencies in s3.gemspec
1
+ source "http://rubygems.org"
4
2
  gemspec
@@ -1,8 +1,8 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- scashin133-s3 (0.3.8)
5
- proxies
4
+ scashin133-s3 (0.3.11)
5
+ proxies (~> 0.2.0)
6
6
 
7
7
  GEM
8
8
  remote: http://rubygems.org/
@@ -19,6 +19,5 @@ PLATFORMS
19
19
  DEPENDENCIES
20
20
  bundler (>= 1.0.0)
21
21
  mocha
22
- proxies
23
22
  scashin133-s3!
24
23
  test-unit (>= 2.0)
@@ -1,21 +1,78 @@
1
1
  = S3
2
2
 
3
3
  S3 library provides access to {Amazon's Simple Storage Service}[http://aws.amazon.com/s3/].
4
- It supports both: European and US buckets through {REST API}[http://docs.amazonwebservices.com/AmazonS3/latest/RESTAPI.html].
5
4
 
6
- * homepage[http://jah.pl/projects/s3.html]
7
- * gemcutter[http://gemcutter.org/gems/s3]
8
- * repository[http://github.com/qoobaa/s3]
9
- * {issue tracker}[http://github.com/qoobaa/s3/issues]
10
- * rdoc[http://qoobaa.github.com/s3]
5
+ It supports both: European and US buckets through the {REST API}[http://docs.amazonwebservices.com/AmazonS3/latest/API/APIRest.html].
11
6
 
12
7
  == Installation
13
8
 
14
- gem install s3
9
+ gem install s3
15
10
 
16
11
  == Usage
17
12
 
18
- See homepage[http://jah.pl/projects/s3.html] for details.
13
+ === Initialize the service
14
+
15
+ require "s3"
16
+ service = S3::Service.new(:access_key_id => "...",
17
+ :secret_access_key => "...")
18
+ #=> #<S3::Service:...>
19
+
20
+ === List buckets
21
+
22
+ service.buckets
23
+ #=> [#<S3::Bucket:first-bucket>,
24
+ # #<S3::Bucket:second-bucket>]
25
+
26
+ === Find bucket
27
+
28
+ first_bucket = service.buckets.find("first-bucket")
29
+ #=> #<S3::Bucket:first-bucket>
30
+
31
+ === List objects in a bucket
32
+
33
+ first_bucket.objects
34
+ #=> [#<S3::Object:/first-bucket/lenna.png>,
35
+ # #<S3::Object:/first-bucket/lenna_mini.png>]
36
+
37
+ === Find object in a bucket
38
+
39
+ object = first_bucket.objects.find("lenna.png")
40
+ #=> #<S3::Object:/first-bucket/lenna.png>
41
+
42
+ === Access object metadata (cached from find)
43
+
44
+ object.content_type
45
+ #=> "image/png"
46
+
47
+ === Access object content (downloads the object)
48
+
49
+ object.content
50
+ #=> "\x89PNG\r\n\x1A\n\x00\x00\x00\rIHDR\x00..."
51
+
52
+ === Delete an object
53
+
54
+ object.destroy
55
+ #=> true
56
+
57
+ === Create an object
58
+
59
+ new_object = bucket.objects.build("bender.png")
60
+ #=> #<S3::Object:/synergy-staging/bender.png>
61
+
62
+ new_object.content = open("bender.png")
63
+
64
+ new_object.save
65
+ #=> true
66
+
67
+ Please note that new objects are created with "public-read" ACL by
68
+ default.
69
+
70
+ == See also
71
+
72
+ * rubygems[http://rubygems.org/gems/s3]
73
+ * repository[http://github.com/qoobaa/s3]
74
+ * {issue tracker}[http://github.com/qoobaa/s3/issues]
75
+ * documentation[http://rubydoc.info/github/qoobaa/s3/master/frames]
19
76
 
20
77
  == Copyright
21
78
 
@@ -19,11 +19,8 @@ module S3
19
19
 
20
20
  # Returns location of the bucket, e.g. "EU"
21
21
  def location(reload = false)
22
- if reload or @location.nil?
23
- @location = location_constraint
24
- else
25
- @location
26
- end
22
+ return @location if defined?(@location) and not reload
23
+ @location = location_constraint
27
24
  end
28
25
 
29
26
  # Compares the bucket with other bucket. Returns true if the names
@@ -88,12 +85,18 @@ module S3
88
85
  vhost? ? "" : "#@name/"
89
86
  end
90
87
 
91
- # Returns the objects in the bucket and caches the result (see
92
- # #reload method).
88
+ # Returns the objects in the bucket and caches the result
93
89
  def objects
94
90
  Proxy.new(lambda { list_bucket }, :owner => self, :extend => ObjectsExtension)
95
91
  end
96
92
 
93
+ # Returns the object with the given key. Does not check whether the
94
+ # object exists. But also does not issue any HTTP requests, so it's
95
+ # much faster than objects.find
96
+ def object(key)
97
+ Object.send(:new, self, :key => key)
98
+ end
99
+
97
100
  def inspect #:nodoc:
98
101
  "#<#{self.class}:#{name}>"
99
102
  end
@@ -113,13 +116,20 @@ module S3
113
116
 
114
117
  def list_bucket(options = {})
115
118
  response = bucket_request(:get, :params => options)
119
+ max_keys = options[:max_keys]
116
120
  objects_attributes = parse_list_bucket_result(response.body)
117
121
 
118
- # If there are more than 1000 objects S3 truncates listing
119
- # and we need to request another listing for the remaining objects.
122
+ # If there are more than 1000 objects S3 truncates listing and
123
+ # we need to request another listing for the remaining objects.
120
124
  while parse_is_truncated(response.body)
121
- marker = objects_attributes.last[:key]
122
- response = bucket_request(:get, :params => options.merge(:marker => marker))
125
+ next_request_options = {:marker => objects_attributes.last[:key]}
126
+
127
+ if max_keys
128
+ break if objects_attributes.length >= max_keys
129
+ next_request_options[:max_keys] = max_keys - objects_attributes.length
130
+ end
131
+
132
+ response = bucket_request(:get, :params => options.merge(next_request_options))
123
133
  objects_attributes += parse_list_bucket_result(response.body)
124
134
  end
125
135
 
@@ -12,14 +12,13 @@ module S3
12
12
  end
13
13
  alias :find :find_first
14
14
 
15
- # Find all buckets in the service
15
+ # Finds all buckets in the service
16
16
  def find_all
17
17
  proxy_target
18
18
  end
19
19
 
20
- # Destroy all buckets in the service. Doesn't destroy non-empty
21
- # buckets by default, pass true to force destroy (USE WITH
22
- # CARE!).
20
+ # Destroys all buckets in the service. Doesn't destroy non-empty
21
+ # buckets by default, pass true to force destroy (USE WITH CARE!).
23
22
  def destroy_all(force = false)
24
23
  proxy_target.each { |bucket| bucket.destroy(force) }
25
24
  end
@@ -61,12 +61,15 @@ module S3
61
61
  params = options.fetch(:params, {})
62
62
  headers = options.fetch(:headers, {})
63
63
 
64
+ # Must be done before adding params
65
+ # Encodes all characters except forward-slash (/) and explicitly legal URL characters
66
+ path = URI.escape(path, /[^#{URI::REGEXP::PATTERN::UNRESERVED}\/]/)
67
+
64
68
  if params
65
69
  params = params.is_a?(String) ? params : self.class.parse_params(params)
66
70
  path << "?#{params}"
67
71
  end
68
72
 
69
- path = URI.escape(path)
70
73
  request = Request.new(@chunk_size, method.to_s.upcase, !!body, method.to_s.upcase != "HEAD", path)
71
74
 
72
75
  headers = self.class.parse_headers(headers)
@@ -6,7 +6,7 @@ module S3
6
6
  extend Forwardable
7
7
 
8
8
  attr_accessor :content_type, :content_disposition, :content_encoding, :cache_control
9
- attr_reader :last_modified, :etag, :size, :bucket, :key, :acl, :storage_class
9
+ attr_reader :last_modified, :etag, :size, :bucket, :key, :acl, :storage_class, :metadata
10
10
  attr_writer :content
11
11
 
12
12
  def_instance_delegators :bucket, :name, :service, :bucket_request, :vhost?, :host, :path_prefix
@@ -70,10 +70,11 @@ module S3
70
70
  false
71
71
  end
72
72
 
73
- # Download the content of the object, and caches it. Pass true
74
- # to clear the cache and download the object again.
73
+ # Downloads the content of the object, and caches it. Pass true to
74
+ # clear the cache and download the object again.
75
75
  def content(reload = false)
76
- get_object if reload or @content.nil?
76
+ return @content if defined?(@content) and not reload
77
+ get_object
77
78
  @content
78
79
  end
79
80
 
@@ -150,7 +151,7 @@ module S3
150
151
  headers[:content_disposition] = options[:content_disposition] if options[:content_disposition]
151
152
  headers[:cache_control] = options[:cache_control] if options[:cache_control]
152
153
  headers[:x_amz_copy_source] = full_key
153
- headers[:x_amz_metadata_directive] = "REPLACE"
154
+ headers[:x_amz_metadata_directive] = options[:replace] == false ? "COPY" : "REPLACE"
154
155
  headers[:x_amz_copy_source_if_match] = options[:if_match] if options[:if_match]
155
156
  headers[:x_amz_copy_source_if_none_match] = options[:if_none_match] if options[:if_none_match]
156
157
  headers[:x_amz_copy_source_if_unmodified_since] = options[:if_modified_since] if options[:if_modified_since]
@@ -234,6 +235,7 @@ module S3
234
235
  end
235
236
 
236
237
  def parse_headers(response)
238
+ @metadata = response.to_hash.select { |k, v| k.to_s.start_with?("x-amz-meta") }
237
239
  self.etag = response["etag"] if response.key?("etag")
238
240
  self.content_type = response["content-type"] if response.key?("content-type")
239
241
  self.content_disposition = response["content-disposition"] if response.key?("content-disposition")
@@ -3,7 +3,7 @@ module S3
3
3
  include REXML
4
4
 
5
5
  def rexml_document(xml)
6
- xml.force_encoding(Encoding::UTF_8) if xml.respond_to? :force_encoding
6
+ xml.force_encoding(::Encoding::UTF_8) if xml.respond_to? :force_encoding
7
7
  Document.new(xml)
8
8
  end
9
9
 
@@ -44,7 +44,7 @@ module S3
44
44
  message = document.elements["Error/Message"].text
45
45
  [code, message]
46
46
  end
47
-
47
+
48
48
  def parse_is_truncated xml
49
49
  rexml_document(xml).elements["ListBucketResult/IsTruncated"].text =='true'
50
50
  end
@@ -39,6 +39,13 @@ module S3
39
39
  Proxy.new(lambda { list_all_my_buckets }, :owner => self, :extend => BucketsExtension)
40
40
  end
41
41
 
42
+ # Returns the bucket with the given name. Does not check whether the
43
+ # bucket exists. But also does not issue any HTTP requests, so it's
44
+ # much faster than buckets.find
45
+ def bucket(name)
46
+ Bucket.send(:new, self, name)
47
+ end
48
+
42
49
  # Returns "http://" or "https://", depends on <tt>:use_ssl</tt>
43
50
  # value from initializer
44
51
  def protocol
@@ -68,15 +75,13 @@ module S3
68
75
  end
69
76
 
70
77
  def connection
71
- if @connection.nil?
72
- @connection = Connection.new(:access_key_id => @access_key_id,
73
- :secret_access_key => @secret_access_key,
74
- :use_ssl => @use_ssl,
75
- :timeout => @timeout,
76
- :debug => @debug,
77
- :proxy => @proxy)
78
- end
79
- @connection
78
+ return @connection if defined?(@connection)
79
+ @connection = Connection.new(:access_key_id => @access_key_id,
80
+ :secret_access_key => @secret_access_key,
81
+ :use_ssl => @use_ssl,
82
+ :timeout => @timeout,
83
+ :debug => @debug,
84
+ :proxy => @proxy)
80
85
  end
81
86
  end
82
87
  end
@@ -52,9 +52,9 @@ module S3
52
52
  expires = options[:expires_at]
53
53
 
54
54
  headers = options[:headers] || {}
55
- headers.merge!('date' => expires.to_i.to_s)
55
+ headers.merge!("date" => expires.to_i.to_s)
56
56
 
57
- options.merge!(:resource => "/#{bucket}/#{resource}",
57
+ options.merge!(:resource => "/#{bucket}/#{URI.escape(resource)}",
58
58
  :method => options[:method] || :get,
59
59
  :headers => headers)
60
60
  signature = canonicalized_signature(options)
@@ -119,7 +119,7 @@ module S3
119
119
  string_to_sign << canonicalized_amz_headers
120
120
  string_to_sign << canonicalized_resource
121
121
 
122
- digest = OpenSSL::Digest::Digest.new('sha1')
122
+ digest = OpenSSL::Digest::Digest.new("sha1")
123
123
  hmac = OpenSSL::HMAC.digest(digest, secret_access_key, string_to_sign)
124
124
  base64 = Base64.encode64(hmac)
125
125
  base64.chomp
@@ -139,7 +139,7 @@ module S3
139
139
  headers = []
140
140
 
141
141
  # 1. Convert each HTTP header name to lower-case. For example,
142
- # 'X-Amz-Date' becomes 'x-amz-date'.
142
+ # "X-Amz-Date" becomes "x-amz-date".
143
143
  request.each { |key, value| headers << [key.downcase, value] if key =~ /\Ax-amz-/io }
144
144
  #=> [["c", 0], ["a", 1], ["a", 2], ["b", 3]]
145
145
 
@@ -152,9 +152,9 @@ module S3
152
152
  # "header-name:comma-separated-value-list" pair as prescribed by
153
153
  # RFC 2616, section 4.2, without any white-space between
154
154
  # values. For example, the two metadata headers
155
- # 'x-amz-meta-username: fred' and 'x-amz-meta-username: barney'
156
- # would be combined into the single header 'x-amz-meta-username:
157
- # fred,barney'.
155
+ # "x-amz-meta-username: fred" and "x-amz-meta-username: barney"
156
+ # would be combined into the single header "x-amz-meta-username:
157
+ # fred,barney".
158
158
  combined_headers = headers.inject([]) do |new_headers, header|
159
159
  existing_header = new_headers.find { |h| h.first == header.first }
160
160
  if existing_header
@@ -176,8 +176,8 @@ module S3
176
176
  end
177
177
 
178
178
  # 5. Trim any white-space around the colon in the header. For
179
- # example, the header 'x-amz-meta-username: fred,barney' would
180
- # become 'x-amz-meta-username:fred,barney'
179
+ # example, the header "x-amz-meta-username: fred,barney" would
180
+ # become "x-amz-meta-username:fred,barney"
181
181
  joined_headers = unfolded_headers.map do |header|
182
182
  key = header.first.strip
183
183
  value = header.last.strip
@@ -222,7 +222,23 @@ module S3
222
222
  # 4. If the request addresses a sub-resource, like ?location,
223
223
  # ?acl, or ?torrent, append the sub-resource including question
224
224
  # mark.
225
- string << "?#{$1}" if uri.query =~ /&?(acl|torrent|logging|location)(?:&|=|\Z)/
225
+ sub_resources = [
226
+ "acl",
227
+ "location",
228
+ "logging",
229
+ "notification",
230
+ "partNumber",
231
+ "policy",
232
+ "requestPayment",
233
+ "torrent",
234
+ "uploadId",
235
+ "uploads",
236
+ "versionId",
237
+ "versioning",
238
+ "versions",
239
+ "website"
240
+ ]
241
+ string << "?#{$1}" if uri.query =~ /&?(#{sub_resources.join("|")})(?:&|=|\Z)/
226
242
  string
227
243
  end
228
244
  end
@@ -1,3 +1,3 @@
1
1
  module S3
2
- VERSION = "0.3.8"
2
+ VERSION = "0.3.11"
3
3
  end
data/s3.gemspec CHANGED
@@ -1,6 +1,6 @@
1
1
  # -*- encoding: utf-8 -*-
2
2
 
3
- # Load version requiring the canonical "s3/version", otherwise Ruby will think
3
+ # Load version requiring the canonical "s3/version", otherwise Ruby will think
4
4
  # is a different file and complaint about a double declaration of S3::VERSION.
5
5
  $LOAD_PATH.unshift File.expand_path("../lib", __FILE__)
6
6
  require "s3/version"
@@ -18,7 +18,7 @@ Gem::Specification.new do |s|
18
18
  s.required_rubygems_version = ">= 1.3.6"
19
19
  s.rubyforge_project = "scashin133-s3"
20
20
 
21
- s.add_dependency "proxies"
21
+ s.add_dependency "proxies", "~> 0.2.0"
22
22
  s.add_development_dependency "test-unit", ">= 2.0"
23
23
  s.add_development_dependency "mocha"
24
24
  s.add_development_dependency "bundler", ">= 1.0.0"
@@ -22,6 +22,7 @@ class ObjectTest < Test::Unit::TestCase
22
22
  @response_binary["content-encoding"] = nil
23
23
  @response_binary["last-modified"] = Time.now.httpdate
24
24
  @response_binary["content-length"] = 20
25
+ @response_binary["x-amz-meta-test"] = "metadata"
25
26
 
26
27
  @xml_body = <<-EOXML
27
28
  <?xml version="1.0" encoding="UTF-8"?>
@@ -132,6 +133,14 @@ class ObjectTest < Test::Unit::TestCase
132
133
  assert @object_lena.retrieve
133
134
  end
134
135
 
136
+ test "retrieve headers" do
137
+ @object_lena.expects(:object_request).twice.with(:head, {}).returns(@response_binary)
138
+ assert @object_lena.retrieve
139
+
140
+ meta = {"x-amz-meta-test" => ["metadata"]}
141
+ assert_equal meta, @object_lena.retrieve.metadata
142
+ end
143
+
135
144
  test "exists" do
136
145
  @object_lena.expects(:retrieve).returns(true)
137
146
  assert @object_lena.exists?
@@ -157,18 +166,18 @@ class ObjectTest < Test::Unit::TestCase
157
166
  actual = @object_lena.acl
158
167
  assert_equal expected, actual
159
168
  end
160
-
169
+
161
170
  test "storage-class writer" do
162
171
  expected = nil
163
172
  actual = @object_lena.storage_class
164
173
  assert_equal expected, actual
165
-
174
+
166
175
  assert @object_lena.storage_class = :standard
167
-
176
+
168
177
  expected = "STANDARD"
169
178
  actual = @object_lena.storage_class
170
179
  assert_equal expected, actual
171
-
180
+
172
181
  assert @object_lena.storage_class = :reduced_redundancy
173
182
 
174
183
  expected = "REDUCED_REDUNDANCY"
@@ -176,7 +185,7 @@ class ObjectTest < Test::Unit::TestCase
176
185
  assert_equal expected, actual
177
186
  end
178
187
 
179
- test "copy" do
188
+ test "replace" do
180
189
  @bucket_images.expects(:bucket_request).with(:put, :path => "Lena-copy.png", :headers => { :x_amz_acl => "public-read", :content_type => "application/octet-stream", :x_amz_copy_source => "images/Lena.png", :x_amz_metadata_directive => "REPLACE" }).returns(@response_xml)
181
190
 
182
191
  new_object = @object_lena.copy(:key => "Lena-copy.png")
@@ -184,4 +193,13 @@ class ObjectTest < Test::Unit::TestCase
184
193
  assert_equal "Lena-copy.png", new_object.key
185
194
  assert_equal "Lena.png", @object_lena.key
186
195
  end
196
+
197
+ test "copy" do
198
+ @bucket_images.expects(:bucket_request).with(:put, :path => "Lena-copy.png", :headers => { :x_amz_acl => "public-read", :content_type => "application/octet-stream", :x_amz_copy_source => "images/Lena.png", :x_amz_metadata_directive => "COPY" }).returns(@response_xml)
199
+
200
+ new_object = @object_lena.copy(:key => "Lena-copy.png", :replace => false)
201
+
202
+ assert_equal "Lena-copy.png", new_object.key
203
+ assert_equal "Lena.png", @object_lena.key
204
+ end
187
205
  end
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: scashin133-s3
3
3
  version: !ruby/object:Gem::Version
4
- hash: 3
5
- prerelease: false
4
+ hash: 5
5
+ prerelease:
6
6
  segments:
7
7
  - 0
8
8
  - 3
9
- - 8
10
- version: 0.3.8
9
+ - 11
10
+ version: 0.3.11
11
11
  platform: ruby
12
12
  authors:
13
13
  - "Jakub Ku\xC5\xBAma"
@@ -16,22 +16,23 @@ autorequire:
16
16
  bindir: bin
17
17
  cert_chain: []
18
18
 
19
- date: 2010-10-23 00:00:00 -07:00
20
- default_executable:
19
+ date: 2012-01-30 00:00:00 Z
21
20
  dependencies:
22
21
  - !ruby/object:Gem::Dependency
23
22
  version_requirements: &id001 !ruby/object:Gem::Requirement
24
23
  none: false
25
24
  requirements:
26
- - - ">="
25
+ - - ~>
27
26
  - !ruby/object:Gem::Version
28
- hash: 3
27
+ hash: 23
29
28
  segments:
30
29
  - 0
31
- version: "0"
30
+ - 2
31
+ - 0
32
+ version: 0.2.0
32
33
  requirement: *id001
33
- name: proxies
34
34
  prerelease: false
35
+ name: proxies
35
36
  type: :runtime
36
37
  - !ruby/object:Gem::Dependency
37
38
  version_requirements: &id002 !ruby/object:Gem::Requirement
@@ -45,8 +46,8 @@ dependencies:
45
46
  - 0
46
47
  version: "2.0"
47
48
  requirement: *id002
48
- name: test-unit
49
49
  prerelease: false
50
+ name: test-unit
50
51
  type: :development
51
52
  - !ruby/object:Gem::Dependency
52
53
  version_requirements: &id003 !ruby/object:Gem::Requirement
@@ -59,8 +60,8 @@ dependencies:
59
60
  - 0
60
61
  version: "0"
61
62
  requirement: *id003
62
- name: mocha
63
63
  prerelease: false
64
+ name: mocha
64
65
  type: :development
65
66
  - !ruby/object:Gem::Dependency
66
67
  version_requirements: &id004 !ruby/object:Gem::Requirement
@@ -75,8 +76,8 @@ dependencies:
75
76
  - 0
76
77
  version: 1.0.0
77
78
  requirement: *id004
78
- name: bundler
79
79
  prerelease: false
80
+ name: bundler
80
81
  type: :development
81
82
  description: "S3 library provides access to Amazon's Simple Storage Service. It supports both: European and US buckets through REST API."
82
83
  email:
@@ -95,8 +96,6 @@ files:
95
96
  - LICENSE
96
97
  - README.rdoc
97
98
  - Rakefile
98
- - extra/s3_attachment_fu.rb
99
- - extra/s3_paperclip.rb
100
99
  - lib/s3.rb
101
100
  - lib/s3/bucket.rb
102
101
  - lib/s3/buckets_extension.rb
@@ -116,7 +115,6 @@ files:
116
115
  - test/service_test.rb
117
116
  - test/signature_test.rb
118
117
  - test/test_helper.rb
119
- has_rdoc: true
120
118
  homepage: http://jah.pl/projects/s3.html
121
119
  licenses: []
122
120
 
@@ -148,7 +146,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
148
146
  requirements: []
149
147
 
150
148
  rubyforge_project: scashin133-s3
151
- rubygems_version: 1.3.7
149
+ rubygems_version: 1.8.10
152
150
  signing_key:
153
151
  specification_version: 3
154
152
  summary: Library for accessing S3 objects and buckets
@@ -1,159 +0,0 @@
1
- require "singleton"
2
- require "s3"
3
-
4
- # S3 Backend for attachment-fu plugin. After installing attachment-fu
5
- # plugin, copy the file to:
6
- # +vendor/plugins/attachment-fu/lib/technoweenie/attachment_fu/backends+
7
- #
8
- # To configure S3Backend create initializer file in your Rails
9
- # application, e.g. +config/initializers/s3_backend.rb+.
10
- #
11
- # Technoweenie::AttachmentFu::Backends::S3Backend.configuration do |config|
12
- # config.access_key_id = "..." # your access key id
13
- # config.secret_access_key = "..." # your secret access key
14
- # config.bucket_name = "..." # default bucket name to store attachments
15
- # config.use_ssl = false # pass true if you want to communicate via SSL
16
- # end
17
-
18
- module Technoweenie
19
- module AttachmentFu
20
- module Backends
21
- module S3Backend
22
-
23
- # S3Backend configuration class
24
- class Configuration
25
- include Singleton
26
-
27
- ATTRIBUTES = [:access_key_id, :secret_access_key, :use_ssl, :bucket_name]
28
-
29
- attr_accessor *ATTRIBUTES
30
- end
31
-
32
- # Method used to configure S3Backend, see the example above
33
- def self.configuration
34
- if block_given?
35
- yield Configuration.instance
36
- end
37
- Configuration.instance
38
- end
39
-
40
- # :nodoc:
41
- def self.included(base)
42
- include S3
43
-
44
- service = Service.new(:access_key_id => configuration.access_key_id,
45
- :secret_access_key => configuration.secret_access_key,
46
- :use_ssl => configuration.use_ssl)
47
-
48
- bucket_name = base.attachment_options[:bucket_name] || configuration.bucket_name
49
-
50
- base.cattr_accessor :bucket
51
- base.bucket = service.buckets.build(bucket_name) # don't connect
52
-
53
- base.before_update :rename_file
54
- end
55
-
56
- # The attachment ID used in the full path of a file
57
- def attachment_path_id
58
- ((respond_to?(:parent_id) && parent_id) || id).to_s
59
- end
60
-
61
- # The pseudo hierarchy containing the file relative to the bucket name
62
- # Example: <tt>:table_name/:id</tt>
63
- def base_path
64
- [attachment_options[:path_prefix], attachment_path_id].join("/")
65
- end
66
-
67
- # The full path to the file relative to the bucket name
68
- # Example: <tt>:table_name/:id/:filename</tt>
69
- def full_filename(thumbnail = nil)
70
- [base_path, thumbnail_name_for(thumbnail)].join("/")
71
- end
72
-
73
- # All public objects are accessible via a GET request to the S3 servers. You can generate a
74
- # url for an object using the s3_url method.
75
- #
76
- # @photo.s3_url
77
- #
78
- # The resulting url is in the form: <tt>http(s)://:server/:bucket_name/:table_name/:id/:file</tt> where
79
- # the <tt>:server</tt> variable defaults to <tt>AWS::S3 URL::DEFAULT_HOST</tt> (s3.amazonaws.com) and can be
80
- # set using the configuration parameters in <tt>RAILS_ROOT/config/amazon_s3.yml</tt>.
81
- #
82
- # The optional thumbnail argument will output the thumbnail's filename (if any).
83
- def s3_url(thumbnail = nil)
84
- if attachment_options[:cname]
85
- ["#{s3_protocol}#{bucket.name}", full_filename(thumbnail)].join("/")
86
- else
87
- ["#{s3_protocol}#{s3_hostname}#{bucket.path_prefix}", full_filename(thumbnail)].join("/")
88
- end
89
- end
90
- alias :public_url :s3_url
91
- alias :public_filename :s3_url
92
-
93
- # Name of the bucket used to store attachments
94
- def bucket_name
95
- self.class.bucket.name
96
- end
97
-
98
- # :nodoc:
99
- def create_temp_file
100
- write_to_temp_file current_data
101
- end
102
-
103
- # :nodoc:
104
- def current_data
105
- # Object.value full_filename, bucket_name
106
- object = self.class.bucket.objects.find(full_filename)
107
- object.content
108
- end
109
-
110
- # Returns http:// or https:// depending on use_ssl setting
111
- def s3_protocol
112
- attachment_options[:use_ssl] ? "https://" : "http://"
113
- end
114
-
115
- # Returns hostname of the bucket
116
- # e.g. +bucketname.com.s3.amazonaws.com+. Additionally you can
117
- # pass :cname => true option in has_attachment method to
118
- # return CNAME only, e.g. +bucketname.com+
119
- def s3_hostname
120
- attachment_options[:cname] ? self.class.bucket.name : self.class.bucket.host
121
- end
122
-
123
- protected
124
-
125
- # Frees the space in S3 bucket, used by after_destroy callback
126
- def destroy_file
127
- object = self.class.bucket.objects.find(full_filename)
128
- object.destroy
129
- end
130
-
131
- # Renames file if filename has been changed - copy the file to
132
- # new key and delete old one
133
- def rename_file
134
- return unless filename_changed?
135
-
136
- old_full_filename = [base_path, filename_was].join("/")
137
-
138
- object = self.class.bucket.objects.find(old_full_filename)
139
- new_object = object.copy(:key => full_filename, :acl => attachment_options[:acl])
140
- object.destroy
141
- true
142
- end
143
-
144
- # Saves the file to storage
145
- def save_to_storage
146
- if save_attachment?
147
- object = self.class.bucket.objects.build(full_filename)
148
-
149
- object.content_type = content_type
150
- object.acl = attachment_options[:acl]
151
- object.content = temp_path ? File.open(temp_path) : temp_data
152
- object.save
153
- end
154
- true
155
- end
156
- end
157
- end
158
- end
159
- end
@@ -1,157 +0,0 @@
1
- # S3 backend for paperclip plugin. Copy the file to:
2
- # +config/initializers/+ directory
3
- #
4
- # Example configuration for CNAME bucket:
5
- #
6
- # has_attached_file :image,
7
- # :s3_host_alias => "bucket.domain.tld",
8
- # :url => ":s3_alias_url",
9
- # :styles => {
10
- # :medium => "300x300>",
11
- # :thumb => "100x100>"
12
- # },
13
- # :storage => :s3,
14
- # :s3_credentials => {
15
- # :access_key_id => "...",
16
- # :secret_access_key => "..."
17
- # },
18
- # :bucket => "bucket.domain.tld",
19
- # :path => ":attachment/:id/:style.:extension"
20
- module Paperclip
21
- module Storage
22
- module S3
23
- def self.extended base
24
- begin
25
- require "s3"
26
- rescue LoadError => e
27
- e.message << " (You may need to install the s3 gem)"
28
- raise e
29
- end
30
-
31
- base.instance_eval do
32
- @s3_credentials = parse_credentials(@options[:s3_credentials])
33
- @bucket_name = @options[:bucket] || @s3_credentials[:bucket]
34
- @bucket_name = @bucket_name.call(self) if @bucket_name.is_a?(Proc)
35
- @s3_options = @options[:s3_options] || {}
36
- @s3_permissions = @options[:s3_permissions] || :public_read
37
- @s3_storage_class = @options[:s3_storage_class] || :standard
38
- @s3_protocol = @options[:s3_protocol] || (@s3_permissions == :public_read ? "http" : "https")
39
- @s3_headers = @options[:s3_headers] || {}
40
- @s3_host_alias = @options[:s3_host_alias]
41
- @url = ":s3_path_url" unless @url.to_s.match(/^:s3.*url$/)
42
- @service = ::S3::Service.new(@s3_options.merge(
43
- :access_key_id => @s3_credentials[:access_key_id],
44
- :secret_access_key => @s3_credentials[:secret_access_key],
45
- :use_ssl => @s3_protocol == "https"
46
- ))
47
- @bucket = @service.buckets.build(@bucket_name)
48
- end
49
- Paperclip.interpolates(:s3_alias_url) do |attachment, style|
50
- "#{attachment.s3_protocol}://#{attachment.s3_host_alias}/#{attachment.path(style).gsub(%r{^/}, "")}"
51
- end
52
- Paperclip.interpolates(:s3_path_url) do |attachment, style|
53
- "#{attachment.s3_protocol}://s3.amazonaws.com/#{attachment.bucket_name}/#{attachment.path(style).gsub(%r{^/}, "")}"
54
- end
55
- Paperclip.interpolates(:s3_domain_url) do |attachment, style|
56
- "#{attachment.s3_protocol}://#{attachment.bucket_name}.s3.amazonaws.com/#{attachment.path(style).gsub(%r{^/}, "")}"
57
- end
58
- end
59
-
60
- def expiring_url(style_name = default_style, time = 3600)
61
- bucket.objects.build(path(style_name)).temporary_url(Time.now + time)
62
- end
63
-
64
- def bucket_name
65
- @bucket_name
66
- end
67
-
68
- def bucket
69
- @bucket
70
- end
71
-
72
- def s3_host_alias
73
- @s3_host_alias
74
- end
75
-
76
- def parse_credentials creds
77
- creds = find_credentials(creds).stringify_keys
78
- (creds[RAILS_ENV] || creds).symbolize_keys
79
- end
80
-
81
- def exists?(style = default_style)
82
- if original_filename
83
- bucket.objects.build(path(style)).exists?
84
- else
85
- false
86
- end
87
- end
88
-
89
- def s3_protocol
90
- @s3_protocol
91
- end
92
-
93
- # Returns representation of the data of the file assigned to the given
94
- # style, in the format most representative of the current storage.
95
- def to_file style = default_style
96
- return @queued_for_write[style] if @queued_for_write[style]
97
- begin
98
- file = Tempfile.new(path(style))
99
- file.binmode if file.respond_to?(:binmode)
100
- file.write(bucket.objects.find(path(style)).content)
101
- file.rewind
102
- rescue ::S3::Error::NoSuchKey
103
- file.close if file.respond_to?(:close)
104
- file = nil
105
- end
106
- file
107
- end
108
-
109
- def flush_writes #:nodoc:
110
- @queued_for_write.each do |style, file|
111
- begin
112
- log("saving #{path(style)}")
113
- object = bucket.objects.build(path(style))
114
- file.rewind
115
- object.content = file.read
116
- object.acl = @s3_permissions
117
- object.storage_class = @s3_storage_class
118
- object.content_type = instance_read(:content_type)
119
- object.content_disposition = @s3_headers[:content_disposition]
120
- object.content_encoding = @s3_headers[:content_encoding]
121
- object.save
122
- rescue ::S3::Error::ResponseError => e
123
- raise
124
- end
125
- end
126
- @queued_for_write = {}
127
- end
128
-
129
- def flush_deletes #:nodoc:
130
- @queued_for_delete.each do |path|
131
- begin
132
- log("deleting #{path}")
133
- bucket.objects.find(path).destroy
134
- rescue ::S3::Error::ResponseError
135
- # Ignore this.
136
- end
137
- end
138
- @queued_for_delete = []
139
- end
140
-
141
- def find_credentials creds
142
- case creds
143
- when File
144
- YAML::load(ERB.new(File.read(creds.path)).result)
145
- when String
146
- YAML::load(ERB.new(File.read(creds)).result)
147
- when Hash
148
- creds
149
- else
150
- raise ArgumentError, "Credentials are not a path, file, or hash."
151
- end
152
- end
153
- private :find_credentials
154
-
155
- end
156
- end
157
- end