aws-s3 0.4.0 → 0.5.0

Sign up to get free protection for your applications and to get access to all the features.
data/COPYING CHANGED
@@ -1,5 +1,5 @@
1
1
  #
2
- # Copyright (c) 2006 Marcel Molina Jr. <marcel@vernix.org>
2
+ # Copyright (c) 2006-2008 Marcel Molina Jr. <marcel@vernix.org>
3
3
  #
4
4
  # Permission is hereby granted, free of charge, to any person obtaining a copy of
5
5
  # this software and associated documentation files (the "Software"), to deal in the
@@ -16,4 +16,4 @@
16
16
  # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
17
17
  # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18
18
  # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
19
- # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19
+ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README CHANGED
@@ -128,7 +128,7 @@ You can store an object on S3 by specifying a key, its data and the name of the
128
128
  S3Object.store('me.jpg', open('headshot.jpg'), 'photos')
129
129
 
130
130
  The content type of the object will be inferred by its extension. If the appropriate content type can not be inferred, S3 defaults
131
- to <tt>binary/octect-stream</tt>.
131
+ to <tt>binary/octet-stream</tt>.
132
132
 
133
133
  If you want to override this, you can explicitly indicate what content type the object should have with the <tt>:content_type</tt> option:
134
134
 
@@ -202,7 +202,7 @@ A bevie of information about an object can be had using the <tt>about</tt> metho
202
202
 
203
203
  pp song.about
204
204
  {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT",
205
- "content-type" => "binary/octect-stream",
205
+ "content-type" => "binary/octet-stream",
206
206
  "etag" => "\"dc629038ffc674bee6f62eb64ff3a\"",
207
207
  "date" => "Sat, 28 Oct 2006 21:30:41 GMT",
208
208
  "x-amz-request-id" => "B7BC68F55495B1C8",
data/Rakefile CHANGED
@@ -111,22 +111,23 @@ namespace :dist do
111
111
 
112
112
  desc 'Tag release'
113
113
  task :tag do
114
- svn_root = 'svn+ssh://marcel@rubyforge.org/var/svn/amazon/s3'
115
- sh %(svn cp #{svn_root}/trunk #{svn_root}/tags/rel-#{spec.version} -m "Tag #{spec.name} release #{spec.version}")
114
+ sh %(git tag -a '#{spec.version}-release' -m 'Tagging #{spec.version} release')
115
+ sh 'git push --tags'
116
116
  end
117
117
 
118
118
  desc 'Update changelog to include a release marker'
119
119
  task :add_release_marker_to_changelog do
120
120
  changelog = IO.read('CHANGELOG')
121
- changelog.sub!(/^trunk:/, "#{spec.version}:")
121
+ changelog.sub!(/^head:/, "#{spec.version}:")
122
122
 
123
123
  open('CHANGELOG', 'w') do |file|
124
- file.write "trunk:\n\n#{changelog}"
124
+ file.write "head:\n\n#{changelog}"
125
125
  end
126
126
  end
127
127
 
128
128
  task :commit_changelog do
129
- sh %(svn ci CHANGELOG -m "Bump changelog version marker for release")
129
+ sh %(git commit CHANGELOG -m "Bump changelog version marker for release")
130
+ sh 'git push'
130
131
  end
131
132
 
132
133
  package_name = lambda {|specification| File.join('pkg', "#{specification.name}-#{specification.version}")}
@@ -136,20 +137,25 @@ namespace :dist do
136
137
  require 'rubyforge'
137
138
  package = package_name[spec]
138
139
 
139
- rubyforge = RubyForge.new
140
+ rubyforge = RubyForge.new.configure
140
141
  rubyforge.login
141
-
142
+
143
+ user_config = rubyforge.userconfig
144
+ user_config['release_changes'] = YAML.load_file('CHANGELOG')[spec.version.to_s].join("\n")
145
+
142
146
  version_already_released = lambda do
143
- releases = rubyforge.userconfig['rubyforge']['release_ids']
144
- releases.has_key?(spec.name) && releases[spec.name][spec.version]
147
+ releases = rubyforge.autoconfig['release_ids']
148
+ releases.has_key?(spec.name) && releases[spec.name][spec.version.to_s]
145
149
  end
146
150
 
147
151
  abort("Release #{spec.version} already exists!") if version_already_released.call
148
152
 
149
- if release_id = rubyforge.add_release(spec.rubyforge_project, spec.name, spec.version, "#{package}.tar.gz")
150
- rubyforge.add_file(spec.rubyforge_project, spec.name, release_id, "#{package}.gem")
151
- else
153
+ begin
154
+ rubyforge.add_release(spec.rubyforge_project, spec.name, spec.version, "#{package}.tar.gz", "#{package}.gem")
155
+ puts "Version #{spec.version} released!"
156
+ rescue Exception => exception
152
157
  puts 'Release failed!'
158
+ raise
153
159
  end
154
160
  end
155
161
 
@@ -50,7 +50,7 @@ module AWS
50
50
  # parameterize these computations and arrange them in a string form appropriate to how they are used, in one case a http request
51
51
  # header value, and in the other case key/value query string parameter pairs.
52
52
  class Signature < String #:nodoc:
53
- attr_reader :request, :access_key_id, :secret_access_key
53
+ attr_reader :request, :access_key_id, :secret_access_key, :options
54
54
 
55
55
  def initialize(request, access_key_id, secret_access_key, options = {})
56
56
  super()
@@ -99,10 +99,9 @@ module AWS
99
99
  # More details about the various authentication schemes can be found in the docs for its containing module, Authentication.
100
100
  class QueryString < Signature #:nodoc:
101
101
  constant :DEFAULT_EXPIRY, 300 # 5 minutes
102
-
103
102
  def initialize(*args)
104
103
  super
105
- @options[:url_encode] = true
104
+ options[:url_encode] = true
106
105
  self << build
107
106
  end
108
107
 
@@ -115,8 +114,12 @@ module AWS
115
114
  # the +:expires_in+ option
116
115
  # 3) The current time in seconds since the epoch plus the default number of seconds (60 seconds)
117
116
  def expires
118
- return @options[:expires] if @options[:expires]
119
- date.to_i + (@options[:expires_in] || DEFAULT_EXPIRY)
117
+ return options[:expires] if options[:expires]
118
+ date.to_i + expires_in
119
+ end
120
+
121
+ def expires_in
122
+ options.has_key?(:expires_in) ? Integer(options[:expires_in]) : DEFAULT_EXPIRY
120
123
  end
121
124
 
122
125
  # Keep in alphabetical order
@@ -225,7 +225,11 @@ module AWS #:nodoc:
225
225
  end
226
226
 
227
227
  def method_missing(method, *args, &block)
228
- attributes[method.to_s] || attributes[method] || super
228
+ case
229
+ when attributes.has_key?(method.to_s): attributes[method.to_s]
230
+ when attributes.has_key?(method): attributes[method]
231
+ else super
232
+ end
229
233
  end
230
234
  end
231
235
  end
@@ -157,11 +157,10 @@ module AWS
157
157
  #
158
158
  # Only the owner of a bucket can delete a bucket, regardless of the bucket's access control policy.
159
159
  def delete(name = nil, options = {})
160
- name = path(name)
161
160
  find(name).delete_all if options[:force]
162
- # A bit confusing. Calling super actually makes makes an HTTP DELETE request. The delete method is
163
- # defined in the Base class. It happens to have the same name.
164
- super(name).success?
161
+
162
+ name = path(name)
163
+ Base.delete(name).success?
165
164
  end
166
165
 
167
166
  # List all your buckets. This is a convenient wrapper around AWS::S3::Service.buckets.
@@ -27,18 +27,20 @@ module AWS
27
27
  body.rewind if body.respond_to?(:rewind) unless attempts.zero?
28
28
 
29
29
  requester = Proc.new do
30
- path = self.class.prepare_path(path)
30
+ path = self.class.prepare_path(path) if attempts.zero? # Only escape the path once
31
31
  request = request_method(verb).new(path, headers)
32
32
  ensure_content_type!(request)
33
33
  add_user_agent!(request)
34
34
  authenticate!(request)
35
35
  if body
36
36
  if body.respond_to?(:read)
37
- request.body_stream = body
38
- request.content_length = body.respond_to?(:lstat) ? body.lstat.size : body.size
37
+ request.body_stream = body
39
38
  else
40
39
  request.body = body
41
- end
40
+ end
41
+ request.content_length = body.respond_to?(:lstat) ? body.stat.size : body.size
42
+ else
43
+ request.content_length = 0
42
44
  end
43
45
  http.request(request, &block)
44
46
  end
@@ -49,7 +51,7 @@ module AWS
49
51
  else
50
52
  http.start(&requester)
51
53
  end
52
- rescue Errno::EPIPE, Timeout::Error, Errno::EPIPE, Errno::EINVAL
54
+ rescue Errno::EPIPE, Timeout::Error, Errno::EINVAL, EOFError
53
55
  @http = create_connection
54
56
  attempts == 3 ? raise : (attempts += 1; retry)
55
57
  end
@@ -75,7 +77,14 @@ module AWS
75
77
  end
76
78
 
77
79
  def protocol(options = {})
78
- (options[:use_ssl] || http.use_ssl?) ? 'https://' : 'http://'
80
+ # This always trumps http.use_ssl?
81
+ if options[:use_ssl] == false
82
+ 'http://'
83
+ elsif options[:use_ssl] || http.use_ssl?
84
+ 'https://'
85
+ else
86
+ 'http://'
87
+ end
79
88
  end
80
89
 
81
90
  private
@@ -178,7 +187,7 @@ module AWS
178
187
  # will be implicitly set to 443, unless specified otherwise. Defaults to false.
179
188
  # * <tt>:persistent</tt> - Whether to use a persistent connection to the server. Having this on provides around a two fold
180
189
  # performance increase but for long running processes some firewalls may find the long lived connection suspicious and close the connection.
181
- # If you run into connection errors, try setting <tt>:persistent</tt> to false. Defaults to true.
190
+ # If you run into connection errors, try setting <tt>:persistent</tt> to false. Defaults to false.
182
191
  # * <tt>:proxy</tt> - If you need to connect through a proxy, you can specify your proxy settings by specifying a <tt>:host</tt>, <tt>:port</tt>, <tt>:user</tt>, and <tt>:password</tt>
183
192
  # with the <tt>:proxy</tt> option.
184
193
  # The <tt>:host</tt> setting is required if specifying a <tt>:proxy</tt>.
@@ -278,7 +287,7 @@ module AWS
278
287
  end
279
288
 
280
289
  def extract_persistent!
281
- self[:persistent] = options.has_key?(:persitent) ? options[:persitent] : true
290
+ self[:persistent] = options.has_key?(:persitent) ? options[:persitent] : false
282
291
  end
283
292
 
284
293
  def extract_proxy_settings!
@@ -311,4 +320,4 @@ module AWS
311
320
  end
312
321
  end
313
322
  end
314
- end
323
+ end
@@ -42,9 +42,10 @@ class String
42
42
  # ActiveSupport adds an underscore method to String so let's just use that one if
43
43
  # we find that the method is already defined
44
44
  def underscore
45
- gsub(/([A-Z]+)([A-Z][a-z])/,'\1_\2').
46
- gsub(/([a-z\d])([A-Z])/,'\1_\2').
47
- downcase
45
+ gsub(/::/, '/').
46
+ gsub(/([A-Z]+)([A-Z][a-z])/,'\1_\2').
47
+ gsub(/([a-z\d])([A-Z])/,'\1_\2').
48
+ tr("-", "_").downcase
48
49
  end unless public_method_defined? :underscore
49
50
 
50
51
  def utf8?
@@ -102,7 +103,7 @@ end
102
103
  module Kernel
103
104
  def __method__(depth = 0)
104
105
  caller[depth][/`([^']+)'/, 1]
105
- end if RUBY_VERSION < '1.9'
106
+ end if RUBY_VERSION < '1.8.7'
106
107
 
107
108
  def memoize(reload = false, storage = nil)
108
109
  storage = "@#{storage || __method__(1)}"
@@ -102,8 +102,12 @@ module AWS
102
102
  end
103
103
  memoized :lines
104
104
 
105
+ def path
106
+ log.path
107
+ end
108
+
105
109
  def inspect #:nodoc:
106
- "#<%s:0x%s '%s'>" % [self.class.name, object_id, log.path]
110
+ "#<%s:0x%s '%s'>" % [self.class.name, object_id, path]
107
111
  end
108
112
 
109
113
  private
@@ -154,9 +158,10 @@ module AWS
154
158
 
155
159
  # Time.parse doesn't like %d/%B/%Y:%H:%M:%S %z so we have to transform it unfortunately
156
160
  def typecast_time(datetime) #:nodoc:
157
- month = datetime[/[a-z]+/i]
161
+ month = datetime[/[a-z]+/i]
162
+ month_names = [nil, "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
158
163
  datetime.sub!(%r|^(\w{2})/(\w{3})|, '\2/\1')
159
- datetime.sub!(month, Date::ABBR_MONTHS[month.downcase].to_s)
164
+ datetime.sub!(month, month_names.index(month).to_s)
160
165
  datetime.sub!(':', ' ')
161
166
  Time.parse(datetime)
162
167
  end
@@ -8,7 +8,7 @@ module AWS
8
8
  # S3Object.store('me.jpg', open('headshot.jpg'), 'photos')
9
9
  #
10
10
  # The content type of the object will be inferred by its extension. If the appropriate content type can not be inferred, S3 defaults
11
- # to <tt>binary/octect-stream</tt>.
11
+ # to <tt>binary/octet-stream</tt>.
12
12
  #
13
13
  # If you want to override this, you can explicitly indicate what content type the object should have with the <tt>:content_type</tt> option:
14
14
  #
@@ -82,7 +82,7 @@ module AWS
82
82
  #
83
83
  # pp song.about
84
84
  # {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT",
85
- # "content-type" => "binary/octect-stream",
85
+ # "content-type" => "binary/octet-stream",
86
86
  # "etag" => "\"dc629038ffc674bee6f62eb64ff3a\"",
87
87
  # "date" => "Sat, 28 Oct 2006 21:30:41 GMT",
88
88
  # "x-amz-request-id" => "B7BC68F55495B1C8",
@@ -178,13 +178,15 @@ module AWS
178
178
  end
179
179
  end
180
180
 
181
- # Makes a copy of the object with <tt>key</tt> to <tt>copy_name</tt>.
181
+ # Makes a copy of the object with <tt>key</tt> to <tt>copy_key</tt>, preserving the ACL of the existing object if the <tt>:copy_acl</tt> option is true (default false).
182
182
  def copy(key, copy_key, bucket = nil, options = {})
183
183
  bucket = bucket_name(bucket)
184
- original = open(url_for(key, bucket))
185
- default_options = {:content_type => original.content_type}
186
- store(copy_key, original, bucket, default_options.merge(options))
187
- acl(copy_key, bucket, acl(key, bucket))
184
+ source_key = path!(bucket, key)
185
+ default_options = {'x-amz-copy-source' => source_key}
186
+ target_key = path!(bucket, copy_key)
187
+ returning put(target_key, default_options) do
188
+ acl(copy_key, bucket, acl(key, bucket)) if options[:copy_acl]
189
+ end
188
190
  end
189
191
 
190
192
  # Rename the object with key <tt>from</tt> to have key in <tt>to</tt>.
@@ -494,7 +496,7 @@ module AWS
494
496
  # pp some_object.about
495
497
  # {"last-modified" => "Sat, 28 Oct 2006 21:29:26 GMT",
496
498
  # "x-amz-id-2" => "LdcQRk5qLwxJQiZ8OH50HhoyKuqyWoJ67B6i+rOE5MxpjJTWh1kCkL+I0NQzbVQn",
497
- # "content-type" => "binary/octect-stream",
499
+ # "content-type" => "binary/octet-stream",
498
500
  # "etag" => "\"dc629038ffc674bee6f62eb68454ff3a\"",
499
501
  # "date" => "Sat, 28 Oct 2006 21:30:41 GMT",
500
502
  # "x-amz-request-id" => "B7BC68F55495B1C8",
@@ -502,7 +504,7 @@ module AWS
502
504
  # "content-length" => "3418766"}
503
505
  #
504
506
  # some_object.content_type
505
- # # => "binary/octect-stream"
507
+ # # => "binary/octet-stream"
506
508
  # some_object.content_type = 'audio/mpeg'
507
509
  # some_object.content_type
508
510
  # # => 'audio/mpeg'
@@ -2,7 +2,7 @@ module AWS
2
2
  module S3
3
3
  module VERSION #:nodoc:
4
4
  MAJOR = '0'
5
- MINOR = '4'
5
+ MINOR = '5'
6
6
  TINY = '0'
7
7
  BETA = nil # Time.now.to_i.to_s
8
8
  end
@@ -30,7 +30,25 @@ class QueryStringAuthenticationTest < Test::Unit::TestCase
30
30
  query_string = Authentication::QueryString.new(request, key_id, secret, :expires => expires)
31
31
  assert_equal expires, query_string.send(:canonical_string).instance_variable_get(:@options)[:expires]
32
32
  assert_equal AmazonDocExampleData::Example3.query_string, query_string
33
- end
33
+ end
34
+
35
+ def test_expires_in_is_coerced_to_being_an_integer_in_case_it_is_a_special_integer_proxy
36
+ # References bug: http://rubyforge.org/tracker/index.php?func=detail&aid=17458&group_id=2409&atid=9356
37
+ integer_proxy = Class.new do
38
+ attr_reader :integer
39
+ def initialize(integer)
40
+ @integer = integer
41
+ end
42
+
43
+ def to_int
44
+ integer
45
+ end
46
+ end
47
+
48
+ actual_integer = 25
49
+ query_string = Authentication::QueryString.new(request, key_id, secret, :expires_in => integer_proxy.new(actual_integer))
50
+ assert_equal actual_integer, query_string.send(:expires_in)
51
+ end
34
52
 
35
53
  private
36
54
  def request; AmazonDocExampleData::Example3.request end
@@ -23,58 +23,52 @@ class BaseTest < Test::Unit::TestCase
23
23
  end
24
24
 
25
25
  def test_request_tries_again_when_encountering_an_internal_error
26
- Bucket.in_test_mode do
27
- Bucket.request_returns [
28
- # First request is an internal error
29
- {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
30
- # Second request is a success
31
- {:body => Fixtures::Buckets.empty_bucket, :code => 200}
32
- ]
33
- bucket = nil # Block scope hack
34
- assert_nothing_raised do
35
- bucket = Bucket.find('marcel')
36
- end
37
- # Don't call objects 'cause we don't want to make another request
38
- assert bucket.object_cache.empty?
39
- end
26
+ mock_connection_for(Bucket, :returns => [
27
+ # First request is an internal error
28
+ {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
29
+ # Second request is a success
30
+ {:body => Fixtures::Buckets.empty_bucket, :code => 200}
31
+ ])
32
+ bucket = nil # Block scope hack
33
+ assert_nothing_raised do
34
+ bucket = Bucket.find('marcel')
35
+ end
36
+ # Don't call objects 'cause we don't want to make another request
37
+ assert bucket.object_cache.empty?
40
38
  end
41
39
 
42
40
  def test_request_tries_up_to_three_times
43
- Bucket.in_test_mode do
44
- Bucket.request_returns [
45
- # First request is an internal error
46
- {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
47
- # Second request is also an internal error
48
- {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
49
- # Ditto third
50
- {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
51
- # Fourth works
52
- {:body => Fixtures::Buckets.empty_bucket, :code => 200}
53
- ]
54
- bucket = nil # Block scope hack
55
- assert_nothing_raised do
56
- bucket = Bucket.find('marcel')
57
- end
58
- # Don't call objects 'cause we don't want to make another request
59
- assert bucket.object_cache.empty?
41
+ mock_connection_for(Bucket, :returns => [
42
+ # First request is an internal error
43
+ {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
44
+ # Second request is also an internal error
45
+ {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
46
+ # Ditto third
47
+ {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
48
+ # Fourth works
49
+ {:body => Fixtures::Buckets.empty_bucket, :code => 200}
50
+ ])
51
+ bucket = nil # Block scope hack
52
+ assert_nothing_raised do
53
+ bucket = Bucket.find('marcel')
60
54
  end
55
+ # Don't call objects 'cause we don't want to make another request
56
+ assert bucket.object_cache.empty?
61
57
  end
62
58
 
63
59
  def test_request_tries_again_three_times_and_gives_up
64
- Bucket.in_test_mode do
65
- Bucket.request_returns [
66
- # First request is an internal error
67
- {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
68
- # Second request is also an internal error
69
- {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
70
- # Ditto third
71
- {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
72
- # Ditto fourth
73
- {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
74
- ]
75
- assert_raises(InternalError) do
76
- Bucket.find('marcel')
77
- end
60
+ mock_connection_for(Bucket, :returns => [
61
+ # First request is an internal error
62
+ {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
63
+ # Second request is also an internal error
64
+ {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
65
+ # Ditto third
66
+ {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
67
+ # Ditto fourth
68
+ {:body => Fixtures::Errors.internal_error, :code => 500, :error => true},
69
+ ])
70
+ assert_raises(InternalError) do
71
+ Bucket.find('marcel')
78
72
  end
79
73
  end
80
74
  end
@@ -87,9 +81,8 @@ class MultiConnectionsTest < Test::Unit::TestCase
87
81
  def setup
88
82
  Base.send(:connections).clear
89
83
  end
90
- alias_method :teardown, :setup
91
84
 
92
- def test_default_connection_options_are_used_for_subsequent_connections
85
+ def test_default_connection_options_are_used_for_subsequent_connections
93
86
  assert !Base.connected?
94
87
 
95
88
  assert_raises(MissingAccessKey) do
@@ -113,7 +106,7 @@ class MultiConnectionsTest < Test::Unit::TestCase
113
106
  end
114
107
 
115
108
  # All subclasses are currently using the default connection
116
- assert Base.connection == Bucket.connection
109
+ assert_equal Base.connection, Bucket.connection
117
110
 
118
111
  # No need to pass in the required options. The default connection will supply them
119
112
  assert_nothing_raised do