smooth_s3 0.1.1 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
data/CHANGELOG ADDED
@@ -0,0 +1,12 @@
1
+ ### 0.2.0 ###
2
+ - Refactoring run
3
+ - Connection errors and failed uploads are now all properly rescued by appropriate logic.
4
+ - Uploads will now be tried up to 3 times before moving on to the next file.
5
+ - Caching service buckets when initializing, saving tons of requests. This speeds up directory sync operations a lot.
6
+ - Renamed sync_directory(!) to directory_sync(!) for consistency. Old method names are aliased for backward compatibility.
7
+
8
+ ### 0.1.1 ###
9
+ - Removed dependency to jeweler (oops!)
10
+
11
+ ### 0.1.0 ###
12
+ - First release
data/VERSION CHANGED
@@ -1 +1 @@
1
- 0.1.1
1
+ 0.2.0
@@ -2,16 +2,16 @@ module SmoothS3
2
2
  class Bucket
3
3
 
4
4
  def self.exists?(bucket, service)
5
- service.buckets.include? bucket
5
+ service.refresh.buckets.keys.include? bucket
6
6
  end
7
7
 
8
8
  def self.file_exists?(file, bucket, service)
9
- b = service.proxy_service.buckets.find_first(bucket)
9
+ b = service.buckets[bucket]
10
10
 
11
11
  begin
12
12
  b.objects.find_first(file)
13
13
  return true
14
- rescue S3::Error::NoSuchKey
14
+ rescue
15
15
  return false
16
16
  end
17
17
  end
@@ -27,14 +27,15 @@ module SmoothS3
27
27
  rescue S3::Error::BucketAlreadyExists
28
28
  raise SmoothS3::Error, "A bucket named '#{bucket_name}' already exists in the Global S3 Namespace. Please select one of you existing buckets or try a new name."
29
29
  end
30
+
31
+ Service.new_buckets[service.aws_key] << new_bucket
30
32
  end
31
33
 
32
34
  def self.store_file(file, remote_file, bucket, service, prefix, overwrite)
33
- b = service.proxy_service.buckets.find_first(bucket)
35
+ b = service.refresh.buckets[bucket]
34
36
 
35
37
  if prefix
36
- remote_file = prefix + remote_file if prefix =~ /\/$/
37
- remote_file = prefix + "/" + remote_file unless prefix =~ /\/$/
38
+ remote_file = prefix =~ /\/$/ ? (prefix + remote_file) : prefix + "/" + remote_file
38
39
  end
39
40
 
40
41
  unless overwrite == true
@@ -46,13 +47,34 @@ module SmoothS3
46
47
 
47
48
  bo = b.objects.build(remote_file)
48
49
  bo.content = open(file)
49
-
50
- if bo.save
50
+
51
+ if Bucket.save_with_retries(3, bo, file)
51
52
  puts "'#{file}' was uploaded to S3 bucket '#{bucket}' under the name '#{remote_file}'."
52
53
  else
53
- puts "There was a problem trying to upload '#{file}' to S3"
54
+ puts "Impossible to upload '#{file}' to S3 (retries were attempted). Please check file permissions."
54
55
  end
55
56
  end
56
57
 
58
+ private
59
+
60
+ def self.save_with_retries(tries, bucket_object, file)
61
+ done, attempts_left = false, tries
62
+
63
+ until done
64
+ break if attempts_left <= 0
65
+
66
+ begin
67
+ bucket_object.save
68
+ done = true
69
+ rescue
70
+ puts "There was a problem trying to upload '#{file}' to S3. Retrying..."
71
+ end
72
+
73
+ attempts_left -= 1
74
+ end
75
+
76
+ done
77
+ end
78
+
57
79
  end
58
80
  end
@@ -1,6 +1,9 @@
1
1
  module SmoothS3
2
2
  class Service
3
3
  attr_reader :aws_key, :aws_secret, :ssl, :proxy_service
4
+ attr_accessor :buckets
5
+
6
+ @@new_buckets = {}
4
7
 
5
8
  def initialize(opts={})
6
9
  @aws_key = opts.delete(:aws_key)
@@ -10,6 +13,30 @@ module SmoothS3
10
13
  @proxy_service = S3::Service.new(:access_key_id => @aws_key, :secret_access_key => @aws_secret, :use_ssl => @ssl)
11
14
 
12
15
  test_connection
16
+
17
+ @buckets = gather_buckets
18
+ @@new_buckets[@aws_key] = []
19
+ end
20
+
21
+ def gather_buckets
22
+ service_buckets = {}
23
+ begin
24
+ self.proxy_service.buckets.each { |b| service_buckets.merge!(b.name => b) }
25
+ rescue
26
+ puts "There was an error trying to fetch the service's buckets. Retrying..."
27
+ sleep 1
28
+
29
+ self.gather_buckets
30
+ end
31
+
32
+ service_buckets
33
+ end
34
+
35
+ def refresh
36
+ new_buckets = @@new_buckets[self.aws_key]
37
+ new_buckets.each {|nb| self.buckets[nb.name] = nb}
38
+
39
+ return self
13
40
  end
14
41
 
15
42
  def upload(bucket, files, options={})
@@ -20,12 +47,12 @@ module SmoothS3
20
47
  Uploader.upload!(self, bucket, files, options)
21
48
  end
22
49
 
23
- def sync_directory(bucket, directory, options={})
24
- Uploader.sync_directory(self, bucket, directory, options)
50
+ def directory_sync(bucket, directory, options={})
51
+ Uploader.directory_sync(self, bucket, directory, options)
25
52
  end
26
53
 
27
- def sync_directory!(bucket, directory, options={})
28
- Uploader.sync_directory!(self, bucket, directory, options)
54
+ def directory_sync!(bucket, directory, options={})
55
+ Uploader.directory_sync!(self, bucket, directory, options)
29
56
  end
30
57
 
31
58
  def timestamped_upload(bucket, files, options={})
@@ -44,11 +71,19 @@ module SmoothS3
44
71
  Uploader.timestamped_directory_sync!(self, bucket, directory, options)
45
72
  end
46
73
 
47
- # Utility Methods
48
- def buckets
49
- @proxy_service.buckets.map { |b| b.name }
74
+ # Make @@new_buckets accessible outside of the class
75
+ def self.new_buckets
76
+ @@new_buckets
50
77
  end
51
78
 
79
+ def self.new_buckets=(value)
80
+ @@new_buckets = value
81
+ end
82
+
83
+ # Preserve backwards compatibility
84
+ alias_method :sync_directory, :directory_sync
85
+ alias_method :sync_directory!, :directory_sync!
86
+
52
87
  private
53
88
 
54
89
  def test_connection
@@ -2,87 +2,61 @@ module SmoothS3
2
2
  class Uploader
3
3
 
4
4
  def self.upload(service, bucket, files, options={})
5
- options[:overwrite] = false unless options[:overwrite]
5
+ [:overwrite, :timestamped].each {|s| options[s] = false unless options[s]}
6
6
  Bucket.select(bucket, service)
7
7
 
8
8
  valid_files = Uploader.validate_files(files)
9
9
  valid_files.each do |vf|
10
- Bucket.store_file(vf, vf.split("/")[-1], bucket, service, options[:prefix], options[:overwrite])
10
+ remote_file_name = options[:timestamped] ? (options[:timestamp] + "_" + vf.split("/")[-1]) : vf.split("/")[-1]
11
+ Bucket.store_file(vf, remote_file_name, bucket, service, options[:prefix], options[:overwrite])
11
12
  end
12
13
  end
13
14
 
14
15
  def self.upload!(service, bucket, files, options={})
15
- options.merge!(:overwrite => true)
16
- Uploader.upload(service, bucket, files, options)
16
+ Uploader.upload(service, bucket, files, options.merge!(:overwrite => true))
17
17
  end
18
18
 
19
- def self.sync_directory(service, bucket, directory, options={})
20
- options[:overwrite] = false unless options[:overwrite]
19
+ def self.directory_sync(service, bucket, directory, options={})
20
+ [:overwrite, :timestamped].each {|s| options[s] = false unless options[s]}
21
21
  Bucket.select(bucket, service)
22
22
 
23
23
  valid_files = Uploader.validate_files_in_directory(directory)
24
24
  valid_files.each do |vf|
25
- Bucket.store_file(vf[0], vf[1], bucket, service, options[:prefix], options[:overwrite])
25
+ remote_file_name = options[:timestamped] ? (options[:timestamp] + "_" + vf[1]) : vf[1]
26
+ Bucket.store_file(vf[0], remote_file_name, bucket, service, options[:prefix], options[:overwrite])
26
27
  end
27
28
  end
28
29
 
29
- def self.sync_directory!(service, bucket, directory, options={})
30
- options.merge!(:overwrite => true)
31
- Uploader.sync_directory(service, bucket, directory, options)
30
+ def self.directory_sync!(service, bucket, directory, options={})
31
+ Uploader.directory_sync(service, bucket, directory, options.merge!(:overwrite => true))
32
32
  end
33
33
 
34
34
  def self.timestamped_upload(service, bucket, files, options={})
35
35
  options[:overwrite] = false unless options[:overwrite]
36
- Bucket.select(bucket, service)
37
-
38
- if options[:timestamp_type] == :epoch
39
- timestamp = Time.now.strftime("%s")
40
- elsif options[:timestamp_type] == :strftime
41
- if options[:timestamp_format]
42
- timestamp = Time.now.strftime(options[:timestamp_format])
43
- else
44
- timestamp = Uploader.default_timestamp
45
- end
46
- else
47
- timestamp = Uploader.default_timestamp
48
- end
49
-
50
- valid_files = Uploader.validate_files(files)
51
- valid_files.each do |vf|
52
- Bucket.store_file(vf, timestamp + "_" + vf.split("/")[-1], bucket, service, options[:prefix], options[:overwrite])
53
- end
36
+
37
+ timestamp = Uploader.calculate_timestamp(options)
38
+ Uploader.upload(service, bucket, files, options.merge!(:timestamped => true, :timestamp => timestamp))
54
39
  end
55
40
 
56
41
  def self.timestamped_upload!(service, bucket, files, options={})
57
- options.merge!(:overwrite => true)
58
- Uploader.timestamped_upload(service, bucket, files, options)
42
+ Uploader.timestamped_upload(service, bucket, files, options.merge!(:overwrite => true))
59
43
  end
60
44
 
61
45
  def self.timestamped_directory_sync(service, bucket, directory, options={})
62
46
  options[:overwrite] = false unless options[:overwrite]
63
- Bucket.select(bucket, service)
64
-
65
- if options[:timestamp_type] == :epoch
66
- timestamp = Time.now.strftime("%s")
67
- elsif options[:timestamp_type] == :strftime
68
- if options[:timestamp_format]
69
- timestamp = Time.now.strftime(options[:timestamp_format])
70
- else
71
- timestamp = Uploader.default_timestamp
72
- end
73
- else
74
- timestamp = Uploader.default_timestamp
75
- end
76
47
 
77
- valid_files = Uploader.validate_files_in_directory(directory)
78
- valid_files.each do |vf|
79
- Bucket.store_file(vf[0], timestamp + "_" + vf[1], bucket, service, options[:prefix], options[:overwrite])
80
- end
48
+ timestamp = Uploader.calculate_timestamp(options)
49
+ Uploader.directory_sync(service, bucket, directory, options.merge!(:timestamped => true, :timestamp => timestamp))
81
50
  end
82
51
 
83
52
  def self.timestamped_directory_sync!(service, bucket, directory, options={})
84
- options.merge!(:overwrite => true)
85
- Uploader.timestamped_directory_sync(service, bucket, directory, options)
53
+ Uploader.timestamped_directory_sync(service, bucket, directory, options.merge!(:overwrite => true))
54
+ end
55
+
56
+ # Preserve backwards compatibility
57
+ class << self
58
+ alias_method :sync_directory, :directory_sync
59
+ alias_method :sync_directory!, :directory_sync!
86
60
  end
87
61
 
88
62
  private
@@ -121,6 +95,18 @@ module SmoothS3
121
95
  valid_files
122
96
  end
123
97
 
98
+ def self.calculate_timestamp(options)
99
+ timestamp = nil
100
+
101
+ if options[:timestamp_type] == :epoch
102
+ timestamp = Time.now.strftime("%s")
103
+ elsif options[:timestamp_type] == :strftime && options[:timestamp_format]
104
+ timestamp = Time.now.strftime(options[:timestamp_format])
105
+ end
106
+
107
+ timestamp || Uploader.default_timestamp
108
+ end
109
+
124
110
  def self.default_timestamp
125
111
  Time.now.strftime("%Y%m%d%H%M%S")
126
112
  end
data/lib/smooth_s3.rb CHANGED
@@ -11,5 +11,5 @@ require 'smooth_s3/uploader.rb'
11
11
  require 'smooth_s3/error.rb'
12
12
 
13
13
  module SmoothS3
14
- VERSION = "0.1.0"
14
+ VERSION = "0.2.0"
15
15
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: smooth_s3
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ version: 0.2.0
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -13,7 +13,7 @@ date: 2011-12-08 00:00:00.000000000Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: s3
16
- requirement: &2156974900 !ruby/object:Gem::Requirement
16
+ requirement: &2153519260 !ruby/object:Gem::Requirement
17
17
  none: false
18
18
  requirements:
19
19
  - - ~>
@@ -21,7 +21,7 @@ dependencies:
21
21
  version: 0.3.9
22
22
  type: :runtime
23
23
  prerelease: false
24
- version_requirements: *2156974900
24
+ version_requirements: *2153519260
25
25
  description: A user-friendly superset of the S3 gem geared towards file system backup
26
26
  operations. Simplifies standard actions such as basic uploads, for example allowing
27
27
  multiple files to be uploaded in one operation and adds new functionality such as
@@ -37,6 +37,7 @@ files:
37
37
  - Gemfile.lock
38
38
  - README.textile
39
39
  - VERSION
40
+ - CHANGELOG
40
41
  - lib/smooth_s3.rb
41
42
  - lib/smooth_s3/bucket.rb
42
43
  - lib/smooth_s3/error.rb