s3_dir 0.0.1 → 0.0.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (4) hide show
  1. checksums.yaml +4 -4
  2. data/lib/s3_dir/version.rb +1 -1
  3. data/lib/s3_dir.rb +61 -35
  4. metadata +2 -3
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 6a17c51ae36e976e230825df8db63b8b29ff2a01
4
- data.tar.gz: c9135601df89eb46385f1d079fb67f6a51a59e63
3
+ metadata.gz: 92a816d5e2e14b3c1238da1987a697f5748be223
4
+ data.tar.gz: b7aca8ce2710278a087d70ba5bb63e599a010ab4
5
5
  SHA512:
6
- metadata.gz: e0ab809198e1182d1ef12ec2940372f8be210a7a7d40316ac7680ef19e1f523e6abaf3f83a41606134a85b0198d168ece6b324b8363b5bc40c1295841fa2ee8c
7
- data.tar.gz: 9d033deadc24d90f77a3ed7bf36a2041d29afcf2ad6b0a86284ed3793532b158a689cb380912ef67cb1ae6b0c82bd6e1c49fc79888d83ee8dd05fbd11ebb0022
6
+ metadata.gz: a83953cdbdd9118062ca6c564eb6469e9107b8fd5ff0a89eb213c20ad35530ad5fb44c0e4a54f16c7dc64632f20298059fa4843b52edc2dae37a5b36973b8e69
7
+ data.tar.gz: c6225bc8585036a0920d716dab714348053da2d4761042844b0d8a29a6f61f40fe999948a785004ab3d3ee47e134dfeefcc31f86699953bf8ef814a65777611c
@@ -1,3 +1,3 @@
1
1
  module S3Dir
2
- VERSION = "0.0.1"
2
+ VERSION = "0.0.2"
3
3
  end
data/lib/s3_dir.rb CHANGED
@@ -1,5 +1,6 @@
1
1
  require_relative 's3_dir/version'
2
2
  require 'fog/aws/storage'
3
+ require 'digest/md5'
3
4
 
4
5
  # S3Dir uploads files to S3
5
6
  #
@@ -26,51 +27,76 @@ require 'fog/aws/storage'
26
27
  # of its contents not public.
27
28
  #
28
29
  module S3Dir
30
+
29
31
  # Upload files to S3
30
32
  def self.upload dir, key, options={}
31
- files_path = File.expand_path(dir)
33
+ uploader = Uploader.new(dir, key, options)
34
+ uploader.upload
35
+ end
36
+
37
+ class Uploader
38
+ attr_reader :key
39
+ attr_reader :bucket
40
+ attr_reader :storage
41
+ attr_reader :is_public
42
+ attr_reader :files_path
43
+
44
+ def initialize dir, key, options
45
+ @files_path = File.expand_path(dir)
32
46
 
33
- # Merge defaults with passed-in options
34
- settings = {credential: ENV['FOG_CREDENTIAL'],
35
- private: false}.merge(options)
47
+ # Merge defaults with passed-in options
48
+ settings = {credential: ENV['FOG_CREDENTIAL'],
49
+ private: false}.merge(options)
36
50
 
37
- # We have to manually extract fog credentials here
38
- # because we'll need those settings when creating a new
39
- # Fog::Storage object with custom settings
40
- all_credentials = YAML::load_file(File.join(ENV['HOME'], '.fog'))
41
- credential = settings[:credential]
42
- credentials = all_credentials[credential]
43
- access_key = credentials[:aws_access_key_id]
44
- secret_key = credentials[:aws_secret_access_key]
45
- region = credentials[:region] || 'us-west-2'
51
+ # Configure Fog
52
+ Fog.credential = settings[:credential]
46
53
 
47
- # If we don't specify this endpoint, Fog will complain about
48
- # not using the correct endpoint if the bucket has dots in
49
- # the name (i.e. website bucket)
50
- endpoint = 'http://s3.amazonaws.com'
54
+ # Get a region
55
+ region = Fog.credentials[:region] || 'us-west-2'
51
56
 
52
- # This may be a public bucket
53
- is_public = !settings[:private]
57
+ # If we don't specify this endpoint, Fog will complain about
58
+ # not using the correct endpoint if the bucket has dots in
59
+ # the name (i.e. website bucket)
60
+ endpoint = 'http://s3.amazonaws.com'
54
61
 
55
- # Set up our storage object
56
- # We have to specify path_style here because Fog will complain about
57
- # our website bucket (if we're using a bucket with dots in the name)
58
- # not being covered by the SSL certificate.
59
- storage = Fog::Storage.new(provider: 'aws', aws_access_key_id: access_key,
60
- aws_secret_access_key: secret_key,
61
- path_style: true, region: region,
62
- endpoint: endpoint)
63
- bucket = storage.directories.get(key)
64
- bucket ||= storage.directories.create(key: key, public: is_public)
62
+ # This may be a public bucket
63
+ @is_public = !settings[:private]
65
64
 
66
- Dir.chdir(files_path) do
67
- Dir['**/*'].each do |entry|
68
- if File.directory?(entry)
69
- bucket.files.create(key: entry, public: is_public)
70
- else
71
- bucket.files.create(key: entry, public: is_public, body: File.open(entry))
65
+ # Set up our storage object
66
+ # We have to specify path_style here because Fog will complain about
67
+ # our website bucket (if we're using a bucket with dots in the name)
68
+ # not being covered by the SSL certificate.
69
+ fog_options = Fog.credentials.merge({provider: 'aws', path_style: true,
70
+ region: region, endpoint: endpoint})
71
+ fog_options.delete(:key_name)
72
+ @storage = Fog::Storage.new(fog_options)
73
+ @bucket = storage.directories.get(key)
74
+ @bucket ||= storage.directories.create(key: key, public: is_public)
75
+ @key = key
76
+ end
77
+
78
+ def upload
79
+ Dir.chdir(files_path) do
80
+ Dir['**/*'].each do |entry|
81
+ File.directory?(entry) ? create_directory(entry) : create_file(entry)
72
82
  end
73
83
  end
74
84
  end
85
+
86
+ private
87
+
88
+ def create_directory entry
89
+ bucket.files.create(key: entry, public: is_public)
90
+ end
91
+
92
+ def create_file entry
93
+ storage.head_object(key, entry, {'If-None-Match' => md5(entry)})
94
+ rescue Excon::Errors::NotFound
95
+ bucket.files.create(key: entry, public: is_public, body: File.open(entry))
96
+ end
97
+
98
+ def md5 entry
99
+ Digest::MD5.digest(entry)
100
+ end
75
101
  end
76
102
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: s3_dir
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.1
4
+ version: 0.0.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - nuex
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2013-12-07 00:00:00.000000000 Z
11
+ date: 2013-12-18 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -106,4 +106,3 @@ signing_key:
106
106
  specification_version: 4
107
107
  summary: Uploads a directory of files to an AWS S3 bucket
108
108
  test_files: []
109
- has_rdoc: