cknife 0.1.8 → 1.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'cknife/cknife_mon'
4
+
5
+ CKnife::CKnifeMon.start
@@ -2,19 +2,19 @@
2
2
  # DO NOT EDIT THIS FILE DIRECTLY
3
3
  # Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
4
4
  # -*- encoding: utf-8 -*-
5
- # stub: cknife 0.1.8 ruby lib
5
+ # stub: cknife 1.1.0 ruby lib
6
6
 
7
7
  Gem::Specification.new do |s|
8
- s.name = "cknife"
9
- s.version = "0.1.8"
8
+ s.name = "cknife".freeze
9
+ s.version = "1.1.0"
10
10
 
11
- s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
12
- s.require_paths = ["lib"]
13
- s.authors = ["Mike De La Loza"]
14
- s.date = "2015-08-01"
15
- s.description = "Cali Army Knife, a collection of command line tools, especially for popular API services."
16
- s.email = "mikedll@mikedll.com"
17
- s.executables = ["cknifeaws", "cknifedub", "cknifemail", "cknifemysql", "cknifenowtimestamp", "cknifepg", "cknifewcdir", "cknifezerigo"]
11
+ s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
12
+ s.require_paths = ["lib".freeze]
13
+ s.authors = ["Michael Rivera".freeze]
14
+ s.date = "2018-10-06"
15
+ s.description = "A collection of command line tools, especially for popular API services.".freeze
16
+ s.email = "soymrmike@gmail.com".freeze
17
+ s.executables = ["cknifeaws".freeze, "cknifedub".freeze, "cknifemail".freeze, "cknifemon".freeze, "cknifemysql".freeze, "cknifenowtimestamp".freeze, "cknifepg".freeze, "cknifewcdir".freeze, "cknifezerigo".freeze]
18
18
  s.extra_rdoc_files = [
19
19
  "LICENSE",
20
20
  "README.md"
@@ -31,6 +31,7 @@ Gem::Specification.new do |s|
31
31
  "bin/cknifeaws",
32
32
  "bin/cknifedub",
33
33
  "bin/cknifemail",
34
+ "bin/cknifemon",
34
35
  "bin/cknifemysql",
35
36
  "bin/cknifenowtimestamp",
36
37
  "bin/cknifepg",
@@ -38,59 +39,68 @@ Gem::Specification.new do |s|
38
39
  "bin/cknifezerigo",
39
40
  "cknife.gemspec",
40
41
  "cknife.yml.sample",
42
+ "doc/remote_ubuntu_machine.md",
43
+ "lib/cknife/backgrounded_polling.rb",
44
+ "lib/cknife/cknife_aws.rb",
45
+ "lib/cknife/cknife_mon.rb",
41
46
  "lib/cknife/cknife_mysql.rb",
42
47
  "lib/cknife/cknife_pg.rb",
43
48
  "lib/cknife/command_line.rb",
44
- "lib/cknife/config.rb"
49
+ "lib/cknife/config.rb",
50
+ "lib/cknife/monitor.rb",
51
+ "lib/cknife/repetition.rb"
45
52
  ]
46
- s.homepage = "http://github.com/mikedll/cali-army-knife"
47
- s.licenses = ["MIT"]
48
- s.rubygems_version = "2.4.7"
49
- s.summary = "Cali Army Knife"
53
+ s.homepage = "http://github.com/mikedll/cknife".freeze
54
+ s.licenses = ["".freeze]
55
+ s.rubygems_version = "2.6.14.1".freeze
56
+ s.summary = "CKnife".freeze
50
57
 
51
58
  if s.respond_to? :specification_version then
52
59
  s.specification_version = 4
53
60
 
54
61
  if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
55
- s.add_runtime_dependency(%q<rest-client>, [">= 1.6", "~> 1"])
56
- s.add_runtime_dependency(%q<nokogiri>, [">= 1.6", "~> 1"])
57
- s.add_runtime_dependency(%q<i18n>, ["~> 0"])
58
- s.add_runtime_dependency(%q<activesupport>, ["~> 3"])
59
- s.add_runtime_dependency(%q<actionpack>, ["~> 3"])
60
- s.add_runtime_dependency(%q<mail>, ["~> 2.4"])
61
- s.add_runtime_dependency(%q<thor>, [">= 0.14", "~> 0"])
62
- s.add_runtime_dependency(%q<builder>, ["~> 3.0"])
63
- s.add_runtime_dependency(%q<fog>, ["<= 1.17", ">= 1.15"])
64
- s.add_runtime_dependency(%q<unf>, [">= 0.1", "~> 0"])
65
- s.add_development_dependency(%q<bundler>, ["~> 1.0"])
66
- s.add_development_dependency(%q<jeweler>, ["~> 2.0"])
62
+ s.add_runtime_dependency(%q<rest-client>.freeze, [">= 1.8.0", "~> 1"])
63
+ s.add_runtime_dependency(%q<nokogiri>.freeze, [">= 1.8.2", "~> 1"])
64
+ s.add_runtime_dependency(%q<i18n>.freeze, [">= 0.6.6"])
65
+ s.add_runtime_dependency(%q<activesupport>.freeze, ["~> 4.2.9"])
66
+ s.add_runtime_dependency(%q<actionpack>.freeze, ["~> 4.2.9"])
67
+ s.add_runtime_dependency(%q<mail>.freeze, ["~> 2.5.5"])
68
+ s.add_runtime_dependency(%q<thor>.freeze, [">= 0.14", "~> 0"])
69
+ s.add_runtime_dependency(%q<builder>.freeze, ["~> 3.0"])
70
+ s.add_runtime_dependency(%q<fog-aws>.freeze, ["< 3.0"])
71
+ s.add_runtime_dependency(%q<unf>.freeze, [">= 0.1", "~> 0"])
72
+ s.add_runtime_dependency(%q<daemons>.freeze, [">= 0"])
73
+ s.add_development_dependency(%q<bundler>.freeze, ["~> 1.0"])
74
+ s.add_development_dependency(%q<jeweler>.freeze, ["~> 2.1"])
67
75
  else
68
- s.add_dependency(%q<rest-client>, [">= 1.6", "~> 1"])
69
- s.add_dependency(%q<nokogiri>, [">= 1.6", "~> 1"])
70
- s.add_dependency(%q<i18n>, ["~> 0"])
71
- s.add_dependency(%q<activesupport>, ["~> 3"])
72
- s.add_dependency(%q<actionpack>, ["~> 3"])
73
- s.add_dependency(%q<mail>, ["~> 2.4"])
74
- s.add_dependency(%q<thor>, [">= 0.14", "~> 0"])
75
- s.add_dependency(%q<builder>, ["~> 3.0"])
76
- s.add_dependency(%q<fog>, ["<= 1.17", ">= 1.15"])
77
- s.add_dependency(%q<unf>, [">= 0.1", "~> 0"])
78
- s.add_dependency(%q<bundler>, ["~> 1.0"])
79
- s.add_dependency(%q<jeweler>, ["~> 2.0"])
76
+ s.add_dependency(%q<rest-client>.freeze, [">= 1.8.0", "~> 1"])
77
+ s.add_dependency(%q<nokogiri>.freeze, [">= 1.8.2", "~> 1"])
78
+ s.add_dependency(%q<i18n>.freeze, [">= 0.6.6"])
79
+ s.add_dependency(%q<activesupport>.freeze, ["~> 4.2.9"])
80
+ s.add_dependency(%q<actionpack>.freeze, ["~> 4.2.9"])
81
+ s.add_dependency(%q<mail>.freeze, ["~> 2.5.5"])
82
+ s.add_dependency(%q<thor>.freeze, [">= 0.14", "~> 0"])
83
+ s.add_dependency(%q<builder>.freeze, ["~> 3.0"])
84
+ s.add_dependency(%q<fog-aws>.freeze, ["< 3.0"])
85
+ s.add_dependency(%q<unf>.freeze, [">= 0.1", "~> 0"])
86
+ s.add_dependency(%q<daemons>.freeze, [">= 0"])
87
+ s.add_dependency(%q<bundler>.freeze, ["~> 1.0"])
88
+ s.add_dependency(%q<jeweler>.freeze, ["~> 2.1"])
80
89
  end
81
90
  else
82
- s.add_dependency(%q<rest-client>, [">= 1.6", "~> 1"])
83
- s.add_dependency(%q<nokogiri>, [">= 1.6", "~> 1"])
84
- s.add_dependency(%q<i18n>, ["~> 0"])
85
- s.add_dependency(%q<activesupport>, ["~> 3"])
86
- s.add_dependency(%q<actionpack>, ["~> 3"])
87
- s.add_dependency(%q<mail>, ["~> 2.4"])
88
- s.add_dependency(%q<thor>, [">= 0.14", "~> 0"])
89
- s.add_dependency(%q<builder>, ["~> 3.0"])
90
- s.add_dependency(%q<fog>, ["<= 1.17", ">= 1.15"])
91
- s.add_dependency(%q<unf>, [">= 0.1", "~> 0"])
92
- s.add_dependency(%q<bundler>, ["~> 1.0"])
93
- s.add_dependency(%q<jeweler>, ["~> 2.0"])
91
+ s.add_dependency(%q<rest-client>.freeze, [">= 1.8.0", "~> 1"])
92
+ s.add_dependency(%q<nokogiri>.freeze, [">= 1.8.2", "~> 1"])
93
+ s.add_dependency(%q<i18n>.freeze, [">= 0.6.6"])
94
+ s.add_dependency(%q<activesupport>.freeze, ["~> 4.2.9"])
95
+ s.add_dependency(%q<actionpack>.freeze, ["~> 4.2.9"])
96
+ s.add_dependency(%q<mail>.freeze, ["~> 2.5.5"])
97
+ s.add_dependency(%q<thor>.freeze, [">= 0.14", "~> 0"])
98
+ s.add_dependency(%q<builder>.freeze, ["~> 3.0"])
99
+ s.add_dependency(%q<fog-aws>.freeze, ["< 3.0"])
100
+ s.add_dependency(%q<unf>.freeze, [">= 0.1", "~> 0"])
101
+ s.add_dependency(%q<daemons>.freeze, [">= 0"])
102
+ s.add_dependency(%q<bundler>.freeze, ["~> 1.0"])
103
+ s.add_dependency(%q<jeweler>.freeze, ["~> 2.1"])
94
104
  end
95
105
  end
96
106
 
@@ -2,6 +2,9 @@
2
2
  key: AKIAblahblahb...
3
3
  secret: 8xILhOsecretsecretsecretsecret...
4
4
 
5
+ mon:
6
+ url: http://some.server.com/monitored_computers/heartbeat
7
+ api_key: someapikey
5
8
 
6
9
  mail:
7
10
  from: me@mydomain.com
@@ -0,0 +1,87 @@
1
+
2
+ The example deployment user here has username `alexan`. (Alexa N.)
3
+
4
+ This presumes an Ubunto 14.x server base installation.
5
+
6
+ > lsb_release -a
7
+ No LSB modules are available.
8
+ Distributor ID: Ubuntu
9
+ Description: Ubuntu 14.04.5 LTS
10
+ Release: 14.04
11
+ Codename: trusty
12
+
13
+ # Non-root user creation
14
+
15
+ Applications are expected to be served from that user's home
16
+ directory.
17
+
18
+ > adduser -G sudo alexan
19
+
20
+ Provide a password. Answer some questions.
21
+
22
+ > usermod -a -G sudo alexan
23
+ > groups alexan
24
+ alexan : alexan sudo
25
+
26
+ You may inspect the sudo file:
27
+
28
+ > less /etc/sudoers
29
+
30
+ ...
31
+
32
+ # Members of the admin group may gain root privileges.
33
+ %admin ALL=(ALL) ALL
34
+
35
+ # Allow members of group sudo to execute any command
36
+ %sudo ALL=(ALL:ALL) ALL
37
+
38
+ # Upgrade to latest release.
39
+
40
+ This is not recommended at this time. This could upgrade to Ubuntu 16.x.
41
+
42
+ # > sudo do-release-upgrade
43
+
44
+ # Directory Structure
45
+
46
+ Ensure you create these directories:
47
+
48
+ mkdir ~/backups
49
+ mkdir ~/packages
50
+
51
+ Pick a package directory like the packages directory described
52
+ above for doing package building.
53
+
54
+ # Passwordless Login Setup
55
+
56
+ Ensure you have a key installed for this user. You're about
57
+ to lose access to the root account.
58
+
59
+ > cd ~
60
+ > mkdir .ssh
61
+ > chmod 700 .ssh
62
+ > touch ~/.ssh/authorized_keys
63
+ > chomd 644 ~/.ssh/authorized_keys
64
+ > vi ~/.ssh/authorized_keys
65
+
66
+ Paste in your .pub contents. Save the file.
67
+
68
+ Test the machine to verify that you can do a passwordless login.
69
+
70
+ Then you are ready to turn off password logins.
71
+
72
+ # Disable Password Logins
73
+
74
+ **Requires** that you've completed Passwordless Login Setup, or you'll
75
+ lose control of your machine.
76
+
77
+ > sudo vi /etc/ssh/sshd_config
78
+
79
+ ...
80
+
81
+ PubkeyAuthentication yes
82
+ ...
83
+ ChallengeResponseAuthentication no
84
+ ...
85
+ PasswordAuthentication no # you'll likely change this line.
86
+
87
+ You may further setup this machine for your purposes.
@@ -0,0 +1,71 @@
1
+ require 'active_support/concern'
2
+
3
+ # mixin client must define:
4
+ #
5
+ # before_poll
6
+ # handle_poll_result(response, request, result)
7
+ # target_endpoint
8
+ #
9
+ # And these fields:
10
+ #
11
+ # last_error
12
+ # last_polled_at
13
+ # active
14
+ # consecutive_error_count
15
+ #
16
+ # And implement a loop that calls poll_background.
17
+ # This is optional:
18
+ #
19
+ # payload
20
+ #
21
+ module CKnife
22
+ module BackgroundedPolling
23
+ extend ActiveSupport::Concern
24
+
25
+ class IneligibleToPoll < StandardError
26
+ end
27
+
28
+ BACKGROUND_POLL = 'background_poll'
29
+
30
+ included do
31
+ def payload
32
+ {}
33
+ end
34
+
35
+ def poll_background
36
+ if active && (last_polled_at.nil? || (last_polled_at < Time.now - 15.minutes))
37
+ before_poll
38
+ self.last_error = ""
39
+
40
+ begin
41
+ result = RestClient.post(target_endpoint, payload) do |response, request, result|
42
+ if ![200, 201].include?(response.net_http_res.code.to_i)
43
+ self.last_error = "Unexpected HTTP Result: #{response.net_http_res.code.to_i}"
44
+ else
45
+ handle_poll_result(response, request, result)
46
+ end
47
+ end
48
+ rescue => e
49
+ self.last_error = e.message
50
+ end
51
+
52
+ if !last_error.blank?
53
+ self.consecutive_error_count += 1
54
+ self.active = false if consecutive_error_count >= Repetition::MAX_CONSECUTIVE
55
+ puts "#{Time.now}: Failed to ping home url. Last error: #{last_error}."
56
+ else
57
+ self.consecutive_error_count = 0
58
+ puts "#{Time.now}: Pinged home url with result #{last_result}."
59
+ end
60
+
61
+ self.last_polled_at = Time.now
62
+ end
63
+ end
64
+
65
+ def reset_last_poll
66
+ self.last_polled_at = Time.now
67
+ end
68
+ end
69
+
70
+ end
71
+ end
@@ -0,0 +1,572 @@
1
+ require 'fog/aws'
2
+ require 'thor'
3
+ require 'active_support/all'
4
+ require 'zlib'
5
+ require 'digest/md5'
6
+ require 'pathname'
7
+
8
+ class CKnifeAws < Thor
9
+
10
+ FILE_BUFFER_SIZE = 10.megabytes
11
+ LOCAL_MOD_KEY = "x-amz-meta-mtime"
12
+ EPSILON = 1.second
13
+
14
+ no_tasks do
15
+
16
+ def config
17
+ return @config if @config
18
+
19
+ @config = {
20
+ :key => ENV["KEY"] || ENV['AMAZON_ACCESS_KEY_ID'],
21
+ :secret => ENV["SECRET"] || ENV['AMAZON_SECRET_ACCESS_KEY']
22
+ }
23
+
24
+ config_file = nil
25
+ Pathname.new(Dir.getwd).tap do |here|
26
+ config_file = [["cknife.yml"], ["tmp", "cknife.yml"]].map { |args|
27
+ here.join(*args)
28
+ }.select { |path|
29
+ File.exists?(path)
30
+ }.first
31
+ end
32
+
33
+ if config_file
34
+ begin
35
+ @config.merge!(YAML.load(config_file.read).symbolize_keys!)
36
+ rescue
37
+ say ("Found, but could not parse config: #{config_file}")
38
+ end
39
+ end
40
+
41
+ @config
42
+ end
43
+
44
+ def fog_opts
45
+ opts = {
46
+ :provider => 'AWS',
47
+ :aws_access_key_id => config[:key],
48
+ :aws_secret_access_key => config[:secret]
49
+ }
50
+ opts.merge!({ :region => options[:region] }) if !options[:region].blank?
51
+ opts
52
+ end
53
+
54
+ def fog_storage
55
+ return @storage if @storage
56
+ @storage = Fog::Storage.new(fog_opts)
57
+ begin
58
+ @storage.directories.count # test login
59
+ rescue Excon::Errors::Forbidden => e
60
+ say("Received Forbidden error while accessing account info. Is your key/secret correct?")
61
+ raise SystemExit
62
+ end
63
+ @storage
64
+ end
65
+
66
+ def fog_compute
67
+ @compute ||= Fog::Compute.new(fog_opts)
68
+ end
69
+
70
+ def fog_cdn
71
+ @cdn ||= Fog::CDN.new(fog_opts)
72
+ end
73
+
74
+ def show_buckets
75
+ fog_storage.directories.sort { |a,b| a.key <=> b.key }.each { |b| puts "#{b.key}" }
76
+ end
77
+
78
+ def show_servers
79
+ fog_compute.servers.sort { |a,b| a.key_name <=> b.key_name }.each do |s|
80
+ puts "#{s.tags['Name']} (state: #{s.state}): id=#{s.id} keyname=#{s.key_name} dns=#{s.dns_name} flavor=#{s.flavor_id}"
81
+ end
82
+ end
83
+
84
+ def show_cdns
85
+ puts fog_cdn.get_distribution_list.body['DistributionSummary'].to_yaml
86
+ end
87
+
88
+ def with_bucket(bucket_name)
89
+ d = fog_storage.directories.select { |d| d.key == bucket_name }.first
90
+ if d.nil?
91
+ say ("Could not find bucket with name #{bucket_name}")
92
+ return
93
+ end
94
+
95
+ say ("Found bucket named #{bucket_name}")
96
+ yield d
97
+ end
98
+
99
+ def s3_download(s3_file)
100
+ dir_path = Pathname.new(s3_file.key).dirname
101
+ dir_path.mkpath
102
+ File.open(s3_file.key, "w") do |f|
103
+ f.write s3_file.body
104
+ end
105
+ end
106
+
107
+
108
+ def content_hash(file)
109
+ md5 = Digest::MD5.new
110
+
111
+ while !file.eof?
112
+ md5.update(file.read(FILE_BUFFER_SIZE))
113
+ end
114
+
115
+ md5.hexdigest
116
+ end
117
+
118
+ def fresh_file_upload(to_upload, d, key, is_public)
119
+ File.open(to_upload) do |localfile|
120
+ file = d.files.create(
121
+ :key => key,
122
+ :public => is_public,
123
+ :body => ""
124
+ )
125
+ file.metadata = { LOCAL_MOD_KEY => localfile.mtime.to_s }
126
+ file.multipart_chunk_size = FILE_BUFFER_SIZE # creates multipart_save
127
+ file.body = localfile
128
+ file.save
129
+ end
130
+ end
131
+
132
+ def n_file_heads(directory, glob=nil, max=30)
133
+ found = []
134
+
135
+ n = 0
136
+ directory.files.each do |f|
137
+ if glob.nil? || File.fnmatch(glob, f.key)
138
+ found.push(directory.files.head(f.key))
139
+ break if n >= max
140
+ n += 1
141
+ end
142
+ end
143
+
144
+ found
145
+ end
146
+ end
147
+
148
+ desc "list_servers", "Show all servers"
149
+ def list_servers
150
+ show_servers
151
+ end
152
+
153
+ desc "start_server [SERVER_ID]", "Start a given EC2 server"
154
+ def start_server(server_id)
155
+ s = fog_compute.servers.select { |s| s.id == server_id}.first
156
+ if s
157
+ say("found server. starting/resuming. #{s.id}")
158
+ s.start
159
+ show_servers
160
+ else
161
+ say("no server with that id found. nothing done.")
162
+ end
163
+ end
164
+
165
+ desc "stop_server [SERVER_ID]", "Stop a given EC2 server (does not terminate it)"
166
+ def stop_server(server_id)
167
+ s = fog_compute.servers.select { |s| s.id == server_id}.first
168
+ if s
169
+ say("found server. stopping. #{s.id}")
170
+ s.stop
171
+ else
172
+ say("no server with that id found. nothing done.")
173
+ end
174
+ end
175
+
176
+ desc "list_cloudfront", "List cloudfront distributions (CDNs)"
177
+ def list_cloudfront
178
+ show_cdns
179
+ end
180
+
181
+ desc "create_cloudfront [BUCKET_NAME]", "Create a cloudfront distribution (a CDN)"
182
+ def create_cloudfront(bucket_id)
183
+ fog_cdn.post_distribution({
184
+ 'S3Origin' => {
185
+ 'DNSName' => "#{bucket_id}.s3.amazonaws.com"
186
+ },
187
+ 'Enabled' => true
188
+ })
189
+
190
+ show_cdns
191
+ end
192
+
193
+ desc "list", "Show all buckets"
194
+ method_options :region => "us-east-1"
195
+ def list
196
+ show_buckets
197
+ end
198
+
199
+ desc "afew [BUCKET_NAME]", "Show first 5 files in bucket"
200
+ method_options :count => "5"
201
+ method_options :glob => "*"
202
+ def afew(bucket_name)
203
+ d = fog_storage.directories.select { |d| d.key == bucket_name }.first
204
+ if d.nil?
205
+ say ("Found no bucket by name #{bucket_name}")
206
+ return
207
+ end
208
+
209
+ found = n_file_heads(d, options[:glob], options[:count].to_i)
210
+
211
+ unit_to_mult = {
212
+ 'B' => 1,
213
+ 'K' => 2**10,
214
+ 'M' => 2**20,
215
+ 'G' => 2**30
216
+ }
217
+
218
+ found.map { |f|
219
+ matching = unit_to_mult.keys.select { |k|
220
+ f.content_length >= unit_to_mult[k]
221
+ }.last
222
+
223
+ [f.key,
224
+ "#{f.content_length == 0 ? 0 : (f.content_length.to_f / unit_to_mult[matching]).round(2)}#{matching}",
225
+ f.content_type,
226
+ f.last_modified
227
+ ]
228
+ }.tap do |tabular|
229
+ print_table(tabular, :ident => 2)
230
+ end
231
+
232
+ end
233
+
234
+ desc "download [BUCKET_NAME]", "Download all files in a bucket to CWD. Or one file."
235
+ method_options :region => "us-east-1"
236
+ method_options :one => nil
237
+ def download(bucket_name)
238
+ with_bucket bucket_name do |d|
239
+ if options[:one].nil?
240
+ if yes?("Are you sure you want to download all files into the CWD?", :red)
241
+ d.files.each do |s3_file|
242
+ say("Creating path for and downloading #{s3_file.key}")
243
+ s3_download(s3_file)
244
+ end
245
+ else
246
+ say("No action taken.")
247
+ end
248
+ else
249
+ s3_file = d.files.get(options[:one])
250
+ if !s3_file.nil?
251
+ s3_download(s3_file)
252
+ else
253
+ say("Could not find #{options[:one]}. No action taken.")
254
+ end
255
+ end
256
+ end
257
+ end
258
+
259
+ desc "upsync [BUCKET_NAME] [DIRECTORY]", "Push local files matching glob PATTERN into bucket. Ignore unchanged files."
260
+ method_options :public => false
261
+ method_options :region => "us-east-1"
262
+ method_options :noprompt => nil
263
+ method_options :glob => "**/*"
264
+ method_options :backups_retain => false
265
+ method_options :days_retain => 30
266
+ method_options :months_retain => 3
267
+ method_options :weeks_retain => 5
268
+ method_options :dry_run => false
269
+ def upsync(bucket_name, directory)
270
+
271
+ say("This is a dry run.") if options[:dry_run]
272
+
273
+ if !File.exists?(directory) || !File.directory?(directory)
274
+ say("'#{directory} does not exist or is not a directory.")
275
+ return
276
+ end
277
+
278
+ target_root = Pathname.new(directory)
279
+
280
+ files = Dir.glob(target_root.join(options[:glob])).select { |f| !File.directory?(f) }.map(&:to_s)
281
+ if !options[:backups_retain] && files.count == 0
282
+ say("No files to upload and no backups retain requested.")
283
+ return
284
+ end
285
+
286
+ say("Found #{files.count} candidate file upload(s).")
287
+
288
+ spn = dn = sn = un = cn = 0
289
+ with_bucket bucket_name do |d|
290
+
291
+ # having a brain fart and cant get this to simplify
292
+ go = false
293
+ if options[:noprompt] != nil
294
+ go = true
295
+ else
296
+ go = yes?("Proceed?", :red)
297
+ end
298
+
299
+ if go
300
+ time_marks = []
301
+ immediate_successors = {}
302
+ if options[:backups_retain]
303
+ # inclusive lower bound, exclusive upper bound
304
+ time_marks = []
305
+ Time.now.beginning_of_day.tap do |start|
306
+ options[:days_retain].times do |i|
307
+ time_marks.push(start - i.days)
308
+ end
309
+ end
310
+
311
+ Time.now.beginning_of_week.tap do |start|
312
+ options[:weeks_retain].times do |i|
313
+ time_marks.push(start - i.weeks)
314
+ end
315
+ end
316
+
317
+ Time.now.beginning_of_month.tap do |start|
318
+ options[:months_retain].times do |i|
319
+ time_marks.push(start - i.months)
320
+ end
321
+ end
322
+
323
+ time_marks.each do |tm|
324
+ files.each do |to_upload|
325
+ File.open(to_upload) do |localfile|
326
+ if localfile.mtime >= tm && (immediate_successors[tm].nil? || localfile.mtime < immediate_successors[tm][:last_modified])
327
+ immediate_successors[tm] = { :local_path => to_upload, :last_modified => localfile.mtime }
328
+ end
329
+ end
330
+ end
331
+ end
332
+ end
333
+
334
+ # don't pointlessly upload large files if we already know we're going to delete them!
335
+ if options[:backups_retain]
336
+ immediate_successors.values.map { |h| h[:local_path] }.tap do |kept_files|
337
+ before_reject = files.count # blah...lame
338
+ files.reject! { |to_upload| !kept_files.include?(to_upload) }
339
+ sn += before_reject - files.count
340
+
341
+ say("Found #{files.count} file(s) that meet backups retention criteria for upload. Comparing against bucket...")
342
+
343
+ end
344
+ end
345
+
346
+ files.each do |to_upload|
347
+ say("#{to_upload} (no output if skipped)...")
348
+ k = File.basename(to_upload)
349
+
350
+ existing_head = d.files.head(k)
351
+
352
+ time_mismatch = false
353
+ content_hash_mistmatched = false
354
+ File.open(to_upload) do |localfile|
355
+ time_mismatch = !existing_head.nil? && (existing_head.metadata[LOCAL_MOD_KEY].nil? || (Time.parse(existing_head.metadata[LOCAL_MOD_KEY]) - localfile.mtime).abs > EPSILON)
356
+ if time_mismatch
357
+ content_hash_mistmatched = existing_head.etag != content_hash(localfile)
358
+ end
359
+ end
360
+
361
+ if existing_head && time_mismatch && content_hash_mistmatched
362
+ if !options[:dry_run]
363
+ File.open(to_upload) do |localfile|
364
+ existing_head.metadata = { LOCAL_MOD_KEY => localfile.mtime.to_s }
365
+ existing_head.body = localfile
366
+ existing_head.multipart_chunk_size = FILE_BUFFER_SIZE # creates multipart_save
367
+ existing_head.save
368
+ end
369
+ end
370
+ say("updated.")
371
+ un += 1
372
+ elsif existing_head && time_mismatch
373
+ if !options[:dry_run]
374
+ File.open(to_upload) do |localfile|
375
+ existing_head.metadata = { LOCAL_MOD_KEY => localfile.mtime.to_s }
376
+ existing_head.save
377
+ end
378
+ end
379
+ say("updated.")
380
+ un += 1
381
+ elsif existing_head.nil?
382
+ if !options[:dry_run]
383
+ fresh_file_upload(to_upload, d, k, options[:public])
384
+ end
385
+ say("created.")
386
+ cn += 1
387
+ else
388
+ sn += 1
389
+ # skipped
390
+ end
391
+ end
392
+
393
+
394
+ if options[:backups_retain]
395
+
396
+ # This array of hashes is computed because we need to do
397
+ # nested for loops of M*N complexity, where M=time_marks
398
+ # and N=files. We also need to do an remote get call to
399
+ # fetch the metadata of all N remote files (d.files.each
400
+ # will not do this). so, for performance sanity, we cache
401
+ # all the meta data for all the N files.
402
+ file_keys_modtimes = []
403
+ d.files.each { |f|
404
+ if File.fnmatch(options[:glob], f.key)
405
+ existing_head = d.files.head(f.key)
406
+ md = existing_head.metadata
407
+ file_keys_modtimes.push({
408
+ :key => f.key,
409
+ :last_modified => md[LOCAL_MOD_KEY] ? Time.parse(md[LOCAL_MOD_KEY]) : f.last_modified,
410
+ :existing_head => existing_head
411
+ })
412
+ end
413
+ }
414
+
415
+ say("#{file_keys_modtimes.length} file(s) found to consider for remote retention or remote deletion.")
416
+
417
+ # this generates as many 'kept files' as there are time marks...which seems wrong.
418
+ immediate_successors = {}
419
+ time_marks.each do |tm|
420
+ file_keys_modtimes.each do |fkm|
421
+ if fkm[:last_modified] >= tm && (immediate_successors[tm].nil? || fkm[:last_modified] < immediate_successors[tm][:last_modified])
422
+ immediate_successors[tm] = fkm
423
+ end
424
+ end
425
+ end
426
+
427
+ immediate_successors.values.map { |v| v[:key] }.tap do |kept_keys|
428
+ file_keys_modtimes.each do |fkm|
429
+ if kept_keys.include?(fkm[:key])
430
+ say("Remote retained #{fkm[:key]}.")
431
+ spn += 1
432
+ else
433
+ fkm[:existing_head].destroy if !options[:dry_run]
434
+ say("Remote deleted #{fkm[:key]}.")
435
+ dn += 1
436
+ end
437
+ end
438
+ end
439
+ end
440
+ else
441
+ say ("No action taken.")
442
+ end
443
+ end
444
+ say("Done. #{cn} created. #{un} updated. #{sn} local skipped. #{dn} deleted remotely. #{spn} retained remotely.")
445
+ end
446
+
447
+ desc "fdelete [BUCKET_NAME] [FILE_NAME]", "Delete a file in a bucket."
448
+ method_options :noprompt => false
449
+ method_options :region => "us-east-1"
450
+ def fdelete(bucket_name, file_name)
451
+ d = fog_storage.directories.select { |d| d.key == bucket_name }.first
452
+
453
+ if d.nil?
454
+ say ("Found no bucket by name #{bucket_name}")
455
+ return
456
+ end
457
+
458
+ f = d.files.select { |f| f.key == file_name }.first
459
+ if f.nil?
460
+ say("Found no file in #{d.key} having name #{file_name}.")
461
+ return
462
+ end
463
+
464
+ if options[:noprompt] || yes?("Are you sure you want to delete #{f.key} in #{d.key}?", :red)
465
+ f.destroy
466
+ say "Destroyed #{f.key} in #{d.key}."
467
+ else
468
+ say "No action taken."
469
+ end
470
+ end
471
+
472
+ desc "fupload [BUCKET_NAME] [LOCAL_FILE]", "Upload a file to a bucket. Path to file is ignored."
473
+ method_options :public => false
474
+ method_options :region => "us-east-1"
475
+ def fupload(bucket_name, file_name)
476
+ d = fog_storage.directories.select { |d| d.key == bucket_name }.first
477
+
478
+ if d.nil?
479
+ say ("Found no bucket by name #{bucket_name}")
480
+ return
481
+ end
482
+
483
+ if !File.exists?(file_name)
484
+ say("Found no such file #{file_name} on the local disk.")
485
+ return
486
+ end
487
+
488
+ key = File.basename(file_name)
489
+ f = d.files.select { |f| f.key == key }.first
490
+ if !f.nil? && !yes?("There is already a file named #{key} in #{d.key}. Do you want to overwrite it with this upload?", :red)
491
+ say("No action taken.")
492
+ return
493
+ f.destroy
494
+ say "Destroyed #{f.key} in #{d.key}."
495
+ end
496
+
497
+ fresh_file_upload(file_name, d, key, options[:public])
498
+ say "Uploaded #{key} to #{d.key}."
499
+ end
500
+
501
+ desc "delete [BUCKET_NAME]", "Destroy a bucket"
502
+ method_options :noprompt => false
503
+ method_options :region => "us-east-1"
504
+ method_options :deep => false
505
+ def delete(bucket_name)
506
+ d = fog_storage.directories.select { |d| d.key == bucket_name }.first
507
+
508
+ if d.nil?
509
+ say ("Found no bucket by name #{bucket_name}")
510
+ return
511
+ end
512
+
513
+ if options[:noprompt] || yes?("Are you sure you want to delete this bucket #{d.key}?", :red)
514
+
515
+ if d.files.length > 0
516
+ if !options[:deep]
517
+ say "Bucket has #{d.files.length} files. Please empty before destroying."
518
+ return
519
+ end
520
+
521
+ found = n_file_heads(d)
522
+ while found.length > 0
523
+ found.each do |f|
524
+ f.destroy
525
+ say("Deleted file #{f.key}.")
526
+ end
527
+ found = n_file_heads(d)
528
+ end
529
+ end
530
+
531
+ d.destroy
532
+ say "Destroyed bucket named #{bucket_name}."
533
+ show_buckets
534
+ else
535
+ say "No action taken."
536
+ end
537
+
538
+ end
539
+
540
+ desc "create [BUCKET_NAME]", "Create a bucket"
541
+ method_options :region => "us-east-1"
542
+ def create(bucket_name = nil)
543
+ if !bucket_name
544
+ puts "No bucket name given."
545
+ return
546
+ end
547
+
548
+ fog_storage.directories.create(
549
+ :key => bucket_name,
550
+ :location => options[:region]
551
+ )
552
+
553
+ puts "Created bucket #{bucket_name}."
554
+ show_buckets
555
+ end
556
+
557
+ desc "show [BUCKET_NAME]", "Show info about bucket"
558
+ method_options :region => "us-east-1"
559
+ def show(bucket_name = nil)
560
+ if !bucket_name
561
+ puts "No bucket name given."
562
+ return
563
+ end
564
+
565
+ with_bucket(bucket_name) do |d|
566
+ say "#{d}: "
567
+ say d.location
568
+ end
569
+ end
570
+
571
+ end
572
+