cult 0.1.1.pre
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +9 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +21 -0
- data/README.md +240 -0
- data/Rakefile +6 -0
- data/cult +1 -0
- data/cult.gemspec +38 -0
- data/doc/welcome.txt +1 -0
- data/exe/cult +86 -0
- data/lib/cult/artifact.rb +45 -0
- data/lib/cult/cli/common.rb +265 -0
- data/lib/cult/cli/console_cmd.rb +124 -0
- data/lib/cult/cli/cri_extensions.rb +84 -0
- data/lib/cult/cli/init_cmd.rb +116 -0
- data/lib/cult/cli/load.rb +26 -0
- data/lib/cult/cli/node_cmd.rb +205 -0
- data/lib/cult/cli/provider_cmd.rb +123 -0
- data/lib/cult/cli/role_cmd.rb +149 -0
- data/lib/cult/cli/task_cmd.rb +140 -0
- data/lib/cult/commander.rb +103 -0
- data/lib/cult/config.rb +22 -0
- data/lib/cult/definition.rb +112 -0
- data/lib/cult/driver.rb +88 -0
- data/lib/cult/drivers/common.rb +192 -0
- data/lib/cult/drivers/digital_ocean_driver.rb +179 -0
- data/lib/cult/drivers/linode_driver.rb +282 -0
- data/lib/cult/drivers/load.rb +26 -0
- data/lib/cult/drivers/script_driver.rb +27 -0
- data/lib/cult/drivers/vultr_driver.rb +217 -0
- data/lib/cult/named_array.rb +129 -0
- data/lib/cult/node.rb +62 -0
- data/lib/cult/project.rb +169 -0
- data/lib/cult/provider.rb +134 -0
- data/lib/cult/role.rb +213 -0
- data/lib/cult/skel.rb +85 -0
- data/lib/cult/task.rb +64 -0
- data/lib/cult/template.rb +92 -0
- data/lib/cult/transferable.rb +61 -0
- data/lib/cult/version.rb +3 -0
- data/lib/cult.rb +4 -0
- data/skel/.cultconsolerc +4 -0
- data/skel/.cultrc.erb +29 -0
- data/skel/README.md.erb +22 -0
- data/skel/keys/.keep +0 -0
- data/skel/nodes/.keep +0 -0
- data/skel/providers/.keep +0 -0
- data/skel/roles/all/role.json +4 -0
- data/skel/roles/all/tasks/00000-do-something-cool +27 -0
- data/skel/roles/bootstrap/files/cult-motd +45 -0
- data/skel/roles/bootstrap/role.json +4 -0
- data/skel/roles/bootstrap/tasks/00000-set-hostname +22 -0
- data/skel/roles/bootstrap/tasks/00001-add-cult-user +21 -0
- data/skel/roles/bootstrap/tasks/00002-install-cult-motd +9 -0
- metadata +183 -0
@@ -0,0 +1,103 @@
|
|
1
|
+
require 'net/ssh'
|
2
|
+
require 'net/scp'
|
3
|
+
require 'shellwords'
|
4
|
+
require 'rainbow'
|
5
|
+
require 'rubygems/package'
|
6
|
+
require 'rubygems/package/tar_writer'
|
7
|
+
|
8
|
+
module Cult
|
9
|
+
|
10
|
+
class Bundle
|
11
|
+
attr_reader :tar
|
12
|
+
def initialize(io, &block)
|
13
|
+
@tar = Gem::Package::TarWriter.new(io)
|
14
|
+
if block_given?
|
15
|
+
begin
|
16
|
+
yield self
|
17
|
+
ensure
|
18
|
+
@tar.close
|
19
|
+
@tar = nil
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
def add_file(project, role, node, transferable)
|
25
|
+
data = transferable.contents(project, role, node, pwd: role.path)
|
26
|
+
tar.add_file(transferable.remote_path, transferable.file_mode) do |io|
|
27
|
+
io.write(data)
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
class Commander
|
33
|
+
attr_reader :project
|
34
|
+
attr_reader :node
|
35
|
+
|
36
|
+
def initialize(project:, node:)
|
37
|
+
@project = project
|
38
|
+
@node = node
|
39
|
+
end
|
40
|
+
|
41
|
+
def esc(s)
|
42
|
+
Shellwords.escape(s)
|
43
|
+
end
|
44
|
+
|
45
|
+
def send_bundle(ssh, role)
|
46
|
+
io = StringIO.new
|
47
|
+
Bundle.new(io) do |bundle|
|
48
|
+
puts "Building bundle..."
|
49
|
+
role.build_order.each do |r|
|
50
|
+
(r.artifacts + r.tasks).each do |transferable|
|
51
|
+
bundle.add_file(project, r, node, transferable)
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
filename = "cult-#{role.name}.tar"
|
56
|
+
puts "Uploading bundle #{filename}..."
|
57
|
+
|
58
|
+
scp = Net::SCP.new(ssh)
|
59
|
+
io.rewind
|
60
|
+
scp.upload!(io, filename)
|
61
|
+
ssh.exec! "tar -xf #{esc(filename)} && rm #{esc(filename)}"
|
62
|
+
end
|
63
|
+
|
64
|
+
def install!(role)
|
65
|
+
connect(user: role.definition['user']) do |ssh|
|
66
|
+
send_bundle(ssh, role)
|
67
|
+
|
68
|
+
role.build_order.each do |r|
|
69
|
+
puts "Installing role: #{Rainbow(r.name).blue}"
|
70
|
+
working_dir = r.remote_path
|
71
|
+
r.tasks.each do |t|
|
72
|
+
puts "Executing: #{t.remote_path}"
|
73
|
+
task_bin = r.relative_path(t.path)
|
74
|
+
res = ssh.exec! <<~BASH
|
75
|
+
cd #{esc(working_dir)}; \
|
76
|
+
if [ ! -f ./#{esc(task_bin)}.success ]; then \
|
77
|
+
touch ./#{esc(task_bin)}.attempt && \
|
78
|
+
./#{esc(task_bin)} && \
|
79
|
+
mv ./#{esc(task_bin)}.attempt ./#{esc(task_bin)}.success; \
|
80
|
+
fi
|
81
|
+
BASH
|
82
|
+
unless res.empty?
|
83
|
+
puts Rainbow(res.gsub(/^/, ' ')).darkgray.italic
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
def bootstrap!
|
91
|
+
bootstrap_role = CLI.fetch_item('bootstrap', from: Role)
|
92
|
+
install!(bootstrap_role)
|
93
|
+
end
|
94
|
+
|
95
|
+
def connect(user:, &block)
|
96
|
+
puts "Connecting with user=#{user}"
|
97
|
+
Net::SSH.start(node.host, user) do |ssh|
|
98
|
+
yield ssh
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
end
|
data/lib/cult/config.rb
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
module Cult
|
2
|
+
module_function
|
3
|
+
def project=(project)
|
4
|
+
@project = project
|
5
|
+
end
|
6
|
+
|
7
|
+
def project
|
8
|
+
@project
|
9
|
+
end
|
10
|
+
|
11
|
+
# This is a a mode we're considering: when it's set, certain objects
|
12
|
+
# will be frozen when they're created, and creating a "logically the same"
|
13
|
+
# instance (e.g., role with same name in the same project) will return
|
14
|
+
# the actual same object each time.
|
15
|
+
#
|
16
|
+
# I'm not sure if we'll commit to this, but having this toggle lets us
|
17
|
+
# easily see what breaks.
|
18
|
+
def immutable?
|
19
|
+
ENV['CULT_IMMUTABLE'] == '1'
|
20
|
+
end
|
21
|
+
|
22
|
+
end
|
@@ -0,0 +1,112 @@
|
|
1
|
+
require 'yaml'
|
2
|
+
require 'json'
|
3
|
+
require 'forwardable'
|
4
|
+
|
5
|
+
require 'cult/template'
|
6
|
+
|
7
|
+
module Cult
|
8
|
+
class Definition
|
9
|
+
attr_reader :object
|
10
|
+
attr_reader :bag
|
11
|
+
|
12
|
+
extend Forwardable
|
13
|
+
def_delegators :object, :definition_parameters, :definition_path,
|
14
|
+
:definition_parents
|
15
|
+
|
16
|
+
|
17
|
+
def initialize(object)
|
18
|
+
@object = object
|
19
|
+
end
|
20
|
+
|
21
|
+
|
22
|
+
def inspect
|
23
|
+
"\#<#{self.class.name} " +
|
24
|
+
"object: #{object.inspect}, " +
|
25
|
+
"params: #{definition_parameters}, " +
|
26
|
+
"parents: #{definition_parents}, " +
|
27
|
+
"direct_values: #{bag}>"
|
28
|
+
end
|
29
|
+
alias_method :to_s, :inspect
|
30
|
+
|
31
|
+
|
32
|
+
def filenames
|
33
|
+
Array(definition_path).map do |dp|
|
34
|
+
attempt = [ "#{dp}.yaml", "#{dp}.yml", "#{dp}.json" ]
|
35
|
+
existing = attempt.select do |filename|
|
36
|
+
File.exist?(filename)
|
37
|
+
end
|
38
|
+
if existing.size > 1
|
39
|
+
raise RuntimeError, "conflicting definition files: #{existing}"
|
40
|
+
end
|
41
|
+
existing[0]
|
42
|
+
end.compact
|
43
|
+
end
|
44
|
+
|
45
|
+
|
46
|
+
def decoder_for(filename)
|
47
|
+
@decoder_for ||= begin
|
48
|
+
case filename
|
49
|
+
when nil
|
50
|
+
nil
|
51
|
+
when /\.json\z/
|
52
|
+
JSON.method(:parse)
|
53
|
+
when /\.ya?ml\z/
|
54
|
+
YAML.method(:safe_load)
|
55
|
+
else
|
56
|
+
fail RuntimeError, "No decoder for file type: #{filename}"
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
|
62
|
+
def bag
|
63
|
+
@bag ||= begin
|
64
|
+
result = {}
|
65
|
+
filenames.each do |filename|
|
66
|
+
erb = Template.new(definition_parameters)
|
67
|
+
contents = erb.process(File.read(filename))
|
68
|
+
result.merge! decoder_for(filename).call(contents)
|
69
|
+
end
|
70
|
+
result
|
71
|
+
end
|
72
|
+
end
|
73
|
+
alias_method :to_h, :bag
|
74
|
+
|
75
|
+
|
76
|
+
def direct(k)
|
77
|
+
fail "Use string keys" unless k.is_a?(String)
|
78
|
+
bag[k]
|
79
|
+
end
|
80
|
+
|
81
|
+
|
82
|
+
def [](k)
|
83
|
+
fail "Use string keys" unless k.is_a?(String)
|
84
|
+
if bag.key?(k)
|
85
|
+
bag[k]
|
86
|
+
else
|
87
|
+
parent_responses = definition_parents.map do |p|
|
88
|
+
[p, p.definition[k]]
|
89
|
+
end.reject do |k, v|
|
90
|
+
v.nil?
|
91
|
+
end
|
92
|
+
consensus = parent_responses.group_by(&:last)
|
93
|
+
if consensus.empty?
|
94
|
+
return nil
|
95
|
+
elsif consensus.size != 1
|
96
|
+
msg = "#{object.inspect}: I didn't have key '#{k}', and " +
|
97
|
+
"my parents had conflicting answers: " +
|
98
|
+
"[answer, parents]: #{consensus}"
|
99
|
+
fail KeyError, msg
|
100
|
+
end
|
101
|
+
consensus.keys[0]
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
|
106
|
+
def []=(k, v)
|
107
|
+
fail "Use string keys" unless k.is_a?(String)
|
108
|
+
bag[k] = v
|
109
|
+
end
|
110
|
+
|
111
|
+
end
|
112
|
+
end
|
data/lib/cult/driver.rb
ADDED
@@ -0,0 +1,88 @@
|
|
1
|
+
require 'cult/definition'
|
2
|
+
require 'cult/drivers/common'
|
3
|
+
|
4
|
+
module Cult
|
5
|
+
class Driver
|
6
|
+
|
7
|
+
# This is raised when a Driver is instantiated, but the required
|
8
|
+
# gems are not installed.
|
9
|
+
class GemNeededError < RuntimeError
|
10
|
+
attr_reader :gems
|
11
|
+
def initialize(gems)
|
12
|
+
@gems = gems
|
13
|
+
super(message)
|
14
|
+
end
|
15
|
+
|
16
|
+
def message
|
17
|
+
"gems required: #{gems.inspect}"
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
|
22
|
+
class << self
|
23
|
+
attr_accessor :required_gems
|
24
|
+
|
25
|
+
def driver_name
|
26
|
+
name.split('::')
|
27
|
+
.last
|
28
|
+
.sub(/Driver\z/, '')
|
29
|
+
.gsub(/([a-z])([A-Z])/, '\1-\2')
|
30
|
+
.downcase
|
31
|
+
end
|
32
|
+
|
33
|
+
|
34
|
+
def inspect
|
35
|
+
self == Driver ? super : "#{super}/#{driver_name}"
|
36
|
+
end
|
37
|
+
alias_method :to_s, :inspect
|
38
|
+
|
39
|
+
|
40
|
+
def named_array_identifier
|
41
|
+
driver_name
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def inspect
|
46
|
+
"\#<#{self.class.name} \"#{self.class.driver_name}\">"
|
47
|
+
end
|
48
|
+
|
49
|
+
|
50
|
+
def to_s
|
51
|
+
self.class.driver_name
|
52
|
+
end
|
53
|
+
|
54
|
+
|
55
|
+
# Attempts to loads all of the required gems before doing any real work
|
56
|
+
def self.try_requires!
|
57
|
+
req = Array(required_gems).map do |gem|
|
58
|
+
begin
|
59
|
+
require gem
|
60
|
+
nil
|
61
|
+
rescue LoadError
|
62
|
+
gem
|
63
|
+
end
|
64
|
+
end.compact
|
65
|
+
|
66
|
+
unless req.empty?
|
67
|
+
fail GemNeededError.new(req)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
|
72
|
+
# These are helpers that most implementations will need, but Driver itself
|
73
|
+
# doesn't depend on. Things like exponential back-off, awaiting an SSH
|
74
|
+
# port to open, etc.
|
75
|
+
include ::Cult::Drivers::Common
|
76
|
+
|
77
|
+
|
78
|
+
def self.setup!
|
79
|
+
try_requires!
|
80
|
+
end
|
81
|
+
|
82
|
+
|
83
|
+
def self.new(api_key:)
|
84
|
+
try_requires!
|
85
|
+
super
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
@@ -0,0 +1,192 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'net/ssh'
|
3
|
+
|
4
|
+
module Cult
|
5
|
+
module Drivers
|
6
|
+
|
7
|
+
module Common
|
8
|
+
module ClassMethods
|
9
|
+
# Lets us write a method "something_map" that returns {'ident' => ...},
|
10
|
+
# and also get a function "something" that returns the keys.
|
11
|
+
def with_id_mapping(method_name)
|
12
|
+
new_method = method_name.to_s.sub(/_map\z/, '')
|
13
|
+
define_method(new_method) do
|
14
|
+
send(method_name).keys
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
|
19
|
+
def memoize(method_name)
|
20
|
+
old_method_name = "#{method_name}_unmemoized".to_sym
|
21
|
+
|
22
|
+
alias_method old_method_name, method_name
|
23
|
+
|
24
|
+
var_name = "@#{method_name}".to_sym
|
25
|
+
|
26
|
+
define_method(method_name) do
|
27
|
+
if !instance_variable_defined?(var_name)
|
28
|
+
instance_variable_set(var_name, send(old_method_name))
|
29
|
+
end
|
30
|
+
instance_variable_get(var_name)
|
31
|
+
end
|
32
|
+
|
33
|
+
define_method("#{method_name}_unmemo!") do
|
34
|
+
remove_instance_variable(var_name)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
|
40
|
+
def self.included(cls)
|
41
|
+
cls.extend(ClassMethods)
|
42
|
+
end
|
43
|
+
|
44
|
+
|
45
|
+
# works with with_id_mapping to convert a human-readible/normalized key
|
46
|
+
# to the id the backend service expects. Allows '=value' to force a
|
47
|
+
# literal value, and gives better error messages.
|
48
|
+
def fetch_mapped(name:, from:, key:)
|
49
|
+
# Allow for the override.
|
50
|
+
key = key.to_s
|
51
|
+
return key[1..-1] if key[0] == '='
|
52
|
+
|
53
|
+
begin
|
54
|
+
from.fetch(key)
|
55
|
+
rescue KeyError => e
|
56
|
+
raise ArgumentError, "Invalid #{name}: \"#{key}\". " +
|
57
|
+
"Use \"=#{key}\" to force, or use one of: " +
|
58
|
+
from.keys.inspect
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
|
63
|
+
def ssh_key_info(data: nil, file: nil)
|
64
|
+
if data.nil?
|
65
|
+
fail ArgumentError if file.nil?
|
66
|
+
data = File.read(file)
|
67
|
+
else
|
68
|
+
fail ArgumentError unless file.nil?
|
69
|
+
end
|
70
|
+
|
71
|
+
data = data.chomp
|
72
|
+
key = Net::SSH::KeyFactory.load_data_public_key(data, file)
|
73
|
+
|
74
|
+
fields = data.split(/ /)
|
75
|
+
return {
|
76
|
+
name: fields[-1],
|
77
|
+
fingerprint: key.fingerprint,
|
78
|
+
data: data,
|
79
|
+
file: file
|
80
|
+
}
|
81
|
+
end
|
82
|
+
|
83
|
+
|
84
|
+
# Enter this block once a node has been created. It makes sure it's
|
85
|
+
# destroyed if there's an error later in the procedure.
|
86
|
+
def rollback_on_error(id:, &block)
|
87
|
+
begin
|
88
|
+
yield
|
89
|
+
rescue Exception => e
|
90
|
+
begin
|
91
|
+
destroy!(id: id)
|
92
|
+
ensure
|
93
|
+
raise e
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
|
99
|
+
def slugify(s)
|
100
|
+
s.gsub(/[^a-z0-9]+/i, '-').gsub(/(^\-)|(-\z)/, '').downcase
|
101
|
+
end
|
102
|
+
|
103
|
+
|
104
|
+
def distro_name(s)
|
105
|
+
s = s.gsub(/\bx64\b/i, '')
|
106
|
+
# People sometimes add "LTS" to the name of Ubuntu LTS releases
|
107
|
+
s = s.gsub(/\blts\b/i, '') if s.match(/ubuntu/i)
|
108
|
+
|
109
|
+
# We don't particularly need the debian codename
|
110
|
+
s = s.gsub(/(\d)[\s-]+(\S+)/, '\1') if s.match(/^debian/i)
|
111
|
+
s
|
112
|
+
end
|
113
|
+
|
114
|
+
|
115
|
+
# Does back-off retrying. Defaults to not-exponential.
|
116
|
+
# Block must throw :done to signal they are done.
|
117
|
+
def backoff_loop(wait = 3, scale = 1.2, &block)
|
118
|
+
times = 0
|
119
|
+
total_wait = 0.0
|
120
|
+
|
121
|
+
catch :done do
|
122
|
+
loop do
|
123
|
+
yield times, total_wait
|
124
|
+
sleep wait
|
125
|
+
times += 1
|
126
|
+
total_wait += wait
|
127
|
+
wait *= scale
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
|
133
|
+
# Waits until SSH is available at host. "available" jsut means
|
134
|
+
# "listening"/acceping connections.
|
135
|
+
def await_ssh(host)
|
136
|
+
backoff_loop do
|
137
|
+
begin
|
138
|
+
sock = connect_timeout(host, 22, 1)
|
139
|
+
throw :done
|
140
|
+
rescue Errno::ETIMEDOUT, Errno::ECONNREFUSED
|
141
|
+
# Nothing, these are expected
|
142
|
+
ensure
|
143
|
+
sock.close if sock
|
144
|
+
end
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
|
149
|
+
# This should not be needed, but it is:
|
150
|
+
# https://spin.atomicobject.com/2013/09/30/socket-connection-timeout-ruby/
|
151
|
+
def connect_timeout(host, port, timeout = 5)
|
152
|
+
# Convert the passed host into structures the non-blocking calls
|
153
|
+
# can deal with
|
154
|
+
addr = Socket.getaddrinfo(host, nil)
|
155
|
+
sockaddr = Socket.pack_sockaddr_in(port, addr[0][3])
|
156
|
+
|
157
|
+
Socket.new(Socket.const_get(addr[0][0]), Socket::SOCK_STREAM, 0).tap do |socket|
|
158
|
+
socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
|
159
|
+
|
160
|
+
begin
|
161
|
+
# Initiate the socket connection in the background. If it doesn't
|
162
|
+
# fail immediately it will raise an IO::WaitWritable
|
163
|
+
# (Errno::EINPROGRESS) indicating the connection is in progress.
|
164
|
+
socket.connect_nonblock(sockaddr)
|
165
|
+
|
166
|
+
rescue IO::WaitWritable
|
167
|
+
# IO.select will block until the socket is writable or the timeout
|
168
|
+
# is exceeded - whichever comes first.
|
169
|
+
if IO.select(nil, [socket], nil, timeout)
|
170
|
+
begin
|
171
|
+
# Verify there is now a good connection
|
172
|
+
socket.connect_nonblock(sockaddr)
|
173
|
+
rescue Errno::EISCONN
|
174
|
+
# Good news everybody, the socket is connected!
|
175
|
+
rescue
|
176
|
+
# An unexpected exception was raised - the connection is no good.
|
177
|
+
socket.close
|
178
|
+
raise
|
179
|
+
end
|
180
|
+
else
|
181
|
+
# IO.select returns nil when the socket is not ready before
|
182
|
+
# timeout seconds have elapsed
|
183
|
+
socket.close
|
184
|
+
raise Errno::ETIMEDOUT
|
185
|
+
end
|
186
|
+
end
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
end
|
191
|
+
end
|
192
|
+
end
|
@@ -0,0 +1,179 @@
|
|
1
|
+
require 'cult/driver'
|
2
|
+
require 'cult/drivers/common'
|
3
|
+
require 'net/ssh'
|
4
|
+
require 'json'
|
5
|
+
|
6
|
+
module Cult
|
7
|
+
module Drivers
|
8
|
+
|
9
|
+
class DigitalOceanDriver < ::Cult::Driver
|
10
|
+
self.required_gems = 'droplet_kit'
|
11
|
+
|
12
|
+
include Common
|
13
|
+
|
14
|
+
attr_reader :client
|
15
|
+
|
16
|
+
def initialize(api_key:)
|
17
|
+
@client = DropletKit::Client.new(access_token: api_key)
|
18
|
+
end
|
19
|
+
|
20
|
+
|
21
|
+
def sizes_map
|
22
|
+
client.sizes.all.to_a.map do |s|
|
23
|
+
[s.slug, s.slug]
|
24
|
+
end.to_h
|
25
|
+
end
|
26
|
+
memoize :sizes_map
|
27
|
+
with_id_mapping :sizes_map
|
28
|
+
|
29
|
+
|
30
|
+
def images_map
|
31
|
+
distros = %w(ubuntu coreos centos freebsd fedora debian).join '|'
|
32
|
+
re = /^(#{distros})\-.*\-x64$/
|
33
|
+
client.images.all.to_a.select do |image|
|
34
|
+
image.public && image.slug && image.slug.match(re)
|
35
|
+
end.map do |image|
|
36
|
+
[slugify(distro_name(image.slug)), image.slug]
|
37
|
+
end.to_h
|
38
|
+
end
|
39
|
+
memoize :images_map
|
40
|
+
with_id_mapping :images_map
|
41
|
+
|
42
|
+
|
43
|
+
def zones_map
|
44
|
+
client.regions.all.map do |zone|
|
45
|
+
[zone.slug, zone.slug]
|
46
|
+
end.to_h
|
47
|
+
end
|
48
|
+
memoize :zones_map
|
49
|
+
with_id_mapping :zones_map
|
50
|
+
|
51
|
+
|
52
|
+
def ssh_keys
|
53
|
+
client.ssh_keys.all.to_a.map(&:to_h)
|
54
|
+
end
|
55
|
+
memoize :ssh_keys
|
56
|
+
|
57
|
+
|
58
|
+
def upload_ssh_key(file:)
|
59
|
+
key = ssh_key_info(file: file)
|
60
|
+
# If we already have one with this fingerprint, use it.
|
61
|
+
if (exist = ssh_keys.find {|dk| dk[:fingerprint] == key[:fingerprint]})
|
62
|
+
exist
|
63
|
+
else
|
64
|
+
ssh_keys_dememo!
|
65
|
+
client.ssh_keys.create \
|
66
|
+
DropletKit::SSHKey.new(fingerprint: key[:fingerprint],
|
67
|
+
public_key: key[:data],
|
68
|
+
name: key[:name])
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
|
73
|
+
def await_creation(droplet)
|
74
|
+
d = nil
|
75
|
+
backoff_loop do
|
76
|
+
d = client.droplets.find(id: droplet.id)
|
77
|
+
throw :done if d.status == 'active'
|
78
|
+
end
|
79
|
+
return d
|
80
|
+
end
|
81
|
+
|
82
|
+
|
83
|
+
def destroy!(id:)
|
84
|
+
client.droplets.delete(id: id)
|
85
|
+
end
|
86
|
+
|
87
|
+
|
88
|
+
def provision!(name:, size:, zone:, image:, ssh_key_files:)
|
89
|
+
fingerprints = Array(ssh_key_files).map do |file|
|
90
|
+
upload_ssh_key(file: file)[:fingerprint]
|
91
|
+
end
|
92
|
+
|
93
|
+
begin
|
94
|
+
params = {
|
95
|
+
name: name,
|
96
|
+
size: fetch_mapped(name: :size, from: sizes_map, key: size),
|
97
|
+
image: fetch_mapped(name: :image, from: images_map, key: image),
|
98
|
+
region: fetch_mapped(name: :zone, from: zones_map, key: zone),
|
99
|
+
ssh_keys: fingerprints,
|
100
|
+
|
101
|
+
private_networking: true,
|
102
|
+
ipv6: true
|
103
|
+
}
|
104
|
+
rescue KeyError => e
|
105
|
+
fail ArgumentError, "Invalid argument: #{e.message}"
|
106
|
+
end
|
107
|
+
|
108
|
+
droplet = DropletKit::Droplet.new(params)
|
109
|
+
|
110
|
+
if droplet.nil?
|
111
|
+
fail "Droplet was nil: #{params.inspect}"
|
112
|
+
end
|
113
|
+
|
114
|
+
rollback_on_error(id: droplet.id) do
|
115
|
+
droplet = client.droplets.create(droplet)
|
116
|
+
droplet = await_creation(droplet)
|
117
|
+
|
118
|
+
ipv4_public = droplet.networks.v4.find {|n| n.type == 'public' }
|
119
|
+
ipv4_private = droplet.networks.v4.find {|n| n.type == 'private' }
|
120
|
+
ipv6_public = droplet.networks.v6.find {|n| n.type == 'public' }
|
121
|
+
ipv6_private = droplet.networks.v6.find {|n| n.type == 'private' }
|
122
|
+
|
123
|
+
await_ssh(ipv4_public.ip_address)
|
124
|
+
return {
|
125
|
+
name: droplet.name,
|
126
|
+
size: size,
|
127
|
+
zone: zone,
|
128
|
+
image: image,
|
129
|
+
ssh_key_files: ssh_key_files,
|
130
|
+
ssh_keys: fingerprints,
|
131
|
+
|
132
|
+
id: droplet.id,
|
133
|
+
created_at: droplet.created_at,
|
134
|
+
host: ipv4_public&.ip_address,
|
135
|
+
ipv4_public: ipv4_public&.ip_address,
|
136
|
+
ipv4_private: ipv4_private&.ip_address,
|
137
|
+
ipv6_public: ipv6_public&.ip_address,
|
138
|
+
ipv6_private: ipv6_private&.ip_address,
|
139
|
+
# Get rid of magic in droplet.
|
140
|
+
meta: JSON.parse(droplet.to_json)
|
141
|
+
}
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
|
146
|
+
def self.setup!
|
147
|
+
super
|
148
|
+
url = "https://cloud.digitalocean.com/settings/api/tokens/new"
|
149
|
+
|
150
|
+
puts "Cult needs a read/write Access Token created for your " +
|
151
|
+
"DigitalOcean account."
|
152
|
+
puts "One can be generated at the following URL:"
|
153
|
+
puts
|
154
|
+
puts " #{url}"
|
155
|
+
puts
|
156
|
+
|
157
|
+
CLI.launch_browser(url) if CLI.yes_no?("Open Browser?")
|
158
|
+
|
159
|
+
api_key = CLI.prompt("Access Token")
|
160
|
+
unless api_key.match(/\A[0-9a-f]{64}\z/)
|
161
|
+
puts "That doesn't look like an access token, but we'll take it."
|
162
|
+
end
|
163
|
+
|
164
|
+
inst = new(api_key: api_key)
|
165
|
+
|
166
|
+
return {
|
167
|
+
api_key: api_key,
|
168
|
+
driver: driver_name,
|
169
|
+
configurations: {
|
170
|
+
sizes: inst.sizes,
|
171
|
+
zones: inst.zones,
|
172
|
+
images: inst.images,
|
173
|
+
}
|
174
|
+
}
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
end
|
179
|
+
end
|