souffle 0.0.1 → 0.0.2
Sign up to get free protection for your applications and to get access to all the features.
- data/Gemfile +9 -3
- data/README.md +6 -0
- data/bin/{souffle-server → souffle} +0 -0
- data/lib/souffle.rb +8 -8
- data/lib/souffle/application.rb +15 -10
- data/lib/souffle/application/souffle-server.rb +90 -5
- data/lib/souffle/config.rb +88 -59
- data/lib/souffle/daemon.rb +156 -0
- data/lib/souffle/exceptions.rb +29 -17
- data/lib/souffle/http.rb +43 -0
- data/lib/souffle/log.rb +11 -14
- data/lib/souffle/node.rb +91 -53
- data/lib/souffle/node/runlist.rb +16 -18
- data/lib/souffle/node/runlist_item.rb +43 -36
- data/lib/souffle/node/runlist_parser.rb +60 -62
- data/lib/souffle/polling_event.rb +110 -0
- data/lib/souffle/provider.rb +231 -23
- data/lib/souffle/provider/aws.rb +654 -7
- data/lib/souffle/provider/vagrant.rb +42 -5
- data/lib/souffle/provisioner.rb +55 -0
- data/lib/souffle/provisioner/node.rb +157 -0
- data/lib/souffle/provisioner/system.rb +195 -0
- data/lib/souffle/redis_client.rb +8 -0
- data/lib/souffle/redis_mixin.rb +40 -0
- data/lib/souffle/server.rb +42 -8
- data/lib/souffle/ssh_monkey.rb +8 -0
- data/lib/souffle/state.rb +16 -0
- data/lib/souffle/system.rb +139 -37
- data/lib/souffle/template.rb +30 -0
- data/lib/souffle/templates/Vagrantfile.erb +41 -0
- data/lib/souffle/version.rb +6 -0
- data/spec/config_spec.rb +20 -0
- data/spec/log_spec.rb +24 -0
- data/spec/{runlist_parser_spec.rb → node/runlist_parser_spec.rb} +1 -1
- data/spec/{runlist_spec.rb → node/runlist_spec.rb} +1 -1
- data/spec/node_spec.rb +43 -8
- data/spec/provider_spec.rb +56 -0
- data/spec/providers/aws_provider_spec.rb +114 -0
- data/spec/providers/vagrant_provider_spec.rb +22 -0
- data/spec/provisioner_spec.rb +47 -0
- data/spec/spec_helper.rb +8 -0
- data/spec/system_spec.rb +242 -13
- data/spec/template_spec.rb +20 -0
- data/spec/templates/example_template.erb +1 -0
- metadata +125 -30
- data/bin/souffle-worker +0 -7
- data/lib/souffle/application/souffle-worker.rb +0 -46
- data/lib/souffle/providers.rb +0 -2
- data/lib/souffle/worker.rb +0 -14
@@ -0,0 +1,110 @@
|
|
1
|
+
require 'eventmachine'
|
2
|
+
|
3
|
+
# Eventmachine polling event helper.
|
4
|
+
class Souffle::PollingEvent
|
5
|
+
# The node to run the polling event against.
|
6
|
+
attr_accessor :node
|
7
|
+
|
8
|
+
# The current state of the polling event.
|
9
|
+
attr_accessor :state
|
10
|
+
|
11
|
+
# The interval to run the periodic timer against the event_loop.
|
12
|
+
attr_reader :interval
|
13
|
+
|
14
|
+
# The timeout (in seconds) for the periodic timer.
|
15
|
+
attr_reader :timeout
|
16
|
+
|
17
|
+
# The proc to run prior to the periodic event loop.
|
18
|
+
attr_accessor :pre_event
|
19
|
+
|
20
|
+
# The event loop proc, should call complete on success.
|
21
|
+
attr_accessor :event_loop
|
22
|
+
|
23
|
+
# The proc to run when the timeout has occurred.
|
24
|
+
attr_accessor :error_handler
|
25
|
+
|
26
|
+
# Create a new polling even instance.
|
27
|
+
#
|
28
|
+
# @param [ Souffle::Node ] node The node to run the polling event against.
|
29
|
+
# @param [ Proc ] blk The block to evaluate in the instance context.
|
30
|
+
#
|
31
|
+
# @example
|
32
|
+
# node = Souffle::Node.new
|
33
|
+
# node.name = "example_node"
|
34
|
+
#
|
35
|
+
# EM.run do
|
36
|
+
# evt = PollingEvent.new(node) do
|
37
|
+
# interval 1
|
38
|
+
# timeout 5
|
39
|
+
# pre_event { puts "at the beginning" }
|
40
|
+
# event_loop { puts "inside of the event loop" }
|
41
|
+
# error_handler { puts "in error handler"; EM.stop }
|
42
|
+
# end
|
43
|
+
# end
|
44
|
+
#
|
45
|
+
def initialize(node, &blk)
|
46
|
+
@state = Hash.new
|
47
|
+
@node = node
|
48
|
+
instance_eval(&blk) if block_given?
|
49
|
+
initialize_defaults
|
50
|
+
initialize_state
|
51
|
+
start_event
|
52
|
+
end
|
53
|
+
|
54
|
+
# Changes or returns the setting for a parameter.
|
55
|
+
%w( interval timeout ).each do |setting|
|
56
|
+
class_eval %[
|
57
|
+
def #{setting}(value=nil)
|
58
|
+
return @#{setting} if value.nil?
|
59
|
+
@#{setting} = value unless @#{setting} == value
|
60
|
+
end
|
61
|
+
]
|
62
|
+
end
|
63
|
+
|
64
|
+
# Sets the callback proc or runs the callback proc with the current state.
|
65
|
+
%w( pre_event event_loop error_handler ).each do |type|
|
66
|
+
class_eval %[
|
67
|
+
def #{type}(&blk)
|
68
|
+
if block_given?
|
69
|
+
@#{type} = blk
|
70
|
+
else
|
71
|
+
@#{type}.call(@state)
|
72
|
+
end
|
73
|
+
end
|
74
|
+
]
|
75
|
+
end
|
76
|
+
|
77
|
+
# Begin the polling event.
|
78
|
+
def start_event
|
79
|
+
pre_event
|
80
|
+
@event_timer = EM.add_periodic_timer(interval) { event_loop }
|
81
|
+
@timeout_timer = EM::Timer.new(timeout) do
|
82
|
+
@event_timer.cancel
|
83
|
+
error_handler
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
# Helper for the event block to set notify the
|
88
|
+
def event_complete
|
89
|
+
@event_timer.cancel
|
90
|
+
@timeout_timer.cancel
|
91
|
+
end
|
92
|
+
|
93
|
+
private
|
94
|
+
|
95
|
+
# Initialize default values for the event.
|
96
|
+
def initialize_defaults
|
97
|
+
@timeout ||= 100
|
98
|
+
@interval ||= 2
|
99
|
+
@pre_event ||= Proc.new { |state| nil }
|
100
|
+
@event_loop ||= Proc.new { |state| nil }
|
101
|
+
@error_handler ||= Proc.new { |state| nil }
|
102
|
+
end
|
103
|
+
|
104
|
+
# Initialize the default values for the state of the event.
|
105
|
+
def initialize_state
|
106
|
+
@state[:node] = @node
|
107
|
+
@state[:interval] = interval
|
108
|
+
@state[:timeout] = timeout
|
109
|
+
end
|
110
|
+
end
|
data/lib/souffle/provider.rb
CHANGED
@@ -1,27 +1,235 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
# The setup method for the provider. Intended to be overridden.
|
5
|
-
#
|
6
|
-
# @raise [Souffle::Exceptions::Provider] This definition must be overridden.
|
7
|
-
def setup
|
8
|
-
error_msg = "#{self.to_s}: you must override setup"
|
9
|
-
raise Souffle::Exceptions::Provider, error_msg
|
10
|
-
end
|
1
|
+
require 'fileutils'
|
2
|
+
require 'tmpdir'
|
11
3
|
|
12
|
-
|
13
|
-
|
14
|
-
#
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
4
|
+
# A metal provider module (Describes AWS, Softlayer, etc).
|
5
|
+
module Souffle::Provider
|
6
|
+
# The souffle cloud provider class.
|
7
|
+
class Base
|
8
|
+
attr_accessor :system
|
9
|
+
|
10
|
+
# Initialize a new provider for a given system.
|
11
|
+
#
|
12
|
+
# @param [ Souffle::System ] system The system to provision.
|
13
|
+
def initialize(system=Souffle::System.new)
|
14
|
+
@system ||= system
|
15
|
+
create_ssh_dir_if_missing
|
16
|
+
end
|
17
|
+
|
18
|
+
# The name of the given provider.
|
19
|
+
#
|
20
|
+
# @return [ String ] The name of the given provider.
|
21
|
+
def name
|
22
|
+
self.class.name.split('::').last
|
23
|
+
end
|
24
|
+
|
25
|
+
# Wait until ssh is available for the node and then connect.
|
26
|
+
def boot(node, retries=50)
|
27
|
+
end
|
28
|
+
|
29
|
+
# Creates a system for a given provider. Intended to be overridden.
|
30
|
+
#
|
31
|
+
# @raise [Souffle::Exceptions::Provider] This definition must be
|
32
|
+
# overrridden.
|
33
|
+
#
|
34
|
+
# @param [ Souffle::System ] system The system to instantiate.
|
35
|
+
# @param [ String ] tag The tag to use for the system.
|
36
|
+
def create_system(system, tag="souffle")
|
37
|
+
error_msg = "#{self.class.to_s}: you must override create_system"
|
38
|
+
raise Souffle::Exceptions::Provider, error_msg
|
39
|
+
end
|
40
|
+
|
41
|
+
# Takes a node definition and begins the provisioning process.
|
42
|
+
#
|
43
|
+
# @param [ Souffle::Node ] node The node to instantiate.
|
44
|
+
# @param [ String ] tag The tag to use for the node.
|
45
|
+
def create_node(node, tag=nil)
|
46
|
+
error_msg = "#{self.class.to_s}: you must override create_node"
|
47
|
+
raise Souffle::Exceptions::Provider, error_msg
|
48
|
+
end
|
49
|
+
|
50
|
+
# Creates a raid array for a given provider. Intended to be overridden.
|
51
|
+
#
|
52
|
+
# @raise [Souffle::Exceptions::Provider] This definition must be
|
53
|
+
# overridden.
|
54
|
+
def create_raid
|
55
|
+
error_msg = "#{self.class.to_s}: you must override create_raid"
|
56
|
+
raise Souffle::Exceptions::Provider, error_msg
|
57
|
+
end
|
58
|
+
|
59
|
+
# Generates the json required for chef-solo to run on a node.
|
60
|
+
#
|
61
|
+
# @param [ Souffle::Node ] node The node to generate chef-solo json for.
|
62
|
+
#
|
63
|
+
# @return [ String ] The chef-solo json for the particular node.
|
64
|
+
def generate_chef_json(node)
|
65
|
+
json_info = Hash.new
|
66
|
+
json_info[:domain] = "souffle"
|
67
|
+
json_info.merge!(node.options[:attributes])
|
68
|
+
json_info[:run_list] = node.run_list
|
69
|
+
JSON.pretty_generate(json_info)
|
70
|
+
end
|
71
|
+
|
72
|
+
# Waits for ssh to be accessible for a node for the initial connection and
|
73
|
+
# yields an ssh object to manage the commands naturally from there.
|
74
|
+
#
|
75
|
+
# @param [ String ] address The address of the machine to connect to.
|
76
|
+
# @param [ String ] user The user to connect as.
|
77
|
+
# @param [ String, NilClass ] pass By default publickey and password auth
|
78
|
+
# will be attempted.
|
79
|
+
# @param [ Hash ] opts The options hash.
|
80
|
+
# @param [ Fixnum ] timeout The timeout for ssh boot.
|
81
|
+
# @option opts [ Hash ] :net_ssh Options to pass to Net::SSH,
|
82
|
+
# see Net::SSH.start
|
83
|
+
# @option opts [ Hash ] :timeout (TIMEOUT) default timeout for all
|
84
|
+
# #wait_for and #send_wait calls.
|
85
|
+
# @option opts [ Boolean ] :reconnect When disconnected reconnect.
|
86
|
+
#
|
87
|
+
# @yield [ Eventmachine::Ssh:Session ] The ssh session.
|
88
|
+
def wait_for_boot(address, user="root", pass=nil, opts={},
|
89
|
+
timeout=200)
|
90
|
+
Souffle::Log.info "Waiting for ssh for #{address}..."
|
91
|
+
is_booted = false
|
92
|
+
timer = EM::PeriodicTimer.new(EM::Ssh::Connection::TIMEOUT) do
|
93
|
+
opts[:password] = pass unless pass.nil?
|
94
|
+
opts[:paranoid] = false
|
95
|
+
EM::Ssh.start(address, user, opts) do |connection|
|
96
|
+
connection.errback { |err| nil }
|
97
|
+
connection.callback do |ssh|
|
98
|
+
is_booted = true
|
99
|
+
yield(ssh) if block_given?
|
100
|
+
ssh.close
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
EM::Timer.new(timeout) do
|
106
|
+
unless is_booted
|
107
|
+
Souffle::Log.error "SSH Boot timeout for #{address}..."
|
108
|
+
timer.cancel
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
19
112
|
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
113
|
+
# Yields an ssh object to manage the commands naturally from there.
|
114
|
+
#
|
115
|
+
# @param [ String ] address The address of the machine to connect to.
|
116
|
+
# @param [ String ] user The user to connect as.
|
117
|
+
# @param [ String, NilClass ] pass By default publickey and password auth
|
118
|
+
# will be attempted.
|
119
|
+
# @param [ Hash ] opts The options hash.
|
120
|
+
# @option opts [ Hash ] :net_ssh Options to pass to Net::SSH,
|
121
|
+
# see Net::SSH.start
|
122
|
+
# @option opts [ Hash ] :timeout (TIMEOUT) default timeout for all
|
123
|
+
# #wait_for and #send_wait calls.
|
124
|
+
# @option opts [ Boolean ] :reconnect When disconnected reconnect.
|
125
|
+
#
|
126
|
+
# @yield [ EventMachine::Ssh::Session ] The ssh session.
|
127
|
+
def ssh_block(address, user="root", pass=nil, opts={})
|
128
|
+
opts[:password] = pass unless pass.nil?
|
129
|
+
opts[:paranoid] = false
|
130
|
+
EM::Ssh.start(address, user, opts) do |connection|
|
131
|
+
connection.errback do |err|
|
132
|
+
Souffle::Log.error "SSH Error: #{err} (#{err.class})"
|
133
|
+
end
|
134
|
+
connection.callback { |ssh| yield(ssh) if block_given?; ssh.close }
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
# The path to the ssh key with the given name.
|
139
|
+
#
|
140
|
+
# @param [ String ] key_name The name fo the ssh key to lookup.
|
141
|
+
#
|
142
|
+
# @return [ String ] The path to the ssh key with the given name.
|
143
|
+
def ssh_key(key_name)
|
144
|
+
"#{ssh_key_path}/#{key_name}"
|
145
|
+
end
|
146
|
+
|
147
|
+
# Grabs an ssh key for a given aws node.
|
148
|
+
#
|
149
|
+
# @param [ String ] key_name The name fo the ssh key to lookup.
|
150
|
+
#
|
151
|
+
# @return [ Boolean ] Whether or not the ssh_key exists
|
152
|
+
# for the node.
|
153
|
+
def ssh_key_exists?(key_name)
|
154
|
+
File.exists? ssh_key(key_name)
|
155
|
+
end
|
156
|
+
|
157
|
+
# Creates the ssh directory for a given provider if it does not exist.
|
158
|
+
def create_ssh_dir_if_missing
|
159
|
+
FileUtils.mkdir_p(ssh_key_path) unless Dir.exists?(ssh_key_path)
|
160
|
+
rescue
|
161
|
+
error_msg = "The ssh key directory does not have write permissions: "
|
162
|
+
error_msg << ssh_key_path
|
163
|
+
raise PermissionErrorSshKeys, error_msg
|
164
|
+
end
|
165
|
+
|
166
|
+
# The path to the ssh keys for the provider.
|
167
|
+
#
|
168
|
+
# @return [ String ] The path to the ssh keys for the provider.
|
169
|
+
def ssh_key_path
|
170
|
+
File.join(File.dirname(
|
171
|
+
Souffle::Config[:config_file]), "ssh", name.downcase)
|
172
|
+
end
|
173
|
+
|
174
|
+
# Rsync's a file to a remote node.
|
175
|
+
#
|
176
|
+
# @param [ String ] ipaddress The ipaddress of the node to connect to.
|
177
|
+
# @param [ String ] file The file to rsync.
|
178
|
+
# @param [ String ] path The remote path to rsync.
|
179
|
+
def rsync_file(ipaddress, file, path='.')
|
180
|
+
ssh_command = "ssh -o UserKnownHostsFile=/dev/null "
|
181
|
+
ssh_command << "-o StrictHostKeyChecking=no -o LogLevel=quiet"
|
182
|
+
rsync_command = "rsync -qar -e \"#{ssh_command}\" "
|
183
|
+
rsync_command << "#{file} root@#{ipaddress}:#{path}"
|
184
|
+
if EM.reactor_running?
|
185
|
+
EM.system(rsync_command)
|
186
|
+
else
|
187
|
+
IO.popen(rsync_command)
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
# The list of cookbooks and their full paths.
|
192
|
+
#
|
193
|
+
# @return [ Array ] The list of cookbooks and their full paths.
|
194
|
+
def cookbook_paths
|
195
|
+
Array(Souffle::Config[:chef_cookbook_path]).inject([]) do |_paths, path|
|
196
|
+
Dir.glob("#{File.expand_path(path)}/*").each do |cb|
|
197
|
+
_paths << cb if File.directory? cb
|
198
|
+
end
|
199
|
+
_paths
|
200
|
+
end
|
201
|
+
end
|
202
|
+
|
203
|
+
# Creates a new cookbook tarball for the deployment.
|
204
|
+
#
|
205
|
+
# @return [ String ] The path to the created tarball.
|
206
|
+
def create_cookbooks_tarball
|
207
|
+
tarball_name = "cookbooks-latest.tar.gz"
|
208
|
+
temp_dir = File.join(Dir.tmpdir, "chef-cookbooks-latest")
|
209
|
+
temp_cookbook_dir = File.join(temp_dir, "cookbooks")
|
210
|
+
tarball_dir = "#{File.dirname(Souffle::Config[:config_file])}/tarballs"
|
211
|
+
tarball_path = File.join(tarball_dir, tarball_name)
|
212
|
+
|
213
|
+
FileUtils.mkdir_p(tarball_dir) unless File.exists?(tarball_dir)
|
214
|
+
FileUtils.mkdir_p(temp_dir) unless File.exists?(temp_dir)
|
215
|
+
FileUtils.mkdir(temp_cookbook_dir) unless File.exists?(temp_cookbook_dir)
|
216
|
+
cookbook_paths.each { |pkg| FileUtils.cp_r(pkg, temp_cookbook_dir) }
|
217
|
+
|
218
|
+
tar_command = "tar -C #{temp_dir} -czf #{tarball_path} ./cookbooks"
|
219
|
+
if EM.reactor_running?
|
220
|
+
EM::DeferrableChildProcess.open(tar_command) do
|
221
|
+
FileUtils.rm_rf temp_dir
|
222
|
+
end
|
223
|
+
else
|
224
|
+
Kernel.system(tar_command)
|
225
|
+
FileUtils.rm_rf temp_dir
|
226
|
+
end
|
227
|
+
tarball_path
|
228
|
+
end
|
26
229
|
end
|
27
230
|
end
|
231
|
+
|
232
|
+
_provider_dir = File.join(File.dirname(__FILE__), "provider")
|
233
|
+
Dir.glob("#{_provider_dir}/*").each do |s|
|
234
|
+
require "souffle/provider/#{File.basename(s)}"
|
235
|
+
end
|
data/lib/souffle/provider/aws.rb
CHANGED
@@ -1,16 +1,663 @@
|
|
1
|
-
require '
|
1
|
+
require 'right_aws'
|
2
|
+
require 'securerandom'
|
3
|
+
|
4
|
+
require 'souffle/polling_event'
|
5
|
+
|
6
|
+
# Monkeypatch RightAws to support EBS delete on termination.
|
7
|
+
class RightAws::Ec2
|
8
|
+
def modify_block_device_delete_on_termination_attribute(instance_id,
|
9
|
+
device_name, delete_on_termination)
|
10
|
+
request_hash = {'InstanceId' => instance_id}
|
11
|
+
prefix = "BlockDeviceMapping.1"
|
12
|
+
request_hash["#{prefix}.DeviceName"] = device_name
|
13
|
+
request_hash["#{prefix}.Ebs.DeleteOnTermination"] = delete_on_termination
|
14
|
+
link = generate_request('ModifyInstanceAttribute', request_hash)
|
15
|
+
request_info(link, RightAws::RightBoolResponseParser.new(
|
16
|
+
:logger => @logger))
|
17
|
+
rescue Exception
|
18
|
+
on_exception
|
19
|
+
end
|
20
|
+
end
|
2
21
|
|
3
22
|
# The AWS souffle provider.
|
4
|
-
class Souffle::Provider::AWS < Souffle::Provider
|
23
|
+
class Souffle::Provider::AWS < Souffle::Provider::Base
|
24
|
+
attr_reader :access_key, :access_secret
|
5
25
|
|
6
26
|
# Setup the internal AWS configuration and object.
|
7
|
-
def
|
27
|
+
def initialize
|
28
|
+
super()
|
29
|
+
@access_key = @system.try_opt(:aws_access_key)
|
30
|
+
@access_secret = @system.try_opt(:aws_access_secret)
|
31
|
+
@newest_cookbooks = create_cookbooks_tarball
|
32
|
+
|
33
|
+
if Souffle::Config[:debug]
|
34
|
+
logger = Souffle::Log.logger
|
35
|
+
else
|
36
|
+
logger = Logger.new('/dev/null')
|
37
|
+
end
|
38
|
+
|
39
|
+
@ec2 = RightAws::Ec2.new(
|
40
|
+
@access_key, @access_secret,
|
41
|
+
:region => @system.try_opt(:aws_region),
|
42
|
+
:logger => logger)
|
43
|
+
rescue
|
44
|
+
raise Souffle::Exceptions::InvalidAwsKeys,
|
45
|
+
"AWS access keys are required to operate on EC2"
|
46
|
+
end
|
47
|
+
|
48
|
+
# Generates a prefixed unique tag.
|
49
|
+
#
|
50
|
+
# @param [ String ] tag_prefix The tag prefix to use.
|
51
|
+
#
|
52
|
+
# @return [ String ] The unique tag with prefix.
|
53
|
+
def generate_tag(tag_prefix="souffle")
|
54
|
+
"#{tag_prefix}-#{SecureRandom.hex(6)}"
|
55
|
+
end
|
56
|
+
|
57
|
+
# Creates a system using aws as the provider.
|
58
|
+
#
|
59
|
+
# @param [ Souffle::System ] system The system to instantiate.
|
60
|
+
# @param [ String ] tag_prefix The tag prefix to use for the system.
|
61
|
+
def create_system(system, tag_prefix="souffle")
|
62
|
+
system.options[:tag] = generate_tag(tag_prefix)
|
63
|
+
system.provisioner = Souffle::Provisioner::System.new(system, self)
|
64
|
+
system.provisioner.initialized
|
65
|
+
end
|
66
|
+
|
67
|
+
# Takes a list of nodes and returns the list of their aws instance_ids.
|
68
|
+
#
|
69
|
+
# @param [ Array ] nodes The list of nodes to get instance_id's from.
|
70
|
+
def instance_id_list(nodes)
|
71
|
+
Array(nodes).map { |n| n.options[:aws_instance_id] }
|
72
|
+
end
|
73
|
+
|
74
|
+
# Takes a node definition and begins the provisioning process.
|
75
|
+
#
|
76
|
+
# @param [ Souffle::Node ] node The node to instantiate.
|
77
|
+
# @param [ String ] tag The tag to use for the node.
|
78
|
+
def create_node(node, tag=nil)
|
79
|
+
opts = prepare_node_options(node)
|
80
|
+
node.options[:tag] = tag unless tag.nil?
|
81
|
+
|
82
|
+
create_ebs(node)
|
83
|
+
instance_info = @ec2.launch_instances(
|
84
|
+
node.try_opt(:aws_image_id), opts).first
|
85
|
+
|
86
|
+
node.options[:aws_instance_id] = instance_info[:aws_instance_id]
|
87
|
+
wait_until_node_running(node) { tag_node(node, node.try_opt(:tag)) }
|
88
|
+
end
|
89
|
+
|
90
|
+
# Tags a node and it's volumes.
|
91
|
+
#
|
92
|
+
# @param [ Souffle::Node ] node The node to tag.
|
93
|
+
# @param [ String ] tag The tag to use for the node.
|
94
|
+
def tag_node(node, tag="")
|
95
|
+
@ec2.create_tags(Array(node.options[:aws_instance_id]), {
|
96
|
+
:Name => node.name,
|
97
|
+
:souffle => tag
|
98
|
+
})
|
99
|
+
volume_ids = node.options[:volumes].map { |vol| vol[:aws_id] }
|
100
|
+
@ec2.create_tags(Array(volume_ids), {
|
101
|
+
:instance_id => node.options[:aws_instance_id],
|
102
|
+
:souffle => tag
|
103
|
+
}) unless Array(volume_ids).empty?
|
104
|
+
end
|
105
|
+
|
106
|
+
# Takes a list of nodes an stops the instances.
|
107
|
+
#
|
108
|
+
# @param [ Souffle::Node, Array ] nodes The list of nodes to stop.
|
109
|
+
def stop_nodes(nodes)
|
110
|
+
@ec2.stop_instances(instance_id_list(nodes))
|
111
|
+
end
|
112
|
+
|
113
|
+
# Stops all nodes in a given system.
|
114
|
+
#
|
115
|
+
# @param [ Souffle::System ] system The system to stop.
|
116
|
+
def stop_system(system)
|
117
|
+
stop_nodes(system.nodes)
|
118
|
+
end
|
119
|
+
|
120
|
+
# Takes a list of nodes and kills them. (Haha)
|
121
|
+
#
|
122
|
+
# @param [ Souffle::Node ] nodes The list of nodes to terminate.
|
123
|
+
def kill(nodes)
|
124
|
+
@ec2.terminate_instances(instance_id_list(nodes))
|
125
|
+
end
|
126
|
+
|
127
|
+
# Takes a list of nodes kills them and then recreates them.
|
128
|
+
#
|
129
|
+
# @param [ Souffle::Node ] nodes The list of nodes to kill and recreate.
|
130
|
+
def kill_and_recreate(nodes)
|
131
|
+
kill(nodes)
|
132
|
+
@provisioner.reclaimed
|
8
133
|
end
|
9
|
-
|
10
|
-
# The name of the given provider.
|
11
|
-
def name; "AWS"; end
|
12
134
|
|
13
135
|
# Creates a raid array with the given requirements.
|
14
|
-
|
136
|
+
#
|
137
|
+
# @param [ Souffle::Node ] node The node to the raid for.
|
138
|
+
# @param [ Array ] devices The list of devices to use for the raid.
|
139
|
+
# @param [ Fixnum ] md_device The md device number.
|
140
|
+
# @param [ Fixnum ] chunk The chunk size in kilobytes.
|
141
|
+
# @param [ String ] level The raid level to use.
|
142
|
+
# options are: linear, raid0, 0, stipe, raid1, 1, mirror,
|
143
|
+
# raid4, 4, raid5, 5, raid6, 6, multipath, mp
|
144
|
+
def create_raid(node, devices=[], md_device=0, chunk=64, level="raid0")
|
145
|
+
dev_list = devices.map { |s| "#{s}1" }
|
146
|
+
mdadm_string = "/sbin/mdadm --create /dev/md#{md_device} "
|
147
|
+
mdadm_string << "--chunk=#{chunk} --level=#{level} "
|
148
|
+
mdadm_string << "--raid-devices=#{devices.size} #{dev_list.join(' ')}"
|
149
|
+
|
150
|
+
export_mdadm = "/sbin/mdadm --detail --scan > /etc/mdadm.conf"
|
151
|
+
|
152
|
+
ssh_block(node) do |ssh|
|
153
|
+
ssh.exec!(mdadm_string)
|
154
|
+
ssh.exec!(export_mdadm)
|
155
|
+
yield if block_given?
|
156
|
+
end
|
157
|
+
end
|
158
|
+
|
159
|
+
# Wait for the machine to boot up.
|
160
|
+
#
|
161
|
+
# @parameter [ Souffle::Node ] The node to boot up.
|
162
|
+
def boot(node)
|
163
|
+
wait_for_boot(node)
|
164
|
+
end
|
165
|
+
|
166
|
+
# Formats all of the devices on a given node for the provisioner interface.
|
167
|
+
#
|
168
|
+
# @param [ Souffle::Node ] node The node to format it's new partitions.
|
169
|
+
def format_device(node)
|
170
|
+
partition_device(node, "/dev/md0", "8e") do
|
171
|
+
_format_device(node, "/dev/md0p1")
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
# Formats a device on a given node with the provided filesystem.
|
176
|
+
#
|
177
|
+
# @param [ Souffle::Node ] node The node to format a device on.
|
178
|
+
# @param [ String ] device The device to format.
|
179
|
+
# @param [ String ] filesystem The filesystem to use when formatting.
|
180
|
+
def _format_device(node, device, filesystem="ext4")
|
181
|
+
return if node.options[:volumes].nil?
|
182
|
+
setup_lvm(node)
|
183
|
+
ssh_block(node) do |ssh|
|
184
|
+
ssh.exec!("#{fs_formatter(filesystem)} #{device}")
|
185
|
+
mount_lvm(node) { node.provisioner.device_formatted }
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
# Partition each of the volumes with raid for the node.
|
190
|
+
#
|
191
|
+
# @param [ Souffle::Node ] node The node to partition the volumes on.
|
192
|
+
# @param [ Fixnum ] iteration The current retry iteration.
|
193
|
+
def partition(node, iteration=0)
|
194
|
+
return node.provisioner.error_occurred if iteration == 3
|
195
|
+
Souffle::PollingEvent.new(node) do
|
196
|
+
timeout 30
|
197
|
+
|
198
|
+
pre_event do
|
199
|
+
@partitions = 0
|
200
|
+
@provider = node.provisioner.provider
|
201
|
+
node.options[:volumes].each_with_index do |volume, index|
|
202
|
+
@provider.partition_device(
|
203
|
+
node, @provider.volume_id_to_device(index)) do |count|
|
204
|
+
@partitions += count
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
209
|
+
event_loop do
|
210
|
+
if @partitions == node.options[:volumes].size
|
211
|
+
event_complete
|
212
|
+
node.provisioner.partitioned_device
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
error_handler do
|
217
|
+
error_msg = "#{node.log_prefix} Timeout during partitioning..."
|
218
|
+
Souffle::Log.error error_msg
|
219
|
+
@provider.partition(node, iteration+1)
|
220
|
+
end
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
# Partitions a device on a given node with the given partition_type.
|
225
|
+
#
|
226
|
+
# @note Currently this is a naive implementation and uses the full disk.
|
227
|
+
#
|
228
|
+
# @param [ Souffle::Node ] node The node to partition a device on.
|
229
|
+
# @param [ String ] device The device to partition.
|
230
|
+
# @param [ String ] partition_type The type of partition to create.
|
231
|
+
def partition_device(node, device, partition_type="fd")
|
232
|
+
partition_cmd = "echo \",,#{partition_type}\""
|
233
|
+
partition_cmd << "| /sbin/sfdisk #{device}"
|
234
|
+
ssh_block(node) do |ssh|
|
235
|
+
ssh.exec!("#{partition_cmd}")
|
236
|
+
yield(1) if block_given?
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
# Sets up the lvm partition for the raid devices.
|
241
|
+
#
|
242
|
+
# @param [ Souffle::Node ] node The node to setup lvm on.
|
243
|
+
def setup_lvm(node)
|
244
|
+
return if node.options[:volumes].nil?
|
245
|
+
ssh_block(node) do |ssh|
|
246
|
+
ssh.exec!("pvcreate /dev/md0p1")
|
247
|
+
ssh.exec!("vgcreate VolGroup00 /dev/md0p1")
|
248
|
+
ssh.exec!("lvcreate -l 100%vg VolGroup00 -n data")
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
# Mounts the newly created lvm configuration and adds it to fstab.
|
253
|
+
#
|
254
|
+
# @param [ Souffle::Node ] node The node to mount lvm on.
|
255
|
+
def mount_lvm(node)
|
256
|
+
fstab_str = "/dev/md0p1 /data"
|
257
|
+
fstab_str << " ext4 noatime,nodiratime 1 1"
|
258
|
+
|
259
|
+
mount_str = "mount -o rw,noatime,nodiratime"
|
260
|
+
mount_str << " /dev/mapper/VolGroup00-data /data"
|
261
|
+
ssh_block(node) do |ssh|
|
262
|
+
ssh.exec!("mkdir /data")
|
263
|
+
ssh.exec!(mount_str)
|
264
|
+
ssh.exec!("echo #{fstab_str} >> /etc/fstab")
|
265
|
+
ssh.exec!("echo #{fstab_str} >> /etc/mtab")
|
266
|
+
yield if block_given?
|
267
|
+
end
|
268
|
+
end
|
269
|
+
|
270
|
+
# Installs mdadm (multiple device administration) to manage raid.
|
271
|
+
#
|
272
|
+
# @param [ Souffle::Node ] node The node to install mdadm on.
|
273
|
+
def setup_mdadm(node)
|
274
|
+
ssh_block(node) do |ssh|
|
275
|
+
ssh.exec!("/usr/bin/yum install -y mdadm")
|
276
|
+
end
|
277
|
+
node.provisioner.mdadm_installed
|
278
|
+
end
|
279
|
+
|
280
|
+
# Sets up software raid for the given node.
|
281
|
+
#
|
282
|
+
# @param [ Souffle::Node ] node The node setup raid for.
|
283
|
+
def setup_raid(node)
|
284
|
+
volume_list = []
|
285
|
+
node.options[:volumes].each_with_index do |volume, index|
|
286
|
+
volume_list << volume_id_to_device(index)
|
287
|
+
end
|
288
|
+
create_raid(node, volume_list) { node.provisioner.raid_initialized }
|
289
|
+
end
|
290
|
+
|
291
|
+
# Creates ebs volumes for the given node.
|
292
|
+
#
|
293
|
+
# @param [ Souffle::Node ] node The node to create ebs volumes for.
|
294
|
+
#
|
295
|
+
# @return [ Array ] The list of created ebs volumes.
|
296
|
+
def create_ebs(node)
|
297
|
+
volumes = Array.new
|
298
|
+
node.options.fetch(:volume_count, 0).times do
|
299
|
+
volumes << @ec2.create_volume(
|
300
|
+
node.try_opt(:aws_snapshot_id),
|
301
|
+
node.try_opt(:aws_ebs_size),
|
302
|
+
node.try_opt(:aws_availability_zone) )
|
303
|
+
end
|
304
|
+
node.options[:volumes] = volumes
|
305
|
+
volumes
|
306
|
+
end
|
307
|
+
|
308
|
+
# Polls the EC2 instance information until it is in the running state.
|
309
|
+
#
|
310
|
+
# @param [ Souffle::Node ] node The node to wait until running on.
|
311
|
+
# @param [ Fixnum ] poll_timeout The maximum number of seconds to wait.
|
312
|
+
# @param [ Fixnum ] poll_interval The interval in seconds to poll EC2.
|
313
|
+
def wait_until_node_running(node, poll_timeout=100, poll_interval=2, &blk)
|
314
|
+
ec2 = @ec2; Souffle::PollingEvent.new(node) do
|
315
|
+
timeout poll_timeout
|
316
|
+
interval poll_interval
|
317
|
+
|
318
|
+
pre_event do
|
319
|
+
Souffle::Log.info "#{node.log_prefix} Waiting for node running..."
|
320
|
+
@provider = node.provisioner.provider
|
321
|
+
@blk = blk
|
322
|
+
end
|
323
|
+
|
324
|
+
event_loop do
|
325
|
+
instance = ec2.describe_instances(
|
326
|
+
node.options[:aws_instance_id]).first
|
327
|
+
if instance[:aws_state].downcase == "running"
|
328
|
+
event_complete
|
329
|
+
@blk.call unless @blk.nil?
|
330
|
+
@provider.wait_until_ebs_ready(node)
|
331
|
+
end
|
332
|
+
end
|
333
|
+
|
334
|
+
error_handler do
|
335
|
+
error_msg = "#{node.log_prefix} Wait for node running timeout..."
|
336
|
+
Souffle::Log.error error_msg
|
337
|
+
node.provisioner.error_occurred
|
338
|
+
end
|
339
|
+
end
|
340
|
+
end
|
341
|
+
|
342
|
+
# Polls the EBS volume status until they're ready then runs the given block.
|
343
|
+
#
|
344
|
+
# @param [ Souffle::Node ] node The node to wait for EBS on.
|
345
|
+
# @param [ Fixnum ] poll_timeout The maximum number of seconds to wait.
|
346
|
+
# @param [ Fixnum ] poll_interval The interval in seconds to poll EC2.
|
347
|
+
def wait_until_ebs_ready(node, poll_timeout=100, poll_interval=2)
|
348
|
+
ec2 = @ec2; Souffle::PollingEvent.new(node) do
|
349
|
+
timeout poll_timeout
|
350
|
+
interval poll_interval
|
351
|
+
|
352
|
+
pre_event do
|
353
|
+
Souffle::Log.info "#{node.log_prefix} Waiting for EBS to be ready..."
|
354
|
+
@provider = node.provisioner.provider
|
355
|
+
@volume_ids = node.options[:volumes].map { |v| v[:aws_id] }
|
356
|
+
end
|
357
|
+
|
358
|
+
event_loop do
|
359
|
+
vol_status = ec2.describe_volumes(@volume_ids)
|
360
|
+
avail = Array(vol_status).select { |v| v[:aws_status] == "available" }
|
361
|
+
if avail.size == vol_status.size
|
362
|
+
event_complete
|
363
|
+
@provider.attach_ebs(node)
|
364
|
+
node.provisioner.created
|
365
|
+
end
|
366
|
+
end
|
367
|
+
|
368
|
+
error_handler do
|
369
|
+
error_msg = "#{node.log_prefix} Waiting for EBS Timed out..."
|
370
|
+
Souffle::Log.error error_msg
|
371
|
+
node.provisioner.error_occurred
|
372
|
+
end
|
373
|
+
end
|
374
|
+
end
|
375
|
+
|
376
|
+
# Attaches ebs volumes to the given node.
|
377
|
+
#
|
378
|
+
# @param [ Souffle::Node ] node The node to attach ebs volumes onto.
|
379
|
+
def attach_ebs(node)
|
380
|
+
Souffle::Log.info "#{node.log_prefix} Attaching EBS..."
|
381
|
+
node.options[:volumes].each_with_index do |volume, index|
|
382
|
+
@ec2.attach_volume(
|
383
|
+
volume[:aws_id],
|
384
|
+
node.options[:aws_instance_id],
|
385
|
+
volume_id_to_aws_device(index) )
|
386
|
+
@ec2.modify_block_device_delete_on_termination_attribute(
|
387
|
+
node.options[:aws_instance_id],
|
388
|
+
volume_id_to_aws_device(index),
|
389
|
+
node.try_opt(:delete_on_termination) )
|
390
|
+
end
|
391
|
+
end
|
392
|
+
|
393
|
+
# Detach and delete all volumes from a given node.
|
394
|
+
#
|
395
|
+
# @param [ Souffle::Node ] node The node to destroy ebs volumes from.
|
396
|
+
def detach_and_delete_ebs(node)
|
397
|
+
detach_ebs(node, force=true)
|
398
|
+
delete_ebs(node)
|
399
|
+
end
|
400
|
+
|
401
|
+
# Detaches all ebs volumes from a given node.
|
402
|
+
#
|
403
|
+
# @param [ Souffle::Node ] node The node to detach volumes from.
|
404
|
+
# @param [ Boolean ] force Whether or not to force the
|
405
|
+
# detachment.
|
406
|
+
def detach_ebs(node, force=false)
|
407
|
+
node.options[:volumes].each_with_index do |volume, index|
|
408
|
+
@ec2.detach_volume(
|
409
|
+
volume[:aws_id],
|
410
|
+
node.options[:aws_instance_id],
|
411
|
+
volume_id_to_aws_device(index),
|
412
|
+
force)
|
413
|
+
end
|
414
|
+
end
|
415
|
+
|
416
|
+
# Deletes the ebs volumes from a given node.
|
417
|
+
#
|
418
|
+
# @param [ Souffle::Node ] node The node to delete volumes from.
|
419
|
+
def delete_ebs(node)
|
420
|
+
node.options[:volumes].each do |volume|
|
421
|
+
@ec2.delete_volume(volume[:aws_id])
|
422
|
+
end
|
423
|
+
end
|
424
|
+
|
425
|
+
# Whether or not to use a vpc instance and subnet for provisioning.
|
426
|
+
#
|
427
|
+
# @param [ Souffle::Node ] node The node to check vpc information for.
|
428
|
+
# @return [ Boolean ] Whether to use a vpc instance and
|
429
|
+
# specific subnet.
|
430
|
+
def using_vpc?(node)
|
431
|
+
!!node.try_opt(:aws_vpc_id) and
|
432
|
+
!!node.try_opt(:aws_subnet_id)
|
433
|
+
end
|
434
|
+
|
435
|
+
# Checks whether or not the vpc and subnet are setup proeprly.
|
436
|
+
#
|
437
|
+
# @param [ Souffle::Node ] node The node to check vpc information for.
|
438
|
+
#
|
439
|
+
# @return [ Boolean ] Whether or not the vpc is setup.
|
440
|
+
def vpc_setup?(node)
|
441
|
+
vpc_exists? and subnet_exists?
|
442
|
+
end
|
443
|
+
|
444
|
+
# Checks whether or not the vpc currently exists.
|
445
|
+
#
|
446
|
+
# @param [ Souffle::Node ] node The node to check vpc information for.
|
447
|
+
#
|
448
|
+
# @return [ Boolean ] Whether or not the vpc exists.
|
449
|
+
def vpc_exists?(node)
|
450
|
+
@ec2.describe_vpcs({:filters =>
|
451
|
+
{ 'vpc-id' => node.try_opt(:aws_vpc_id) } }).any?
|
452
|
+
end
|
453
|
+
|
454
|
+
# Checks whether or not the subnet currently exists.
|
455
|
+
#
|
456
|
+
# @param [ Souffle::Node ] node The node to check vpc information for.
|
457
|
+
#
|
458
|
+
# @return [ Boolean ] Whether or not the subnet exists.
|
459
|
+
def subnet_exists?(node)
|
460
|
+
@ec2.describe_subnets({:filters =>
|
461
|
+
{ 'subnet-id' => node.try_opt(:aws_subnet_id) } }).any?
|
462
|
+
end
|
463
|
+
|
464
|
+
# Provisions a node with the chef/chef-solo configuration.
|
465
|
+
#
|
466
|
+
# @todo Setup the chef/chef-solo tar gzip and ssh connections.
|
467
|
+
def provision(node)
|
468
|
+
if node.try_opt(:chef_provisioner) == :solo
|
469
|
+
provision_chef_solo(node, generate_chef_json(node))
|
470
|
+
else
|
471
|
+
provision_chef_client(node)
|
472
|
+
end
|
473
|
+
node.provisioner.provisioned
|
474
|
+
end
|
475
|
+
|
476
|
+
# Waits for ssh to be accessible for a node for the initial connection and
|
477
|
+
# yields an ssh object to manage the commands naturally from there.
|
478
|
+
#
|
479
|
+
# @param [ Souffle::Node ] node The node to run commands against.
|
480
|
+
# @param [ String ] user The user to connect as.
|
481
|
+
# @param [ String, NilClass ] pass By default publickey and password auth
|
482
|
+
# will be attempted.
|
483
|
+
# @param [ Hash ] opts The options hash.
|
484
|
+
# @param [ Fixnum ] poll_timeout The maximum number of seconds to wait.
|
485
|
+
# @param [ Fixnum ] iteration The current retry iteration.
|
486
|
+
#
|
487
|
+
# @option opts [ Hash ] :net_ssh Options to pass to Net::SSH,
|
488
|
+
# see Net::SSH.start
|
489
|
+
# @option opts [ Hash ] :timeout (TIMEOUT) default timeout for all #wait_for
|
490
|
+
# and #send_wait calls.
|
491
|
+
# @option opts [ Boolean ] :reconnect When disconnected reconnect.
|
492
|
+
#
|
493
|
+
# @yield [ Eventmachine::Ssh:Session ] The ssh session.
|
494
|
+
def wait_for_boot(node, user="root", pass=nil, opts={},
|
495
|
+
poll_timeout=100, iteration=0, &blk)
|
496
|
+
return node.provisioner.error_occurred if iteration == 3
|
497
|
+
|
498
|
+
ec2 = @ec2; Souffle::PollingEvent.new(node) do
|
499
|
+
timeout poll_timeout
|
500
|
+
interval EM::Ssh::Connection::TIMEOUT
|
501
|
+
|
502
|
+
pre_event do
|
503
|
+
Souffle::Log.info "#{node.log_prefix} Waiting for ssh..."
|
504
|
+
@provider = node.provisioner.provider
|
505
|
+
@blk = blk
|
506
|
+
end
|
507
|
+
|
508
|
+
event_loop do
|
509
|
+
n = ec2.describe_instances(node.options[:aws_instance_id]).first
|
510
|
+
unless n.nil?
|
511
|
+
key = n[:ssh_key_name]
|
512
|
+
if @provider.ssh_key_exists?(key)
|
513
|
+
opts[:keys] = @provider.ssh_key(key)
|
514
|
+
end
|
515
|
+
opts[:password] = pass unless pass.nil?
|
516
|
+
opts[:paranoid] = false
|
517
|
+
address = n[:private_ip_address]
|
518
|
+
|
519
|
+
EM::Ssh.start(address, user, opts) do |connection|
|
520
|
+
connection.errback { |err| nil }
|
521
|
+
connection.callback do |ssh|
|
522
|
+
event_complete
|
523
|
+
node.provisioner.booted
|
524
|
+
@blk.call(ssh) unless @blk.nil?
|
525
|
+
ssh.close
|
526
|
+
end
|
527
|
+
end
|
528
|
+
end
|
529
|
+
end
|
530
|
+
|
531
|
+
error_handler do
|
532
|
+
Souffle::Log.error "#{node.log_prefix} SSH Boot timeout..."
|
533
|
+
@provider.wait_for_boot(node, user, pass, opts,
|
534
|
+
poll_timeout, iteration+1, &blk)
|
535
|
+
end
|
536
|
+
end
|
537
|
+
end
|
538
|
+
|
539
|
+
# Provisions a box using the chef_solo provisioner.
|
540
|
+
#
|
541
|
+
# @param [ String ] ipaddress The ip address of the node to provision.
|
542
|
+
# @param [ String ] solo_json The chef solo json string to use.
|
543
|
+
def provision_chef_solo(node, solo_json)
|
544
|
+
rsync_file(node, @newest_cookbooks, "/tmp")
|
545
|
+
solo_config = "node_name \"#{node.name}.souffle\"\n"
|
546
|
+
solo_config << 'cookbook_path "/tmp/cookbooks"'
|
547
|
+
ssh_block(node) do |ssh|
|
548
|
+
ssh.exec!("sleep 2; tar -zxf /tmp/cookbooks-latest.tar.gz -C /tmp")
|
549
|
+
ssh.exec!("echo '#{solo_config}' >/tmp/solo.rb")
|
550
|
+
ssh.exec!("echo '#{solo_json}' >/tmp/solo.json")
|
551
|
+
ssh.exec!("chef-solo -c /tmp/solo.rb -j /tmp/solo.json")
|
552
|
+
rm_files = "/tmp/cookbooks /tmp/cookbooks-latest.tar.gz"
|
553
|
+
rm_files << " /tmp/solo.rb /tmp/solo.json > /tmp/chef_bootstrap"
|
554
|
+
ssh.exec!("rm -rf #{rm_files}")
|
555
|
+
end
|
556
|
+
end
|
557
|
+
|
558
|
+
# Provisions a box using the chef_client provisioner.
|
559
|
+
#
|
560
|
+
# @todo Chef client provisioner needs to be completed.
|
561
|
+
def provision_chef_client(node)
|
562
|
+
ssh_block(node) do |ssh|
|
563
|
+
ssh.exec!("chef-client")
|
564
|
+
end
|
565
|
+
end
|
566
|
+
|
567
|
+
# Rsync's a file to a remote node.
|
568
|
+
#
|
569
|
+
# @param [ Souffle::Node ] node The node to connect to.
|
570
|
+
# @param [ Souffle::Node ] file The file to rsync.
|
571
|
+
# @param [ Souffle::Node ] path The remote path to rsync.
|
572
|
+
def rsync_file(node, file, path='.')
|
573
|
+
n = @ec2.describe_instances(node.options[:aws_instance_id]).first
|
574
|
+
super(n[:private_ip_address], file, path)
|
575
|
+
end
|
576
|
+
|
577
|
+
# Yields an ssh object to manage the commands naturally from there.
|
578
|
+
#
|
579
|
+
# @param [ Souffle::Node ] node The node to run commands against.
|
580
|
+
# @param [ String ] user The user to connect as.
|
581
|
+
# @param [ String, NilClass ] pass By default publickey and password auth
|
582
|
+
# will be attempted.
|
583
|
+
# @param [ Hash ] opts The options hash.
|
584
|
+
# @option opts [ Hash ] :net_ssh Options to pass to Net::SSH,
|
585
|
+
# see Net::SSH.start
|
586
|
+
# @option opts [ Hash ] :timeout (TIMEOUT) default timeout for all #wait_for
|
587
|
+
# and #send_wait calls.
|
588
|
+
# @option opts [ Boolean ] :reconnect When disconnected reconnect.
|
589
|
+
#
|
590
|
+
# @yield [ EventMachine::Ssh::Session ] The ssh session.
|
591
|
+
def ssh_block(node, user="root", pass=nil, opts={})
|
592
|
+
n = @ec2.describe_instances(node.options[:aws_instance_id]).first
|
593
|
+
if n.nil?
|
594
|
+
raise AwsInstanceDoesNotExist,
|
595
|
+
"The AWS instance (#{node.options[:aws_instance_id]}) does not exist."
|
596
|
+
else
|
597
|
+
key = n[:ssh_key_name]
|
598
|
+
opts[:keys] = ssh_key(key) if ssh_key_exists?(key)
|
599
|
+
super(n[:private_ip_address], user, pass, opts)
|
600
|
+
end
|
601
|
+
end
|
602
|
+
|
603
|
+
# Prepares the node options using the system or global defaults.
|
604
|
+
#
|
605
|
+
# @param [ Souffle::Node ] node The node you wish to prepare options for.
|
606
|
+
#
|
607
|
+
# @return [ Hash ] The options hash to pass into ec2 launch instance.
|
608
|
+
def prepare_node_options(node)
|
609
|
+
opts = Hash.new
|
610
|
+
opts[:instance_type] = node.try_opt(:aws_instance_type)
|
611
|
+
opts[:min_count] = 1
|
612
|
+
opts[:max_count] = 1
|
613
|
+
if using_vpc?(node)
|
614
|
+
opts[:subnet_id] = node.try_opt(:aws_subnet_id)
|
615
|
+
opts[:aws_subnet_id] = node.try_opt(:aws_subnet_id)
|
616
|
+
opts[:aws_vpc_id] = Array(node.try_opt(:aws_vpc_id))
|
617
|
+
opts[:group_ids] = Array(node.try_opt(:group_ids))
|
618
|
+
else
|
619
|
+
opts[:group_names] = node.try_opt(:group_names)
|
620
|
+
end
|
621
|
+
opts[:key_name] = node.try_opt(:key_name)
|
622
|
+
opts
|
623
|
+
end
|
624
|
+
|
625
|
+
# Takes the volume count in the array and converts it to a device name.
|
626
|
+
#
|
627
|
+
# @note This starts at /dev/xvda and goes to /dev/xvdb, etc.
|
628
|
+
# And due to the special case on AWS, skips /dev/xvde.
|
629
|
+
#
|
630
|
+
# @param [ Fixnum ] volume_id The count in the array for the volume id.
|
631
|
+
#
|
632
|
+
# @return [ String ] The device string to mount to.
|
633
|
+
def volume_id_to_device(volume_id)
|
634
|
+
if volume_id >= 4
|
635
|
+
volume_id += 1
|
636
|
+
end
|
637
|
+
"/dev/xvd#{(volume_id + "a".ord).chr}"
|
638
|
+
end
|
639
|
+
|
640
|
+
# Takes the volume count in the array and converts it to a device name.
|
641
|
+
#
|
642
|
+
# @note This starts at /dev/xvda and goes to /dev/xvdb, etc.
|
643
|
+
# And due to the special case on AWS, skips /dev/xvde.
|
644
|
+
#
|
645
|
+
# @param [ Fixnum ] volume_id The count in the array for the volume id.
|
646
|
+
#
|
647
|
+
# @return [ String ] The device string to mount to.
|
648
|
+
def volume_id_to_aws_device(volume_id)
|
649
|
+
if volume_id >= 4
|
650
|
+
volume_id += 1
|
651
|
+
end
|
652
|
+
"/dev/hd#{(volume_id + "a".ord).chr}"
|
653
|
+
end
|
654
|
+
|
655
|
+
# Chooses the appropriate formatter for the given filesystem.
|
656
|
+
#
|
657
|
+
# @param [ String ] filesystem The filessytem you intend to use.
|
658
|
+
#
|
659
|
+
# @param [ String ] The filesystem formatter.
|
660
|
+
def fs_formatter(filesystem)
|
661
|
+
"mkfs.#{filesystem}"
|
15
662
|
end
|
16
663
|
end
|