stack-kicker 0.0.1 → 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.rvmrc +48 -0
- data/Gemfile +1 -1
- data/LICENSE.txt +202 -22
- data/README.md +127 -10
- data/README.rdoc +5 -1
- data/bin/stack-kicker +84 -0
- data/doc/examples/Stackfile +41 -0
- data/features/stack-kicker.feature +27 -0
- data/features/step_definitions/{kicker_steps.rb → stack-kicker_steps.rb} +0 -0
- data/lib/stack-kicker/version.rb +5 -0
- data/lib/stack-kicker.rb +7 -0
- data/lib/stack.rb +733 -158
- data/stack-kicker.gemspec +26 -0
- metadata +37 -27
- data/bin/kicker +0 -77
- data/features/kicker.feature +0 -13
- data/kicker.gemspec +0 -32
- data/lib/kicker/version.rb +0 -3
- data/lib/kicker.rb +0 -5
data/lib/stack.rb
CHANGED
@@ -1,212 +1,787 @@
|
|
1
|
-
|
2
|
-
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# Copyright 2012 Hewlett-Packard Development Company, L.P.
|
3
|
+
# All Rights Reserved.
|
4
|
+
#
|
5
|
+
# Author: Simon McCartney <simon.mccartney@hp.com>
|
6
|
+
#
|
7
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
8
|
+
# not use this file except in compliance with the License. You may obtain
|
9
|
+
# a copy of the License at
|
10
|
+
#
|
11
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
12
|
+
#
|
13
|
+
# Unless required by applicable law or agreed to in writing, software
|
14
|
+
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
15
|
+
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
16
|
+
# License for the specific language governing permissions and limitations
|
17
|
+
# under the License.
|
18
|
+
#
|
19
|
+
require 'bundler'
|
20
|
+
require 'rubygems'
|
3
21
|
|
22
|
+
require 'base64'
|
23
|
+
require 'erb'
|
24
|
+
require 'openstack' # https://github.com/ruby-openstack/ruby-openstack
|
25
|
+
require 'json' # http://www.ruby-doc.org/stdlib-1.9.3/libdoc/json/rdoc/JSON.html
|
26
|
+
require 'tempfile'
|
27
|
+
|
28
|
+
|
29
|
+
#
|
30
|
+
# This really needs to be converted into a class....
|
31
|
+
#
|
4
32
|
module Stack
|
5
33
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
34
|
+
# Shadow the global constant Logger with Stack::Logger
|
35
|
+
# (if you want access to the global constant, use ::Logger from inside the Stack module)
|
36
|
+
Logger = Logger.new(STDOUT)
|
37
|
+
Logger.level = ::Logger::INFO
|
38
|
+
Logger.datetime_format = "%Y-%m-%d %H:%M:%S"
|
39
|
+
Logger.formatter = proc do |severity, datetime, progname, msg|
|
40
|
+
"#{datetime} #{severity}: #{msg}\n"
|
11
41
|
end
|
12
42
|
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
43
|
+
# location of gem, where config[:gemhome]/lib contains our default cloud-init templates
|
44
|
+
@@gemhome = File.absolute_path(File.realpath(File.dirname(File.expand_path(__FILE__)) + '/..'))
|
45
|
+
|
46
|
+
# Methadone::CLILogger is a Class, Stack is still a module, so we can't include it
|
47
|
+
# so this is a QADH to propagate the log_level
|
48
|
+
def Stack.log_level(level)
|
49
|
+
Logger.debug { "Setting the Logger.level to #{level}" }
|
50
|
+
Logger.level = level
|
19
51
|
end
|
20
52
|
|
21
|
-
def Stack.
|
22
|
-
#
|
23
|
-
|
24
|
-
|
53
|
+
def Stack.show_stacks(stackfile = 'Stackfile')
|
54
|
+
# our local config file
|
55
|
+
config_raw = File.read(stackfile)
|
56
|
+
eval(config_raw)
|
25
57
|
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
config[:node_details][fqdn] = {
|
30
|
-
# set the node details from the role, if not specified in the role, use the config global
|
31
|
-
# (takes advantage of left to right evaluation of ||)
|
32
|
-
:flavor_id => (role_details[:flavor_id] || config[:flavor_id]),
|
33
|
-
:count => (role_details[:count] || 1),
|
34
|
-
:publish_private_ip => (role_details[:publish_private_ip] || false),
|
35
|
-
:dns_wildcard => (role_details[:dns_wildcard] || false)
|
36
|
-
}
|
58
|
+
Logger.info { "Stacks:" }
|
59
|
+
StackConfig::Stacks.each do |name, details|
|
60
|
+
Logger.info { " #{name}" }
|
37
61
|
end
|
38
62
|
end
|
63
|
+
|
64
|
+
def Stack.show_stack(config)
|
65
|
+
# syntax_check is a light weight check that doesn't talk to OpenStalk
|
66
|
+
Stack.syntax_check(config)
|
67
|
+
# generate an array of hostnames that this stack would create
|
68
|
+
hostnames = Stack.generate_server_names(config)
|
39
69
|
|
40
|
-
|
41
|
-
stack_hostnames = Array.new
|
42
|
-
config[:roles].each do |role, role_details|
|
43
|
-
fqdn = role.to_s + '.' + config[:dns_domain]
|
44
|
-
stack_hostnames << fqdn
|
45
|
-
end
|
46
|
-
stack_hostnames
|
70
|
+
hostnames.each { |hostname| Logger.info " #{hostname}" }
|
47
71
|
end
|
48
72
|
|
49
|
-
def Stack.
|
50
|
-
#
|
51
|
-
|
73
|
+
def Stack.select_stack(stackfile = 'Stackfile', stack_name)
|
74
|
+
# our local config file
|
75
|
+
config_raw = File.read(stackfile)
|
76
|
+
eval(config_raw)
|
52
77
|
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
78
|
+
# if there is only one stack defined in the Stackfile, load it:
|
79
|
+
if StackConfig::Stacks.count == 1 && stack_name.nil?
|
80
|
+
stack_name = StackConfig::Stacks.keys[0]
|
81
|
+
Logger.debug { "defaulting to #{stack_name} as there is a single stack defined and no stack named" }
|
82
|
+
end
|
57
83
|
|
58
|
-
|
59
|
-
|
60
|
-
|
84
|
+
# returns a config object, injecting the name into the returned config
|
85
|
+
if StackConfig::Stacks[stack_name].nil?
|
86
|
+
Logger.error { "#{stack_name} is invalid, defined stacks are:" }
|
87
|
+
StackConfig::Stacks.each do |name, details|
|
88
|
+
Logger.error { " #{name}" }
|
61
89
|
end
|
90
|
+
exit 2
|
91
|
+
end
|
92
|
+
|
93
|
+
config = StackConfig::Stacks[stack_name]
|
94
|
+
config[:name] = stack_name
|
95
|
+
# set the stackhome to the directory containing the Stackfile
|
96
|
+
config[:stackhome] = File.dirname(File.expand_path(stackfile))
|
97
|
+
Logger.info "stackhome is #{config[:stackhome]}"
|
98
|
+
|
99
|
+
config
|
100
|
+
end
|
101
|
+
|
102
|
+
def Stack.connect(config, region = nil)
|
103
|
+
# region & az concepts are confused in HPCS land
|
104
|
+
region = config['REGION'] if (region.nil? || region.length() < 1)
|
105
|
+
|
106
|
+
Logger.info "Connecting to OpenStack with region = #{region}"
|
107
|
+
|
108
|
+
OpenStack::Connection.create({
|
109
|
+
:auth_method=> 'password',
|
110
|
+
:username => config['USERNAME'],
|
111
|
+
:api_key=> config['PASSWORD'],
|
112
|
+
:auth_url => config['AUTH_URL'],
|
113
|
+
:authtenant_name => config['TENANT_NAME'],
|
114
|
+
:region => region,
|
115
|
+
:service_type=>"compute"
|
116
|
+
})
|
117
|
+
end
|
118
|
+
|
119
|
+
# expects server to be OpenStack::Compute::Server object
|
120
|
+
def Stack.get_addresses(server)
|
121
|
+
|
122
|
+
# get the addressess associated with an OpenStack::Compute::Server object
|
123
|
+
address_description = String.new
|
124
|
+
server.addresses.each do |address|
|
125
|
+
address_description << "#{address.address}(#{address.label}) "
|
126
|
+
end
|
127
|
+
address_description
|
128
|
+
end
|
129
|
+
|
130
|
+
# check that all the required config items are set
|
131
|
+
def Stack.syntax_check(config)
|
132
|
+
if config['REGION'].nil? || config['USERNAME'].nil? || config['PASSWORD'].nil? || config['AUTH_URL'].nil? || config['TENANT_NAME'].nil? &&
|
133
|
+
config['REGION'].empty? || config['USERNAME'].empty? || config['PASSWORD'].empty? || config['AUTH_URL'].empty? || config['TENANT_NAME'].empty?
|
134
|
+
Logger.error { "REGION, USERNAME, PASSWORD, AUTH_URL & TENANT_NAME must all be set" }
|
135
|
+
exit
|
136
|
+
end
|
62
137
|
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
#
|
69
|
-
#
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
:availability_zone => config[:availability_zone],
|
75
|
-
:flavor_id => config[:node_details][fqdn],
|
76
|
-
:image_id => config[:image_id],
|
77
|
-
:key_name => config[:keypair],
|
78
|
-
:user_data => user_data,
|
79
|
-
:tags => { 'Name' => fqdn },
|
80
|
-
})
|
81
|
-
|
82
|
-
print "Waiting for instance to be ready..."
|
83
|
-
server.wait_for { ready? }
|
84
|
-
puts "#{role.to_s} is booted, #{server.public_ip_address}/#{server.private_ip_address}"
|
85
|
-
|
86
|
-
# create/update the public & private DNS for this host
|
87
|
-
Stack.update_dns(role.to_s + '-public.' + config[:dns_domain], server.public_ip_address, config)
|
88
|
-
Stack.update_dns(role.to_s + '-private.' + config[:dns_domain], server.private_ip_address, config)
|
138
|
+
# load defaults for any items not configured
|
139
|
+
Stack.populate_config(config)
|
140
|
+
|
141
|
+
if config[:provisioner] == 'chef'
|
142
|
+
# check that we have semi-sensible Chef setup
|
143
|
+
# at a bare minimum, we need the directory where we're going to download
|
144
|
+
# validation.pem to to exist
|
145
|
+
dot_chef_abs = File.absolute_path(config[:stackhome] + '/' + config[:dot_chef])
|
146
|
+
if !File.directory?(dot_chef_abs)
|
147
|
+
Logger.warn "#{dot_chef_abs} doesn't exist"
|
148
|
+
end
|
89
149
|
|
90
|
-
#
|
91
|
-
|
92
|
-
|
150
|
+
# Check we have a #{dot_chef_abs}/.chef/knife.rb
|
151
|
+
knife_rb_abs = dot_chef_abs + '/knife.rb'
|
152
|
+
if File.exists?(knife_rb_abs)
|
153
|
+
Logger.info "Found #{knife_rb_abs}, lets hope it contains something sensible"
|
93
154
|
else
|
94
|
-
|
95
|
-
end
|
96
|
-
Stack.update_dns(fqdn, ip_address, config)
|
97
|
-
#
|
98
|
-
# is this a wildcard DNS host, then claim the *.domain.net
|
99
|
-
if (role_details[:dns_wildcard] == true && (!role_details[:dns_wildcard].nil?))
|
100
|
-
wildcard = "*." + config[:dns_domain]
|
101
|
-
Stack.update_dns(wildcard, ip_address, config)
|
155
|
+
Logger.warn "#{knife_rb_abs} doesn't exist, please run './stack.rb configure-knife <stack-name>'"
|
102
156
|
end
|
103
157
|
end
|
104
158
|
end
|
105
159
|
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
#
|
160
|
+
# validate that all our OpenStack creds, image_id, flavors, keys etc are valid
|
161
|
+
def Stack.validate(config)
|
162
|
+
|
163
|
+
Stack.syntax_check(config)
|
164
|
+
|
165
|
+
# check that the ssh-key is loaded, otherwise most post-install scripts will fail
|
166
|
+
# this lazily assumes that the :key_pair name matches the file the keys were loaded
|
167
|
+
# from
|
168
|
+
if (0 == 1)
|
169
|
+
ssh_keys_loaded = `ssh-add -L`
|
170
|
+
Logger.debug "ssh_keys_loaded: #{ssh_keys_loaded}"
|
171
|
+
Logger.debug "Looking for #{config[:key_pair]}"
|
172
|
+
if ssh_keys_loaded.include?(config[:key_pair])
|
173
|
+
Logger.info "Found #{config[:key_pair]} in the ssh-agent key list"
|
174
|
+
else
|
175
|
+
Logger.error "Couldn't find #{config[:key_pair]} key in the ssh-agent key list! Aborting!"
|
176
|
+
Logger.erroLogger.error "ssh_keys_loaded: #{ssh_keys_loaded}"
|
177
|
+
exit 2
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
# populate the config & then walk through the AZs verifying the config
|
182
|
+
Stack.populate_config(config)
|
183
|
+
|
184
|
+
# Check that we have valid details for each AZ
|
185
|
+
config[:azs].each do |az|
|
186
|
+
|
187
|
+
# check that credentials, flavor & image are valid
|
188
|
+
os = connect(config, az)
|
189
|
+
|
190
|
+
Logger.info "Checking that flavor #{config['flavor_id']} exists in #{az}..."
|
191
|
+
flavor = os.get_flavor(config['flavor_id'])
|
192
|
+
Logger.info "#{config['flavor_id']} is #{flavor.name}"
|
193
|
+
|
194
|
+
Logger.info "Checking that image #{config[az]['image_id']} exists in #{az}..."
|
195
|
+
image = os.get_image(config[az]['image_id'])
|
196
|
+
Logger.info "#{config[az]['image_id']} is #{image.name}"
|
197
|
+
|
198
|
+
Logger.info "Checking that keypair #{config[:key_pair]} exists in #{az}...."
|
199
|
+
keypairs = os.keypairs()
|
200
|
+
if (keypairs[config[:key_pair]].nil? && keypairs[config[:key_pair].to_sym].nil?)
|
201
|
+
Logger.warn "#{config[:key_pair]} isn't available, uploading the key"
|
113
202
|
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
203
|
+
# upload the key
|
204
|
+
key = os.create_keypair({:name=> config[:key_pair], :public_key=> File.read(config[:key_public])})
|
205
|
+
Logger.warn "#{config[:key_pair]} fingerprint=#{key[:fingerprint]}"
|
206
|
+
else
|
207
|
+
Logger.info "#{config[:key_pair]} fingerprint=#{keypairs[config[:key_pair].to_sym][:fingerprint]}"
|
208
|
+
end
|
209
|
+
|
210
|
+
# TODO: check that security group exists
|
211
|
+
# we should have a security group that matches each role
|
212
|
+
# get all the secgroups
|
213
|
+
security_groups = os.security_groups()
|
214
|
+
# extract the names
|
215
|
+
sg_names = security_groups.map { |secgroup, secgroup_details| secgroup_details[:name] }
|
216
|
+
|
217
|
+
config[:roles].each do |role, role_details|
|
218
|
+
# is does the secgroup exist?
|
219
|
+
if sg_names.include?(role.to_s)
|
220
|
+
Logger.info "security group #{role} exists in #{az}"
|
221
|
+
else
|
222
|
+
Logger.error "security group #{role} is missing in #{az}"
|
223
|
+
end
|
224
|
+
end
|
121
225
|
end
|
122
226
|
end
|
123
227
|
|
124
|
-
def Stack.
|
125
|
-
#
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
228
|
+
def Stack.generate_knife_rb(config)
|
229
|
+
# generate a project/.chef/knife.rb from our config
|
230
|
+
# (assumes the chef server is running for public IP access etc)
|
231
|
+
|
232
|
+
# find the chef server, if we need to
|
233
|
+
if config[:chef_server_hostname].nil? || config[:chef_server_private].nil? || config[:chef_server_public]
|
234
|
+
Logger.debug { "Attempting to discover the chef server details" }
|
235
|
+
ours = Stack.get_our_instances(config)
|
236
|
+
ours.each do |node, node_details|
|
237
|
+
if node_details[:role] == :chef
|
238
|
+
Logger.debug { "Found the Chef server: #{node} #{node_details}" }
|
239
|
+
Stack.set_chef_server(config, node)
|
240
|
+
end
|
137
241
|
end
|
138
|
-
end
|
139
|
-
|
242
|
+
end
|
243
|
+
|
244
|
+
# CWD shoud be chef-repo/bootstrap, so the project .chef directory should be
|
245
|
+
dot_chef_abs = File.absolute_path(config[:stackhome] + '/' + config[:dot_chef])
|
246
|
+
|
247
|
+
if !File.directory?(dot_chef_abs)
|
248
|
+
Logger.warn "#{dot_chef_abs} doesn't exist, creating it..."
|
249
|
+
Dir.mkdir(dot_chef_abs)
|
250
|
+
end
|
140
251
|
|
141
|
-
|
142
|
-
|
143
|
-
connection = Stack.connect(config)
|
252
|
+
client_key = dot_chef_abs + '/' + config[:name] + '-' + ENV['USER'] + '.pem'
|
253
|
+
validation_key = dot_chef_abs + '/' + config[:name] + '-' + 'validation.pem'
|
144
254
|
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
255
|
+
Logger.debug "stackhome: #{config[:stackhome]}"
|
256
|
+
Logger.debug "Current user client key: #{client_key}"
|
257
|
+
Logger.debug "New Host Validation key: #{validation_key}"
|
258
|
+
|
259
|
+
knife_rb_template = %q{
|
260
|
+
log_level :info
|
261
|
+
log_location STDOUT
|
262
|
+
node_name '<%=ENV['USER']%>'
|
263
|
+
# use the HPCS_ENV environment name to pick the correct key
|
264
|
+
client_key '<%=dot_chef_abs%>/' + ENV['HPCS_ENV'] + '-' + ENV['USER'] + '.pem'
|
265
|
+
validation_client_name 'chef-validator'
|
266
|
+
validation_key '<%=dot_chef_abs%>/' + ENV['HPCS_ENV'] + '-validation.pem'
|
267
|
+
chef_server_url '<%=config[:chef_server_public]%>'
|
268
|
+
cache_type 'BasicFile'
|
269
|
+
cache_options( :path => '<%=dot_chef_abs%>/checksums' )
|
270
|
+
cookbook_path [ '<%=config[:stackhome]%>/cookbooks' ]
|
271
|
+
}
|
272
|
+
|
273
|
+
knife_rb_erb = ERB.new(knife_rb_template)
|
274
|
+
knife_rb = knife_rb_erb.result(binding)
|
275
|
+
|
276
|
+
krb = File.new(dot_chef_abs + '/knife.rb', "w")
|
277
|
+
krb.truncate(0)
|
278
|
+
krb.puts knife_rb
|
279
|
+
krb.close
|
280
|
+
end
|
281
|
+
|
282
|
+
# position is really the node number in a role, i.e. 1..count
|
283
|
+
def Stack.generate_hostname(config, role, position)
|
284
|
+
role_details = config[:roles][role]
|
285
|
+
|
286
|
+
# TODO: don't calculate this everytime, shift out to a hash lookup
|
287
|
+
Logger.debug config
|
288
|
+
Logger.debug config[:site_template]
|
289
|
+
Logger.debug role_details
|
290
|
+
Logger.debug role_details[:azs]
|
291
|
+
|
292
|
+
site = sprintf(config[:site_template], role_details[:azs][position-1].split('.')[0].sub(/-/, ''))
|
293
|
+
|
294
|
+
# generate the hostname
|
295
|
+
hostname = sprintf(config[:name_template], config[:global_service_name], site, role, position)
|
296
|
+
|
297
|
+
hostname
|
298
|
+
end
|
299
|
+
|
300
|
+
def Stack.generate_server_names(config)
|
301
|
+
Stack.populate_config(config)
|
302
|
+
config[:hostnames] = config[:node_details].keys
|
303
|
+
config[:hostnames]
|
304
|
+
end
|
305
|
+
|
306
|
+
def Stack.populate_config(config)
|
307
|
+
# config[:role_details] contains built out role details with defaults filled in from stack defaults
|
308
|
+
# config[:node_details] contains node details built out from role_details
|
309
|
+
|
310
|
+
# set some sensible defaults to the stack-wide defaults if they haven't been set in the Stackfile.
|
311
|
+
if config[:provisioner].nil?
|
312
|
+
Logger.warn { "Defaulting to chef for config[:provisioner] "}
|
313
|
+
config[:provisioner] = 'chef'
|
314
|
+
end
|
315
|
+
|
316
|
+
if config[:dot_chef].nil?
|
317
|
+
Logger.warn { "Defaulting to .chef for config[:dot_chef] "}
|
318
|
+
config[:dot_chef] = '.chef'
|
319
|
+
end
|
320
|
+
|
321
|
+
if config[:chef_environment].nil?
|
322
|
+
Logger.warn { "Defaulting to _default for config[:chef_environment]" }
|
323
|
+
config[:chef_environment] = '_default'
|
324
|
+
end
|
325
|
+
|
326
|
+
if config[:chef_validation_pem].nil?
|
327
|
+
Logger.warn { "Defaulting to .chef/validation.pem for config[:chef_validation_pem]" }
|
328
|
+
config[:chef_validation_pem] = '.chef/validation.pem'
|
329
|
+
end
|
330
|
+
|
331
|
+
if config[:name_template].nil?
|
332
|
+
Logger.warn { "Defaulting to '%s-%s-%s%04d' for config[:name_template]" }
|
333
|
+
config[:name_template] = '%s-%s-%s%04d'
|
334
|
+
end
|
335
|
+
|
336
|
+
if config[:site_template].nil?
|
337
|
+
Logger.warn { "Defaulting to '%s' for config[:site_template]" }
|
338
|
+
config[:site_template] = '%s'
|
339
|
+
end
|
340
|
+
|
341
|
+
if config[:global_service_name].nil?
|
342
|
+
Logger.error { "Defaulting to 'UNKNOWN' for config[:global_service_name]" }
|
343
|
+
config[:site_template] = 'UNKNOWN'
|
344
|
+
end
|
345
|
+
|
346
|
+
|
347
|
+
if config[:node_details].nil?
|
348
|
+
Logger.debug { "Initializing config[:node_details] and config[:azs]" }
|
349
|
+
config[:node_details] = Hash.new
|
350
|
+
config[:azs] = Array.new
|
351
|
+
|
352
|
+
config[:roles].each do |role,role_details|
|
353
|
+
Logger.debug { "Setting defaults for #{role}" }
|
354
|
+
|
355
|
+
# default to 1 node of this role if :count isn't set
|
356
|
+
if role_details[:count].nil?
|
357
|
+
role_details[:count] = 1
|
156
358
|
end
|
359
|
+
|
360
|
+
if (role_details[:data_dir].nil?)
|
361
|
+
role_details[:data_dir] = '/dummy'
|
362
|
+
end
|
363
|
+
|
364
|
+
# Has the cloud_config_yaml been overridden?
|
365
|
+
if (role_details[:cloud_config_yaml])
|
366
|
+
role_details[:cloud_config_yaml] = Stack.find_file(config, role_details[:cloud_config_yaml])
|
367
|
+
else
|
368
|
+
role_details[:cloud_config_yaml] = Stack.find_file(config, 'cloud-config.yaml')
|
369
|
+
end
|
370
|
+
|
371
|
+
# Has the default bootstrap script been overridden
|
372
|
+
if (role_details[:bootstrap])
|
373
|
+
if (role_details[:bootstrap].empty?)
|
374
|
+
Logger.debug { "role_details[:bootstrap] is empty, ignoring" }
|
375
|
+
else
|
376
|
+
role_details[:bootstrap] = Stack.find_file(config, role_details[:bootstrap])
|
377
|
+
end
|
378
|
+
else
|
379
|
+
role_details[:bootstrap] = Stack.find_file(config, 'chef-client-bootstrap-excl-validation-pem.sh')
|
380
|
+
end
|
381
|
+
|
382
|
+
# we default to the role name for the security group unless explicitly set
|
383
|
+
if role_details[:security_group].nil?
|
384
|
+
role_details[:security_group] = role.to_s
|
385
|
+
end
|
386
|
+
|
387
|
+
(1..role_details[:count]).each do |p|
|
388
|
+
Logger.debug { "Populating the config[:role_details][:azs] array with AZ" }
|
389
|
+
role_details[:azs] = Array.new if role_details[:azs].nil?
|
390
|
+
|
391
|
+
# is there an az set for this node?
|
392
|
+
if role_details[:azs][p-1].nil?
|
393
|
+
# inherit the global az
|
394
|
+
Logger.debug { "Inheriting the AZ for #{role} (#{config['REGION']})" }
|
395
|
+
role_details[:azs][p-1] = config['REGION']
|
396
|
+
end
|
397
|
+
|
398
|
+
# add this AZ to the AZ list, we'll dedupe later
|
399
|
+
config[:azs] << role_details[:azs][p-1]
|
400
|
+
|
401
|
+
hostname = Stack.generate_hostname(config, role, p)
|
402
|
+
Logger.debug { "Setting node_details for #{hostname}, using element #{p}-1 from #{role_details[:azs]}" }
|
403
|
+
config[:node_details][hostname] = { :az => role_details[:azs][p-1], :region => role_details[:azs][p-1], :role => role }
|
404
|
+
end
|
157
405
|
end
|
158
406
|
end
|
159
|
-
|
407
|
+
config[:azs].uniq!
|
408
|
+
|
409
|
+
# if set the region specific settings from the global settings if not already specified
|
410
|
+
config[:azs].each do |az|
|
411
|
+
# we store region spefic stuff in hash
|
412
|
+
config[az] = Hash.new if config[az].nil?
|
413
|
+
|
414
|
+
config[az]['image_id'] = config['image_id'] if config[az]['image_id'].nil?
|
415
|
+
end
|
416
|
+
|
417
|
+
config[:node_details]
|
418
|
+
end
|
419
|
+
|
420
|
+
# get all instances running in the current config
|
421
|
+
# return a hash where key is the instance name, value is another hash containing :region, :id, :addresses
|
422
|
+
def Stack.get_all_instances(config, refresh = false)
|
423
|
+
if config[:all_instances].nil? || refresh
|
424
|
+
# we need to get the server list for each AZ mentioned in the config[:roles][:role][:azs], this is populated by Stack.populate_config
|
425
|
+
Stack.populate_config(config)
|
426
|
+
|
427
|
+
# get the current list of servers from OpenStack & generate a hash, keyed on name
|
428
|
+
servers = Hash.new
|
429
|
+
config[:azs].each do |az|
|
430
|
+
os = Stack.connect(config, az)
|
431
|
+
os.servers.each do |server|
|
432
|
+
servers[server[:name]] = {
|
433
|
+
:region => az,
|
434
|
+
:id => server[:id],
|
435
|
+
:addresses => os.server(server[:id]).addresses
|
436
|
+
}
|
437
|
+
end
|
438
|
+
end
|
439
|
+
config[:all_instances] = servers
|
440
|
+
end
|
441
|
+
config[:all_instances]
|
160
442
|
end
|
161
443
|
|
162
444
|
def Stack.show_running(config)
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
445
|
+
# TODO: optionally show the hosts that are missing
|
446
|
+
ours = Stack.get_our_instances(config)
|
447
|
+
|
448
|
+
ours.each do |node, node_details|
|
449
|
+
printf("%-30s %20s %8d %16s %s\n", node, node_details[:region], node_details[:id], node_details[:role], node_details[:addresses].map { |address| address.address })
|
167
450
|
end
|
168
451
|
end
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
452
|
+
|
453
|
+
# Add an instance to the :all_instances hash, instead of having to poll the whole lot again
|
454
|
+
def Stack.add_instance(config, hostname, region, id, addresses)
|
455
|
+
config[:all_instances][hostname] = { :region => region, :id => id, :addresses => addresses}
|
456
|
+
end
|
457
|
+
|
458
|
+
def Stack.ssh(config, hostname = nil, user = ENV['USER'], command = nil)
|
459
|
+
# ssh to a host, or all hosts
|
460
|
+
|
461
|
+
# get all running instances
|
462
|
+
servers = Stack.get_our_instances(config)
|
463
|
+
|
464
|
+
if hostname.nil?
|
465
|
+
Logger.debug { "request to SSH to all hosts" }
|
466
|
+
servers.each do |host, details|
|
467
|
+
public_ip = Stack.get_public_ip(config, host)
|
468
|
+
Logger.info { "#{host} #{public_ip}" }
|
469
|
+
cmd_output = `ssh -oStrictHostKeyChecking=no -l #{user} #{public_ip} "#{command}"`
|
470
|
+
Logger.info { "#{host} #{public_ip} #{cmd_output}" }
|
471
|
+
end
|
175
472
|
else
|
176
|
-
|
177
|
-
pp running_instances[fqdn]
|
178
|
-
running_instances[fqdn].destroy
|
473
|
+
Logger.debug { "request to SSH to #{servers[hostname]}" }
|
179
474
|
end
|
180
475
|
end
|
181
476
|
|
182
|
-
def Stack.show_details(config)
|
183
|
-
# create a connection
|
184
|
-
connection = Stack.connect(config)
|
185
|
-
|
186
|
-
pp connection.describe_regions
|
187
|
-
pp connection.describe_availability_zones
|
188
477
|
|
189
|
-
|
478
|
+
def Stack.get_our_instances(config)
|
479
|
+
# build an hash of running instances that match our generated hostnames
|
480
|
+
node_details = Stack.populate_config(config)
|
190
481
|
|
191
|
-
|
192
|
-
|
482
|
+
# get all of our hostnames
|
483
|
+
hostnames = Stack.generate_server_names(config)
|
484
|
+
|
485
|
+
# get all running instances
|
486
|
+
servers = Stack.get_all_instances(config)
|
487
|
+
|
488
|
+
running = Hash.new
|
489
|
+
# do any of the list of servers in OpenStack match one of our hostnames?
|
490
|
+
hostnames.each do |hostname|
|
491
|
+
if (servers.include?(hostname))
|
492
|
+
# return the instance details merged with the node_details (info like role)
|
493
|
+
running[hostname] = servers[hostname].merge(node_details[hostname])
|
494
|
+
end
|
495
|
+
end
|
496
|
+
|
497
|
+
running
|
193
498
|
end
|
194
499
|
|
195
|
-
def
|
196
|
-
|
197
|
-
|
500
|
+
def Stack.delete_node(config, node)
|
501
|
+
# this also populates out unspecified defaults, like az
|
502
|
+
Stack.populate_config(config)
|
503
|
+
# get info about all instances running in our account & AZs
|
504
|
+
Stack.get_all_instances(config)
|
505
|
+
|
506
|
+
if (config[:all_instances][node].nil?)
|
507
|
+
Logger.info "Sorry, #{node} doesn't exist or isn't running"
|
198
508
|
else
|
199
|
-
|
509
|
+
Logger.info "Deleting node #{node} in #{config[:all_instances][node][:region]}..."
|
510
|
+
os = Stack.connect(config, config[:all_instances][node][:region])
|
511
|
+
instance = os.get_server(config[:all_instances][node][:id])
|
512
|
+
instance.delete!
|
513
|
+
end
|
514
|
+
end
|
515
|
+
|
516
|
+
def Stack.delete_all(config)
|
517
|
+
# this also populates out unspecified defaults, like az
|
518
|
+
Stack.populate_config(config)
|
519
|
+
|
520
|
+
# get the list of nodes we consider 'ours', i.e. with hostnames that match
|
521
|
+
# those generated by this stack
|
522
|
+
ours = Stack.get_our_instances(config)
|
523
|
+
|
524
|
+
# do any of the list of servers in OpenStack match one of our hostnames?
|
525
|
+
ours.each do |node, node_details|
|
526
|
+
Logger.info "Deleting #{node}"
|
527
|
+
os = Stack.connect(config, config[:all_instances][node][:region])
|
528
|
+
d = os.get_server(config[:all_instances][node][:id])
|
529
|
+
d.delete!
|
530
|
+
end
|
531
|
+
end
|
532
|
+
|
533
|
+
def Stack.get_public_ip(config, hostname)
|
534
|
+
# get a public address from the instance
|
535
|
+
# (could be either the dynamic or one of our floating IPs
|
536
|
+
config[:all_instances][hostname][:addresses].each do |address|
|
537
|
+
if address.label == 'public'
|
538
|
+
return address.address
|
539
|
+
end
|
540
|
+
end
|
541
|
+
end
|
542
|
+
|
543
|
+
def Stack.set_chef_server(config, chef_server)
|
544
|
+
# set the private & public URLs for the chef server,
|
545
|
+
# called either after we create the Chef Server, or skip over it
|
546
|
+
Logger.debug "Setting :chef_server_hostname, chef_server_private & chef_server_public details (using #{chef_server})"
|
547
|
+
|
548
|
+
config[:chef_server_hostname] = chef_server
|
549
|
+
# get the internal IP of this instance....which we should have stored in config[:all_instances]
|
550
|
+
if config[:all_instances][chef_server] && config[:all_instances][chef_server][:addresses]
|
551
|
+
config[:all_instances][chef_server][:addresses].each do |address|
|
552
|
+
# find the private IP, any old private IP will do...
|
553
|
+
if (address.label == 'private')
|
554
|
+
config[:chef_server_private] = "http://#{address.address}:4000/"
|
555
|
+
Logger.info "Setting the internal Chef URL to #{config[:chef_server_private]}"
|
556
|
+
end
|
557
|
+
|
558
|
+
# only set the public url if it hasn't been set in the config
|
559
|
+
if ((config[:chef_server_public].nil? || config[:chef_server_public].empty?) && address.label == 'public')
|
560
|
+
config[:chef_server_public] = "http://#{address.address}:4000/"
|
561
|
+
Logger.info "Setting the public Chef URL to #{config[:chef_server_public]}"
|
562
|
+
end
|
563
|
+
end
|
564
|
+
end
|
565
|
+
end
|
566
|
+
|
567
|
+
def Stack.secgroup_sync(config)
|
568
|
+
# 1) get all the IP information we have
|
569
|
+
# 2) generate the json to describe that to the "stackhelper secgroup-sync" tool
|
570
|
+
# 3) run "stackhelper secgroup-sync --some-file our-ips.json"
|
571
|
+
ours = Stack.get_our_instances(config)
|
572
|
+
|
573
|
+
secgroup_ips = Hash.new
|
574
|
+
# walk the list of hosts, dumping the IPs into role buckets
|
575
|
+
ours.each do |instance, instance_details|
|
576
|
+
secgroup_ips[instance_details[:role]] = Array.new if secgroup_ips[instance_details[:role]].nil?
|
577
|
+
|
578
|
+
#secgroup_ips[instance_details[:role]] << instance_details[:addresses].map { |address| address.address }
|
579
|
+
secgroup_ips[instance_details[:role]] << instance_details[:addresses].map do |address|
|
580
|
+
if (address.label == 'public')
|
581
|
+
address.address
|
582
|
+
else
|
583
|
+
next
|
584
|
+
end
|
585
|
+
end
|
586
|
+
|
587
|
+
# we potentially have an array of arrays, so flatten them
|
588
|
+
secgroup_ips[instance_details[:role]].flatten!
|
589
|
+
|
590
|
+
# delete any nil's that we collected due to skipping private ips
|
591
|
+
secgroup_ips[instance_details[:role]].delete_if {|x| x.nil? }
|
592
|
+
end
|
593
|
+
|
594
|
+
# dump the json to a temp file
|
595
|
+
#sg_json = Tempfile.new(['secgroup_ips', '.json'])
|
596
|
+
sg_json = File.new('secgroup_ips.json', "w")
|
597
|
+
sg_json.write(secgroup_ips.to_json)
|
598
|
+
sg_json.close
|
599
|
+
|
600
|
+
# run the secgroup-sync tool, across each AZ/REGION
|
601
|
+
config[:azs].each do |az|
|
602
|
+
Logger.info "Syncing security groups in #{az}"
|
603
|
+
system("stackhelper --os-region-name #{az} secgroup-sync --secgroup-json secgroups.json --additional-group-json #{sg_json.path}")
|
604
|
+
end
|
605
|
+
end
|
606
|
+
|
607
|
+
# if we're passed a role, only deploy this role.
|
608
|
+
def Stack.deploy_all(config, role_to_deploy = nil)
|
609
|
+
Stack.validate(config)
|
610
|
+
|
611
|
+
# this also populates out unspecified defaults, like az
|
612
|
+
node_details = Stack.populate_config(config)
|
613
|
+
# get info about all instances running in our account & AZs
|
614
|
+
servers = Stack.get_all_instances(config)
|
615
|
+
|
616
|
+
# this is our main loop iterator, generates each host
|
617
|
+
config[:roles].each do |role,role_details|
|
618
|
+
Logger.debug { "Iterating over roles, this is #{role}, role_details = #{role_details}" }
|
619
|
+
|
620
|
+
(1..role_details[:count]).each do |p|
|
621
|
+
hostname = Stack.generate_hostname(config, role, p)
|
622
|
+
Logger.debug { "Iterating over nodes in #{role}, this is #{hostname}" }
|
623
|
+
|
624
|
+
# configure the global :chef_server details if this the chef server
|
625
|
+
if role_details[:chef_server]
|
626
|
+
Stack.set_chef_server(config, hostname)
|
627
|
+
end
|
628
|
+
|
629
|
+
# does this node already exist?
|
630
|
+
if (!servers[hostname].nil?)
|
631
|
+
Logger.info { "#{hostname} already exists, skipping.." }
|
632
|
+
next
|
633
|
+
end
|
634
|
+
|
635
|
+
Logger.debug { "Deploying #{role}, role_to_deploy = #{role_to_deploy}" }
|
636
|
+
if ((role_to_deploy.nil?) || (role_to_deploy.to_s == role.to_s))
|
637
|
+
if (role_details[:skip_chef_prereg] == true || role_details[:chef_server])
|
638
|
+
Logger.debug "Skipping Chef pre-reg for #{hostname}"
|
639
|
+
else
|
640
|
+
# Prepare Chef
|
641
|
+
# 1) delete the client if it exists
|
642
|
+
knife_client_list = `knife client list | grep #{hostname}`
|
643
|
+
knife_client_list.sub!(/\s/,'')
|
644
|
+
if knife_client_list.length() > 0
|
645
|
+
# we should delete the client to make way for this new machine
|
646
|
+
Logger.info `knife client delete --yes #{hostname}`
|
647
|
+
end
|
648
|
+
|
649
|
+
# knife node create -d --environment $CHEF_ENVIRONMENT $SERVER_NAME
|
650
|
+
# knife node run_list add -d --environment $CHEF_ENVIRONMENT $SERVER_NAME "role[${ROLE}]"
|
651
|
+
# this relies on .chef matching the stacks config (TODO: poke the Chef API directly?)
|
652
|
+
cmd = "EDITOR=\"perl -p -i -e 's/_default/#{config[:chef_environment]}/'\" knife node create --server-url #{config[:chef_server_public]} #{hostname}"
|
653
|
+
Logger.debug cmd
|
654
|
+
knife_node_create = `#{cmd}`
|
655
|
+
Logger.info "Priming Chef Server: #{knife_node_create}"
|
656
|
+
|
657
|
+
cmd = "knife node run_list add -d --environment #{config[:chef_environment]} #{hostname} \"role[#{role}]\""
|
658
|
+
Logger.info cmd
|
659
|
+
knife_node_run_list = `#{cmd}`
|
660
|
+
Logger.info "Priming Chef Server: #{knife_node_run_list}"
|
661
|
+
end
|
662
|
+
|
663
|
+
# build the user-data content for this host
|
664
|
+
# (we have a local copy of https://github.com/lovelysystems/cloud-init/blob/master/tools/write-mime-multipart)
|
665
|
+
# 1) generate the mimi-multipart file
|
666
|
+
# libdir = where our shipped scripts live
|
667
|
+
# (use config[:stackhome] for "project" config/scripts)
|
668
|
+
libdir = File.realpath(@@gemhome + '/lib')
|
669
|
+
multipart_cmd = "#{libdir}/write-mime-multipart #{role_details[:bootstrap]} #{role_details[:cloud_config_yaml]}"
|
670
|
+
Logger.debug { "multipart_cmd = #{multipart_cmd}" }
|
671
|
+
multipart = `#{multipart_cmd}`
|
672
|
+
# 2) replace the tokens (CHEF_SERVER, CHEF_ENVIRONMENT, SERVER_NAME, ROLE)
|
673
|
+
multipart.gsub!(%q!%HOSTNAME%!, hostname)
|
674
|
+
|
675
|
+
Logger.info "Chef server is #{config[:chef_server_hostname]}, which is in #{config[:node_details][config[:chef_server_hostname]][:region]}"
|
676
|
+
Logger.info "#{hostname}'s region is #{config[:node_details][hostname][:region]}"
|
677
|
+
# if this host is in the same region/az, use the private URL, if not, use the public url
|
678
|
+
if (config[:node_details][hostname][:region] == config[:node_details][config[:chef_server_hostname]][:region]) && !config[:chef_server_private].nil?
|
679
|
+
multipart.gsub!(%q!%CHEF_SERVER%!, config[:chef_server_private])
|
680
|
+
elsif !config[:chef_server_public].nil?
|
681
|
+
multipart.gsub!(%q!%CHEF_SERVER%!, config[:chef_server_public])
|
682
|
+
else
|
683
|
+
Logger.warn { "Not setting the chef url for #{hostname} as neither chef_server_private or chef_server_public are valid yet" }
|
684
|
+
end
|
685
|
+
multipart.gsub!(%q!%CHEF_ENVIRONMENT%!, config[:chef_environment])
|
686
|
+
if File.exists?(config[:chef_validation_pem])
|
687
|
+
multipart.gsub!(%q!%CHEF_VALIDATION_PEM%!, File.read(config[:chef_validation_pem]))
|
688
|
+
else
|
689
|
+
Logger.warn "Skipping #{config[:chef_validation_pem]} substitution in user-data"
|
690
|
+
end
|
691
|
+
multipart.gsub!(%q!%SERVER_NAME%!, hostname)
|
692
|
+
multipart.gsub!(%q!%ROLE%!, role.to_s)
|
693
|
+
multipart.gsub!(%q!%DATA_DIR%!, role_details[:data_dir])
|
694
|
+
|
695
|
+
Logger.info "Creating #{hostname} in #{node_details[hostname][:az]} with role #{role}"
|
696
|
+
|
697
|
+
# this will get put in /meta.js
|
698
|
+
metadata = { 'region' => node_details[hostname][:az], 'chef_role' => role }
|
699
|
+
|
700
|
+
os = Stack.connect(config, node_details[hostname][:az])
|
701
|
+
newserver = os.create_server(:name => hostname,
|
702
|
+
:imageRef => config[node_details[hostname][:az]]['image_id'],
|
703
|
+
:flavorRef => config['flavor_id'],
|
704
|
+
:security_groups=>[role_details[:security_group]],
|
705
|
+
:user_data => Base64.encode64(multipart),
|
706
|
+
:metadata => metadata,
|
707
|
+
:key_name => config[:key_pair])
|
708
|
+
|
709
|
+
# wait for the server to become ACTIVE before proceeding
|
710
|
+
while (newserver.status != 'ACTIVE') do
|
711
|
+
print '.'
|
712
|
+
sleep 1
|
713
|
+
# refresh the status
|
714
|
+
newserver.refresh
|
715
|
+
end
|
716
|
+
puts
|
717
|
+
|
718
|
+
# refresh the config[:all_instances] with the newly built server
|
719
|
+
# TODO: we should be able to just add this server, instead of re-polling everything
|
720
|
+
Stack.get_all_instances(config, true)
|
721
|
+
|
722
|
+
# refresh the chef_server details..we should have IPs now
|
723
|
+
if role_details[:chef_server]
|
724
|
+
Stack.set_chef_server(config, hostname)
|
725
|
+
Stack.generate_knife_rb(config)
|
726
|
+
end
|
727
|
+
|
728
|
+
# attach a floating IP to this if we have one
|
729
|
+
if role_details[:floating_ips] && role_details[:floating_ips][p-1]
|
730
|
+
floating_ip = role_details[:floating_ips][p-1]
|
731
|
+
Logger.info "Attaching #{floating_ip} to #{hostname}\n"
|
732
|
+
# nova --os-region-name $REGION add-floating-ip $SERVER_NAME $FLOATING_IP
|
733
|
+
floating_ip_add = `nova --os-region-name #{node_details[hostname][:az]} add-floating-ip #{hostname} #{floating_ip}`
|
734
|
+
Logger.info floating_ip_add
|
735
|
+
end
|
736
|
+
|
737
|
+
# refresh the secgroups ASAP
|
738
|
+
Stack.secgroup_sync(config)
|
739
|
+
|
740
|
+
# run any post-install scripts, these are run from the current host, not the nodes
|
741
|
+
if role_details[:post_install_script]
|
742
|
+
# convert when we got passed to an absolute path
|
743
|
+
post_install_script_abs = File.realpath(config[:stackhome] + '/' + role_details[:post_install_script])
|
744
|
+
post_install_cwd_abs = File.realpath(config[:stackhome] + '/' + role_details[:post_install_cwd])
|
745
|
+
|
746
|
+
# replace any tokens in the argument
|
747
|
+
public_ip = Stack.get_public_ip(config, hostname)
|
748
|
+
role_details[:post_install_args].sub!(%q!%PUBLIC_IP%!, public_ip)
|
749
|
+
# we system this, as they are can give live feed back
|
750
|
+
Logger.info "Executing '#{post_install_script_abs} #{role_details[:post_install_args]}' as the post_install_script"
|
751
|
+
system("cd #{post_install_cwd_abs} ; #{post_install_script_abs} #{role_details[:post_install_args]}")
|
752
|
+
end
|
753
|
+
else
|
754
|
+
Logger.info "Skipped role #{role}"
|
755
|
+
end
|
756
|
+
end
|
200
757
|
end
|
201
758
|
end
|
202
759
|
|
203
|
-
def
|
204
|
-
#
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
760
|
+
def Stack.find_file(config, filename)
|
761
|
+
# find a file, using the standard path precedence
|
762
|
+
# 1) cwd
|
763
|
+
# 2) stackhome
|
764
|
+
# 3) gemhome/lib
|
765
|
+
dirs = [ './' ]
|
766
|
+
dirs.push(config[:stackhome])
|
767
|
+
dirs.push(@@gemhome + '/lib')
|
768
|
+
|
769
|
+
Logger.debug "find_file, looking for #{filename} in #{dirs}"
|
770
|
+
filename_fqp = ''
|
771
|
+
dirs.each do |dir|
|
772
|
+
fqp = dir + '/' + filename
|
773
|
+
Logger.debug "find_file: checking #{fqp}"
|
774
|
+
if File.file?(fqp)
|
775
|
+
Logger.debug "find_file: found #{fqp}!"
|
776
|
+
filename_fqp = File.expand_path(fqp)
|
777
|
+
end
|
209
778
|
end
|
779
|
+
|
780
|
+
if filename_fqp.empty?
|
781
|
+
Logger.warn "couldn't find #{filename} in #{dirs}"
|
782
|
+
end
|
783
|
+
filename_fqp
|
210
784
|
end
|
211
785
|
|
212
786
|
end
|
787
|
+
|