chef-workflow 0.1.1 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG.md +19 -0
- data/LICENSE.txt +2 -2
- data/README.md +31 -142
- data/bin/chef-workflow-bootstrap +6 -0
- data/chef-workflow.gemspec +4 -1
- data/lib/chef-workflow.rb +41 -39
- data/lib/chef-workflow/support/attr.rb +28 -26
- data/lib/chef-workflow/support/db.rb +26 -0
- data/lib/chef-workflow/support/db/basic.rb +225 -0
- data/lib/chef-workflow/support/db/group.rb +72 -0
- data/lib/chef-workflow/support/debug.rb +47 -45
- data/lib/chef-workflow/support/ec2.rb +136 -134
- data/lib/chef-workflow/support/general.rb +46 -54
- data/lib/chef-workflow/support/generic.rb +27 -23
- data/lib/chef-workflow/support/ip.rb +89 -103
- data/lib/chef-workflow/support/knife-plugin.rb +26 -24
- data/lib/chef-workflow/support/knife.rb +76 -102
- data/lib/chef-workflow/support/scheduler.rb +319 -324
- data/lib/chef-workflow/support/ssh.rb +100 -0
- data/lib/chef-workflow/support/vagrant.rb +34 -30
- data/lib/chef-workflow/support/vm.rb +25 -54
- data/lib/chef-workflow/support/vm/chef_server.rb +28 -19
- data/lib/chef-workflow/support/vm/ec2.rb +135 -106
- data/lib/chef-workflow/support/vm/helpers/knife.rb +26 -0
- data/lib/chef-workflow/support/vm/knife.rb +218 -189
- data/lib/chef-workflow/support/vm/vagrant.rb +90 -74
- data/lib/chef-workflow/version.rb +3 -5
- metadata +57 -4
@@ -4,400 +4,395 @@ require 'timeout'
|
|
4
4
|
require 'chef-workflow/support/attr'
|
5
5
|
require 'chef-workflow/support/debug'
|
6
6
|
require 'chef-workflow/support/vm'
|
7
|
+
require 'chef-workflow/support/db/basic'
|
8
|
+
|
9
|
+
module ChefWorkflow
|
10
|
+
#
|
11
|
+
# This is a scheduler for provisioners. It can run in parallel or serial mode,
|
12
|
+
# and is dependency-based, that is, it will only schedule items for execution
|
13
|
+
# which have all their dependencies satisfied and items that haven't will wait
|
14
|
+
# to execute until that happens.
|
15
|
+
#
|
16
|
+
class Scheduler
|
17
|
+
extend ChefWorkflow::AttrSupport
|
18
|
+
include ChefWorkflow::DebugSupport
|
19
|
+
|
20
|
+
##
|
21
|
+
# :attr:
|
22
|
+
#
|
23
|
+
# Turn serial mode on (off by default). This forces the scheduler to execute
|
24
|
+
# every provision in order, even if it could handle multiple provisions at
|
25
|
+
# the same time.
|
26
|
+
#
|
27
|
+
fancy_attr :serial
|
28
|
+
|
29
|
+
##
|
30
|
+
# :attr:
|
31
|
+
#
|
32
|
+
# Ignore exceptions while deprovisioning. Default is false.
|
33
|
+
#
|
34
|
+
|
35
|
+
fancy_attr :force_deprovision
|
36
|
+
|
37
|
+
#
|
38
|
+
# Constructor. If the first argument is true, will install an `at_exit` hook
|
39
|
+
# to write out the VM and IP databases.
|
40
|
+
#
|
41
|
+
def initialize
|
42
|
+
@force_deprovision = false
|
43
|
+
@solved_mutex = Mutex.new
|
44
|
+
@waiters_mutex = Mutex.new
|
45
|
+
@serial = false
|
46
|
+
@solver_thread = nil
|
47
|
+
@working = { }
|
48
|
+
@waiters = ChefWorkflow::DatabaseSupport::Set.new('vm_scheduler', 'waiters')
|
49
|
+
@queue = Queue.new
|
50
|
+
@vm = VM.new
|
51
|
+
end
|
7
52
|
|
8
|
-
#
|
9
|
-
#
|
10
|
-
#
|
11
|
-
|
12
|
-
|
13
|
-
#
|
14
|
-
class Scheduler
|
15
|
-
extend AttrSupport
|
16
|
-
include DebugSupport
|
17
|
-
|
18
|
-
##
|
19
|
-
# :attr:
|
20
|
-
#
|
21
|
-
# Turn serial mode on (off by default). This forces the scheduler to execute
|
22
|
-
# every provision in order, even if it could handle multiple provisions at
|
23
|
-
# the same time.
|
24
|
-
#
|
25
|
-
fancy_attr :serial
|
26
|
-
|
27
|
-
##
|
28
|
-
# :attr:
|
29
|
-
#
|
30
|
-
# Ignore exceptions while deprovisioning. Default is false.
|
31
|
-
#
|
32
|
-
|
33
|
-
fancy_attr :force_deprovision
|
34
|
-
|
35
|
-
#
|
36
|
-
# Constructor. If the first argument is true, will install an `at_exit` hook
|
37
|
-
# to write out the VM and IP databases.
|
38
|
-
#
|
39
|
-
def initialize(at_exit_hook=true)
|
40
|
-
@force_deprovision = false
|
41
|
-
@solved_mutex = Mutex.new
|
42
|
-
@waiters_mutex = Mutex.new
|
43
|
-
@serial = false
|
44
|
-
@solver_thread = nil
|
45
|
-
@working = { }
|
46
|
-
@waiters = Set.new
|
47
|
-
@queue = Queue.new
|
48
|
-
@vm = VM.load_from_file || VM.new
|
49
|
-
|
50
|
-
if at_exit_hook
|
51
|
-
at_exit { write_state }
|
53
|
+
#
|
54
|
+
# Helper to assist with dealing with a VM object
|
55
|
+
#
|
56
|
+
def solved
|
57
|
+
@vm.provisioned
|
52
58
|
end
|
53
|
-
end
|
54
59
|
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
# we don't
|
62
|
-
IPSupport.singleton.write
|
63
|
-
end
|
60
|
+
#
|
61
|
+
# Helper to assist with dealing with a VM object
|
62
|
+
#
|
63
|
+
def vm_groups
|
64
|
+
@vm.groups
|
65
|
+
end
|
64
66
|
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
67
|
+
#
|
68
|
+
# Helper to assist with dealing with a VM object
|
69
|
+
#
|
70
|
+
def vm_dependencies
|
71
|
+
@vm.dependencies
|
72
|
+
end
|
71
73
|
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
#
|
80
|
-
# Helper to assist with dealing with a VM object
|
81
|
-
#
|
82
|
-
def vm_dependencies
|
83
|
-
@vm.dependencies
|
84
|
-
end
|
74
|
+
#
|
75
|
+
# Helper to assist with dealing with a VM object
|
76
|
+
#
|
77
|
+
def vm_working
|
78
|
+
@vm.working
|
79
|
+
end
|
85
80
|
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
81
|
+
#
|
82
|
+
# Schedule a group of VMs for provision. This takes a group name, which is a
|
83
|
+
# string, an array of provisioner objects, and a list of string dependencies.
|
84
|
+
# If anything in the dependencies list hasn't been pre-declared, it refuses
|
85
|
+
# to continue.
|
86
|
+
#
|
87
|
+
# This method will return nil if the server group is already provisioned.
|
88
|
+
#
|
89
|
+
def schedule_provision(group_name, provisioner, dependencies=[])
|
90
|
+
return nil if vm_groups[group_name]
|
91
|
+
provisioner = [provisioner] unless provisioner.kind_of?(Array)
|
92
|
+
provisioner.each { |x| x.name = group_name }
|
93
|
+
vm_groups[group_name] = provisioner
|
94
|
+
|
95
|
+
unless dependencies.all? { |x| vm_groups.has_key?(x) }
|
96
|
+
raise "One of your dependencies for #{group_name} has not been pre-declared. Cannot continue"
|
97
|
+
end
|
92
98
|
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
# to continue.
|
98
|
-
#
|
99
|
-
# This method will return nil if the server group is already provisioned.
|
100
|
-
#
|
101
|
-
def schedule_provision(group_name, provisioner, dependencies=[])
|
102
|
-
return nil if vm_groups[group_name]
|
103
|
-
provisioner = [provisioner] unless provisioner.kind_of?(Array)
|
104
|
-
provisioner.each { |x| x.name = group_name }
|
105
|
-
vm_groups[group_name] = provisioner
|
106
|
-
|
107
|
-
unless dependencies.all? { |x| vm_groups.has_key?(x) }
|
108
|
-
raise "One of your dependencies for #{group_name} has not been pre-declared. Cannot continue"
|
99
|
+
vm_dependencies[group_name] = dependencies.to_set
|
100
|
+
@waiters_mutex.synchronize do
|
101
|
+
@waiters.add(group_name)
|
102
|
+
end
|
109
103
|
end
|
110
104
|
|
111
|
-
|
112
|
-
|
113
|
-
|
105
|
+
#
|
106
|
+
# Sleep until this list of dependencies are resolved. In parallel mode, will
|
107
|
+
# raise if an exeception occurred while waiting for these resources. In
|
108
|
+
# serial mode, wait_for just returns nil.
|
109
|
+
#
|
110
|
+
def wait_for(*dependencies)
|
111
|
+
return nil if @serial
|
112
|
+
return nil if dependencies.empty?
|
113
|
+
|
114
|
+
dep_set = dependencies.to_set
|
115
|
+
until dep_set & solved == dep_set
|
116
|
+
sleep 1
|
117
|
+
@solver_thread.join unless @solver_thread.alive?
|
118
|
+
end
|
114
119
|
end
|
115
|
-
end
|
116
120
|
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
sleep 1
|
129
|
-
@solver_thread.join unless @solver_thread.alive?
|
130
|
-
end
|
131
|
-
end
|
121
|
+
#
|
122
|
+
# Helper method for scheduling. Wraps items in a timeout and immediately
|
123
|
+
# checks all running workers for exceptions, which are immediately bubbled up
|
124
|
+
# if there are any. If do_loop is true, it will retry the timeout.
|
125
|
+
#
|
126
|
+
def with_timeout(do_loop=true)
|
127
|
+
Timeout.timeout(10) do
|
128
|
+
dead_working = @working.values.reject(&:alive?)
|
129
|
+
if dead_working.size > 0
|
130
|
+
dead_working.map(&:join)
|
131
|
+
end
|
132
132
|
|
133
|
-
|
134
|
-
# Helper method for scheduling. Wraps items in a timeout and immediately
|
135
|
-
# checks all running workers for exceptions, which are immediately bubbled up
|
136
|
-
# if there are any. If do_loop is true, it will retry the timeout.
|
137
|
-
#
|
138
|
-
def with_timeout(do_loop=true)
|
139
|
-
Timeout.timeout(10) do
|
140
|
-
dead_working = @working.values.reject(&:alive?)
|
141
|
-
if dead_working.size > 0
|
142
|
-
dead_working.map(&:join)
|
133
|
+
yield
|
143
134
|
end
|
144
|
-
|
145
|
-
|
135
|
+
rescue TimeoutError
|
136
|
+
retry if do_loop
|
146
137
|
end
|
147
|
-
rescue TimeoutError
|
148
|
-
retry if do_loop
|
149
|
-
end
|
150
138
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
139
|
+
#
|
140
|
+
# Start the scheduler. In serial mode this call will block until the whole
|
141
|
+
# dependency graph is satisfied, or one of the provisions fails, at which
|
142
|
+
# point an exception will be raised. In parallel mode, this call completes
|
143
|
+
# immediately, and you should use #wait_for to control main thread flow.
|
144
|
+
#
|
145
|
+
# This call also installs a SIGINFO (Ctrl+T in the terminal on macs) and
|
146
|
+
# SIGUSR2 handler which can be used to get information on the status of
|
147
|
+
# what's solved and what's working.
|
148
|
+
#
|
149
|
+
# Immediately returns if in threaded mode and the solver is already running.
|
150
|
+
#
|
151
|
+
def run
|
152
|
+
# short circuit if we're not serial and already running
|
153
|
+
return if @solver_thread and !@serial
|
154
|
+
|
155
|
+
handler = lambda do |*args|
|
156
|
+
p ["solved:", solved.to_a]
|
157
|
+
p ["working:", vm_working.to_a]
|
158
|
+
p ["waiting:", @waiters.to_a]
|
159
|
+
end
|
172
160
|
|
173
|
-
|
161
|
+
%w[USR2 INFO].each { |sig| trap(sig, &handler) if Signal.list[sig] }
|
174
162
|
|
175
|
-
|
176
|
-
|
163
|
+
queue_runner = lambda do
|
164
|
+
run = true
|
177
165
|
|
178
|
-
|
179
|
-
|
166
|
+
while run
|
167
|
+
service_resolved_waiters
|
180
168
|
|
181
|
-
|
169
|
+
ready = []
|
182
170
|
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
171
|
+
if @queue.empty?
|
172
|
+
if @serial
|
173
|
+
return
|
174
|
+
else
|
175
|
+
with_timeout do
|
176
|
+
# this is where most of the execution time is spent, so ensure
|
177
|
+
# waiters get considered here.
|
178
|
+
service_resolved_waiters
|
179
|
+
ready << @queue.shift
|
180
|
+
end
|
192
181
|
end
|
193
182
|
end
|
194
|
-
end
|
195
183
|
|
196
|
-
|
197
|
-
|
198
|
-
|
184
|
+
while !@queue.empty?
|
185
|
+
ready << @queue.shift
|
186
|
+
end
|
199
187
|
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
188
|
+
ready.each do |r|
|
189
|
+
if r
|
190
|
+
@solved_mutex.synchronize do
|
191
|
+
solved.add(r)
|
192
|
+
@working.delete(r)
|
193
|
+
vm_working.delete(r)
|
194
|
+
end
|
195
|
+
else
|
196
|
+
run = false
|
206
197
|
end
|
207
|
-
else
|
208
|
-
run = false
|
209
198
|
end
|
210
199
|
end
|
211
200
|
end
|
212
|
-
end
|
213
201
|
|
214
|
-
|
215
|
-
|
216
|
-
queue_runner.call
|
217
|
-
else
|
218
|
-
@solver_thread = Thread.new do
|
219
|
-
with_timeout(false) { service_resolved_waiters }
|
202
|
+
if @serial
|
203
|
+
service_resolved_waiters
|
220
204
|
queue_runner.call
|
221
|
-
|
205
|
+
else
|
206
|
+
@solver_thread = Thread.new do
|
207
|
+
with_timeout(false) { service_resolved_waiters }
|
208
|
+
queue_runner.call
|
209
|
+
end
|
222
210
|
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
211
|
+
# we depend on at_exit hooks being fired, and Thread#abort_on_exception
|
212
|
+
# doesn't fire them. This solution bubbles up the exceptions in a similar
|
213
|
+
# fashion without actually sacrificing the at_exit functionality.
|
214
|
+
Thread.new do
|
215
|
+
begin
|
216
|
+
@solver_thread.join
|
217
|
+
rescue Exception => e
|
218
|
+
$stderr.puts "Solver thread encountered an exception:"
|
219
|
+
$stderr.puts "#{e.class.name}: #{e.message}"
|
220
|
+
$stderr.puts e.backtrace.join("\n")
|
221
|
+
Kernel.exit 1
|
222
|
+
end
|
234
223
|
end
|
235
224
|
end
|
236
225
|
end
|
237
|
-
end
|
238
226
|
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
end
|
252
|
-
|
253
|
-
#
|
254
|
-
# This method determines what 'waiters', or provisioners that cannot
|
255
|
-
# provision yet because of unresolved dependencies, can be executed.
|
256
|
-
#
|
257
|
-
def service_resolved_waiters
|
258
|
-
@waiters_mutex.synchronize do
|
259
|
-
@waiters -= (@working.keys.to_set + solved)
|
227
|
+
#
|
228
|
+
# Instructs the scheduler to stop. Note that this is not an interrupt, and
|
229
|
+
# the queue will still be exhausted before terminating.
|
230
|
+
#
|
231
|
+
def stop
|
232
|
+
if @serial
|
233
|
+
@queue << nil
|
234
|
+
else
|
235
|
+
@working.values.map { |v| v.join rescue nil }
|
236
|
+
@queue << nil
|
237
|
+
@solver_thread.join rescue nil
|
238
|
+
end
|
260
239
|
end
|
261
240
|
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
241
|
+
#
|
242
|
+
# This method determines what 'waiters', or provisioners that cannot
|
243
|
+
# provision yet because of unresolved dependencies, can be executed.
|
244
|
+
#
|
245
|
+
def service_resolved_waiters
|
246
|
+
@waiters_mutex.synchronize do
|
247
|
+
@waiters.replace(@waiters.to_set - (@working.keys.to_set + solved))
|
248
|
+
end
|
268
249
|
|
269
|
-
|
250
|
+
waiter_iteration = lambda do
|
251
|
+
@waiters.each do |group_name|
|
252
|
+
if (solved.to_set & vm_dependencies[group_name]).to_a == vm_dependencies[group_name]
|
253
|
+
if_debug do
|
254
|
+
$stderr.puts "Provisioning #{group_name}"
|
255
|
+
end
|
270
256
|
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
257
|
+
provisioner = vm_groups[group_name]
|
258
|
+
|
259
|
+
provision_block = lambda do
|
260
|
+
# FIXME maybe a way to specify initial args?
|
261
|
+
args = nil
|
262
|
+
provisioner.each do |this_prov|
|
263
|
+
vm_groups[group_name] = provisioner # force a write to the db
|
264
|
+
unless args = this_prov.startup(args)
|
265
|
+
$stderr.puts "Could not provision #{group_name} with provisioner #{this_prov.class.name}"
|
266
|
+
raise "Could not provision #{group_name} with provisioner #{this_prov.class.name}"
|
267
|
+
end
|
278
268
|
end
|
269
|
+
@queue << group_name
|
279
270
|
end
|
280
|
-
@queue << group_name
|
281
|
-
end
|
282
271
|
|
283
|
-
|
272
|
+
vm_working.add(group_name)
|
284
273
|
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
274
|
+
if @serial
|
275
|
+
# HACK: just give the working check something that will always work.
|
276
|
+
# Probably should just mock it.
|
277
|
+
@working[group_name] = Thread.new { sleep }
|
278
|
+
provision_block.call
|
279
|
+
else
|
280
|
+
@working[group_name] = Thread.new(&provision_block)
|
281
|
+
end
|
292
282
|
end
|
293
283
|
end
|
294
284
|
end
|
295
|
-
end
|
296
285
|
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
286
|
+
if @serial
|
287
|
+
waiter_iteration.call
|
288
|
+
else
|
289
|
+
@waiters_mutex.synchronize(&waiter_iteration)
|
290
|
+
end
|
301
291
|
end
|
302
|
-
end
|
303
292
|
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
293
|
+
#
|
294
|
+
# Teardown a single group -- modifies the solved formula. Be careful to
|
295
|
+
# resupply dependencies if you use this, as nothing will resolve until you
|
296
|
+
# resupply it.
|
297
|
+
#
|
298
|
+
# This takes an optional argument to wait for the group to be solved before
|
299
|
+
# attempting to tear it down. Setting this to false effectively says, "I know
|
300
|
+
# what I'm doing", and you should feel bad if you file an issue because you
|
301
|
+
# supplied it.
|
302
|
+
#
|
314
303
|
|
315
|
-
|
316
|
-
|
304
|
+
def teardown_group(group_name, wait=true)
|
305
|
+
wait_for(group_name) if wait
|
317
306
|
|
318
|
-
|
307
|
+
dependent_items = vm_dependencies.partition { |k,v| v.include?(group_name) }.first.map(&:first)
|
319
308
|
|
320
|
-
|
321
|
-
|
322
|
-
|
309
|
+
if_debug do
|
310
|
+
if dependent_items.length > 0
|
311
|
+
$stderr.puts "Trying to terminate #{group_name}, found #{dependent_items.inspect} depending on it"
|
312
|
+
end
|
323
313
|
end
|
324
|
-
end
|
325
314
|
|
326
|
-
|
327
|
-
|
315
|
+
@solved_mutex.synchronize do
|
316
|
+
dependent_and_working = @working.keys & dependent_items
|
317
|
+
|
318
|
+
if dependent_and_working.count > 0
|
319
|
+
$stderr.puts "#{dependent_and_working.inspect} are depending on #{group_name}, which you are trying to deprovision."
|
320
|
+
$stderr.puts "We can't resolve this problem for you, and future converges may fail during this run that would otherwise work."
|
321
|
+
$stderr.puts "Consider using wait_for to better control the dependencies, or turning serial provisioning on."
|
322
|
+
end
|
328
323
|
|
329
|
-
|
330
|
-
$stderr.puts "#{dependent_and_working.inspect} are depending on #{group_name}, which you are trying to deprovision."
|
331
|
-
$stderr.puts "We can't resolve this problem for you, and future converges may fail during this run that would otherwise work."
|
332
|
-
$stderr.puts "Consider using wait_for to better control the dependencies, or turning serial provisioning on."
|
324
|
+
deprovision_group(group_name)
|
333
325
|
end
|
334
326
|
|
335
|
-
deprovision_group(group_name)
|
336
327
|
end
|
337
328
|
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
$stderr.puts "Attempting to deprovision group #{group_name}"
|
354
|
-
end
|
329
|
+
#
|
330
|
+
# Performs the deprovision of a group by replaying its provision strategy
|
331
|
+
# backwards and applying the #shutdown method instead of the #startup method.
|
332
|
+
# Removes it from the various state tables if true is set as the second
|
333
|
+
# argument, which is the default.
|
334
|
+
#
|
335
|
+
def deprovision_group(group_name, clean_state=true)
|
336
|
+
provisioner = vm_groups[group_name]
|
337
|
+
|
338
|
+
# if we can't find the provisioner, we probably got asked to clean up
|
339
|
+
# something we never scheduled. Just ignore that.
|
340
|
+
if provisioner and ((solved.to_set + vm_working.to_set).include?(group_name) or @force_deprovision)
|
341
|
+
if_debug do
|
342
|
+
$stderr.puts "Attempting to deprovision group #{group_name}"
|
343
|
+
end
|
355
344
|
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
345
|
+
perform_deprovision = lambda do |this_prov|
|
346
|
+
result = this_prov.shutdown
|
347
|
+
unless result
|
348
|
+
if_debug do
|
349
|
+
$stderr.puts "Could not deprovision group #{group_name}."
|
350
|
+
end
|
360
351
|
end
|
352
|
+
result
|
361
353
|
end
|
362
|
-
end
|
363
354
|
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
355
|
+
provisioner.reverse.each do |this_prov|
|
356
|
+
if @force_deprovision
|
357
|
+
begin
|
358
|
+
perform_deprovision.call(this_prov)
|
359
|
+
rescue Exception => e
|
360
|
+
if_debug do
|
361
|
+
$stderr.puts "Deprovision #{this_prov.class.name}/#{group_name} had errors:"
|
362
|
+
$stderr.puts "#{e.message}"
|
363
|
+
end
|
364
|
+
end
|
365
|
+
else
|
366
|
+
unless perform_deprovision.call(this_prov)
|
367
|
+
raise "Could not deprovision #{group_name}/#{this_prov.inspect}"
|
372
368
|
end
|
373
369
|
end
|
374
|
-
else
|
375
|
-
perform_deprovision.call(this_prov)
|
376
370
|
end
|
377
371
|
end
|
378
|
-
end
|
379
372
|
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
373
|
+
if clean_state
|
374
|
+
solved.delete(group_name)
|
375
|
+
@waiters_mutex.synchronize do
|
376
|
+
@waiters.delete(group_name)
|
377
|
+
end
|
378
|
+
vm_working.delete(group_name)
|
379
|
+
vm_dependencies.delete(group_name)
|
380
|
+
vm_groups.delete(group_name)
|
381
|
+
end
|
385
382
|
end
|
386
|
-
end
|
387
383
|
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
384
|
+
#
|
385
|
+
# Instruct all provisioners except ones in the exception list to tear down.
|
386
|
+
# Calls #stop as its first action.
|
387
|
+
#
|
388
|
+
# This is always done serially. For sanity.
|
389
|
+
#
|
390
|
+
def teardown(exceptions=[])
|
391
|
+
stop
|
392
|
+
|
393
|
+
(vm_groups.keys.to_set - exceptions.to_set).each do |group_name|
|
394
|
+
deprovision_group(group_name) # clean this after everything finishes
|
395
|
+
end
|
399
396
|
end
|
400
|
-
|
401
|
-
write_state
|
402
397
|
end
|
403
398
|
end
|