chef-workflow 0.1.1 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -4,400 +4,395 @@ require 'timeout'
4
4
  require 'chef-workflow/support/attr'
5
5
  require 'chef-workflow/support/debug'
6
6
  require 'chef-workflow/support/vm'
7
+ require 'chef-workflow/support/db/basic'
8
+
9
+ module ChefWorkflow
10
+ #
11
+ # This is a scheduler for provisioners. It can run in parallel or serial mode,
12
+ # and is dependency-based, that is, it will only schedule items for execution
13
+ # which have all their dependencies satisfied and items that haven't will wait
14
+ # to execute until that happens.
15
+ #
16
+ class Scheduler
17
+ extend ChefWorkflow::AttrSupport
18
+ include ChefWorkflow::DebugSupport
19
+
20
+ ##
21
+ # :attr:
22
+ #
23
+ # Turn serial mode on (off by default). This forces the scheduler to execute
24
+ # every provision in order, even if it could handle multiple provisions at
25
+ # the same time.
26
+ #
27
+ fancy_attr :serial
28
+
29
+ ##
30
+ # :attr:
31
+ #
32
+ # Ignore exceptions while deprovisioning. Default is false.
33
+ #
34
+
35
+ fancy_attr :force_deprovision
36
+
37
+ #
38
+ # Constructor. If the first argument is true, will install an `at_exit` hook
39
+ # to write out the VM and IP databases.
40
+ #
41
+ def initialize
42
+ @force_deprovision = false
43
+ @solved_mutex = Mutex.new
44
+ @waiters_mutex = Mutex.new
45
+ @serial = false
46
+ @solver_thread = nil
47
+ @working = { }
48
+ @waiters = ChefWorkflow::DatabaseSupport::Set.new('vm_scheduler', 'waiters')
49
+ @queue = Queue.new
50
+ @vm = VM.new
51
+ end
7
52
 
8
- #
9
- # This is a scheduler for provisioners. It can run in parallel or serial mode,
10
- # and is dependency-based, that is, it will only schedule items for execution
11
- # which have all their dependencies satisfied and items that haven't will wait
12
- # to execute until that happens.
13
- #
14
- class Scheduler
15
- extend AttrSupport
16
- include DebugSupport
17
-
18
- ##
19
- # :attr:
20
- #
21
- # Turn serial mode on (off by default). This forces the scheduler to execute
22
- # every provision in order, even if it could handle multiple provisions at
23
- # the same time.
24
- #
25
- fancy_attr :serial
26
-
27
- ##
28
- # :attr:
29
- #
30
- # Ignore exceptions while deprovisioning. Default is false.
31
- #
32
-
33
- fancy_attr :force_deprovision
34
-
35
- #
36
- # Constructor. If the first argument is true, will install an `at_exit` hook
37
- # to write out the VM and IP databases.
38
- #
39
- def initialize(at_exit_hook=true)
40
- @force_deprovision = false
41
- @solved_mutex = Mutex.new
42
- @waiters_mutex = Mutex.new
43
- @serial = false
44
- @solver_thread = nil
45
- @working = { }
46
- @waiters = Set.new
47
- @queue = Queue.new
48
- @vm = VM.load_from_file || VM.new
49
-
50
- if at_exit_hook
51
- at_exit { write_state }
53
+ #
54
+ # Helper to assist with dealing with a VM object
55
+ #
56
+ def solved
57
+ @vm.provisioned
52
58
  end
53
- end
54
59
 
55
- #
56
- # Write out the VM and IP databases.
57
- #
58
- def write_state
59
- @vm.save_to_file
60
- # FIXME not the best place to do this, but we have additional problems if
61
- # we don't
62
- IPSupport.singleton.write
63
- end
60
+ #
61
+ # Helper to assist with dealing with a VM object
62
+ #
63
+ def vm_groups
64
+ @vm.groups
65
+ end
64
66
 
65
- #
66
- # Helper to assist with dealing with a VM object
67
- #
68
- def solved
69
- @vm.provisioned
70
- end
67
+ #
68
+ # Helper to assist with dealing with a VM object
69
+ #
70
+ def vm_dependencies
71
+ @vm.dependencies
72
+ end
71
73
 
72
- #
73
- # Helper to assist with dealing with a VM object
74
- #
75
- def vm_groups
76
- @vm.groups
77
- end
78
-
79
- #
80
- # Helper to assist with dealing with a VM object
81
- #
82
- def vm_dependencies
83
- @vm.dependencies
84
- end
74
+ #
75
+ # Helper to assist with dealing with a VM object
76
+ #
77
+ def vm_working
78
+ @vm.working
79
+ end
85
80
 
86
- #
87
- # Helper to assist with dealing with a VM object
88
- #
89
- def vm_working
90
- @vm.working
91
- end
81
+ #
82
+ # Schedule a group of VMs for provision. This takes a group name, which is a
83
+ # string, an array of provisioner objects, and a list of string dependencies.
84
+ # If anything in the dependencies list hasn't been pre-declared, it refuses
85
+ # to continue.
86
+ #
87
+ # This method will return nil if the server group is already provisioned.
88
+ #
89
+ def schedule_provision(group_name, provisioner, dependencies=[])
90
+ return nil if vm_groups[group_name]
91
+ provisioner = [provisioner] unless provisioner.kind_of?(Array)
92
+ provisioner.each { |x| x.name = group_name }
93
+ vm_groups[group_name] = provisioner
94
+
95
+ unless dependencies.all? { |x| vm_groups.has_key?(x) }
96
+ raise "One of your dependencies for #{group_name} has not been pre-declared. Cannot continue"
97
+ end
92
98
 
93
- #
94
- # Schedule a group of VMs for provision. This takes a group name, which is a
95
- # string, an array of provisioner objects, and a list of string dependencies.
96
- # If anything in the dependencies list hasn't been pre-declared, it refuses
97
- # to continue.
98
- #
99
- # This method will return nil if the server group is already provisioned.
100
- #
101
- def schedule_provision(group_name, provisioner, dependencies=[])
102
- return nil if vm_groups[group_name]
103
- provisioner = [provisioner] unless provisioner.kind_of?(Array)
104
- provisioner.each { |x| x.name = group_name }
105
- vm_groups[group_name] = provisioner
106
-
107
- unless dependencies.all? { |x| vm_groups.has_key?(x) }
108
- raise "One of your dependencies for #{group_name} has not been pre-declared. Cannot continue"
99
+ vm_dependencies[group_name] = dependencies.to_set
100
+ @waiters_mutex.synchronize do
101
+ @waiters.add(group_name)
102
+ end
109
103
  end
110
104
 
111
- vm_dependencies[group_name] = dependencies.to_set
112
- @waiters_mutex.synchronize do
113
- @waiters.add(group_name)
105
+ #
106
+ # Sleep until this list of dependencies are resolved. In parallel mode, will
107
+ # raise if an exeception occurred while waiting for these resources. In
108
+ # serial mode, wait_for just returns nil.
109
+ #
110
+ def wait_for(*dependencies)
111
+ return nil if @serial
112
+ return nil if dependencies.empty?
113
+
114
+ dep_set = dependencies.to_set
115
+ until dep_set & solved == dep_set
116
+ sleep 1
117
+ @solver_thread.join unless @solver_thread.alive?
118
+ end
114
119
  end
115
- end
116
120
 
117
- #
118
- # Sleep until this list of dependencies are resolved. In parallel mode, will
119
- # raise if an exeception occurred while waiting for these resources. In
120
- # serial mode, wait_for just returns nil.
121
- #
122
- def wait_for(*dependencies)
123
- return nil if @serial
124
- return nil if dependencies.empty?
125
-
126
- dep_set = dependencies.to_set
127
- until dep_set & solved == dep_set
128
- sleep 1
129
- @solver_thread.join unless @solver_thread.alive?
130
- end
131
- end
121
+ #
122
+ # Helper method for scheduling. Wraps items in a timeout and immediately
123
+ # checks all running workers for exceptions, which are immediately bubbled up
124
+ # if there are any. If do_loop is true, it will retry the timeout.
125
+ #
126
+ def with_timeout(do_loop=true)
127
+ Timeout.timeout(10) do
128
+ dead_working = @working.values.reject(&:alive?)
129
+ if dead_working.size > 0
130
+ dead_working.map(&:join)
131
+ end
132
132
 
133
- #
134
- # Helper method for scheduling. Wraps items in a timeout and immediately
135
- # checks all running workers for exceptions, which are immediately bubbled up
136
- # if there are any. If do_loop is true, it will retry the timeout.
137
- #
138
- def with_timeout(do_loop=true)
139
- Timeout.timeout(10) do
140
- dead_working = @working.values.reject(&:alive?)
141
- if dead_working.size > 0
142
- dead_working.map(&:join)
133
+ yield
143
134
  end
144
-
145
- yield
135
+ rescue TimeoutError
136
+ retry if do_loop
146
137
  end
147
- rescue TimeoutError
148
- retry if do_loop
149
- end
150
138
 
151
- #
152
- # Start the scheduler. In serial mode this call will block until the whole
153
- # dependency graph is satisfied, or one of the provisions fails, at which
154
- # point an exception will be raised. In parallel mode, this call completes
155
- # immediately, and you should use #wait_for to control main thread flow.
156
- #
157
- # This call also installs a SIGINFO (Ctrl+T in the terminal on macs) and
158
- # SIGUSR2 handler which can be used to get information on the status of
159
- # what's solved and what's working.
160
- #
161
- # Immediately returns if in threaded mode and the solver is already running.
162
- #
163
- def run
164
- # short circuit if we're not serial and already running
165
- return if @solver_thread and !@serial
166
-
167
- handler = lambda do |*args|
168
- p ["solved:", solved]
169
- p ["working:", @working]
170
- p ["waiting:", @waiters]
171
- end
139
+ #
140
+ # Start the scheduler. In serial mode this call will block until the whole
141
+ # dependency graph is satisfied, or one of the provisions fails, at which
142
+ # point an exception will be raised. In parallel mode, this call completes
143
+ # immediately, and you should use #wait_for to control main thread flow.
144
+ #
145
+ # This call also installs a SIGINFO (Ctrl+T in the terminal on macs) and
146
+ # SIGUSR2 handler which can be used to get information on the status of
147
+ # what's solved and what's working.
148
+ #
149
+ # Immediately returns if in threaded mode and the solver is already running.
150
+ #
151
+ def run
152
+ # short circuit if we're not serial and already running
153
+ return if @solver_thread and !@serial
154
+
155
+ handler = lambda do |*args|
156
+ p ["solved:", solved.to_a]
157
+ p ["working:", vm_working.to_a]
158
+ p ["waiting:", @waiters.to_a]
159
+ end
172
160
 
173
- %w[USR2 INFO].each { |sig| trap(sig, &handler) if Signal.list[sig] }
161
+ %w[USR2 INFO].each { |sig| trap(sig, &handler) if Signal.list[sig] }
174
162
 
175
- queue_runner = lambda do
176
- run = true
163
+ queue_runner = lambda do
164
+ run = true
177
165
 
178
- while run
179
- service_resolved_waiters
166
+ while run
167
+ service_resolved_waiters
180
168
 
181
- ready = []
169
+ ready = []
182
170
 
183
- if @queue.empty?
184
- if @serial
185
- return
186
- else
187
- with_timeout do
188
- # this is where most of the execution time is spent, so ensure
189
- # waiters get considered here.
190
- service_resolved_waiters
191
- ready << @queue.shift
171
+ if @queue.empty?
172
+ if @serial
173
+ return
174
+ else
175
+ with_timeout do
176
+ # this is where most of the execution time is spent, so ensure
177
+ # waiters get considered here.
178
+ service_resolved_waiters
179
+ ready << @queue.shift
180
+ end
192
181
  end
193
182
  end
194
- end
195
183
 
196
- while !@queue.empty?
197
- ready << @queue.shift
198
- end
184
+ while !@queue.empty?
185
+ ready << @queue.shift
186
+ end
199
187
 
200
- ready.each do |r|
201
- if r
202
- @solved_mutex.synchronize do
203
- solved.add(r)
204
- @working.delete(r)
205
- vm_working.delete(r)
188
+ ready.each do |r|
189
+ if r
190
+ @solved_mutex.synchronize do
191
+ solved.add(r)
192
+ @working.delete(r)
193
+ vm_working.delete(r)
194
+ end
195
+ else
196
+ run = false
206
197
  end
207
- else
208
- run = false
209
198
  end
210
199
  end
211
200
  end
212
- end
213
201
 
214
- if @serial
215
- service_resolved_waiters
216
- queue_runner.call
217
- else
218
- @solver_thread = Thread.new do
219
- with_timeout(false) { service_resolved_waiters }
202
+ if @serial
203
+ service_resolved_waiters
220
204
  queue_runner.call
221
- end
205
+ else
206
+ @solver_thread = Thread.new do
207
+ with_timeout(false) { service_resolved_waiters }
208
+ queue_runner.call
209
+ end
222
210
 
223
- # we depend on at_exit hooks being fired, and Thread#abort_on_exception
224
- # doesn't fire them. This solution bubbles up the exceptions in a similar
225
- # fashion without actually sacrificing the at_exit functionality.
226
- Thread.new do
227
- begin
228
- @solver_thread.join
229
- rescue Exception => e
230
- $stderr.puts "Solver thread encountered an exception:"
231
- $stderr.puts "#{e.class.name}: #{e.message}"
232
- $stderr.puts e.backtrace.join("\n")
233
- Kernel.exit 1
211
+ # we depend on at_exit hooks being fired, and Thread#abort_on_exception
212
+ # doesn't fire them. This solution bubbles up the exceptions in a similar
213
+ # fashion without actually sacrificing the at_exit functionality.
214
+ Thread.new do
215
+ begin
216
+ @solver_thread.join
217
+ rescue Exception => e
218
+ $stderr.puts "Solver thread encountered an exception:"
219
+ $stderr.puts "#{e.class.name}: #{e.message}"
220
+ $stderr.puts e.backtrace.join("\n")
221
+ Kernel.exit 1
222
+ end
234
223
  end
235
224
  end
236
225
  end
237
- end
238
226
 
239
- #
240
- # Instructs the scheduler to stop. Note that this is not an interrupt, and
241
- # the queue will still be exhausted before terminating.
242
- #
243
- def stop
244
- if @serial
245
- @queue << nil
246
- else
247
- @working.values.map { |v| v.join rescue nil }
248
- @queue << nil
249
- @solver_thread.join rescue nil
250
- end
251
- end
252
-
253
- #
254
- # This method determines what 'waiters', or provisioners that cannot
255
- # provision yet because of unresolved dependencies, can be executed.
256
- #
257
- def service_resolved_waiters
258
- @waiters_mutex.synchronize do
259
- @waiters -= (@working.keys.to_set + solved)
227
+ #
228
+ # Instructs the scheduler to stop. Note that this is not an interrupt, and
229
+ # the queue will still be exhausted before terminating.
230
+ #
231
+ def stop
232
+ if @serial
233
+ @queue << nil
234
+ else
235
+ @working.values.map { |v| v.join rescue nil }
236
+ @queue << nil
237
+ @solver_thread.join rescue nil
238
+ end
260
239
  end
261
240
 
262
- waiter_iteration = lambda do
263
- @waiters.each do |group_name|
264
- if (solved & vm_dependencies[group_name]) == vm_dependencies[group_name]
265
- if_debug do
266
- $stderr.puts "Provisioning #{group_name}"
267
- end
241
+ #
242
+ # This method determines what 'waiters', or provisioners that cannot
243
+ # provision yet because of unresolved dependencies, can be executed.
244
+ #
245
+ def service_resolved_waiters
246
+ @waiters_mutex.synchronize do
247
+ @waiters.replace(@waiters.to_set - (@working.keys.to_set + solved))
248
+ end
268
249
 
269
- provisioner = vm_groups[group_name]
250
+ waiter_iteration = lambda do
251
+ @waiters.each do |group_name|
252
+ if (solved.to_set & vm_dependencies[group_name]).to_a == vm_dependencies[group_name]
253
+ if_debug do
254
+ $stderr.puts "Provisioning #{group_name}"
255
+ end
270
256
 
271
- provision_block = lambda do
272
- # FIXME maybe a way to specify initial args?
273
- args = nil
274
- provisioner.each do |this_prov|
275
- unless args = this_prov.startup(args)
276
- $stderr.puts "Could not provision #{group_name} with provisioner #{this_prov.class.name}"
277
- raise "Could not provision #{group_name} with provisioner #{this_prov.class.name}"
257
+ provisioner = vm_groups[group_name]
258
+
259
+ provision_block = lambda do
260
+ # FIXME maybe a way to specify initial args?
261
+ args = nil
262
+ provisioner.each do |this_prov|
263
+ vm_groups[group_name] = provisioner # force a write to the db
264
+ unless args = this_prov.startup(args)
265
+ $stderr.puts "Could not provision #{group_name} with provisioner #{this_prov.class.name}"
266
+ raise "Could not provision #{group_name} with provisioner #{this_prov.class.name}"
267
+ end
278
268
  end
269
+ @queue << group_name
279
270
  end
280
- @queue << group_name
281
- end
282
271
 
283
- vm_working.add(group_name)
272
+ vm_working.add(group_name)
284
273
 
285
- if @serial
286
- # HACK: just give the working check something that will always work.
287
- # Probably should just mock it.
288
- @working[group_name] = Thread.new { sleep }
289
- provision_block.call
290
- else
291
- @working[group_name] = Thread.new(&provision_block)
274
+ if @serial
275
+ # HACK: just give the working check something that will always work.
276
+ # Probably should just mock it.
277
+ @working[group_name] = Thread.new { sleep }
278
+ provision_block.call
279
+ else
280
+ @working[group_name] = Thread.new(&provision_block)
281
+ end
292
282
  end
293
283
  end
294
284
  end
295
- end
296
285
 
297
- if @serial
298
- waiter_iteration.call
299
- else
300
- @waiters_mutex.synchronize(&waiter_iteration)
286
+ if @serial
287
+ waiter_iteration.call
288
+ else
289
+ @waiters_mutex.synchronize(&waiter_iteration)
290
+ end
301
291
  end
302
- end
303
292
 
304
- #
305
- # Teardown a single group -- modifies the solved formula. Be careful to
306
- # resupply dependencies if you use this, as nothing will resolve until you
307
- # resupply it.
308
- #
309
- # This takes an optional argument to wait for the group to be solved before
310
- # attempting to tear it down. Setting this to false effectively says, "I know
311
- # what I'm doing", and you should feel bad if you file an issue because you
312
- # supplied it.
313
- #
293
+ #
294
+ # Teardown a single group -- modifies the solved formula. Be careful to
295
+ # resupply dependencies if you use this, as nothing will resolve until you
296
+ # resupply it.
297
+ #
298
+ # This takes an optional argument to wait for the group to be solved before
299
+ # attempting to tear it down. Setting this to false effectively says, "I know
300
+ # what I'm doing", and you should feel bad if you file an issue because you
301
+ # supplied it.
302
+ #
314
303
 
315
- def teardown_group(group_name, wait=true)
316
- wait_for(group_name) if wait
304
+ def teardown_group(group_name, wait=true)
305
+ wait_for(group_name) if wait
317
306
 
318
- dependent_items = vm_dependencies.partition { |k,v| v.include?(group_name) }.first.map(&:first)
307
+ dependent_items = vm_dependencies.partition { |k,v| v.include?(group_name) }.first.map(&:first)
319
308
 
320
- if_debug do
321
- if dependent_items.length > 0
322
- $stderr.puts "Trying to terminate #{group_name}, found #{dependent_items.inspect} depending on it"
309
+ if_debug do
310
+ if dependent_items.length > 0
311
+ $stderr.puts "Trying to terminate #{group_name}, found #{dependent_items.inspect} depending on it"
312
+ end
323
313
  end
324
- end
325
314
 
326
- @solved_mutex.synchronize do
327
- dependent_and_working = @working.keys & dependent_items
315
+ @solved_mutex.synchronize do
316
+ dependent_and_working = @working.keys & dependent_items
317
+
318
+ if dependent_and_working.count > 0
319
+ $stderr.puts "#{dependent_and_working.inspect} are depending on #{group_name}, which you are trying to deprovision."
320
+ $stderr.puts "We can't resolve this problem for you, and future converges may fail during this run that would otherwise work."
321
+ $stderr.puts "Consider using wait_for to better control the dependencies, or turning serial provisioning on."
322
+ end
328
323
 
329
- if dependent_and_working.count > 0
330
- $stderr.puts "#{dependent_and_working.inspect} are depending on #{group_name}, which you are trying to deprovision."
331
- $stderr.puts "We can't resolve this problem for you, and future converges may fail during this run that would otherwise work."
332
- $stderr.puts "Consider using wait_for to better control the dependencies, or turning serial provisioning on."
324
+ deprovision_group(group_name)
333
325
  end
334
326
 
335
- deprovision_group(group_name)
336
327
  end
337
328
 
338
- end
339
-
340
- #
341
- # Performs the deprovision of a group by replaying its provision strategy
342
- # backwards and applying the #shutdown method instead of the #startup method.
343
- # Removes it from the various state tables if true is set as the second
344
- # argument, which is the default.
345
- #
346
- def deprovision_group(group_name, clean_state=true)
347
- provisioner = vm_groups[group_name]
348
-
349
- # if we can't find the provisioner, we probably got asked to clean up
350
- # something we never scheduled. Just ignore that.
351
- if provisioner
352
- if_debug do
353
- $stderr.puts "Attempting to deprovision group #{group_name}"
354
- end
329
+ #
330
+ # Performs the deprovision of a group by replaying its provision strategy
331
+ # backwards and applying the #shutdown method instead of the #startup method.
332
+ # Removes it from the various state tables if true is set as the second
333
+ # argument, which is the default.
334
+ #
335
+ def deprovision_group(group_name, clean_state=true)
336
+ provisioner = vm_groups[group_name]
337
+
338
+ # if we can't find the provisioner, we probably got asked to clean up
339
+ # something we never scheduled. Just ignore that.
340
+ if provisioner and ((solved.to_set + vm_working.to_set).include?(group_name) or @force_deprovision)
341
+ if_debug do
342
+ $stderr.puts "Attempting to deprovision group #{group_name}"
343
+ end
355
344
 
356
- perform_deprovision = lambda do |this_prov|
357
- unless this_prov.shutdown
358
- if_debug do
359
- $stderr.puts "Could not deprovision group #{group_name}."
345
+ perform_deprovision = lambda do |this_prov|
346
+ result = this_prov.shutdown
347
+ unless result
348
+ if_debug do
349
+ $stderr.puts "Could not deprovision group #{group_name}."
350
+ end
360
351
  end
352
+ result
361
353
  end
362
- end
363
354
 
364
- provisioner.reverse.each do |this_prov|
365
- if @force_deprovision
366
- begin
367
- perform_deprovision.call(this_prov)
368
- rescue Exception => e
369
- if_debug do
370
- $stderr.puts "Deprovision #{this_prov.class.name}/#{group_name} had errors:"
371
- $stderr.puts "#{e.message}"
355
+ provisioner.reverse.each do |this_prov|
356
+ if @force_deprovision
357
+ begin
358
+ perform_deprovision.call(this_prov)
359
+ rescue Exception => e
360
+ if_debug do
361
+ $stderr.puts "Deprovision #{this_prov.class.name}/#{group_name} had errors:"
362
+ $stderr.puts "#{e.message}"
363
+ end
364
+ end
365
+ else
366
+ unless perform_deprovision.call(this_prov)
367
+ raise "Could not deprovision #{group_name}/#{this_prov.inspect}"
372
368
  end
373
369
  end
374
- else
375
- perform_deprovision.call(this_prov)
376
370
  end
377
371
  end
378
- end
379
372
 
380
- if clean_state
381
- solved.delete(group_name)
382
- vm_working.delete(group_name)
383
- vm_dependencies.delete(group_name)
384
- vm_groups.delete(group_name)
373
+ if clean_state
374
+ solved.delete(group_name)
375
+ @waiters_mutex.synchronize do
376
+ @waiters.delete(group_name)
377
+ end
378
+ vm_working.delete(group_name)
379
+ vm_dependencies.delete(group_name)
380
+ vm_groups.delete(group_name)
381
+ end
385
382
  end
386
- end
387
383
 
388
- #
389
- # Instruct all provisioners except ones in the exception list to tear down.
390
- # Calls #stop as its first action.
391
- #
392
- # This is always done serially. For sanity.
393
- #
394
- def teardown(exceptions=[])
395
- stop
396
-
397
- (vm_groups.keys.to_set - exceptions.to_set).each do |group_name|
398
- deprovision_group(group_name) # clean this after everything finishes
384
+ #
385
+ # Instruct all provisioners except ones in the exception list to tear down.
386
+ # Calls #stop as its first action.
387
+ #
388
+ # This is always done serially. For sanity.
389
+ #
390
+ def teardown(exceptions=[])
391
+ stop
392
+
393
+ (vm_groups.keys.to_set - exceptions.to_set).each do |group_name|
394
+ deprovision_group(group_name) # clean this after everything finishes
395
+ end
399
396
  end
400
-
401
- write_state
402
397
  end
403
398
  end