furnish 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +19 -0
- data/Gemfile +6 -0
- data/Guardfile +10 -0
- data/LICENSE.txt +22 -0
- data/README.md +110 -0
- data/Rakefile +31 -0
- data/furnish.gemspec +29 -0
- data/lib/furnish/logger.rb +110 -0
- data/lib/furnish/provisioner.rb +46 -0
- data/lib/furnish/provisioner_group.rb +132 -0
- data/lib/furnish/provisioners/dummy.rb +87 -0
- data/lib/furnish/scheduler.rb +410 -0
- data/lib/furnish/version.rb +4 -0
- data/lib/furnish/vm.rb +39 -0
- data/lib/furnish.rb +51 -0
- data/test/helper.rb +15 -0
- data/test/mt_cases.rb +267 -0
- data/test/test_dummy.rb +95 -0
- data/test/test_logger.rb +68 -0
- data/test/test_provisioner_group.rb +51 -0
- data/test/test_scheduler_basic.rb +33 -0
- data/test/test_scheduler_serial.rb +16 -0
- data/test/test_scheduler_threaded.rb +44 -0
- data/test/test_vm.rb +18 -0
- metadata +212 -0
@@ -0,0 +1,410 @@
|
|
1
|
+
require 'timeout'
|
2
|
+
require 'furnish/vm'
|
3
|
+
require 'furnish/logger'
|
4
|
+
require 'furnish/provisioner_group'
|
5
|
+
|
6
|
+
module Furnish
|
7
|
+
#
|
8
|
+
# This is a scheduler for provisioners. It can run in parallel or serial mode,
|
9
|
+
# and is dependency-based, that is, it will only schedule items for execution
|
10
|
+
# which have all their dependencies satisfied and items that haven't will wait
|
11
|
+
# to execute until that happens.
|
12
|
+
#
|
13
|
+
class Scheduler
|
14
|
+
|
15
|
+
include Furnish::Logger::Mixins
|
16
|
+
|
17
|
+
#
|
18
|
+
# Access the VM object.
|
19
|
+
#
|
20
|
+
attr_reader :vm
|
21
|
+
|
22
|
+
#
|
23
|
+
# Turn serial mode on (off by default). This forces the scheduler to execute
|
24
|
+
# every provision in order, even if it could handle multiple provisions at
|
25
|
+
# the same time.
|
26
|
+
#
|
27
|
+
attr_accessor :serial
|
28
|
+
|
29
|
+
##
|
30
|
+
#
|
31
|
+
# Ignore exceptions while deprovisioning. Default is false.
|
32
|
+
#
|
33
|
+
attr_accessor :force_deprovision
|
34
|
+
|
35
|
+
#
|
36
|
+
# Instantiate the Scheduler.
|
37
|
+
#
|
38
|
+
def initialize
|
39
|
+
@force_deprovision = false
|
40
|
+
@solved_mutex = Mutex.new
|
41
|
+
@serial = false
|
42
|
+
@solver_thread = nil
|
43
|
+
@working_threads = { }
|
44
|
+
@queue = Queue.new
|
45
|
+
@vm = Furnish::VM.new
|
46
|
+
end
|
47
|
+
|
48
|
+
#
|
49
|
+
# Ask the scheduler if it's running. Returns nil in serial mode.
|
50
|
+
#
|
51
|
+
# If there's an exception waiting and the scheduler has stopped, it will be
|
52
|
+
# raised here.
|
53
|
+
#
|
54
|
+
def running?
|
55
|
+
return nil if @serial
|
56
|
+
return nil unless @solver_thread
|
57
|
+
if @solver_thread.alive?
|
58
|
+
return true
|
59
|
+
else
|
60
|
+
# XXX if there's an exception to be raised, it'll happen here.
|
61
|
+
@solver_thread.join
|
62
|
+
return nil
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
#
|
67
|
+
# Schedule a group of VMs for provision. This takes a group name, which is a
|
68
|
+
# string, an array of provisioner objects, and a list of string dependencies.
|
69
|
+
# If anything in the dependencies list hasn't been pre-declared, it refuses
|
70
|
+
# to continue.
|
71
|
+
#
|
72
|
+
# This method will return nil if the server group is already provisioned.
|
73
|
+
#
|
74
|
+
def schedule_provision(group_name, provisioners, dependencies=[])
|
75
|
+
group = Furnish::ProvisionerGroup.new(provisioners, group_name, dependencies)
|
76
|
+
schedule_provisioner_group(group)
|
77
|
+
end
|
78
|
+
|
79
|
+
#
|
80
|
+
# Schedule a provision with a Furnish::ProvisionerGroup. Works exactly like
|
81
|
+
# Furnish::Scheduler#schedule_provision otherwise.
|
82
|
+
#
|
83
|
+
def schedule_provisioner_group(group)
|
84
|
+
return nil if vm.groups[group.name]
|
85
|
+
|
86
|
+
vm.groups[group.name] = group
|
87
|
+
|
88
|
+
unless group.dependencies.all? { |x| vm.groups.has_key?(x) }
|
89
|
+
raise "One of your dependencies for #{group.name} has not been pre-declared. Cannot continue"
|
90
|
+
end
|
91
|
+
|
92
|
+
vm.dependencies[group.name] = group.dependencies
|
93
|
+
|
94
|
+
vm.sync_waiters do |waiters|
|
95
|
+
waiters.add(group.name)
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
#
|
100
|
+
# Sleep until this list of dependencies are resolved. In parallel mode, will
|
101
|
+
# raise if an exception occurred while waiting for these resources. In
|
102
|
+
# serial mode, wait_for just returns nil.
|
103
|
+
#
|
104
|
+
def wait_for(*dependencies)
|
105
|
+
return nil if @serial
|
106
|
+
return nil if dependencies.empty?
|
107
|
+
|
108
|
+
dep_set = Set[*dependencies]
|
109
|
+
|
110
|
+
until dep_set & vm.solved == dep_set
|
111
|
+
sleep 0.1
|
112
|
+
@solver_thread.join unless @solver_thread.alive?
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
#
|
117
|
+
# Start the scheduler. In serial mode this call will block until the whole
|
118
|
+
# dependency graph is satisfied, or one of the provisions fails, at which
|
119
|
+
# point an exception will be raised. In parallel mode, this call completes
|
120
|
+
# immediately, and you should use #wait_for to control main thread flow,
|
121
|
+
# and #running? and #stop to control and monitor the threads this class
|
122
|
+
# manages.
|
123
|
+
#
|
124
|
+
# This call also installs a SIGINFO (Ctrl+T in the terminal on macs) and
|
125
|
+
# SIGUSR2 handler which can be used to get information on the status of
|
126
|
+
# what's solved and what's working. You can disable this functionality by
|
127
|
+
# passing `false` as the first argument.
|
128
|
+
#
|
129
|
+
def run(install_handler=true)
|
130
|
+
# short circuit if we're not serial and already running
|
131
|
+
return if @solver_thread and !@serial
|
132
|
+
|
133
|
+
if install_handler
|
134
|
+
handler = lambda do |*args|
|
135
|
+
Furnish.logger.puts ["solved:", vm.solved.to_a].inspect
|
136
|
+
Furnish.logger.puts ["working:", vm.working.to_a].inspect
|
137
|
+
Furnish.logger.puts ["waiting:", vm.waiters.to_a].inspect
|
138
|
+
end
|
139
|
+
|
140
|
+
%w[USR2 INFO].each { |sig| trap(sig, &handler) if Signal.list[sig] }
|
141
|
+
end
|
142
|
+
|
143
|
+
if @serial
|
144
|
+
service_resolved_waiters
|
145
|
+
queue_loop
|
146
|
+
else
|
147
|
+
@solver_thread = Thread.new do
|
148
|
+
with_timeout(false) { service_resolved_waiters }
|
149
|
+
queue_loop
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
#
|
155
|
+
# Instructs the scheduler to stop. Note that this is not an interrupt, and
|
156
|
+
# the queue will still be exhausted before terminating.
|
157
|
+
#
|
158
|
+
# It is a good idea to check #running? before calling this to ensure the
|
159
|
+
# scheduler did not halt with an exception.
|
160
|
+
#
|
161
|
+
def stop
|
162
|
+
if @serial
|
163
|
+
@queue << nil
|
164
|
+
else
|
165
|
+
@working_threads.values.map { |v| v.join rescue nil }
|
166
|
+
if @solver_thread and @solver_thread.alive?
|
167
|
+
@queue << nil
|
168
|
+
sleep 0.1 until @queue.empty?
|
169
|
+
@solver_thread.kill
|
170
|
+
end
|
171
|
+
|
172
|
+
@solver_thread = nil
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
#
|
177
|
+
# Teardown a single group -- modifies the solved formula. Be careful to
|
178
|
+
# resupply dependencies if you use this, as nothing will resolve until you
|
179
|
+
# resupply it.
|
180
|
+
#
|
181
|
+
# This takes an optional argument to wait for the group to be solved before
|
182
|
+
# attempting to tear it down. Setting this to false effectively says, "I know
|
183
|
+
# what I'm doing", and you should feel bad if you file an issue because you
|
184
|
+
# supplied it.
|
185
|
+
#
|
186
|
+
# If #force_provision is true, failed shutdowns from provisioners will not
|
187
|
+
# halt the deprovisioning process.
|
188
|
+
#
|
189
|
+
def teardown_group(group_name, wait=true)
|
190
|
+
wait_for(group_name) if wait
|
191
|
+
|
192
|
+
dependent_items = vm.dependencies.partition { |k,v| v.include?(group_name) }.first.map(&:first)
|
193
|
+
|
194
|
+
if_debug do
|
195
|
+
if dependent_items.length > 0
|
196
|
+
puts "Trying to terminate #{group_name}, found #{dependent_items.inspect} depending on it"
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
@solved_mutex.synchronize do
|
201
|
+
dependent_and_working = @working_threads.keys & dependent_items
|
202
|
+
|
203
|
+
if dependent_and_working.count > 0
|
204
|
+
if_debug do
|
205
|
+
puts "#{dependent_and_working.inspect} are depending on #{group_name}, which you are trying to deprovision."
|
206
|
+
puts "We can't resolve this problem for you, and future converges may fail during this run that would otherwise work."
|
207
|
+
puts "Consider using wait_for to better control the dependencies, or turning serial provisioning on."
|
208
|
+
end
|
209
|
+
end
|
210
|
+
|
211
|
+
deprovision_group(group_name)
|
212
|
+
end
|
213
|
+
|
214
|
+
end
|
215
|
+
|
216
|
+
#
|
217
|
+
# Instruct all provisioners except ones in the exception list to tear down.
|
218
|
+
# Calls #stop as its first action.
|
219
|
+
#
|
220
|
+
# This is always done serially. For sanity.
|
221
|
+
#
|
222
|
+
# If #force_provision is true, failed shutdowns from provisioners will not
|
223
|
+
# halt the deprovisioning process.
|
224
|
+
#
|
225
|
+
def teardown(exceptions=[])
|
226
|
+
stop
|
227
|
+
|
228
|
+
(vm.groups.keys.to_set - exceptions.to_set).each do |group_name|
|
229
|
+
deprovision_group(group_name) # clean this after everything finishes
|
230
|
+
end
|
231
|
+
end
|
232
|
+
|
233
|
+
#
|
234
|
+
# Performs the deprovision of a group by replaying its provision strategy
|
235
|
+
# backwards and applying the #shutdown method instead of the #startup method.
|
236
|
+
# Removes it from the various state tables if true is set as the second
|
237
|
+
# argument, which is the default.
|
238
|
+
#
|
239
|
+
# While this is a part of the public API, you should probably use #teardown
|
240
|
+
# or #teardown_group instead of this method, as they have better error
|
241
|
+
# handling and semantics. This "just does it".
|
242
|
+
#
|
243
|
+
def deprovision_group(group_name, clean_state=true)
|
244
|
+
shutdown(group_name)
|
245
|
+
delete_group(group_name) if clean_state
|
246
|
+
end
|
247
|
+
|
248
|
+
#--
|
249
|
+
#
|
250
|
+
# END OF PUBLIC API
|
251
|
+
#
|
252
|
+
#++
|
253
|
+
|
254
|
+
protected
|
255
|
+
|
256
|
+
#
|
257
|
+
# Helper method for scheduling. Wraps items in a timeout and immediately
|
258
|
+
# checks all running workers for exceptions, which are immediately bubbled up
|
259
|
+
# if there are any. If do_loop is true, it will retry the timeout.
|
260
|
+
#
|
261
|
+
def with_timeout(do_loop=true)
|
262
|
+
Timeout.timeout(1) do
|
263
|
+
dead_working = @working_threads.values.reject(&:alive?)
|
264
|
+
if dead_working.size > 0
|
265
|
+
dead_working.map(&:join)
|
266
|
+
end
|
267
|
+
|
268
|
+
yield
|
269
|
+
end
|
270
|
+
rescue TimeoutError
|
271
|
+
retry if do_loop
|
272
|
+
end
|
273
|
+
|
274
|
+
#
|
275
|
+
# Consume the queue. Runs until a nil enters the queue, unless in serial
|
276
|
+
# mode, where it will terminate when the queue is empty.
|
277
|
+
#
|
278
|
+
def queue_loop
|
279
|
+
run = true
|
280
|
+
|
281
|
+
while run
|
282
|
+
service_resolved_waiters
|
283
|
+
|
284
|
+
ready = []
|
285
|
+
|
286
|
+
if @queue.empty?
|
287
|
+
if @serial
|
288
|
+
return
|
289
|
+
else
|
290
|
+
with_timeout do
|
291
|
+
# this is where most of the execution time is spent, so ensure
|
292
|
+
# waiters get considered here.
|
293
|
+
service_resolved_waiters
|
294
|
+
ready << @queue.shift
|
295
|
+
end
|
296
|
+
end
|
297
|
+
end
|
298
|
+
|
299
|
+
while !@queue.empty?
|
300
|
+
ready << @queue.shift
|
301
|
+
end
|
302
|
+
|
303
|
+
ready.each do |r|
|
304
|
+
if r
|
305
|
+
@solved_mutex.synchronize do
|
306
|
+
vm.solved.add(r)
|
307
|
+
@working_threads.delete(r)
|
308
|
+
vm.working.delete(r)
|
309
|
+
end
|
310
|
+
else
|
311
|
+
run = false
|
312
|
+
end
|
313
|
+
end
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
#
|
318
|
+
# Helper method to manage waiters based on solved dependencies.
|
319
|
+
#
|
320
|
+
def resolve_waiters
|
321
|
+
vm.sync_waiters do |waiters|
|
322
|
+
waiters.replace(waiters.to_set - (@working_threads.keys.to_set + vm.solved.to_set))
|
323
|
+
end
|
324
|
+
end
|
325
|
+
|
326
|
+
#
|
327
|
+
# Predicate to determine all of a groups dependencies are in the solved set.
|
328
|
+
#
|
329
|
+
def dependencies_solved?(group_name)
|
330
|
+
(vm.solved.to_set & vm.dependencies[group_name]) == vm.dependencies[group_name]
|
331
|
+
end
|
332
|
+
|
333
|
+
#
|
334
|
+
# Fetch the ProvisionerGroup and start it.
|
335
|
+
#
|
336
|
+
def startup(group_name)
|
337
|
+
provisioner = vm.groups[group_name]
|
338
|
+
|
339
|
+
# FIXME maybe a way to specify initial args?
|
340
|
+
args = nil
|
341
|
+
provisioner.startup
|
342
|
+
@queue << group_name
|
343
|
+
end
|
344
|
+
|
345
|
+
#
|
346
|
+
# Similar to #startup -- just a shim to talk to a specific ProvisionerGroup
|
347
|
+
#
|
348
|
+
def shutdown(group_name)
|
349
|
+
provisioner = vm.groups[group_name]
|
350
|
+
|
351
|
+
# if we can't find the provisioner, we probably got asked to clean up
|
352
|
+
# something we never scheduled. Just ignore that.
|
353
|
+
if provisioner and can_deprovision?(group_name)
|
354
|
+
provisioner.shutdown(@force_deprovision)
|
355
|
+
end
|
356
|
+
end
|
357
|
+
|
358
|
+
#
|
359
|
+
# This method determines what 'waiters', or provisioners that cannot
|
360
|
+
# provision yet because of unresolved dependencies, can be executed.
|
361
|
+
#
|
362
|
+
def service_resolved_waiters
|
363
|
+
resolve_waiters
|
364
|
+
|
365
|
+
vm.sync_waiters do |waiters|
|
366
|
+
waiters.each do |group_name|
|
367
|
+
if dependencies_solved?(group_name)
|
368
|
+
if_debug do
|
369
|
+
puts "Provisioning #{group_name}"
|
370
|
+
end
|
371
|
+
|
372
|
+
vm.working.add(group_name)
|
373
|
+
|
374
|
+
if @serial
|
375
|
+
# HACK: just give the working check something that will always work.
|
376
|
+
# Probably should just mock it.
|
377
|
+
@working_threads[group_name] = Thread.new { sleep }
|
378
|
+
startup(group_name)
|
379
|
+
else
|
380
|
+
@working_threads[group_name] = Thread.new { startup(group_name) }
|
381
|
+
end
|
382
|
+
end
|
383
|
+
end
|
384
|
+
end
|
385
|
+
end
|
386
|
+
|
387
|
+
#
|
388
|
+
# Predicate to determine if a provisioner group can be shutdown.
|
389
|
+
#
|
390
|
+
def can_deprovision?(group_name)
|
391
|
+
((vm.solved.to_set + vm.working.to_set).include?(group_name) or @force_deprovision)
|
392
|
+
end
|
393
|
+
|
394
|
+
#
|
395
|
+
# Wipes the ProvisionerGroup out of the scheduler's state, and terminates
|
396
|
+
# any threads managing it.
|
397
|
+
#
|
398
|
+
def delete_group(group_name)
|
399
|
+
vm.solved.delete(group_name)
|
400
|
+
vm.sync_waiters do |waiters|
|
401
|
+
waiters.delete(group_name)
|
402
|
+
end
|
403
|
+
@working_threads[group_name].kill rescue nil
|
404
|
+
@working_threads.delete(group_name)
|
405
|
+
vm.working.delete(group_name)
|
406
|
+
vm.dependencies.delete(group_name)
|
407
|
+
vm.groups.delete(group_name)
|
408
|
+
end
|
409
|
+
end
|
410
|
+
end
|
data/lib/furnish/vm.rb
ADDED
@@ -0,0 +1,39 @@
|
|
1
|
+
module Furnish
|
2
|
+
#
|
3
|
+
# This class mainly exists to track the run state of the Scheduler, and is kept
|
4
|
+
# simple. The attributes delegate to Palsy instances.
|
5
|
+
#
|
6
|
+
class VM
|
7
|
+
# the vm groups and their provisioning lists.
|
8
|
+
attr_reader :groups
|
9
|
+
# the dependencies that each vm group depends on
|
10
|
+
attr_reader :dependencies
|
11
|
+
# the set of provisioned (solved) groups
|
12
|
+
attr_reader :solved
|
13
|
+
# the set of provisioning (working) groups
|
14
|
+
attr_reader :working
|
15
|
+
# the set of groups waiting to be provisioned.
|
16
|
+
attr_reader :waiters
|
17
|
+
|
18
|
+
#
|
19
|
+
# Create a new VM object. Should only happen in the Scheduler.
|
20
|
+
#
|
21
|
+
def initialize
|
22
|
+
@groups = Palsy::Map.new('vm_groups', 'provisioner_group')
|
23
|
+
@dependencies = Palsy::Map.new('vm_groups', 'dependency_group')
|
24
|
+
@solved = Palsy::Set.new('vm_scheduler', 'provisioned')
|
25
|
+
@working = Palsy::Set.new('vm_scheduler', 'working')
|
26
|
+
@waiters = Palsy::Set.new('vm_scheduler', 'waiters')
|
27
|
+
@waiters_mutex = Mutex.new
|
28
|
+
end
|
29
|
+
|
30
|
+
#
|
31
|
+
# Helper to deal with waiters in a synchronous way.
|
32
|
+
#
|
33
|
+
def sync_waiters
|
34
|
+
@waiters_mutex.synchronize do
|
35
|
+
yield @waiters
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
data/lib/furnish.rb
ADDED
@@ -0,0 +1,51 @@
|
|
1
|
+
require 'palsy'
|
2
|
+
require 'furnish/version'
|
3
|
+
require 'furnish/logger'
|
4
|
+
require 'furnish/scheduler'
|
5
|
+
|
6
|
+
#
|
7
|
+
# Furnish is a scheduling system that has a massive readme which explains what
|
8
|
+
# it does here:
|
9
|
+
#
|
10
|
+
# https://github.com/erikh/furnish
|
11
|
+
#
|
12
|
+
# You may also wish to read the Furnish::Scheduler, Furnish::Logger, and
|
13
|
+
# Furnish::ProvisionerGroup documentation to learn more about it.
|
14
|
+
#
|
15
|
+
module Furnish
|
16
|
+
#
|
17
|
+
# Initialize Furnish. The path given is to a SQLite 3 database file that it
|
18
|
+
# will create for you.
|
19
|
+
#
|
20
|
+
def self.init(database_file)
|
21
|
+
Palsy.change_db(database_file)
|
22
|
+
end
|
23
|
+
|
24
|
+
#
|
25
|
+
# Access the logger (Furnish::Logger) or override it if it does not already
|
26
|
+
# exist. In the latter case, Furnish#logger= might be more reliable.
|
27
|
+
#
|
28
|
+
# The default logger is pointed at standard error and has a debug level of 0.
|
29
|
+
#
|
30
|
+
def self.logger(io=$stderr, debug_level=0)
|
31
|
+
return @logger if @logger
|
32
|
+
@logger ||= Furnish::Logger.new(io, debug_level)
|
33
|
+
end
|
34
|
+
|
35
|
+
#
|
36
|
+
# Set the logger. This is expected to have an interface akin to
|
37
|
+
# Furnish::Logger, it's not the same as ruby's Logger.
|
38
|
+
#
|
39
|
+
def self.logger=(logger)
|
40
|
+
@logger = logger
|
41
|
+
end
|
42
|
+
|
43
|
+
#
|
44
|
+
# Shutdown Furnish by closing its state file. Furnish::Scheduler objects and
|
45
|
+
# the threads it spawns must be stopped already, otherwise you're going to
|
46
|
+
# have a bad day.
|
47
|
+
#
|
48
|
+
def self.shutdown
|
49
|
+
Palsy.instance.close
|
50
|
+
end
|
51
|
+
end
|
data/test/helper.rb
ADDED