nodule 0.0.34
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +6 -0
- data/.yardopts +1 -0
- data/Gemfile +5 -0
- data/README.md +100 -0
- data/Rakefile +15 -0
- data/ci_jobs/nodule-units/run.sh +25 -0
- data/examples/cat_test.rb +45 -0
- data/examples/wget.rb +36 -0
- data/lib/nodule/alarm.rb +25 -0
- data/lib/nodule/base.rb +258 -0
- data/lib/nodule/cassandra.rb +292 -0
- data/lib/nodule/console.rb +87 -0
- data/lib/nodule/line_io.rb +74 -0
- data/lib/nodule/monkeypatch.rb +8 -0
- data/lib/nodule/process.rb +386 -0
- data/lib/nodule/tempfile.rb +57 -0
- data/lib/nodule/topology.rb +195 -0
- data/lib/nodule/unixsocket.rb +54 -0
- data/lib/nodule/util.rb +56 -0
- data/lib/nodule/version.rb +3 -0
- data/lib/nodule/zeromq.rb +280 -0
- data/lib/nodule.rb +10 -0
- data/nodule.gemspec +28 -0
- data/test/helper.rb +1 -0
- data/test/nodule_cassandra_test.rb +31 -0
- data/test/nodule_console_test.rb +11 -0
- data/test/nodule_lineio_test.rb +32 -0
- data/test/nodule_process_test.rb +25 -0
- data/test/nodule_tempfile_test.rb +22 -0
- data/test/nodule_topology_test.rb +11 -0
- data/test/nodule_unixsocket_test.rb +11 -0
- data/test/nodule_util_test.rb +25 -0
- data/test/nodule_zeromq_test.rb +44 -0
- metadata +163 -0
@@ -0,0 +1,386 @@
|
|
1
|
+
require 'nodule/version'
|
2
|
+
require 'nodule/line_io'
|
3
|
+
|
4
|
+
module Nodule
|
5
|
+
class ProcessNotRunningError < StandardError; end
|
6
|
+
class ProcessAlreadyRunningError < StandardError; end
|
7
|
+
class ProcessStillRunningError < StandardError; end
|
8
|
+
class TopologyUnknownSymbolError < StandardError; end
|
9
|
+
|
10
|
+
class Process < Base
|
11
|
+
attr_reader :argv, :pid, :started, :ended
|
12
|
+
attr_accessor :topology
|
13
|
+
|
14
|
+
# @param [Array] command, argv
|
15
|
+
# @param [Hash] opts
|
16
|
+
def initialize(*argv)
|
17
|
+
@opts = argv[-1].is_a?(Hash) ? argv.pop : {}
|
18
|
+
@env = argv[0].is_a?(Hash) ? argv.shift : {}
|
19
|
+
@status = nil
|
20
|
+
@started = -1 # give started and ended default values
|
21
|
+
@ended = -2
|
22
|
+
@pid = nil
|
23
|
+
@argv = argv
|
24
|
+
@stdout_opts = @opts.delete(:stdout) || :capture
|
25
|
+
@stderr_opts = @opts.delete(:stderr) || :capture
|
26
|
+
|
27
|
+
super(@opts)
|
28
|
+
end
|
29
|
+
|
30
|
+
# convert symbol arguments to the to_s result of a topology item if it exists,
|
31
|
+
# run procs, and flatten enumerbles, so
|
32
|
+
# :foobar will access the topology's entry for :foobar and call .to_s on it
|
33
|
+
# proc { "abc" } will become "abc"
|
34
|
+
# ['if=', :foobar] will resolve :foobar (this is recursive) and join all the results with no padding
|
35
|
+
# anything left unmatched will be coerced into a string with .to_s
|
36
|
+
def _apply_topology(arg)
|
37
|
+
# only symbols are auto-translated to resource strings, String keys intentionally do not match
|
38
|
+
if arg.kind_of? Symbol
|
39
|
+
if @topology.has_key? arg
|
40
|
+
@topology[arg].to_s
|
41
|
+
else
|
42
|
+
raise TopologyUnknownSymbolError.new "Unresolvable topology symbol, :#{arg}"
|
43
|
+
end
|
44
|
+
# sub-lists are recursed then joined with no padding, so:
|
45
|
+
# ["if=", :foo] would become "if=value"
|
46
|
+
elsif arg.respond_to? :map
|
47
|
+
new = arg.map { |a| _apply_topology(a) }
|
48
|
+
new.join('')
|
49
|
+
else
|
50
|
+
arg.to_s
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def run
|
55
|
+
# raise exception only if the start time comes after the end time
|
56
|
+
if @started > @ended
|
57
|
+
raise ProcessAlreadyRunningError.new if @pid
|
58
|
+
end
|
59
|
+
|
60
|
+
argv = @argv.map { |arg| _apply_topology(arg) }
|
61
|
+
|
62
|
+
# Simply calling spawn with *argv isn't good enough, it really needs the command
|
63
|
+
# to be a completely separate argument. This is likely due to a bug in spawn().
|
64
|
+
command = argv.shift
|
65
|
+
|
66
|
+
verbose "Spawning: #{command} #{argv.join(' ')}"
|
67
|
+
|
68
|
+
@stdin_r, @stdin = IO.pipe
|
69
|
+
@stdout, @stdout_w = IO.pipe
|
70
|
+
@stderr, @stderr_w = IO.pipe
|
71
|
+
|
72
|
+
@stdout_handler = Nodule::LineIO.new :io => @stdout, :reader => @stdout_opts, :topology => @topology, :run => true
|
73
|
+
@stderr_handler = Nodule::LineIO.new :io => @stderr, :reader => @stderr_opts, :topology => @topology, :run => true
|
74
|
+
|
75
|
+
@pid = spawn(@env, command, *argv,
|
76
|
+
:in => @stdin_r,
|
77
|
+
:out => @stdout_w,
|
78
|
+
:err => @stderr_w,
|
79
|
+
)
|
80
|
+
|
81
|
+
@started = Time.now
|
82
|
+
|
83
|
+
@stdin_r.close
|
84
|
+
@stdout_w.close
|
85
|
+
@stderr_w.close
|
86
|
+
|
87
|
+
super
|
88
|
+
end
|
89
|
+
|
90
|
+
#
|
91
|
+
# Clear all of the state and prepare to be able to .run again.
|
92
|
+
# Raises ProcessStillRunningError if the child is still running.
|
93
|
+
#
|
94
|
+
def reset
|
95
|
+
raise ProcessStillRunningError.new unless done?
|
96
|
+
@stdout_handler.stop
|
97
|
+
@stderr_handler.stop
|
98
|
+
close
|
99
|
+
@pid = nil
|
100
|
+
end
|
101
|
+
|
102
|
+
def _kill(sig)
|
103
|
+
# Do not use negative signals. You will _always_ get ESRCH for child processes, since they are
|
104
|
+
# by definition not process group leaders, which is usually synonymous with the process group id
|
105
|
+
# that "kill -9 $PID" relies on. See kill(2).
|
106
|
+
raise ArgumentError.new "negative signals are wrong and unsupported" unless sig > 0
|
107
|
+
raise ProcessNotRunningError.new unless @pid
|
108
|
+
|
109
|
+
verbose "Sending signal #{sig} to process #{@pid}."
|
110
|
+
::Process.kill(sig, @pid)
|
111
|
+
# do not catch ESRCH - ESRCH means we did something totally buggy, likewise, an exception
|
112
|
+
# should fire if the process is not running since there's all kinds of code already checking
|
113
|
+
# that it is running before getting this far.
|
114
|
+
end
|
115
|
+
|
116
|
+
#
|
117
|
+
# Call Process.waitpid2, save the status (accessible with obj.status) and return just the pid value
|
118
|
+
# returned by waitpid2.
|
119
|
+
#
|
120
|
+
def waitpid(flag=::Process::WNOHANG)
|
121
|
+
raise ProcessNotRunningError.new "pid is not known" unless @pid
|
122
|
+
raise ProcessNotRunningError.new "process seems to have exited #{@status.inspect}" if @status
|
123
|
+
|
124
|
+
pid, @status = ::Process.waitpid2(@pid, flag)
|
125
|
+
|
126
|
+
# this is as accurate as we can get, and it will generally be good enough for test work
|
127
|
+
@ended = Time.now if pid == @pid
|
128
|
+
|
129
|
+
pid
|
130
|
+
end
|
131
|
+
|
132
|
+
#
|
133
|
+
# Call waitpid and block until the process exits or timeout is reached.
|
134
|
+
#
|
135
|
+
alias :iowait :wait
|
136
|
+
def wait(timeout=nil)
|
137
|
+
pid = nil # silence warning
|
138
|
+
|
139
|
+
# block indefinitely on nil/0 timeout
|
140
|
+
unless timeout
|
141
|
+
return waitpid(0)
|
142
|
+
end
|
143
|
+
|
144
|
+
wait_with_backoff timeout do
|
145
|
+
if @status
|
146
|
+
true
|
147
|
+
else
|
148
|
+
pid = waitpid(::Process::WNOHANG)
|
149
|
+
done?
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
pid
|
154
|
+
end
|
155
|
+
|
156
|
+
#
|
157
|
+
# Send SIGTERM (15) to the child process, sleep 1/25 of a second, then call waitpid. For well-behaving
|
158
|
+
# processes, this should be enough to make it stop.
|
159
|
+
# Returns true/false just like done?
|
160
|
+
#
|
161
|
+
def stop
|
162
|
+
return if done?
|
163
|
+
_kill 15 # never negative!
|
164
|
+
@stdout_handler.stop
|
165
|
+
@stderr_handler.stop
|
166
|
+
sleep 0.05
|
167
|
+
@pid == waitpid
|
168
|
+
close
|
169
|
+
end
|
170
|
+
|
171
|
+
#
|
172
|
+
# Send SIGKILL (9) to the child process, sleep 1/10 of a second, then call waitpid and return.
|
173
|
+
# Returns true/false just like done?
|
174
|
+
#
|
175
|
+
def stop!
|
176
|
+
raise ProcessNotRunningError.new unless @pid
|
177
|
+
return if done?
|
178
|
+
|
179
|
+
_kill 9 # never negative!
|
180
|
+
@stdout_handler.stop!
|
181
|
+
@stderr_handler.stop!
|
182
|
+
sleep 0.1
|
183
|
+
@pid == waitpid
|
184
|
+
close
|
185
|
+
end
|
186
|
+
|
187
|
+
#
|
188
|
+
# Return Process::Status as returned by Process::waitpid2.
|
189
|
+
#
|
190
|
+
def status
|
191
|
+
raise ProcessNotRunningError.new "#@prefix called .status before .run." unless @pid
|
192
|
+
waitpid unless @status
|
193
|
+
@status
|
194
|
+
end
|
195
|
+
|
196
|
+
#
|
197
|
+
# Check whether the process has exited or been killed and cleaned up.
|
198
|
+
# Calls waitpid2 behind the scenes if necessary.
|
199
|
+
# Throws ProcessNotRunningError if called before .run.
|
200
|
+
#
|
201
|
+
alias :iodone? :done?
|
202
|
+
def done?
|
203
|
+
raise ProcessNotRunningError.new "#@prefix called .done? before .run." unless @pid
|
204
|
+
waitpid unless @status
|
205
|
+
return true if @status
|
206
|
+
waitpid == @pid
|
207
|
+
end
|
208
|
+
|
209
|
+
#
|
210
|
+
# Return the elapsed time in milliseconds.
|
211
|
+
#
|
212
|
+
def elapsed
|
213
|
+
raise ProcessNotRunningError.new unless @started
|
214
|
+
raise ProcessStillRunningError.new unless @ended
|
215
|
+
@ended - @started
|
216
|
+
end
|
217
|
+
|
218
|
+
#
|
219
|
+
# Returns whether or not any stdout has been captured.
|
220
|
+
# Will raise an exception if capture is not enabled.
|
221
|
+
# proxies: Nodule::Base.output?
|
222
|
+
# @return [TrueClass,FalseClass]
|
223
|
+
#
|
224
|
+
def stdout?
|
225
|
+
@stdout_handler.output?
|
226
|
+
end
|
227
|
+
alias :output? :stdout?
|
228
|
+
|
229
|
+
#
|
230
|
+
# Get all currently captured stdout. Does not clear the buffer.
|
231
|
+
# proxies: Nodule::Base.output
|
232
|
+
# @return [Array{String}]
|
233
|
+
#
|
234
|
+
def stdout
|
235
|
+
@stdout_handler.output
|
236
|
+
end
|
237
|
+
alias :output :stdout
|
238
|
+
|
239
|
+
#
|
240
|
+
# Get all currently captured stdout. Resets the buffer and counts.
|
241
|
+
# proxies: Nodule::Base.output!
|
242
|
+
# @return [Array{String}]
|
243
|
+
#
|
244
|
+
def stdout!
|
245
|
+
@stdout_handler.output!
|
246
|
+
end
|
247
|
+
alias :output! :stdout!
|
248
|
+
|
249
|
+
#
|
250
|
+
# Clear the stdout buffer and reset the counter.
|
251
|
+
# proxies: Nodule::Base.clear!
|
252
|
+
#
|
253
|
+
def clear_stdout!
|
254
|
+
@stdout_handler.clear!
|
255
|
+
end
|
256
|
+
alias :clear! :clear_stdout!
|
257
|
+
|
258
|
+
#
|
259
|
+
# Proxies to stdout require_read_count.
|
260
|
+
#
|
261
|
+
def require_stdout_count(count, max_sleep=10)
|
262
|
+
@stdout_handler.require_read_count count, max_sleep
|
263
|
+
end
|
264
|
+
alias :require_read_count :require_stdout_count
|
265
|
+
|
266
|
+
#
|
267
|
+
# Returns whether or not any stderr has been captured.
|
268
|
+
# Will raise an exception if capture is not enabled.
|
269
|
+
# proxies: Nodule::Base.output?
|
270
|
+
# @return [TrueClass,FalseClass]
|
271
|
+
#
|
272
|
+
def stderr?
|
273
|
+
@stderr_handler.output?
|
274
|
+
end
|
275
|
+
|
276
|
+
#
|
277
|
+
# Get all currently captured stderr. Does not clear the buffer.
|
278
|
+
# proxies: Nodule::Base.output
|
279
|
+
# @return [Array{String}]
|
280
|
+
#
|
281
|
+
def stderr
|
282
|
+
@stderr_handler.output
|
283
|
+
end
|
284
|
+
|
285
|
+
#
|
286
|
+
# Get all currently captured stderr. Resets the buffer and counts.
|
287
|
+
# proxies: Nodule::Base.output!
|
288
|
+
# @return [Array{String}]
|
289
|
+
#
|
290
|
+
def stderr!
|
291
|
+
@stderr_handler.output!
|
292
|
+
end
|
293
|
+
|
294
|
+
#
|
295
|
+
# Clear the stderr buffer and reset the counter.
|
296
|
+
# proxies: Nodule::Base.clear!
|
297
|
+
#
|
298
|
+
def clear_stderr!
|
299
|
+
@stderr_handler.clear!
|
300
|
+
end
|
301
|
+
|
302
|
+
#
|
303
|
+
# Proxies to stderr require_read_count.
|
304
|
+
#
|
305
|
+
def require_stderr_count(count, max_sleep=10)
|
306
|
+
@stderr_handler.require_read_count count, max_sleep
|
307
|
+
end
|
308
|
+
|
309
|
+
#
|
310
|
+
# Write the to child process's stdin using IO.print.
|
311
|
+
# @param [String] see IO.print
|
312
|
+
#
|
313
|
+
def print(*args)
|
314
|
+
@stdin.print(*args)
|
315
|
+
end
|
316
|
+
|
317
|
+
#
|
318
|
+
# Write the to child process's stdin using IO.puts.
|
319
|
+
# @param [String] see IO.puts
|
320
|
+
#
|
321
|
+
def puts(*args)
|
322
|
+
@stdin.puts(*args)
|
323
|
+
end
|
324
|
+
|
325
|
+
#
|
326
|
+
# Access the STDIN pipe IO object of the handle.
|
327
|
+
# @return [IO]
|
328
|
+
#
|
329
|
+
def stdin_pipe
|
330
|
+
@stdin
|
331
|
+
end
|
332
|
+
|
333
|
+
#
|
334
|
+
# Access the STDOUT pipe IO object of the handle.
|
335
|
+
# @return [IO]
|
336
|
+
#
|
337
|
+
def stdout_pipe
|
338
|
+
@stdout
|
339
|
+
end
|
340
|
+
|
341
|
+
#
|
342
|
+
# Access the STDERR pipe IO object of the handle.
|
343
|
+
# @return [IO]
|
344
|
+
#
|
345
|
+
def stderr_pipe
|
346
|
+
@stderr
|
347
|
+
end
|
348
|
+
|
349
|
+
#
|
350
|
+
# Close all of the pipes.
|
351
|
+
#
|
352
|
+
def close
|
353
|
+
@stdin.close rescue nil
|
354
|
+
@stdout.close rescue nil
|
355
|
+
@stderr.close rescue nil
|
356
|
+
end
|
357
|
+
|
358
|
+
#
|
359
|
+
# Return most of the data about the process as a hash. This is safe to call at any point.
|
360
|
+
#
|
361
|
+
def to_hash
|
362
|
+
{
|
363
|
+
:argv => @argv,
|
364
|
+
:started => @started.to_i,
|
365
|
+
:ended => @ended.to_i,
|
366
|
+
:elapsed => elapsed,
|
367
|
+
:pid => @pid,
|
368
|
+
:retval => ((@status.nil? and @status.exited?) ? nil : @status.exitstatus)
|
369
|
+
}
|
370
|
+
end
|
371
|
+
|
372
|
+
#
|
373
|
+
# Returns the command as a string.
|
374
|
+
#
|
375
|
+
def to_s
|
376
|
+
@argv.join(' ')
|
377
|
+
end
|
378
|
+
|
379
|
+
#
|
380
|
+
# Returns to_hash.inspect
|
381
|
+
#
|
382
|
+
def inspect
|
383
|
+
to_hash.inspect
|
384
|
+
end
|
385
|
+
end
|
386
|
+
end
|
@@ -0,0 +1,57 @@
|
|
1
|
+
require 'nodule/base'
|
2
|
+
require 'fileutils'
|
3
|
+
|
4
|
+
module Nodule
|
5
|
+
class Tempfile < Base
|
6
|
+
attr_reader :file
|
7
|
+
|
8
|
+
def initialize(opts={})
|
9
|
+
suffix = opts[:suffix] || ''
|
10
|
+
prefix = opts[:prefix] || 'nodule'
|
11
|
+
@file = "#{prefix}-#{::Process.pid}-#{Nodule.next_seq}#{suffix}"
|
12
|
+
|
13
|
+
if opts[:directory]
|
14
|
+
@is_dir = true
|
15
|
+
if opts[:directory].kind_of? String
|
16
|
+
FileUtils.mkdir_p File.join(opts[:directory], @file)
|
17
|
+
else
|
18
|
+
FileUtils.mkdir @file
|
19
|
+
end
|
20
|
+
else
|
21
|
+
@is_dir = false
|
22
|
+
# require an explicit request to create an empty file
|
23
|
+
if opts[:touch]
|
24
|
+
File.open @file, "w" do |f| f.puts "" end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
@cleanup = opts.has_key?(:cleanup) ? opts[:cleanup] : true
|
29
|
+
|
30
|
+
super(opts)
|
31
|
+
end
|
32
|
+
|
33
|
+
def touch(target=nil)
|
34
|
+
File.open(@file, "w+").close
|
35
|
+
@file
|
36
|
+
end
|
37
|
+
|
38
|
+
def stop
|
39
|
+
if @cleanup
|
40
|
+
# Ruby caches stat_t somewhere and causes race conditions, but we don't really
|
41
|
+
# care here as long as the file is gone.
|
42
|
+
begin
|
43
|
+
FileUtils.rm_r(@file) if @is_dir
|
44
|
+
File.unlink(@file)
|
45
|
+
rescue Errno::ENOENT
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
super
|
50
|
+
end
|
51
|
+
|
52
|
+
def to_s
|
53
|
+
@file
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
@@ -0,0 +1,195 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
# To test, you'll be creating a Topology, representing a cluster of
|
4
|
+
# interconnected processes. You'll also optionally declare a number
|
5
|
+
# of resources for the test framework to verify - files it can read,
|
6
|
+
# network connections it can snoop or spoof and so on. By declaring
|
7
|
+
# these resources, you gain the ability to make assertions against
|
8
|
+
# them.
|
9
|
+
|
10
|
+
# After creating the Topology and adding processes to it, you run it.
|
11
|
+
# When you do, the framework will allocate resources and rework the
|
12
|
+
# command line of every node to use the resources that the framework
|
13
|
+
# has allocated, faked or mocked. For instance, for a ZeroMQ socket
|
14
|
+
# the framework will create an identical forwarding socket that
|
15
|
+
# records traffic before resending to the application's actual socket.
|
16
|
+
|
17
|
+
# Since the test framework doesn't know the command line of every
|
18
|
+
# possible executable, you'll need to write your command lines in
|
19
|
+
# terms of those resources. Erb is used to let you do logic in the
|
20
|
+
# command-line declarations, and variables are passed in for the
|
21
|
+
# resources that the test framework has created.
|
22
|
+
|
23
|
+
#
|
24
|
+
# Module to help build a topology on a single machine. All pieces of the topology
|
25
|
+
# that run in subprocesses will be referenceable through this wrapper.
|
26
|
+
#
|
27
|
+
module Nodule
|
28
|
+
class TopologyProcessStillRunningError < StandardError; end
|
29
|
+
class TopologyIntegrationRequiredError < StandardError; end
|
30
|
+
|
31
|
+
class Topology
|
32
|
+
def initialize(opts={})
|
33
|
+
@resources = {}
|
34
|
+
@started = {}
|
35
|
+
|
36
|
+
opts.each do |name,value|
|
37
|
+
inject_topology(name, value)
|
38
|
+
@resources[name] = value
|
39
|
+
end
|
40
|
+
|
41
|
+
@all_stopped = true
|
42
|
+
end
|
43
|
+
|
44
|
+
def inject_topology(name, value)
|
45
|
+
unless value.respond_to? :join_topology!
|
46
|
+
raise TopologyIntegrationRequiredError.new "#{name} => #{value} does not respond to :join_topology!"
|
47
|
+
end
|
48
|
+
value.join_topology! self, name
|
49
|
+
end
|
50
|
+
|
51
|
+
def [](key)
|
52
|
+
@resources[key]
|
53
|
+
end
|
54
|
+
|
55
|
+
def []=(key, value)
|
56
|
+
inject_topology(key, value)
|
57
|
+
@resources[key] = value
|
58
|
+
end
|
59
|
+
|
60
|
+
def has_key?(key)
|
61
|
+
@resources.has_key?(key)
|
62
|
+
end
|
63
|
+
|
64
|
+
def keys
|
65
|
+
@resources.keys
|
66
|
+
end
|
67
|
+
|
68
|
+
def key(object)
|
69
|
+
@resources.key(object)
|
70
|
+
end
|
71
|
+
|
72
|
+
def to_hash
|
73
|
+
@resources
|
74
|
+
end
|
75
|
+
|
76
|
+
def start_all
|
77
|
+
@resources.keys.each do |key|
|
78
|
+
start key unless @started[key]
|
79
|
+
end
|
80
|
+
|
81
|
+
# If we do many cycles, this will wind up getting called repeatedly.
|
82
|
+
# The @all_stopped variable will make sure that's a really fast
|
83
|
+
# operation.
|
84
|
+
at_exit { stop_all }
|
85
|
+
end
|
86
|
+
|
87
|
+
#
|
88
|
+
# Run each process in order, waiting for each one to complete & return before
|
89
|
+
# running the next.
|
90
|
+
#
|
91
|
+
# Resources are all started up at once.
|
92
|
+
#
|
93
|
+
def run_serially
|
94
|
+
@all_stopped = false
|
95
|
+
|
96
|
+
@resources.each do |name,object|
|
97
|
+
object.run
|
98
|
+
if object.respond_to? :wait
|
99
|
+
object.wait
|
100
|
+
else
|
101
|
+
object.stop
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
@all_stopped = true
|
106
|
+
end
|
107
|
+
|
108
|
+
#
|
109
|
+
# Starts the node in the topology. Looks up the node's command
|
110
|
+
# given that the topology hash is keyed off of the node's name.
|
111
|
+
#
|
112
|
+
def start(*names)
|
113
|
+
@all_stopped = false
|
114
|
+
names.flatten.each do |name|
|
115
|
+
# run the command that starts up the node and store the subprocess for later manipulation
|
116
|
+
@resources[name].run unless @started[name]
|
117
|
+
|
118
|
+
@started[name] = true
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
#
|
123
|
+
# Immediately kills a node given its topology name
|
124
|
+
#
|
125
|
+
def stop(*names)
|
126
|
+
names.flatten.each do |name|
|
127
|
+
object = @resources[name]
|
128
|
+
object.stop
|
129
|
+
object.wait 1 unless object.done?
|
130
|
+
object.stop! unless object.done?
|
131
|
+
object.wait 1 unless object.done?
|
132
|
+
unless object.done?
|
133
|
+
raise "Could not stop resource: #{object.class} #{object.inspect}"
|
134
|
+
end
|
135
|
+
|
136
|
+
@started[name] = false
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
#
|
141
|
+
# Kills all of the nodes in the topology.
|
142
|
+
#
|
143
|
+
def stop_all
|
144
|
+
@resources.each { |name,object| stop name unless object.done? } unless @all_stopped
|
145
|
+
end
|
146
|
+
|
147
|
+
def started?(key)
|
148
|
+
@started[key.to_sym] == true
|
149
|
+
end
|
150
|
+
|
151
|
+
def start_all_but(*resources)
|
152
|
+
@resources.keys.each do |key|
|
153
|
+
if !@started[key] && !resources.flatten.map(&:to_sym).include?(key)
|
154
|
+
start key
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
at_exit { stop_all_but resources }
|
159
|
+
end
|
160
|
+
|
161
|
+
def stop_all_but(*resources)
|
162
|
+
@resources.each do |name,object|
|
163
|
+
if !resources.flatten.map(&:to_sym).include?(name.to_sym) && !object.done?
|
164
|
+
stop name
|
165
|
+
end
|
166
|
+
end unless @all_stopped
|
167
|
+
end
|
168
|
+
|
169
|
+
def cleanup
|
170
|
+
@resources.each { |_,object| object.stop }
|
171
|
+
end
|
172
|
+
|
173
|
+
def wait(name, timeout=60)
|
174
|
+
@resources[name].wait timeout
|
175
|
+
end
|
176
|
+
|
177
|
+
#
|
178
|
+
# Wait for all resources to exit normally.
|
179
|
+
#
|
180
|
+
def wait_all
|
181
|
+
@resources.each do |name,object|
|
182
|
+
object.wait if object.respond_to? :wait
|
183
|
+
end
|
184
|
+
end
|
185
|
+
|
186
|
+
#
|
187
|
+
# Reset all processes for restart.
|
188
|
+
#
|
189
|
+
def reset_all
|
190
|
+
raise TopologyProcessStillRunningError.new unless @all_stopped
|
191
|
+
@resources.each { |_, object| object.reset }
|
192
|
+
end
|
193
|
+
|
194
|
+
end
|
195
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'nodule/tempfile'
|
3
|
+
|
4
|
+
module Nodule
|
5
|
+
class UnixSocket < Tempfile
|
6
|
+
attr_reader :family, :address, :connected
|
7
|
+
|
8
|
+
def initialize(opts={})
|
9
|
+
super(opts)
|
10
|
+
@family = opts[:family] || :DGRAM
|
11
|
+
@socket = Socket.new(:UNIX, @family, 0)
|
12
|
+
@address = Addrinfo.unix(@file)
|
13
|
+
@connected = false
|
14
|
+
end
|
15
|
+
|
16
|
+
#
|
17
|
+
# sock1 = Nodule::UnixSocket.new
|
18
|
+
#
|
19
|
+
def send(data)
|
20
|
+
@socket.connect(@address) unless @connected
|
21
|
+
@connected = true
|
22
|
+
|
23
|
+
if @family == :DGRAM
|
24
|
+
@socket.sendmsg(data, 0)
|
25
|
+
else
|
26
|
+
@socket.send(data, 0)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def stop
|
31
|
+
@socket.close
|
32
|
+
super
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
class UnixServer < Tempfile
|
37
|
+
def run
|
38
|
+
super
|
39
|
+
@thread = Thread.new do
|
40
|
+
Thread.current.abort_on_exception
|
41
|
+
|
42
|
+
server = Socket.new(:UNIX, @family, 0)
|
43
|
+
address = Addrinfo.unix(@file)
|
44
|
+
server.bind(address)
|
45
|
+
|
46
|
+
message, = server.recvmsg(65536, 0) if sock
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
def to_s
|
51
|
+
@sockfile
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|