trident 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +15 -0
- data/.coveralls.yml +1 -0
- data/.gitignore +17 -0
- data/.travis.yml +8 -0
- data/Gemfile +9 -0
- data/LICENSE.txt +22 -0
- data/README.md +33 -0
- data/Rakefile +18 -0
- data/bin/trident +8 -0
- data/lib/trident.rb +8 -0
- data/lib/trident/cli.rb +130 -0
- data/lib/trident/pool.rb +108 -0
- data/lib/trident/pool_handler.rb +31 -0
- data/lib/trident/pool_manager.rb +79 -0
- data/lib/trident/signal_handler.rb +167 -0
- data/lib/trident/utils.rb +9 -0
- data/lib/trident/version.rb +3 -0
- data/test/fixtures/integration_project/config/trident.yml +51 -0
- data/test/integration/trident_test.rb +105 -0
- data/test/test_helper.rb +144 -0
- data/test/unit/trident/cli_test.rb +253 -0
- data/test/unit/trident/pool_handler_test.rb +70 -0
- data/test/unit/trident/pool_manager_test.rb +131 -0
- data/test/unit/trident/pool_test.rb +233 -0
- data/test/unit/trident/signal_handler_test.rb +262 -0
- data/test/unit/trident/utils_test.rb +20 -0
- data/trident.example.yml +49 -0
- data/trident.gemspec +29 -0
- metadata +180 -0
@@ -0,0 +1,167 @@
|
|
1
|
+
module Trident
|
2
|
+
class SignalHandler
|
3
|
+
include GemLogger::LoggerSupport
|
4
|
+
|
5
|
+
CHUNK_SIZE = (16 * 1024)
|
6
|
+
SIGNAL_QUEUE_MAX_SIZE = 5
|
7
|
+
MSG_STOP = 'STOP'
|
8
|
+
|
9
|
+
class << self
|
10
|
+
|
11
|
+
attr_accessor :instance
|
12
|
+
|
13
|
+
def start(signal_mappings, target)
|
14
|
+
raise "Already started, call stop if restart needed" if instance
|
15
|
+
logger.info "Starting signal handler"
|
16
|
+
self.instance = new(signal_mappings, target)
|
17
|
+
instance.start
|
18
|
+
end
|
19
|
+
|
20
|
+
def stop
|
21
|
+
raise "No signal handler started" unless instance
|
22
|
+
logger.info "Stopping signal handler"
|
23
|
+
instance.stop
|
24
|
+
self.instance = nil
|
25
|
+
end
|
26
|
+
|
27
|
+
def reset_for_fork
|
28
|
+
raise "No signal handler started" unless instance
|
29
|
+
instance.reset_for_fork
|
30
|
+
self.instance = nil
|
31
|
+
end
|
32
|
+
|
33
|
+
end
|
34
|
+
|
35
|
+
attr_reader :target, :signal_mappings, :signal_queue, :self_pipe, :original_signal_handlers
|
36
|
+
|
37
|
+
def initialize(signal_mappings, target)
|
38
|
+
@target = target
|
39
|
+
@signal_queue = []
|
40
|
+
@self_pipe = []
|
41
|
+
@original_signal_handlers = {}
|
42
|
+
self.signal_mappings = signal_mappings
|
43
|
+
end
|
44
|
+
|
45
|
+
def start
|
46
|
+
setup_self_pipe
|
47
|
+
setup_signal_handlers
|
48
|
+
|
49
|
+
logger.info "Main loop started"
|
50
|
+
loop do
|
51
|
+
signal_result = handle_signal_queue
|
52
|
+
break if signal_result == :break
|
53
|
+
msg = snooze if signal_queue.empty?
|
54
|
+
logger.debug "Main loop awake"
|
55
|
+
break if msg == MSG_STOP
|
56
|
+
end
|
57
|
+
logger.info "Main loop exited"
|
58
|
+
end
|
59
|
+
|
60
|
+
def stop
|
61
|
+
reset_signal_handlers
|
62
|
+
wakeup(MSG_STOP)
|
63
|
+
end
|
64
|
+
|
65
|
+
def reset_for_fork
|
66
|
+
@self_pipe = []
|
67
|
+
reset_signal_handlers
|
68
|
+
end
|
69
|
+
|
70
|
+
def wakeup(msg='.')
|
71
|
+
begin
|
72
|
+
# mutexes (and thus logging) not allowed within a trap context
|
73
|
+
# puts "Waking main loop"
|
74
|
+
self_pipe.last.write_nonblock(msg) # wakeup master process from select
|
75
|
+
rescue Errno::EAGAIN, Errno::EINTR
|
76
|
+
# pipe is full, master should wake up anyways
|
77
|
+
retry
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def snooze
|
82
|
+
msg = ""
|
83
|
+
begin
|
84
|
+
logger.info "Snoozing main loop"
|
85
|
+
ready = IO.select([self_pipe.first], nil, nil, 1) or return
|
86
|
+
ready.first && ready.first.first or return
|
87
|
+
loop { msg << self_pipe.first.read_nonblock(CHUNK_SIZE) }
|
88
|
+
rescue Errno::EAGAIN, Errno::EINTR
|
89
|
+
end
|
90
|
+
msg
|
91
|
+
end
|
92
|
+
|
93
|
+
private
|
94
|
+
|
95
|
+
def signal_mappings=(mappings)
|
96
|
+
@signal_mappings = {}
|
97
|
+
mappings.each do |k, v|
|
98
|
+
k = "SIG#{k}" unless k =~ /^SIG/i
|
99
|
+
k = k.upcase
|
100
|
+
|
101
|
+
raise ArgumentError,
|
102
|
+
"Duplicate signal handler: #{k}" if @signal_mappings.has_key?(k)
|
103
|
+
|
104
|
+
@signal_mappings[k] = Array(v)
|
105
|
+
end
|
106
|
+
|
107
|
+
# Should always handle CHLD signals as they wakeup/drive the main
|
108
|
+
# loop on status changes from child processes
|
109
|
+
@signal_mappings = {"SIGCHLD" => ["update"]}.merge(@signal_mappings)
|
110
|
+
end
|
111
|
+
|
112
|
+
def setup_self_pipe
|
113
|
+
self_pipe.each { |io| io.close rescue nil }
|
114
|
+
self_pipe.replace(IO.pipe)
|
115
|
+
self_pipe.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
|
116
|
+
end
|
117
|
+
|
118
|
+
def setup_signal_handlers
|
119
|
+
logger.info "Installing signal handlers"
|
120
|
+
signal_mappings.each do |signal_name, actions|
|
121
|
+
raise ArgumentError,
|
122
|
+
"Target does not respond to action: #{actions}" unless actions.all? { |a| target.respond_to?(a) }
|
123
|
+
|
124
|
+
logger.info "Adding signal mapping: #{signal_name} -> #{actions.inspect}"
|
125
|
+
original_signal_handlers[signal_name] = trap_deferred(signal_name)
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
def reset_signal_handlers
|
130
|
+
original_signal_handlers.each do |signal_name, original_handler|
|
131
|
+
trap(signal_name, original_handler)
|
132
|
+
end
|
133
|
+
original_signal_handlers.clear
|
134
|
+
end
|
135
|
+
|
136
|
+
# defer a signal for later processing in #join (master process)
|
137
|
+
def trap_deferred(signal)
|
138
|
+
trap(signal) do |signal_number|
|
139
|
+
if signal_queue.size < SIGNAL_QUEUE_MAX_SIZE
|
140
|
+
# mutexes (and thus logging) not allowed within a trap context
|
141
|
+
# puts "Adding signal to queue: #{signal}"
|
142
|
+
signal_queue << signal
|
143
|
+
wakeup
|
144
|
+
else
|
145
|
+
$stderr.puts "Signal queue exceeded max size, ignoring #{signal}"
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
def handle_signal_queue
|
151
|
+
signal_result = nil
|
152
|
+
signal = signal_queue.shift
|
153
|
+
if signal
|
154
|
+
logger.info "Handling signal: #{signal}"
|
155
|
+
actions = signal_mappings[signal]
|
156
|
+
if actions
|
157
|
+
actions.each do |action|
|
158
|
+
logger.info "Sending to target: #{action}"
|
159
|
+
signal_result = target.send(action)
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
signal_result
|
164
|
+
end
|
165
|
+
|
166
|
+
end
|
167
|
+
end
|
@@ -0,0 +1,51 @@
|
|
1
|
+
# a name for labelling pool processes
|
2
|
+
application: app_name
|
3
|
+
|
4
|
+
# loads handlers in the master process before forking workers
|
5
|
+
prefork: true
|
6
|
+
|
7
|
+
# Configure the behavior of the pool manager in response to signals
|
8
|
+
# actions are methods on PoolManager and can be sequenced
|
9
|
+
signals:
|
10
|
+
INT: stop_gracefully
|
11
|
+
TERM: stop_forcefully
|
12
|
+
USR1: [stop_forcefully, wait]
|
13
|
+
|
14
|
+
# define and setup all the PoolHandlers
|
15
|
+
handlers:
|
16
|
+
myhandler:
|
17
|
+
environment: |+
|
18
|
+
class MyWorker
|
19
|
+
def initialize(o)
|
20
|
+
@o = o
|
21
|
+
end
|
22
|
+
def start
|
23
|
+
loop { sleep 1 }
|
24
|
+
end
|
25
|
+
end
|
26
|
+
class: MyWorker
|
27
|
+
options:
|
28
|
+
signals:
|
29
|
+
default: USR2
|
30
|
+
stop_gracefully: INT
|
31
|
+
stop_forcefully: TERM
|
32
|
+
|
33
|
+
# Configure the worker pools
|
34
|
+
pools:
|
35
|
+
mypool1:
|
36
|
+
# number of worker processes
|
37
|
+
size: 3
|
38
|
+
# chooses a handler defined above for running process
|
39
|
+
handler: myhandler
|
40
|
+
# options passed to each handler's initializer (merged into handler options above)
|
41
|
+
options:
|
42
|
+
name: one
|
43
|
+
mypool2:
|
44
|
+
# number of worker processes
|
45
|
+
size: 2
|
46
|
+
# chooses a handler defined above for running process
|
47
|
+
handler: myhandler
|
48
|
+
# options passed to each handler's initializer (merged into handler options above)
|
49
|
+
options:
|
50
|
+
name: two
|
51
|
+
|
@@ -0,0 +1,105 @@
|
|
1
|
+
require_relative '../test_helper'
|
2
|
+
|
3
|
+
class Trident::TridentTest < MiniTest::Should::TestCase
|
4
|
+
|
5
|
+
setup do
|
6
|
+
@project_root = File.expand_path('../../fixtures/integration_project', __FILE__)
|
7
|
+
@cli = "#{File.expand_path('../../..', __FILE__)}/bin/trident"
|
8
|
+
end
|
9
|
+
|
10
|
+
def wait_for(io, pattern)
|
11
|
+
timeout(5) do
|
12
|
+
loop do
|
13
|
+
line = io.readline
|
14
|
+
puts line if ENV['DEBUG']
|
15
|
+
break if line =~ pattern
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
def process_list
|
21
|
+
processes = {}
|
22
|
+
lines = `ps -e -opid,command`.lines.grep(/trident\[/)
|
23
|
+
lines.each do |line|
|
24
|
+
pieces = line.split
|
25
|
+
pid = pieces[0].to_i
|
26
|
+
next if pid == Process.pid
|
27
|
+
command = pieces[1..-1].join(' ')
|
28
|
+
processes[pid] = command
|
29
|
+
end
|
30
|
+
processes
|
31
|
+
end
|
32
|
+
|
33
|
+
def parse_manager(manager_str)
|
34
|
+
pools = {}
|
35
|
+
manager_str.scan(/(\w+)\[([0-9, ]+)\]/) do |pool, pids|
|
36
|
+
pids = pids.split(", ").collect(&:to_i)
|
37
|
+
pools[pool] = pids
|
38
|
+
end
|
39
|
+
pools
|
40
|
+
end
|
41
|
+
|
42
|
+
context "basic usage" do
|
43
|
+
|
44
|
+
should "start and stop pools" do
|
45
|
+
cmd = "#{@cli} --verbose --config #{@project_root}/config/trident.yml"
|
46
|
+
io = IO.popen(cmd, :err=>[:child, :out])
|
47
|
+
|
48
|
+
wait_for(io, /<pool-mypool1> Pool started with 3 workers/)
|
49
|
+
wait_for(io, /<pool-mypool2> Pool started with 2 workers/)
|
50
|
+
|
51
|
+
processes = process_list
|
52
|
+
assert_equal 6, processes.size
|
53
|
+
manager = processes[io.pid]
|
54
|
+
pools = parse_manager(manager)
|
55
|
+
pools.each do |pool, pids|
|
56
|
+
pids.each do |pid|
|
57
|
+
assert processes[pid], "no worker process"
|
58
|
+
assert_match /trident[pool-#{pool}-worker]/, processes[pid], "worker process not in right pool"
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
Process.kill("USR1", io.pid)
|
63
|
+
|
64
|
+
wait_for(io, /<pool-mypool1> Pool stopped/)
|
65
|
+
wait_for(io, /<pool-mypool2> Pool stopped/)
|
66
|
+
wait_for(io, /Main loop exited/)
|
67
|
+
|
68
|
+
Process.wait(io.pid)
|
69
|
+
assert_empty process_list
|
70
|
+
end
|
71
|
+
|
72
|
+
end
|
73
|
+
|
74
|
+
context "worker maintenance" do
|
75
|
+
|
76
|
+
should "restart failed workers" do
|
77
|
+
cmd = "#{@cli} --verbose --config #{@project_root}/config/trident.yml"
|
78
|
+
io = IO.popen(cmd, :err=>[:child, :out])
|
79
|
+
|
80
|
+
wait_for(io, /<pool-mypool1> Pool started with 3 workers/)
|
81
|
+
wait_for(io, /<pool-mypool2> Pool started with 2 workers/)
|
82
|
+
|
83
|
+
processes = process_list
|
84
|
+
assert_equal 6, processes.size
|
85
|
+
manager = processes[io.pid]
|
86
|
+
pools = parse_manager(manager)
|
87
|
+
children = pools['mypool1']
|
88
|
+
child = children.delete_at(1)
|
89
|
+
Process.kill("KILL", child)
|
90
|
+
|
91
|
+
wait_for(io, /<pool-mypool1> Spawned worker \d+, worker count now at 3/)
|
92
|
+
processes = process_list
|
93
|
+
assert_equal 6, processes.size
|
94
|
+
manager = processes[io.pid]
|
95
|
+
pools = parse_manager(manager)
|
96
|
+
assert_equal 3, pools['mypool1'].size
|
97
|
+
assert children.all? {|c| pools['mypool1'].include?(c) }
|
98
|
+
|
99
|
+
Process.kill("USR1", io.pid)
|
100
|
+
Process.wait(io.pid)
|
101
|
+
assert_empty process_list
|
102
|
+
end
|
103
|
+
|
104
|
+
end
|
105
|
+
end
|
data/test/test_helper.rb
ADDED
@@ -0,0 +1,144 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
|
3
|
+
if ENV['CI']
|
4
|
+
require 'coveralls'
|
5
|
+
Coveralls.wear!
|
6
|
+
end
|
7
|
+
|
8
|
+
require 'bundler'
|
9
|
+
begin
|
10
|
+
Bundler.setup(:default, :development)
|
11
|
+
rescue Bundler::BundlerError => e
|
12
|
+
$stderr.puts e.message
|
13
|
+
$stderr.puts "Run `bundle install` to install missing gems"
|
14
|
+
exit e.status_code
|
15
|
+
end
|
16
|
+
|
17
|
+
require 'minitest/autorun'
|
18
|
+
require 'minitest/should'
|
19
|
+
require "minitest/reporters"
|
20
|
+
require "mocha/setup"
|
21
|
+
require 'timeout'
|
22
|
+
require 'tempfile'
|
23
|
+
|
24
|
+
reporter = ENV['REPORTER']
|
25
|
+
reporter = case reporter
|
26
|
+
when 'none' then nil
|
27
|
+
when 'spec' then MiniTest::Reporters::SpecReporter.new
|
28
|
+
when 'progress' then MiniTest::Reporters::ProgressReporter.new
|
29
|
+
else MiniTest::Reporters::DefaultReporter.new
|
30
|
+
end
|
31
|
+
MiniTest::Reporters.use!(reporter) if reporter
|
32
|
+
|
33
|
+
require 'trident'
|
34
|
+
include Trident
|
35
|
+
|
36
|
+
GemLogger.default_logger = Logger.new("/dev/null")
|
37
|
+
|
38
|
+
class ForkChild
|
39
|
+
|
40
|
+
attr_reader :pid, :read_from_child
|
41
|
+
|
42
|
+
def initialize
|
43
|
+
@read_from_child, @write_from_child = IO.pipe
|
44
|
+
|
45
|
+
@pid = fork do
|
46
|
+
@read_from_child.close
|
47
|
+
result = yield
|
48
|
+
Marshal.dump(result, @write_from_child)
|
49
|
+
exit!(0) # skips exit handlers.
|
50
|
+
end
|
51
|
+
|
52
|
+
@write_from_child.close
|
53
|
+
end
|
54
|
+
|
55
|
+
def wait(time=5)
|
56
|
+
timeout(time) do
|
57
|
+
result = @read_from_child.read
|
58
|
+
begin
|
59
|
+
Process.wait(@pid)
|
60
|
+
rescue Errno::ECHILD
|
61
|
+
end
|
62
|
+
raise "child failed" if result.empty?
|
63
|
+
Marshal.load(result)
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
end
|
68
|
+
|
69
|
+
class FileCounter
|
70
|
+
|
71
|
+
def initialize(file=Tempfile.new('file_counter').path)
|
72
|
+
@file = file
|
73
|
+
end
|
74
|
+
|
75
|
+
def increment
|
76
|
+
File.open(@file, File::RDWR|File::CREAT, 0644) do |f|
|
77
|
+
f.flock(File::LOCK_EX)
|
78
|
+
value = f.read.to_i + 1
|
79
|
+
f.rewind
|
80
|
+
f.write("#{value}\n")
|
81
|
+
f.flush
|
82
|
+
f.truncate(f.pos)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
def read
|
87
|
+
# read the counter using read lock
|
88
|
+
File.open(@file, "r") do |f|
|
89
|
+
f.flock(File::LOCK_SH)
|
90
|
+
f.read.to_i
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
end
|
95
|
+
|
96
|
+
class MiniTest::Should::TestCase
|
97
|
+
ORIGINAL_PROCLINE = $0
|
98
|
+
|
99
|
+
setup do
|
100
|
+
$0 = ORIGINAL_PROCLINE
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
# Allow triggering single tests when running from rubymine
|
105
|
+
# reopen the installed runner so we don't step on runner customizations
|
106
|
+
class << MiniTest::Unit.runner
|
107
|
+
# Rubymine sends --name=/\Atest\: <context> should <should>\./
|
108
|
+
# Minitest runs each context as a suite
|
109
|
+
# Minitest filters methods by matching against: <suite>#test_0001_<should>
|
110
|
+
# Nested contexts are separted by spaces in rubymine, but ::s in minitest
|
111
|
+
|
112
|
+
def _run_suites(suites, type)
|
113
|
+
if options[:filter]
|
114
|
+
if options[:filter] =~ /\/\\Atest\\: (.*) should (.*)\\\.\//
|
115
|
+
context_filter = $1
|
116
|
+
should_filter = $2
|
117
|
+
should_filter.strip!
|
118
|
+
should_filter.gsub!(" ", "_")
|
119
|
+
should_filter.gsub!(/\W/, "")
|
120
|
+
context_filter = context_filter.gsub(" ", "((::)| )")
|
121
|
+
options[:filter] = "/\\A#{context_filter}(Test)?#test(_\\d+)?_should_#{should_filter}\\Z/"
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
super
|
126
|
+
end
|
127
|
+
|
128
|
+
# Prevent "Empty test suite" verbosity when running in rubymine
|
129
|
+
def _run_suite(suite, type)
|
130
|
+
|
131
|
+
filter = options[:filter] || '/./'
|
132
|
+
filter = Regexp.new $1 if filter =~ /\/(.*)\//
|
133
|
+
all_test_methods = suite.send "#{type}_methods"
|
134
|
+
filtered_test_methods = all_test_methods.find_all { |m|
|
135
|
+
filter === m || filter === "#{suite}##{m}"
|
136
|
+
}
|
137
|
+
|
138
|
+
if filtered_test_methods.size > 0
|
139
|
+
super
|
140
|
+
else
|
141
|
+
[0, 0]
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|