reqless 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Gemfile +8 -0
- data/README.md +648 -0
- data/Rakefile +117 -0
- data/bin/docker-build-and-test +22 -0
- data/exe/reqless-web +11 -0
- data/lib/reqless/config.rb +31 -0
- data/lib/reqless/failure_formatter.rb +43 -0
- data/lib/reqless/job.rb +496 -0
- data/lib/reqless/job_reservers/ordered.rb +29 -0
- data/lib/reqless/job_reservers/round_robin.rb +46 -0
- data/lib/reqless/job_reservers/shuffled_round_robin.rb +21 -0
- data/lib/reqless/lua/reqless-lib.lua +2965 -0
- data/lib/reqless/lua/reqless.lua +2545 -0
- data/lib/reqless/lua_script.rb +90 -0
- data/lib/reqless/middleware/requeue_exceptions.rb +94 -0
- data/lib/reqless/middleware/retry_exceptions.rb +72 -0
- data/lib/reqless/middleware/sentry.rb +66 -0
- data/lib/reqless/middleware/timeout.rb +63 -0
- data/lib/reqless/queue.rb +189 -0
- data/lib/reqless/queue_priority_pattern.rb +16 -0
- data/lib/reqless/server/static/css/bootstrap-responsive.css +686 -0
- data/lib/reqless/server/static/css/bootstrap-responsive.min.css +12 -0
- data/lib/reqless/server/static/css/bootstrap.css +3991 -0
- data/lib/reqless/server/static/css/bootstrap.min.css +689 -0
- data/lib/reqless/server/static/css/codemirror.css +112 -0
- data/lib/reqless/server/static/css/docs.css +839 -0
- data/lib/reqless/server/static/css/jquery.noty.css +105 -0
- data/lib/reqless/server/static/css/noty_theme_twitter.css +137 -0
- data/lib/reqless/server/static/css/style.css +200 -0
- data/lib/reqless/server/static/favicon.ico +0 -0
- data/lib/reqless/server/static/img/glyphicons-halflings-white.png +0 -0
- data/lib/reqless/server/static/img/glyphicons-halflings.png +0 -0
- data/lib/reqless/server/static/js/bootstrap-alert.js +94 -0
- data/lib/reqless/server/static/js/bootstrap-scrollspy.js +125 -0
- data/lib/reqless/server/static/js/bootstrap-tab.js +130 -0
- data/lib/reqless/server/static/js/bootstrap-tooltip.js +270 -0
- data/lib/reqless/server/static/js/bootstrap-typeahead.js +285 -0
- data/lib/reqless/server/static/js/bootstrap.js +1726 -0
- data/lib/reqless/server/static/js/bootstrap.min.js +6 -0
- data/lib/reqless/server/static/js/codemirror.js +2972 -0
- data/lib/reqless/server/static/js/jquery.noty.js +220 -0
- data/lib/reqless/server/static/js/mode/javascript.js +360 -0
- data/lib/reqless/server/static/js/theme/cobalt.css +18 -0
- data/lib/reqless/server/static/js/theme/eclipse.css +25 -0
- data/lib/reqless/server/static/js/theme/elegant.css +10 -0
- data/lib/reqless/server/static/js/theme/lesser-dark.css +45 -0
- data/lib/reqless/server/static/js/theme/monokai.css +28 -0
- data/lib/reqless/server/static/js/theme/neat.css +9 -0
- data/lib/reqless/server/static/js/theme/night.css +21 -0
- data/lib/reqless/server/static/js/theme/rubyblue.css +21 -0
- data/lib/reqless/server/static/js/theme/xq-dark.css +46 -0
- data/lib/reqless/server/views/_job.erb +259 -0
- data/lib/reqless/server/views/_job_list.erb +8 -0
- data/lib/reqless/server/views/_pagination.erb +7 -0
- data/lib/reqless/server/views/about.erb +130 -0
- data/lib/reqless/server/views/completed.erb +11 -0
- data/lib/reqless/server/views/config.erb +14 -0
- data/lib/reqless/server/views/failed.erb +48 -0
- data/lib/reqless/server/views/failed_type.erb +18 -0
- data/lib/reqless/server/views/job.erb +17 -0
- data/lib/reqless/server/views/layout.erb +451 -0
- data/lib/reqless/server/views/overview.erb +137 -0
- data/lib/reqless/server/views/queue.erb +125 -0
- data/lib/reqless/server/views/queues.erb +45 -0
- data/lib/reqless/server/views/tag.erb +6 -0
- data/lib/reqless/server/views/throttles.erb +38 -0
- data/lib/reqless/server/views/track.erb +75 -0
- data/lib/reqless/server/views/worker.erb +34 -0
- data/lib/reqless/server/views/workers.erb +14 -0
- data/lib/reqless/server.rb +549 -0
- data/lib/reqless/subscriber.rb +74 -0
- data/lib/reqless/test_helpers/worker_helpers.rb +55 -0
- data/lib/reqless/throttle.rb +57 -0
- data/lib/reqless/version.rb +5 -0
- data/lib/reqless/worker/base.rb +237 -0
- data/lib/reqless/worker/forking.rb +215 -0
- data/lib/reqless/worker/serial.rb +41 -0
- data/lib/reqless/worker.rb +5 -0
- data/lib/reqless.rb +309 -0
- metadata +399 -0
@@ -0,0 +1,57 @@
|
|
1
|
+
# Encoding: utf-8
|
2
|
+
|
3
|
+
require 'redis'
|
4
|
+
require 'json'
|
5
|
+
|
6
|
+
module Reqless
|
7
|
+
class Throttle
|
8
|
+
attr_reader :name, :client
|
9
|
+
|
10
|
+
def initialize(name, client)
|
11
|
+
@name = name
|
12
|
+
@client = client
|
13
|
+
end
|
14
|
+
|
15
|
+
def delete
|
16
|
+
@client.call('throttle.delete', @name)
|
17
|
+
end
|
18
|
+
|
19
|
+
def expiration=(expire_time_in_seconds)
|
20
|
+
update(nil, Integer(expire_time_in_seconds))
|
21
|
+
end
|
22
|
+
|
23
|
+
def id
|
24
|
+
@name
|
25
|
+
end
|
26
|
+
|
27
|
+
def locks
|
28
|
+
JSON.parse(@client.call('throttle.locks', @name))
|
29
|
+
end
|
30
|
+
|
31
|
+
def maximum
|
32
|
+
throttle_attrs['maximum'].to_i
|
33
|
+
end
|
34
|
+
|
35
|
+
def maximum=(max)
|
36
|
+
update(max)
|
37
|
+
end
|
38
|
+
|
39
|
+
def pending
|
40
|
+
JSON.parse(@client.call('throttle.pending', @name))
|
41
|
+
end
|
42
|
+
|
43
|
+
def ttl
|
44
|
+
throttle_attrs['ttl'].to_i
|
45
|
+
end
|
46
|
+
|
47
|
+
private
|
48
|
+
def throttle_attrs
|
49
|
+
throttle_json = @client.call('throttle.get', @name)
|
50
|
+
throttle_json ? JSON.parse(throttle_json) : {}
|
51
|
+
end
|
52
|
+
|
53
|
+
def update(max, expiration = 0)
|
54
|
+
@client.call('throttle.set', @name, max || maximum, expiration)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
@@ -0,0 +1,237 @@
|
|
1
|
+
# Encoding: utf-8
|
2
|
+
|
3
|
+
# Standard stuff
|
4
|
+
require 'time'
|
5
|
+
require 'logger'
|
6
|
+
require 'thread'
|
7
|
+
|
8
|
+
require 'reqless'
|
9
|
+
require 'reqless/subscriber'
|
10
|
+
|
11
|
+
module Reqless
|
12
|
+
module Workers
|
13
|
+
JobLockLost = Class.new(StandardError)
|
14
|
+
|
15
|
+
class BaseWorker
|
16
|
+
attr_accessor :output, :reserver, :interval, :paused,
|
17
|
+
:options, :sighup_handler
|
18
|
+
|
19
|
+
def initialize(reserver, options = {})
|
20
|
+
# Our job reserver and options
|
21
|
+
@reserver = reserver
|
22
|
+
@options = options
|
23
|
+
|
24
|
+
# SIGHUP handler
|
25
|
+
@sighup_handler = options.fetch(:sighup_handler) { lambda { } }
|
26
|
+
|
27
|
+
# Our logger
|
28
|
+
@log = options.fetch(:logger) do
|
29
|
+
@output = options.fetch(:output, $stdout)
|
30
|
+
Logger.new(output).tap do |logger|
|
31
|
+
logger.level = options.fetch(:log_level, Logger::WARN)
|
32
|
+
logger.formatter = options.fetch(:log_formatter) do
|
33
|
+
Proc.new { |severity, datetime, progname, msg| "#{datetime}: #{msg}\n" }
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
# The interval for checking for new jobs
|
39
|
+
@interval = options.fetch(:interval, 5.0)
|
40
|
+
@current_job_mutex = Mutex.new
|
41
|
+
@current_job = nil
|
42
|
+
|
43
|
+
# Default behavior when a lock is lost: stop after the current job.
|
44
|
+
on_current_job_lock_lost { shutdown }
|
45
|
+
end
|
46
|
+
|
47
|
+
def log_level
|
48
|
+
@log.level
|
49
|
+
end
|
50
|
+
|
51
|
+
def safe_trap(signal_name, &cblock)
|
52
|
+
begin
|
53
|
+
trap(signal_name, cblock)
|
54
|
+
rescue ArgumentError
|
55
|
+
warn "Signal #{signal_name} not supported."
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
# The meaning of these signals is meant to closely mirror resque
|
60
|
+
#
|
61
|
+
# TERM: Shutdown immediately, stop processing jobs.
|
62
|
+
# INT: Shutdown immediately, stop processing jobs.
|
63
|
+
# QUIT: Shutdown after the current job has finished processing.
|
64
|
+
# USR1: Kill the forked children immediately, continue processing jobs.
|
65
|
+
# USR2: Pause after this job
|
66
|
+
# CONT: Start processing jobs again after a USR2
|
67
|
+
# HUP: Print current stack to log and continue
|
68
|
+
def register_signal_handlers
|
69
|
+
# Otherwise, we want to take the appropriate action
|
70
|
+
trap('TERM') { exit! }
|
71
|
+
trap('INT') { exit! }
|
72
|
+
safe_trap('HUP') { sighup_handler.call }
|
73
|
+
safe_trap('QUIT') { shutdown }
|
74
|
+
begin
|
75
|
+
trap('CONT') { unpause }
|
76
|
+
trap('USR2') { pause }
|
77
|
+
rescue ArgumentError
|
78
|
+
warn 'Signals USR2, and/or CONT not supported.'
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
# Return an enumerator to each of the jobs provided by the reserver
|
83
|
+
def jobs
|
84
|
+
return Enumerator.new do |enum|
|
85
|
+
loop do
|
86
|
+
begin
|
87
|
+
job = reserver.reserve
|
88
|
+
rescue Exception => error
|
89
|
+
# We want workers to durably stay up, so we don't want errors
|
90
|
+
# during job reserving (e.g. network timeouts, etc) to kill the
|
91
|
+
# worker.
|
92
|
+
log(:error,
|
93
|
+
"Error reserving job: #{error.class}: #{error.message}")
|
94
|
+
end
|
95
|
+
|
96
|
+
# If we ended up getting a job, yield it. Otherwise, we wait
|
97
|
+
if job.nil?
|
98
|
+
no_job_available
|
99
|
+
else
|
100
|
+
self.current_job = job
|
101
|
+
enum.yield(job)
|
102
|
+
self.current_job = nil
|
103
|
+
end
|
104
|
+
|
105
|
+
break if @shutdown
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
# Actually perform the job
|
111
|
+
def perform(job)
|
112
|
+
around_perform(job)
|
113
|
+
rescue JobLockLost
|
114
|
+
log(:warn, "Lost lock for job #{job.jid}")
|
115
|
+
rescue Exception => error
|
116
|
+
fail_job(job, error, caller)
|
117
|
+
else
|
118
|
+
try_complete(job)
|
119
|
+
end
|
120
|
+
|
121
|
+
# Allow middleware modules to be mixed in and override the
|
122
|
+
# definition of around_perform while providing a default
|
123
|
+
# implementation so our code can assume the method is present.
|
124
|
+
module SupportsMiddlewareModules
|
125
|
+
def around_perform(job)
|
126
|
+
job.perform
|
127
|
+
end
|
128
|
+
|
129
|
+
def after_fork
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
include SupportsMiddlewareModules
|
134
|
+
|
135
|
+
# Stop processing after this job
|
136
|
+
def shutdown
|
137
|
+
@shutdown = true
|
138
|
+
end
|
139
|
+
alias stop! shutdown # so we can call `stop!` regardless of the worker type
|
140
|
+
|
141
|
+
# Pause the worker -- take no more new jobs
|
142
|
+
def pause
|
143
|
+
@paused = true
|
144
|
+
procline "Paused -- #{reserver.description}"
|
145
|
+
end
|
146
|
+
|
147
|
+
# Continue taking new jobs
|
148
|
+
def unpause
|
149
|
+
@paused = false
|
150
|
+
end
|
151
|
+
|
152
|
+
# Set the proceline. Not supported on all systems
|
153
|
+
def procline(value)
|
154
|
+
$0 = "reQless-#{Reqless::VERSION}: #{value} at #{Time.now.iso8601}"
|
155
|
+
log(:debug, $PROGRAM_NAME)
|
156
|
+
end
|
157
|
+
|
158
|
+
# Complete the job unless the worker has already put it into another state
|
159
|
+
# by completing / failing / etc. the job
|
160
|
+
def try_complete(job)
|
161
|
+
job.complete unless job.state_changed?
|
162
|
+
rescue Job::CantCompleteError => e
|
163
|
+
# There's not much we can do here. Complete fails in a few cases:
|
164
|
+
# - The job is already failed (i.e. by another worker)
|
165
|
+
# - The job is being worked on by another worker
|
166
|
+
# - The job has been cancelled
|
167
|
+
#
|
168
|
+
# We don't want to (or are able to) fail the job with this error in
|
169
|
+
# any of these cases, so the best we can do is log the failure.
|
170
|
+
log(:error, "Failed to complete #{job.inspect}: #{e.message}")
|
171
|
+
end
|
172
|
+
|
173
|
+
def fail_job(job, error, worker_backtrace)
|
174
|
+
failure = Reqless.failure_formatter.format(job, error, worker_backtrace)
|
175
|
+
log(:error, "Got #{failure.group} failure from #{job.inspect}\n#{failure.message}" )
|
176
|
+
job.fail(*failure)
|
177
|
+
rescue Job::CantFailError => e
|
178
|
+
# There's not much we can do here. Another worker may have cancelled it,
|
179
|
+
# or we might not own the job, etc. Logging is the best we can do.
|
180
|
+
log(:error, "Failed to fail #{job.inspect}: #{e.message}")
|
181
|
+
end
|
182
|
+
|
183
|
+
def deregister
|
184
|
+
uniq_clients.each do |client|
|
185
|
+
client.deregister_workers(client.worker_name)
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
def uniq_clients
|
190
|
+
@uniq_clients ||= reserver.queues.map(&:client).uniq
|
191
|
+
end
|
192
|
+
|
193
|
+
def on_current_job_lock_lost(&block)
|
194
|
+
@on_current_job_lock_lost = block
|
195
|
+
end
|
196
|
+
|
197
|
+
def listen_for_lost_lock(job)
|
198
|
+
# Ensure subscribers always has a value
|
199
|
+
subscriber = Subscriber.start(job.client, "ql:w:#{job.client.worker_name}", log: @log) do |_, message|
|
200
|
+
if message['event'] == 'lock_lost' && message['jid'] == job.jid
|
201
|
+
@on_current_job_lock_lost.call(job)
|
202
|
+
end
|
203
|
+
end
|
204
|
+
|
205
|
+
yield
|
206
|
+
ensure
|
207
|
+
subscriber && subscriber.stop
|
208
|
+
end
|
209
|
+
|
210
|
+
private
|
211
|
+
|
212
|
+
def log(type, msg)
|
213
|
+
@log.public_send(type, "#{Process.pid}: #{msg}")
|
214
|
+
end
|
215
|
+
|
216
|
+
def no_job_available
|
217
|
+
unless interval.zero?
|
218
|
+
procline "Waiting for #{reserver.description}"
|
219
|
+
log(:debug, "Sleeping for #{interval} seconds")
|
220
|
+
sleep interval
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
def with_current_job
|
225
|
+
@current_job_mutex.synchronize do
|
226
|
+
yield @current_job
|
227
|
+
end
|
228
|
+
end
|
229
|
+
|
230
|
+
def current_job=(job)
|
231
|
+
@current_job_mutex.synchronize do
|
232
|
+
@current_job = job
|
233
|
+
end
|
234
|
+
end
|
235
|
+
end
|
236
|
+
end
|
237
|
+
end
|
@@ -0,0 +1,215 @@
|
|
1
|
+
# Encoding: utf-8
|
2
|
+
|
3
|
+
require 'reqless'
|
4
|
+
require 'reqless/worker/base'
|
5
|
+
require 'reqless/worker/serial'
|
6
|
+
require 'thread'
|
7
|
+
|
8
|
+
module Reqless
|
9
|
+
module Workers
|
10
|
+
class ForkingWorker < BaseWorker
|
11
|
+
# The child startup interval
|
12
|
+
attr_accessor :max_startup_interval
|
13
|
+
|
14
|
+
def initialize(reserver, options = {})
|
15
|
+
super(reserver, options)
|
16
|
+
# The keys are the child PIDs, the values are information about the
|
17
|
+
# worker, including its sandbox directory. This directory currently
|
18
|
+
# isn't used, but this sets up for having that eventually.
|
19
|
+
@sandboxes = {}
|
20
|
+
|
21
|
+
# Save our options for starting children
|
22
|
+
@options = options
|
23
|
+
|
24
|
+
# The max interval between when children start (reduces thundering herd)
|
25
|
+
@max_startup_interval = options[:max_startup_interval] || 10.0
|
26
|
+
|
27
|
+
# TODO: facter to figure out how many cores we have
|
28
|
+
@num_workers = options[:num_workers] || 1
|
29
|
+
|
30
|
+
# All the modules that have been applied to this worker
|
31
|
+
@modules = []
|
32
|
+
|
33
|
+
@sandbox_mutex = Mutex.new
|
34
|
+
end
|
35
|
+
|
36
|
+
# Because we spawn a new worker, we need to apply all the modules that
|
37
|
+
# extend this one
|
38
|
+
def extend(mod)
|
39
|
+
@modules << mod
|
40
|
+
super(mod)
|
41
|
+
end
|
42
|
+
|
43
|
+
# Spawn a new child worker
|
44
|
+
def spawn
|
45
|
+
worker = SerialWorker.new(reserver, @options)
|
46
|
+
# We use 11 as the exit status so that it is something unique
|
47
|
+
# (rather than the common 1). Plus, 11 looks a little like
|
48
|
+
# ll (i.e. "Lock Lost").
|
49
|
+
worker.on_current_job_lock_lost { |job| exit!(11) }
|
50
|
+
@modules.each { |mod| worker.extend(mod) }
|
51
|
+
worker
|
52
|
+
end
|
53
|
+
|
54
|
+
# Register our handling of signals
|
55
|
+
def register_signal_handlers
|
56
|
+
# If we're the parent process, we mostly want to forward the signals on
|
57
|
+
# to the child processes. It's just that sometimes we want to wait for
|
58
|
+
# them and then exit
|
59
|
+
trap('TERM') do
|
60
|
+
stop!('TERM')
|
61
|
+
exit
|
62
|
+
end
|
63
|
+
|
64
|
+
trap('INT') do
|
65
|
+
stop!('TERM')
|
66
|
+
exit
|
67
|
+
end
|
68
|
+
|
69
|
+
safe_trap('HUP') { sighup_handler.call }
|
70
|
+
safe_trap('QUIT') do
|
71
|
+
stop!('QUIT')
|
72
|
+
exit
|
73
|
+
end
|
74
|
+
safe_trap('USR1') { stop!('KILL') }
|
75
|
+
|
76
|
+
begin
|
77
|
+
trap('CONT') { stop('CONT') }
|
78
|
+
trap('USR2') { stop('USR2') }
|
79
|
+
rescue ArgumentError
|
80
|
+
warn 'Signals USR2, and/or CONT not supported.'
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
# Run this worker
|
85
|
+
def run
|
86
|
+
startup_sandboxes
|
87
|
+
|
88
|
+
# Now keep an eye on our child processes, spawn replacements as needed
|
89
|
+
loop do
|
90
|
+
begin
|
91
|
+
# Don't wait on any processes if we're already in shutdown mode.
|
92
|
+
break if @shutdown
|
93
|
+
|
94
|
+
# Wait for any child to kick the bucket
|
95
|
+
pid, status = Process.wait2
|
96
|
+
code, sig = status.exitstatus, status.stopsig
|
97
|
+
log(:warn,
|
98
|
+
"Worker process #{pid} died with #{code} from signal (#{sig})")
|
99
|
+
|
100
|
+
# allow our shutdown logic (called from a separate thread) to take affect.
|
101
|
+
break if @shutdown
|
102
|
+
|
103
|
+
spawn_replacement_child(pid)
|
104
|
+
rescue SystemCallError => e
|
105
|
+
log(:error, "Failed to wait for child process: #{e.inspect}")
|
106
|
+
# If we're shutting down, the loop above will exit
|
107
|
+
exit! unless @shutdown
|
108
|
+
end
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
# Returns a list of each of the child pids
|
113
|
+
def children
|
114
|
+
@sandboxes.keys
|
115
|
+
end
|
116
|
+
|
117
|
+
# Signal all the children
|
118
|
+
def stop(signal = 'QUIT')
|
119
|
+
log(:warn, "Sending #{signal} to children")
|
120
|
+
children.each do |pid|
|
121
|
+
begin
|
122
|
+
Process.kill(signal, pid)
|
123
|
+
rescue Errno::ESRCH
|
124
|
+
# no such process -- means the process has already died.
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
# Signal all the children and wait for them to exit
|
130
|
+
def stop!(signal = 'QUIT')
|
131
|
+
shutdown
|
132
|
+
shutdown_sandboxes(signal)
|
133
|
+
end
|
134
|
+
|
135
|
+
private
|
136
|
+
|
137
|
+
def startup_sandboxes
|
138
|
+
# Make sure we respond to signals correctly
|
139
|
+
register_signal_handlers
|
140
|
+
|
141
|
+
log(:debug, "Starting to run with #{@num_workers} workers")
|
142
|
+
@num_workers.times do |i|
|
143
|
+
slot = {
|
144
|
+
worker_id: i,
|
145
|
+
sandbox: nil
|
146
|
+
}
|
147
|
+
|
148
|
+
cpid = fork_child_process do
|
149
|
+
# Wait for a bit to calm the thundering herd
|
150
|
+
sleep(rand(max_startup_interval)) if max_startup_interval > 0
|
151
|
+
end
|
152
|
+
|
153
|
+
# If we're the parent process, save information about the child
|
154
|
+
log(:info, "Spawned worker #{cpid}")
|
155
|
+
@sandboxes[cpid] = slot
|
156
|
+
end
|
157
|
+
end
|
158
|
+
|
159
|
+
def shutdown_sandboxes(signal)
|
160
|
+
@sandbox_mutex.synchronize do
|
161
|
+
# First, send the signal
|
162
|
+
stop(signal)
|
163
|
+
|
164
|
+
# Wait for each of our children
|
165
|
+
log(:warn, 'Waiting for child processes')
|
166
|
+
|
167
|
+
until @sandboxes.empty?
|
168
|
+
begin
|
169
|
+
pid, _ = Process.wait2
|
170
|
+
log(:warn, "Child #{pid} stopped")
|
171
|
+
@sandboxes.delete(pid)
|
172
|
+
rescue SystemCallError
|
173
|
+
break
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
log(:warn, 'All children have stopped')
|
178
|
+
|
179
|
+
# If there were any children processes we couldn't wait for, log it
|
180
|
+
@sandboxes.keys.each do |cpid|
|
181
|
+
log(:warn, "Could not wait for child #{cpid}")
|
182
|
+
end
|
183
|
+
|
184
|
+
@sandboxes.clear
|
185
|
+
end
|
186
|
+
end
|
187
|
+
|
188
|
+
private
|
189
|
+
|
190
|
+
def spawn_replacement_child(pid)
|
191
|
+
@sandbox_mutex.synchronize do
|
192
|
+
return if @shutdown
|
193
|
+
|
194
|
+
# And give its slot to a new worker process
|
195
|
+
slot = @sandboxes.delete(pid)
|
196
|
+
cpid = fork_child_process
|
197
|
+
|
198
|
+
# If we're the parent process, ave information about the child
|
199
|
+
log(:warn, "Spawned worker #{cpid} to replace #{pid}")
|
200
|
+
@sandboxes[cpid] = slot
|
201
|
+
end
|
202
|
+
end
|
203
|
+
|
204
|
+
# returns child's pid.
|
205
|
+
def fork_child_process
|
206
|
+
fork do
|
207
|
+
yield if block_given?
|
208
|
+
after_fork
|
209
|
+
spawn.run
|
210
|
+
end
|
211
|
+
end
|
212
|
+
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# Encoding: utf-8
|
2
|
+
|
3
|
+
require 'reqless'
|
4
|
+
require 'reqless/worker/base'
|
5
|
+
|
6
|
+
module Reqless
|
7
|
+
module Workers
|
8
|
+
# A worker that keeps popping off jobs and processing them
|
9
|
+
class SerialWorker < BaseWorker
|
10
|
+
def initialize(reserver, options = {})
|
11
|
+
super(reserver, options)
|
12
|
+
end
|
13
|
+
|
14
|
+
def run
|
15
|
+
log(:info, "Starting #{reserver.description} in #{Process.pid}")
|
16
|
+
procline "Starting #{reserver.description}"
|
17
|
+
register_signal_handlers
|
18
|
+
|
19
|
+
reserver.prep_for_work!
|
20
|
+
|
21
|
+
procline "Running #{reserver.description}"
|
22
|
+
|
23
|
+
jobs.each do |job|
|
24
|
+
# Run the job we're working on
|
25
|
+
log(:debug, "Starting job #{job.klass_name} (#{job.jid} from #{job.queue_name})")
|
26
|
+
procline "Processing #{job.description}"
|
27
|
+
listen_for_lost_lock(job) do
|
28
|
+
perform(job)
|
29
|
+
end
|
30
|
+
log(:debug, "Finished job #{job.klass_name} (#{job.jid} from #{job.queue_name})")
|
31
|
+
|
32
|
+
# So long as we're paused, we should wait
|
33
|
+
while paused
|
34
|
+
log(:debug, 'Paused...')
|
35
|
+
sleep interval
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|