backburner-allq 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +17 -0
- data/.travis.yml +29 -0
- data/CHANGELOG.md +133 -0
- data/CONTRIBUTING.md +37 -0
- data/Gemfile +4 -0
- data/HOOKS.md +99 -0
- data/LICENSE +22 -0
- data/README.md +658 -0
- data/Rakefile +17 -0
- data/TODO +4 -0
- data/backburner-allq.gemspec +26 -0
- data/bin/backburner +7 -0
- data/circle.yml +3 -0
- data/deploy.sh +3 -0
- data/examples/custom.rb +25 -0
- data/examples/demo.rb +60 -0
- data/examples/god.rb +46 -0
- data/examples/hooked.rb +87 -0
- data/examples/retried.rb +31 -0
- data/examples/simple.rb +43 -0
- data/examples/stress.rb +31 -0
- data/lib/backburner.rb +75 -0
- data/lib/backburner/allq_wrapper.rb +317 -0
- data/lib/backburner/async_proxy.rb +25 -0
- data/lib/backburner/cli.rb +53 -0
- data/lib/backburner/configuration.rb +48 -0
- data/lib/backburner/connection.rb +157 -0
- data/lib/backburner/helpers.rb +193 -0
- data/lib/backburner/hooks.rb +53 -0
- data/lib/backburner/job.rb +118 -0
- data/lib/backburner/logger.rb +53 -0
- data/lib/backburner/performable.rb +95 -0
- data/lib/backburner/queue.rb +145 -0
- data/lib/backburner/tasks.rb +54 -0
- data/lib/backburner/version.rb +3 -0
- data/lib/backburner/worker.rb +221 -0
- data/lib/backburner/workers/forking.rb +52 -0
- data/lib/backburner/workers/simple.rb +29 -0
- data/lib/backburner/workers/threading.rb +163 -0
- data/lib/backburner/workers/threads_on_fork.rb +263 -0
- data/test/async_proxy_test.rb +36 -0
- data/test/back_burner_test.rb +88 -0
- data/test/connection_test.rb +179 -0
- data/test/fixtures/hooked.rb +122 -0
- data/test/fixtures/test_fork_jobs.rb +72 -0
- data/test/fixtures/test_forking_jobs.rb +56 -0
- data/test/fixtures/test_jobs.rb +87 -0
- data/test/fixtures/test_queue_settings.rb +14 -0
- data/test/helpers/templogger.rb +22 -0
- data/test/helpers_test.rb +278 -0
- data/test/hooks_test.rb +112 -0
- data/test/job_test.rb +185 -0
- data/test/logger_test.rb +44 -0
- data/test/performable_test.rb +88 -0
- data/test/queue_test.rb +69 -0
- data/test/test_helper.rb +128 -0
- data/test/worker_test.rb +157 -0
- data/test/workers/forking_worker_test.rb +181 -0
- data/test/workers/simple_worker_test.rb +350 -0
- data/test/workers/threading_worker_test.rb +104 -0
- data/test/workers/threads_on_fork_worker_test.rb +484 -0
- metadata +217 -0
@@ -0,0 +1,145 @@
|
|
1
|
+
module Backburner
|
2
|
+
module Queue
|
3
|
+
def self.included(base)
|
4
|
+
base.instance_variable_set(:@queue_name, nil)
|
5
|
+
base.instance_variable_set(:@queue_priority, nil)
|
6
|
+
base.instance_variable_set(:@queue_respond_timeout, nil)
|
7
|
+
base.instance_variable_set(:@queue_max_job_retries, nil)
|
8
|
+
base.instance_variable_set(:@queue_retry_delay, nil)
|
9
|
+
base.instance_variable_set(:@queue_retry_delay_proc, nil)
|
10
|
+
base.instance_variable_set(:@queue_jobs_limit, nil)
|
11
|
+
base.instance_variable_set(:@queue_garbage_limit, nil)
|
12
|
+
base.instance_variable_set(:@queue_retry_limit, nil)
|
13
|
+
base.extend ClassMethods
|
14
|
+
Backburner::Worker.known_queue_classes << base
|
15
|
+
end
|
16
|
+
|
17
|
+
module ClassMethods
|
18
|
+
# Returns or assigns queue name for this job.
|
19
|
+
#
|
20
|
+
# @example
|
21
|
+
# queue "some.task.name"
|
22
|
+
# @klass.queue # => "some.task.name"
|
23
|
+
#
|
24
|
+
def queue(name=nil)
|
25
|
+
if name
|
26
|
+
@queue_name = name
|
27
|
+
else # accessor
|
28
|
+
(@queue_name.is_a?(Proc) ? @queue_name.call(self) : @queue_name) || Backburner.configuration.primary_queue
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
# Returns or assigns queue priority for this job
|
33
|
+
#
|
34
|
+
# @example
|
35
|
+
# queue_priority 120
|
36
|
+
# @klass.queue_priority # => 120
|
37
|
+
#
|
38
|
+
def queue_priority(pri=nil)
|
39
|
+
if pri
|
40
|
+
@queue_priority = pri
|
41
|
+
else # accessor
|
42
|
+
@queue_priority
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
# Returns or assigns queue respond_timeout for this job
|
47
|
+
#
|
48
|
+
# @example
|
49
|
+
# queue_respond_timeout 120
|
50
|
+
# @klass.queue_respond_timeout # => 120
|
51
|
+
#
|
52
|
+
def queue_respond_timeout(ttr=nil)
|
53
|
+
if ttr
|
54
|
+
@queue_respond_timeout = ttr
|
55
|
+
else # accessor
|
56
|
+
@queue_respond_timeout
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
# Returns or assigns queue max_job_retries for this job
|
61
|
+
#
|
62
|
+
# @example
|
63
|
+
# queue_max_job_retries 120
|
64
|
+
# @klass.queue_max_job_retries # => 120
|
65
|
+
#
|
66
|
+
def queue_max_job_retries(delay=nil)
|
67
|
+
if delay
|
68
|
+
@queue_max_job_retries = delay
|
69
|
+
else # accessor
|
70
|
+
@queue_max_job_retries
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
# Returns or assigns queue retry_delay for this job
|
75
|
+
#
|
76
|
+
# @example
|
77
|
+
# queue_retry_delay 120
|
78
|
+
# @klass.queue_retry_delay # => 120
|
79
|
+
#
|
80
|
+
def queue_retry_delay(delay=nil)
|
81
|
+
if delay
|
82
|
+
@queue_retry_delay = delay
|
83
|
+
else # accessor
|
84
|
+
@queue_retry_delay
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
# Returns or assigns queue retry_delay_proc for this job
|
89
|
+
#
|
90
|
+
# @example
|
91
|
+
# queue_retry_delay_proc lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 2) }
|
92
|
+
# @klass.queue_retry_delay_proc # => lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 2) }
|
93
|
+
#
|
94
|
+
def queue_retry_delay_proc(proc=nil)
|
95
|
+
if proc
|
96
|
+
@queue_retry_delay_proc = proc
|
97
|
+
else # accessor
|
98
|
+
@queue_retry_delay_proc
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
# Returns or assigns queue parallel active jobs limit (only ThreadsOnFork and Threading workers)
|
103
|
+
#
|
104
|
+
# @example
|
105
|
+
# queue_jobs_limit 5
|
106
|
+
# @klass.queue_jobs_limit # => 5
|
107
|
+
#
|
108
|
+
def queue_jobs_limit(limit=nil)
|
109
|
+
if limit
|
110
|
+
@queue_jobs_limit = limit
|
111
|
+
else #accessor
|
112
|
+
@queue_jobs_limit
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
# Returns or assigns queue jobs garbage limit (only ThreadsOnFork Worker)
|
117
|
+
#
|
118
|
+
# @example
|
119
|
+
# queue_garbage_limit 1000
|
120
|
+
# @klass.queue_garbage_limit # => 1000
|
121
|
+
#
|
122
|
+
def queue_garbage_limit(limit=nil)
|
123
|
+
if limit
|
124
|
+
@queue_garbage_limit = limit
|
125
|
+
else #accessor
|
126
|
+
@queue_garbage_limit
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
# Returns or assigns queue retry limit (only ThreadsOnFork worker)
|
131
|
+
#
|
132
|
+
# @example
|
133
|
+
# queue_retry_limit 6
|
134
|
+
# @klass.queue_retry_limit # => 6
|
135
|
+
#
|
136
|
+
def queue_retry_limit(limit=nil)
|
137
|
+
if limit
|
138
|
+
@queue_retry_limit = limit
|
139
|
+
else #accessor
|
140
|
+
@queue_retry_limit
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end # ClassMethods
|
144
|
+
end # Queue
|
145
|
+
end # Backburner
|
@@ -0,0 +1,54 @@
|
|
1
|
+
# require 'backburner/tasks'
|
2
|
+
# will give you the backburner tasks
|
3
|
+
|
4
|
+
namespace :backburner do
|
5
|
+
# QUEUE=foo,bar,baz rake backburner:work
|
6
|
+
desc "Start backburner worker using default worker"
|
7
|
+
task :work => :environment do
|
8
|
+
Backburner.work get_queues
|
9
|
+
end
|
10
|
+
|
11
|
+
namespace :simple do
|
12
|
+
# QUEUE=foo,bar,baz rake backburner:simple:work
|
13
|
+
desc "Starts backburner worker using simple processing"
|
14
|
+
task :work => :environment do
|
15
|
+
Backburner.work get_queues, :worker => Backburner::Workers::Simple
|
16
|
+
end
|
17
|
+
end # simple
|
18
|
+
|
19
|
+
namespace :forking do
|
20
|
+
# QUEUE=foo,bar,baz rake backburner:forking:work
|
21
|
+
desc "Starts backburner worker using fork processing"
|
22
|
+
task :work => :environment do
|
23
|
+
Backburner.work get_queues, :worker => Backburner::Workers::Forking
|
24
|
+
end
|
25
|
+
end # forking
|
26
|
+
|
27
|
+
namespace :threads_on_fork do
|
28
|
+
# QUEUE=twitter:10:5000:5,parse_page,send_mail,verify_bithday THREADS=2 GARBAGE=1000 rake backburner:threads_on_fork:work
|
29
|
+
# twitter tube will have 10 threads, garbage after 5k executions and retry 5 times.
|
30
|
+
desc "Starts backburner worker using threads_on_fork processing"
|
31
|
+
task :work => :environment do
|
32
|
+
threads = ENV['THREADS'].to_i
|
33
|
+
garbage = ENV['GARBAGE'].to_i
|
34
|
+
Backburner::Workers::ThreadsOnFork.threads_number = threads if threads > 0
|
35
|
+
Backburner::Workers::ThreadsOnFork.garbage_after = garbage if garbage > 0
|
36
|
+
Backburner.work get_queues, :worker => Backburner::Workers::ThreadsOnFork
|
37
|
+
end
|
38
|
+
end # threads_on_fork
|
39
|
+
|
40
|
+
namespace :threading do
|
41
|
+
# QUEUE=twitter:10,parse_page,send_mail,verify_bithday THREADS=2 rake backburner:threading:work
|
42
|
+
# twitter tube will have 10 threads
|
43
|
+
desc "Starts backburner worker using threading processing"
|
44
|
+
task :work => :environment do
|
45
|
+
threads = ENV['THREADS'].to_i
|
46
|
+
Backburner::Workers::Threading.threads_number = threads if threads > 0
|
47
|
+
Backburner.work get_queues, :worker => Backburner::Workers::Threading
|
48
|
+
end
|
49
|
+
end # threads_on_fork
|
50
|
+
|
51
|
+
def get_queues
|
52
|
+
(ENV["QUEUE"] ? ENV["QUEUE"].split(',') : nil) rescue nil
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,221 @@
|
|
1
|
+
require 'backburner/job'
|
2
|
+
|
3
|
+
module Backburner
|
4
|
+
#
|
5
|
+
# @abstract Subclass and override {#process_tube_names}, {#prepare} and {#start} to implement
|
6
|
+
# a custom Worker class.
|
7
|
+
#
|
8
|
+
class Worker
|
9
|
+
include Backburner::Helpers
|
10
|
+
include Backburner::Logger
|
11
|
+
|
12
|
+
# Backburner::Worker.known_queue_classes
|
13
|
+
# List of known_queue_classes
|
14
|
+
class << self
|
15
|
+
attr_writer :known_queue_classes
|
16
|
+
def known_queue_classes; @known_queue_classes ||= []; end
|
17
|
+
end
|
18
|
+
|
19
|
+
# Enqueues a job to be processed later by a worker.
|
20
|
+
# Options: `pri` (priority), `delay` (delay in secs), `ttr` (time to respond), `queue` (queue name)
|
21
|
+
#
|
22
|
+
# @raise [Beaneater::NotConnected] If beanstalk fails to connect.
|
23
|
+
# @example
|
24
|
+
# Backburner::Worker.enqueue NewsletterSender, [self.id, user.id], :ttr => 1000
|
25
|
+
#
|
26
|
+
def self.enqueue(job_class, args=[], opts={})
|
27
|
+
pri = resolve_priority(opts[:pri] || job_class)
|
28
|
+
delay = [0, opts[:delay].to_i].max
|
29
|
+
ttr = resolve_respond_timeout(opts[:ttr] || job_class)
|
30
|
+
res = Backburner::Hooks.invoke_hook_events(job_class, :before_enqueue, *args)
|
31
|
+
|
32
|
+
return nil unless res # stop if hook is false
|
33
|
+
|
34
|
+
data = { :class => job_class.name, :args => args }
|
35
|
+
queue = opts[:queue] && (Proc === opts[:queue] ? opts[:queue].call(job_class) : opts[:queue])
|
36
|
+
|
37
|
+
begin
|
38
|
+
response = nil
|
39
|
+
connection = Backburner::Connection.new(Backburner.configuration.allq_url)
|
40
|
+
connection.retryable do
|
41
|
+
tube_name = expand_tube_name(queue || job_class)
|
42
|
+
serialized_data = Backburner.configuration.job_serializer_proc.call(data)
|
43
|
+
response = connection.put(serialized_data, :pri => pri, :delay => delay, :ttr => ttr)
|
44
|
+
end
|
45
|
+
return nil unless Backburner::Hooks.invoke_hook_events(job_class, :after_enqueue, *args)
|
46
|
+
ensure
|
47
|
+
connection.close if connection
|
48
|
+
end
|
49
|
+
|
50
|
+
response
|
51
|
+
end
|
52
|
+
|
53
|
+
# Starts processing jobs with the specified tube_names.
|
54
|
+
#
|
55
|
+
# @example
|
56
|
+
# Backburner::Worker.start(["foo.tube.name"])
|
57
|
+
#
|
58
|
+
def self.start(tube_names=nil)
|
59
|
+
begin
|
60
|
+
self.new(tube_names).start
|
61
|
+
rescue SystemExit
|
62
|
+
# do nothing
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
# List of tube names to be watched and processed
|
67
|
+
attr_accessor :tube_names, :connection
|
68
|
+
|
69
|
+
# Constructs a new worker for processing jobs within specified tubes.
|
70
|
+
#
|
71
|
+
# @example
|
72
|
+
# Worker.new(['test.job'])
|
73
|
+
def initialize(tube_names=nil)
|
74
|
+
@connection = new_connection
|
75
|
+
@tube_names = self.process_tube_names(tube_names)
|
76
|
+
register_signal_handlers!
|
77
|
+
end
|
78
|
+
|
79
|
+
# Starts processing ready jobs indefinitely.
|
80
|
+
# Primary way to consume and process jobs in specified tubes.
|
81
|
+
#
|
82
|
+
# @example
|
83
|
+
# @worker.start
|
84
|
+
#
|
85
|
+
def start
|
86
|
+
raise NotImplementedError
|
87
|
+
end
|
88
|
+
|
89
|
+
# Used to prepare the job queues before job processing is initiated.
|
90
|
+
#
|
91
|
+
# @raise [Beaneater::NotConnected] If beanstalk fails to connect.
|
92
|
+
# @example
|
93
|
+
# @worker.prepare
|
94
|
+
#
|
95
|
+
# @abstract Define this in your worker subclass
|
96
|
+
# to be run once before processing. Recommended to watch tubes
|
97
|
+
# or print a message to the logs with 'log_info'
|
98
|
+
#
|
99
|
+
def prepare
|
100
|
+
raise NotImplementedError
|
101
|
+
end
|
102
|
+
|
103
|
+
# Triggers this worker to shutdown
|
104
|
+
def shutdown
|
105
|
+
Thread.new do
|
106
|
+
log_info 'Worker exiting...'
|
107
|
+
end
|
108
|
+
Kernel.exit
|
109
|
+
end
|
110
|
+
|
111
|
+
# Processes tube_names given tube_names array.
|
112
|
+
# Should return normalized tube_names as an array of strings.
|
113
|
+
#
|
114
|
+
# @example
|
115
|
+
# process_tube_names([['foo'], ['bar']])
|
116
|
+
# => ['foo', 'bar', 'baz']
|
117
|
+
#
|
118
|
+
# @note This method can be overridden in inherited workers
|
119
|
+
# to add more complex tube name processing.
|
120
|
+
def process_tube_names(tube_names)
|
121
|
+
compact_tube_names(tube_names)
|
122
|
+
end
|
123
|
+
|
124
|
+
# Performs a job by reserving a job from beanstalk and processing it
|
125
|
+
#
|
126
|
+
# @example
|
127
|
+
# @worker.work_one_job
|
128
|
+
# @raise [Beaneater::NotConnected] If beanstalk fails to connect multiple times.
|
129
|
+
def work_one_job(conn = connection)
|
130
|
+
begin
|
131
|
+
job = reserve_job(conn)
|
132
|
+
rescue Beaneater::TimedOutError => e
|
133
|
+
return
|
134
|
+
end
|
135
|
+
|
136
|
+
self.log_job_begin(job.name, job.args)
|
137
|
+
job.process
|
138
|
+
self.log_job_end(job.name)
|
139
|
+
|
140
|
+
rescue Backburner::Job::JobFormatInvalid => e
|
141
|
+
self.log_error self.exception_message(e)
|
142
|
+
rescue => e # Error occurred processing job
|
143
|
+
self.log_error self.exception_message(e) unless e.is_a?(Backburner::Job::RetryJob)
|
144
|
+
|
145
|
+
unless job
|
146
|
+
self.log_error "Error occurred before we were able to assign a job. Giving up without retrying!"
|
147
|
+
return
|
148
|
+
end
|
149
|
+
|
150
|
+
# NB: There's a slight chance here that the connection to beanstalkd has
|
151
|
+
# gone down between the time we reserved / processed the job and here.
|
152
|
+
num_retries = job.stats.releases
|
153
|
+
max_job_retries = resolve_max_job_retries(job.job_class)
|
154
|
+
retry_status = "failed: attempt #{num_retries+1} of #{max_job_retries+1}"
|
155
|
+
if num_retries < max_job_retries # retry again
|
156
|
+
retry_delay = resolve_retry_delay(job.job_class)
|
157
|
+
delay = resolve_retry_delay_proc(job.job_class).call(retry_delay, num_retries) rescue retry_delay
|
158
|
+
job.retry(num_retries + 1, delay)
|
159
|
+
self.log_job_end(job.name, "#{retry_status}, retrying in #{delay}s") if job_started_at
|
160
|
+
else # retries failed, bury
|
161
|
+
job.bury
|
162
|
+
self.log_job_end(job.name, "#{retry_status}, burying") if job_started_at
|
163
|
+
end
|
164
|
+
|
165
|
+
handle_error(e, job.name, job.args, job)
|
166
|
+
end
|
167
|
+
|
168
|
+
|
169
|
+
protected
|
170
|
+
|
171
|
+
# Return a new connection instance
|
172
|
+
def new_connection
|
173
|
+
Connection.new(Backburner.configuration.beanstalk_url) { |conn| Backburner::Hooks.invoke_hook_events(self, :on_reconnect, conn) }
|
174
|
+
end
|
175
|
+
|
176
|
+
# Reserve a job from the watched queues
|
177
|
+
def reserve_job(conn, reserve_timeout = Backburner.configuration.reserve_timeout)
|
178
|
+
Backburner::Job.new(conn.get(@tube_names.sample))
|
179
|
+
end
|
180
|
+
|
181
|
+
# Returns a list of all tubes known within the system
|
182
|
+
# Filtered for tubes that match the known prefix
|
183
|
+
def all_existing_queues
|
184
|
+
known_queues = Backburner::Worker.known_queue_classes.map(&:queue)
|
185
|
+
existing_tubes = self.connection.tubes.all.map(&:name).select { |tube| tube =~ /^#{queue_config.tube_namespace}/ }
|
186
|
+
existing_tubes + known_queues + [queue_config.primary_queue]
|
187
|
+
end
|
188
|
+
|
189
|
+
|
190
|
+
# Handles an error according to custom definition
|
191
|
+
# Used when processing a job that errors out
|
192
|
+
def handle_error(e, name, args, job)
|
193
|
+
if error_handler = Backburner.configuration.on_error
|
194
|
+
if error_handler.arity == 1
|
195
|
+
error_handler.call(e)
|
196
|
+
elsif error_handler.arity == 3
|
197
|
+
error_handler.call(e, name, args)
|
198
|
+
else
|
199
|
+
error_handler.call(e, name, args, job)
|
200
|
+
end
|
201
|
+
end
|
202
|
+
end
|
203
|
+
|
204
|
+
# Normalizes tube names given array of tube_names
|
205
|
+
# Compacts nil items, flattens arrays, sets tubes to nil if no valid names
|
206
|
+
# Loads default tubes when no tubes given.
|
207
|
+
def compact_tube_names(tube_names)
|
208
|
+
tube_names = tube_names.first if tube_names && tube_names.size == 1 && tube_names.first.is_a?(Array)
|
209
|
+
tube_names = Array(tube_names).compact if tube_names && Array(tube_names).compact.size > 0
|
210
|
+
tube_names = nil if tube_names && tube_names.compact.empty?
|
211
|
+
tube_names ||= Backburner.default_queues.any? ? Backburner.default_queues : all_existing_queues
|
212
|
+
Array(tube_names).uniq
|
213
|
+
end
|
214
|
+
|
215
|
+
# Registers signal handlers TERM and INT to trigger
|
216
|
+
def register_signal_handlers!
|
217
|
+
trap('TERM') { shutdown }
|
218
|
+
trap('INT') { shutdown }
|
219
|
+
end
|
220
|
+
end # Worker
|
221
|
+
end # Backburner
|
@@ -0,0 +1,52 @@
|
|
1
|
+
module Backburner
|
2
|
+
module Workers
|
3
|
+
class Forking < Worker
|
4
|
+
# Used to prepare job queues before processing jobs.
|
5
|
+
# Setup beanstalk tube_names and watch all specified tubes for jobs.
|
6
|
+
#
|
7
|
+
# @raise [Beaneater::NotConnected] If beanstalk fails to connect.
|
8
|
+
# @example
|
9
|
+
# @worker.prepare
|
10
|
+
#
|
11
|
+
def prepare
|
12
|
+
self.tube_names.map! { |name| expand_tube_name(name) }.uniq!
|
13
|
+
log_info "Working #{tube_names.size} queues: [ #{tube_names.join(', ')} ]"
|
14
|
+
self.connection.tubes.watch!(*self.tube_names)
|
15
|
+
end
|
16
|
+
|
17
|
+
# Starts processing new jobs indefinitely.
|
18
|
+
# Primary way to consume and process jobs in specified tubes.
|
19
|
+
#
|
20
|
+
# @example
|
21
|
+
# @worker.start
|
22
|
+
#
|
23
|
+
def start
|
24
|
+
prepare
|
25
|
+
loop { fork_one_job }
|
26
|
+
end
|
27
|
+
|
28
|
+
# Need to re-establish the connection to the server(s) after forking
|
29
|
+
# Waits for a job, works the job, and exits
|
30
|
+
def fork_one_job
|
31
|
+
pid = Process.fork do
|
32
|
+
work_one_job
|
33
|
+
coolest_exit
|
34
|
+
end
|
35
|
+
Process.wait(pid)
|
36
|
+
end
|
37
|
+
|
38
|
+
def on_reconnect(conn)
|
39
|
+
@connection = conn
|
40
|
+
prepare
|
41
|
+
end
|
42
|
+
|
43
|
+
# Exit with Kernel.exit! to avoid at_exit callbacks that should belongs to
|
44
|
+
# parent process
|
45
|
+
# We will use exitcode 99 that means the fork reached the garbage number
|
46
|
+
def coolest_exit
|
47
|
+
Kernel.exit! 99
|
48
|
+
end
|
49
|
+
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|