dat-worker-pool 0.5.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/Gemfile +1 -1
- data/bench/report.rb +130 -0
- data/bench/report.txt +7 -0
- data/dat-worker-pool.gemspec +1 -1
- data/lib/dat-worker-pool.rb +38 -187
- data/lib/dat-worker-pool/default_queue.rb +60 -0
- data/lib/dat-worker-pool/locked_object.rb +60 -0
- data/lib/dat-worker-pool/queue.rb +71 -48
- data/lib/dat-worker-pool/runner.rb +196 -0
- data/lib/dat-worker-pool/version.rb +1 -1
- data/lib/dat-worker-pool/worker.rb +251 -72
- data/lib/dat-worker-pool/worker_pool_spy.rb +39 -53
- data/test/helper.rb +13 -0
- data/test/support/factory.rb +15 -0
- data/test/support/thread_spies.rb +83 -0
- data/test/system/dat-worker-pool_tests.rb +399 -0
- data/test/unit/dat-worker-pool_tests.rb +132 -255
- data/test/unit/default_queue_tests.rb +217 -0
- data/test/unit/locked_object_tests.rb +260 -0
- data/test/unit/queue_tests.rb +95 -72
- data/test/unit/runner_tests.rb +365 -0
- data/test/unit/worker_pool_spy_tests.rb +95 -102
- data/test/unit/worker_tests.rb +819 -153
- metadata +27 -12
- data/test/system/use_worker_pool_tests.rb +0 -34
@@ -0,0 +1,60 @@
|
|
1
|
+
require 'thread'
|
2
|
+
|
3
|
+
class DatWorkerPool
|
4
|
+
|
5
|
+
class LockedObject
|
6
|
+
attr_reader :mutex
|
7
|
+
|
8
|
+
def initialize(object = nil)
|
9
|
+
@object = object
|
10
|
+
@mutex = Mutex.new
|
11
|
+
end
|
12
|
+
|
13
|
+
def value
|
14
|
+
@mutex.synchronize{ @object }
|
15
|
+
end
|
16
|
+
|
17
|
+
def set(new_object)
|
18
|
+
@mutex.synchronize{ @object = new_object }
|
19
|
+
end
|
20
|
+
|
21
|
+
def with_lock(&block)
|
22
|
+
@mutex.synchronize{ block.call(@mutex, @object) }
|
23
|
+
end
|
24
|
+
|
25
|
+
end
|
26
|
+
|
27
|
+
class LockedArray < LockedObject
|
28
|
+
def initialize(array = nil)
|
29
|
+
super(array || [])
|
30
|
+
end
|
31
|
+
|
32
|
+
alias :values :value
|
33
|
+
|
34
|
+
def first; @mutex.synchronize{ @object.first }; end
|
35
|
+
def last; @mutex.synchronize{ @object.last }; end
|
36
|
+
def size; @mutex.synchronize{ @object.size }; end
|
37
|
+
def empty?; @mutex.synchronize{ @object.empty? }; end
|
38
|
+
|
39
|
+
def push(new_item); @mutex.synchronize{ @object.push(new_item) }; end
|
40
|
+
def pop; @mutex.synchronize{ @object.pop }; end
|
41
|
+
|
42
|
+
def shift; @mutex.synchronize{ @object.shift }; end
|
43
|
+
def unshift(new_item); @mutex.synchronize{ @object.unshift(new_item) }; end
|
44
|
+
|
45
|
+
def delete(item); @mutex.synchronize{ @object.delete(item) }; end
|
46
|
+
end
|
47
|
+
|
48
|
+
class LockedSet < LockedObject
|
49
|
+
def initialize; super(Set.new); end
|
50
|
+
|
51
|
+
alias :values :value
|
52
|
+
|
53
|
+
def size; @mutex.synchronize{ @object.size }; end
|
54
|
+
def empty?; @mutex.synchronize{ @object.empty? }; end
|
55
|
+
|
56
|
+
def add(item); @mutex.synchronize{ @object.add(item) }; end
|
57
|
+
def remove(item); @mutex.synchronize{ @object.delete(item) }; end
|
58
|
+
end
|
59
|
+
|
60
|
+
end
|
@@ -1,65 +1,88 @@
|
|
1
|
-
require 'thread'
|
2
|
-
|
3
1
|
class DatWorkerPool
|
4
2
|
|
5
|
-
|
3
|
+
module Queue
|
6
4
|
|
7
|
-
|
5
|
+
def self.included(klass)
|
6
|
+
klass.class_eval do
|
7
|
+
include InstanceMethods
|
8
|
+
end
|
9
|
+
end
|
8
10
|
|
9
|
-
|
10
|
-
@work_items = []
|
11
|
-
@shutdown = false
|
12
|
-
@mutex = Mutex.new
|
13
|
-
@condition_variable = ConditionVariable.new
|
11
|
+
module InstanceMethods
|
14
12
|
|
15
|
-
|
16
|
-
|
17
|
-
|
13
|
+
# overwrite this method to add custom logic for reading the current work
|
14
|
+
# items on the queue
|
15
|
+
def work_items
|
16
|
+
raise NotImplementedError
|
17
|
+
end
|
18
18
|
|
19
|
-
|
20
|
-
|
21
|
-
|
19
|
+
def dwp_start
|
20
|
+
@dwp_running = true
|
21
|
+
start!
|
22
|
+
end
|
22
23
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
@shutdown = true
|
27
|
-
@mutex.synchronize{ @condition_variable.broadcast }
|
28
|
-
end
|
24
|
+
def dwp_signal_shutdown
|
25
|
+
@dwp_running = false
|
26
|
+
end
|
29
27
|
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
raise "Unable to add work while shutting down" if @shutdown
|
34
|
-
@mutex.synchronize do
|
35
|
-
@work_items << work_item
|
36
|
-
@condition_variable.signal
|
28
|
+
def dwp_shutdown
|
29
|
+
self.dwp_signal_shutdown
|
30
|
+
shutdown!
|
37
31
|
end
|
38
|
-
@on_push_callbacks.each(&:call)
|
39
|
-
end
|
40
32
|
|
41
|
-
|
42
|
-
|
43
|
-
def pop
|
44
|
-
return if @shutdown
|
45
|
-
item = @mutex.synchronize do
|
46
|
-
@condition_variable.wait(@mutex) while !@shutdown && @work_items.empty?
|
47
|
-
@work_items.shift
|
33
|
+
def running?
|
34
|
+
!!@dwp_running
|
48
35
|
end
|
49
|
-
@on_pop_callbacks.each(&:call)
|
50
|
-
item
|
51
|
-
end
|
52
36
|
|
53
|
-
|
54
|
-
|
55
|
-
|
37
|
+
def shutdown?
|
38
|
+
!self.running?
|
39
|
+
end
|
56
40
|
|
57
|
-
|
58
|
-
|
59
|
-
|
41
|
+
def dwp_push(*args)
|
42
|
+
raise "Unable to add work when shut down" if self.shutdown?
|
43
|
+
push!(*args)
|
44
|
+
end
|
45
|
+
|
46
|
+
def dwp_pop
|
47
|
+
return if self.shutdown?
|
48
|
+
pop!
|
49
|
+
end
|
50
|
+
|
51
|
+
private
|
52
|
+
|
53
|
+
# overwrite this method to add custom start logic; this is a no-op by
|
54
|
+
# default because we don't require a queue to have custom start logic
|
55
|
+
def start!; end
|
56
|
+
|
57
|
+
# overwrite this method to add custom shutdown logic; this is a no-op by
|
58
|
+
# default because we don't require a queue to have custom shutdown logic;
|
59
|
+
# more than likely you will want to use this to "wakeup" worker threads
|
60
|
+
# that are sleeping waiting to pop work from the queue (see the default
|
61
|
+
# queue for an example using mutexes and condition variables)
|
62
|
+
def shutdown!; end
|
63
|
+
|
64
|
+
# overwrite this method to add custom push logic; this doesn't have to be
|
65
|
+
# overwritten but if it isn't, you will not be able to add work items using
|
66
|
+
# the queue (and the `add_work` method on `DatWorkerPool` will not work);
|
67
|
+
# more than likely this should add work to the queue and "signal" the
|
68
|
+
# workers so they know to process it (see the default queue for an example
|
69
|
+
# using mutexes and condition variables)
|
70
|
+
def push!(*args)
|
71
|
+
raise NotImplementedError
|
72
|
+
end
|
73
|
+
|
74
|
+
# overwrite this method to add custom pop logic; this has to be overwritten
|
75
|
+
# or the workers will not be able to get work that needs to be processed;
|
76
|
+
# this is intended to sleep the worker threads (see the default queue for an
|
77
|
+
# example using mutexes and condition variables); if this returns `nil` the
|
78
|
+
# workers will ignore it and go back to sleep, `nil` is not a valid work
|
79
|
+
# item to process; also check if the queue is shutdown when waking up
|
80
|
+
# workers, you probably don't want to hand-off work while everything is
|
81
|
+
# shutting down
|
82
|
+
def pop!
|
83
|
+
raise NotImplementedError
|
84
|
+
end
|
60
85
|
|
61
|
-
def shutdown?
|
62
|
-
@shutdown
|
63
86
|
end
|
64
87
|
|
65
88
|
end
|
@@ -0,0 +1,196 @@
|
|
1
|
+
require 'set'
|
2
|
+
require 'system_timer'
|
3
|
+
require 'dat-worker-pool'
|
4
|
+
require 'dat-worker-pool/locked_object'
|
5
|
+
|
6
|
+
class DatWorkerPool
|
7
|
+
|
8
|
+
class Runner
|
9
|
+
|
10
|
+
attr_reader :num_workers, :worker_class, :worker_params
|
11
|
+
attr_reader :logger_proxy, :queue
|
12
|
+
|
13
|
+
def initialize(args)
|
14
|
+
@num_workers = args[:num_workers]
|
15
|
+
@queue = args[:queue]
|
16
|
+
@worker_class = args[:worker_class]
|
17
|
+
@worker_params = args[:worker_params]
|
18
|
+
|
19
|
+
@logger_proxy = if args[:logger]
|
20
|
+
LoggerProxy.new(args[:logger])
|
21
|
+
else
|
22
|
+
NullLoggerProxy.new
|
23
|
+
end
|
24
|
+
|
25
|
+
@workers = LockedArray.new
|
26
|
+
@available_workers = LockedSet.new
|
27
|
+
end
|
28
|
+
|
29
|
+
def workers
|
30
|
+
@workers.values
|
31
|
+
end
|
32
|
+
|
33
|
+
def start
|
34
|
+
log{ "Starting worker pool with #{@num_workers} worker(s)" }
|
35
|
+
@queue.dwp_start
|
36
|
+
@num_workers.times.each{ |n| build_worker(n + 1) }
|
37
|
+
end
|
38
|
+
|
39
|
+
# the workers should be told to shutdown before the queue because the queue
|
40
|
+
# shutdown will wake them up; a worker popping on a shutdown queue will
|
41
|
+
# always get `nil` back and will loop as fast as allowed until its shutdown
|
42
|
+
# flag is flipped, so shutting down the workers then the queue keeps them
|
43
|
+
# from looping as fast as possible; if any kind of standard error or the
|
44
|
+
# expected timeout error (assuming the workers take too long to shutdown) is
|
45
|
+
# raised, force a shutdown; this ensures we shutdown as best as possible
|
46
|
+
# instead of letting ruby kill the threads when the process exits;
|
47
|
+
# non-timeout errors will be re-raised so they can be caught and handled (or
|
48
|
+
# shown when the process exits)
|
49
|
+
def shutdown(timeout = nil, backtrace = nil)
|
50
|
+
log do
|
51
|
+
timeout_message = timeout ? "#{timeout} second(s)" : "none"
|
52
|
+
"Shutting down worker pool (timeout: #{timeout_message})"
|
53
|
+
end
|
54
|
+
begin
|
55
|
+
@workers.with_lock{ |m, ws| ws.each(&:dwp_signal_shutdown) }
|
56
|
+
@queue.dwp_signal_shutdown
|
57
|
+
OptionalTimeout.new(timeout) do
|
58
|
+
@queue.dwp_shutdown
|
59
|
+
wait_for_workers_to_shutdown
|
60
|
+
end
|
61
|
+
rescue StandardError => exception
|
62
|
+
force_workers_to_shutdown(exception, timeout, backtrace)
|
63
|
+
raise exception
|
64
|
+
rescue TimeoutInterruptError => exception
|
65
|
+
force_workers_to_shutdown(exception, timeout, backtrace)
|
66
|
+
end
|
67
|
+
log{ "Finished shutting down" }
|
68
|
+
end
|
69
|
+
|
70
|
+
def available_worker_count
|
71
|
+
@available_workers.size
|
72
|
+
end
|
73
|
+
|
74
|
+
def worker_available?
|
75
|
+
self.available_worker_count > 0
|
76
|
+
end
|
77
|
+
|
78
|
+
def make_worker_available(worker)
|
79
|
+
@available_workers.add(worker.object_id)
|
80
|
+
end
|
81
|
+
|
82
|
+
def make_worker_unavailable(worker)
|
83
|
+
@available_workers.remove(worker.object_id)
|
84
|
+
end
|
85
|
+
|
86
|
+
def log(&message_block)
|
87
|
+
@logger_proxy.runner_log(&message_block)
|
88
|
+
end
|
89
|
+
|
90
|
+
def worker_log(worker, &message_block)
|
91
|
+
@logger_proxy.worker_log(worker, &message_block)
|
92
|
+
end
|
93
|
+
|
94
|
+
private
|
95
|
+
|
96
|
+
def build_worker(number)
|
97
|
+
@workers.push(@worker_class.new(self, @queue, number).tap(&:dwp_start))
|
98
|
+
end
|
99
|
+
|
100
|
+
# use an until loop instead of each to join all the workers, while we are
|
101
|
+
# joining a worker a different worker can shutdown and remove itself from
|
102
|
+
# the `@workers` array; rescue when joining the workers, ruby will raise any
|
103
|
+
# exceptions that aren't handled by a thread when its joined, this allows
|
104
|
+
# all the workers to be joined
|
105
|
+
def wait_for_workers_to_shutdown
|
106
|
+
log{ "Waiting for #{@workers.size} workers to shutdown" }
|
107
|
+
while !(worker = @workers.first).nil?
|
108
|
+
begin
|
109
|
+
worker.dwp_join
|
110
|
+
rescue StandardError => exception
|
111
|
+
log{ "An error occurred while waiting for worker " \
|
112
|
+
"to shutdown ##{worker.dwp_number}" }
|
113
|
+
end
|
114
|
+
remove_worker(worker)
|
115
|
+
log{ "Worker ##{worker.dwp_number} shutdown" }
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
# use an until loop instead of each to join all the workers, while we are
|
120
|
+
# joining a worker a different worker can shutdown and remove itself from
|
121
|
+
# the `@workers` array; rescue when joining the workers, ruby will raise any
|
122
|
+
# exceptions that aren't handled by a thread when its joined, this ensures
|
123
|
+
# if the hard shutdown is raised and not rescued (for example, in the
|
124
|
+
# workers ensure), then it won't cause the forced shutdown to end
|
125
|
+
# prematurely
|
126
|
+
def force_workers_to_shutdown(orig_exception, timeout, backtrace)
|
127
|
+
log{ "Forcing #{@workers.size} workers to shutdown" }
|
128
|
+
error = build_forced_shutdown_error(orig_exception, timeout, backtrace)
|
129
|
+
while !(worker = @workers.first).nil?
|
130
|
+
worker.dwp_raise(error)
|
131
|
+
begin
|
132
|
+
worker.dwp_join
|
133
|
+
rescue StandardError => exception
|
134
|
+
log{ "An error occurred while waiting for worker " \
|
135
|
+
"to shutdown ##{worker.dwp_number} (forced)" }
|
136
|
+
rescue ShutdownError
|
137
|
+
# these are expected (because we raised them in the thread) so they
|
138
|
+
# don't need to be logged
|
139
|
+
end
|
140
|
+
remove_worker(worker)
|
141
|
+
log{ "Worker ##{worker.dwp_number} shutdown (forced)" }
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
# make sure the worker has been removed from the available workers, in case
|
146
|
+
# it errored before it was able to make itself unavailable
|
147
|
+
def remove_worker(worker)
|
148
|
+
self.make_worker_unavailable(worker)
|
149
|
+
@workers.delete(worker)
|
150
|
+
end
|
151
|
+
|
152
|
+
def build_forced_shutdown_error(orig_exception, timeout, backtrace)
|
153
|
+
if orig_exception.kind_of?(TimeoutInterruptError)
|
154
|
+
ShutdownError.new("Timed out shutting down (#{timeout} seconds).").tap do |e|
|
155
|
+
e.set_backtrace(backtrace) if backtrace
|
156
|
+
end
|
157
|
+
else
|
158
|
+
ShutdownError.new("Errored while shutting down: #{orig_exception.inspect}").tap do |e|
|
159
|
+
e.set_backtrace(orig_exception.backtrace)
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
# this needs to be an `Interrupt` to be sure we don't accidentally catch it
|
165
|
+
# when rescueing exceptions; in the shutdown methods we rescue any errors
|
166
|
+
# from `worker.join`, this will also rescue the timeout error if its a
|
167
|
+
# standard error and will keep it from doing a forced shutdown
|
168
|
+
TimeoutInterruptError = Class.new(Interrupt)
|
169
|
+
|
170
|
+
module OptionalTimeout
|
171
|
+
def self.new(seconds, &block)
|
172
|
+
if seconds
|
173
|
+
SystemTimer.timeout(seconds, TimeoutInterruptError, &block)
|
174
|
+
else
|
175
|
+
block.call
|
176
|
+
end
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
class LoggerProxy < Struct.new(:logger)
|
181
|
+
def runner_log(&message_block)
|
182
|
+
self.logger.debug("[DWP] #{message_block.call}")
|
183
|
+
end
|
184
|
+
def worker_log(worker, &message_block)
|
185
|
+
self.logger.debug("[DWP-#{worker.dwp_number}] #{message_block.call}")
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
class NullLoggerProxy
|
190
|
+
def runner_log(&block); end
|
191
|
+
def worker_log(worker, &block); end
|
192
|
+
end
|
193
|
+
|
194
|
+
end
|
195
|
+
|
196
|
+
end
|
@@ -1,91 +1,270 @@
|
|
1
1
|
require 'thread'
|
2
|
-
require 'dat-worker-pool'
|
2
|
+
require 'dat-worker-pool/runner'
|
3
3
|
|
4
4
|
class DatWorkerPool
|
5
5
|
|
6
|
-
|
7
|
-
|
8
|
-
attr_accessor :on_work, :on_error_callbacks
|
9
|
-
attr_accessor :on_start_callbacks, :on_shutdown_callbacks
|
10
|
-
attr_accessor :on_sleep_callbacks, :on_wakeup_callbacks
|
11
|
-
attr_accessor :before_work_callbacks, :after_work_callbacks
|
12
|
-
|
13
|
-
def initialize(queue)
|
14
|
-
@queue = queue
|
15
|
-
@on_work = proc{ |worker, work_item| }
|
16
|
-
@on_error_callbacks = []
|
17
|
-
@on_start_callbacks = []
|
18
|
-
@on_shutdown_callbacks = []
|
19
|
-
@on_sleep_callbacks = []
|
20
|
-
@on_wakeup_callbacks = []
|
21
|
-
@before_work_callbacks = []
|
22
|
-
@after_work_callbacks = []
|
23
|
-
|
24
|
-
@shutdown = false
|
25
|
-
@thread = nil
|
26
|
-
end
|
6
|
+
module Worker
|
27
7
|
|
28
|
-
def
|
29
|
-
|
8
|
+
def self.included(klass)
|
9
|
+
klass.class_eval do
|
10
|
+
extend ClassMethods
|
11
|
+
include InstanceMethods
|
12
|
+
end
|
30
13
|
end
|
31
14
|
|
32
|
-
|
33
|
-
@shutdown = true
|
34
|
-
end
|
15
|
+
module ClassMethods
|
35
16
|
|
36
|
-
|
37
|
-
|
38
|
-
|
17
|
+
def on_start_callbacks; @on_start_callbacks ||= []; end
|
18
|
+
def on_shutdown_callbacks; @on_shutdown_callbacks ||= []; end
|
19
|
+
def on_available_callbacks; @on_available_callbacks ||= []; end
|
20
|
+
def on_unavailable_callbacks; @on_unavailable_callbacks ||= []; end
|
21
|
+
def on_error_callbacks; @on_error_callbacks ||= []; end
|
22
|
+
def before_work_callbacks; @before_work_callbacks ||= []; end
|
23
|
+
def after_work_callbacks; @after_work_callbacks ||= []; end
|
39
24
|
|
40
|
-
|
41
|
-
|
42
|
-
|
25
|
+
def on_start(&block); self.on_start_callbacks << block; end
|
26
|
+
def on_shutdown(&block); self.on_shutdown_callbacks << block; end
|
27
|
+
def on_available(&block); self.on_available_callbacks << block; end
|
28
|
+
def on_unavailable(&block); self.on_unavailable_callbacks << block; end
|
29
|
+
def on_error(&block); self.on_error_callbacks << block; end
|
30
|
+
def before_work(&block); self.before_work_callbacks << block; end
|
31
|
+
def after_work(&block); self.after_work_callbacks << block; end
|
43
32
|
|
44
|
-
|
45
|
-
|
46
|
-
|
33
|
+
def prepend_on_start(&block); self.on_start_callbacks.unshift(block); end
|
34
|
+
def prepend_on_shutdown(&block); self.on_shutdown_callbacks.unshift(block); end
|
35
|
+
def prepend_on_available(&block); self.on_available_callbacks.unshift(block); end
|
36
|
+
def prepend_on_unavailable(&block); self.on_unavailable_callbacks.unshift(block); end
|
37
|
+
def prepend_on_error(&block); self.on_error_callbacks.unshift(block); end
|
38
|
+
def prepend_before_work(&block); self.before_work_callbacks.unshift(block); end
|
39
|
+
def prepend_after_work(&block); self.after_work_callbacks.unshift(block); end
|
47
40
|
|
48
|
-
private
|
49
|
-
|
50
|
-
# * Rescue `ShutdownError` but don't do anything with it. We want to handle
|
51
|
-
# the error but we just want it to cause the worker to exit its work loop.
|
52
|
-
# If the `ShutdownError` isn't rescued, it will be raised when the worker
|
53
|
-
# is joined.
|
54
|
-
def work_loop
|
55
|
-
@on_start_callbacks.each{ |p| p.call(self) }
|
56
|
-
loop do
|
57
|
-
break if @shutdown
|
58
|
-
fetch_and_do_work
|
59
|
-
end
|
60
|
-
rescue ShutdownError
|
61
|
-
ensure
|
62
|
-
@on_shutdown_callbacks.each{ |p| p.call(self) }
|
63
|
-
@thread = nil
|
64
41
|
end
|
65
42
|
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
43
|
+
module InstanceMethods
|
44
|
+
|
45
|
+
attr_reader :dwp_number
|
46
|
+
|
47
|
+
def initialize(runner, queue, number)
|
48
|
+
@dwp_runner, @dwp_queue, @dwp_number = runner, queue, number
|
49
|
+
@dwp_running = false
|
50
|
+
@dwp_thread = nil
|
51
|
+
end
|
52
|
+
|
53
|
+
def dwp_start
|
54
|
+
@dwp_running = true
|
55
|
+
@dwp_thread ||= Thread.new{ dwp_work_loop }
|
56
|
+
end
|
57
|
+
|
58
|
+
def dwp_signal_shutdown
|
59
|
+
@dwp_running = false
|
60
|
+
end
|
61
|
+
|
62
|
+
def dwp_running?
|
63
|
+
!!@dwp_running
|
64
|
+
end
|
65
|
+
|
66
|
+
def dwp_shutdown?
|
67
|
+
!self.dwp_running?
|
68
|
+
end
|
69
|
+
|
70
|
+
# this is needed because even if the running flag has been set to false
|
71
|
+
# (meaning the worker has been shutdown) the thread may still be alive
|
72
|
+
# because its `work` is taking a long time or its still trying to shut
|
73
|
+
# down
|
74
|
+
def dwp_thread_alive?
|
75
|
+
!!(@dwp_thread && @dwp_thread.alive?)
|
76
|
+
end
|
77
|
+
|
78
|
+
def dwp_join(*args)
|
79
|
+
@dwp_thread.join(*args) if self.dwp_thread_alive?
|
80
|
+
end
|
81
|
+
|
82
|
+
def dwp_raise(*args)
|
83
|
+
@dwp_thread.raise(*args) if self.dwp_thread_alive?
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
# Helpers
|
89
|
+
def number; @dwp_number; end
|
90
|
+
def params; @dwp_runner.worker_params; end
|
91
|
+
def queue; @dwp_runner.queue; end
|
92
|
+
|
93
|
+
# overwrite this method to add custom work logic; this has to be
|
94
|
+
# overwritten or the workers will not know how to handle a work item
|
95
|
+
def work!(work_item)
|
96
|
+
raise NotImplementedError
|
97
|
+
end
|
98
|
+
|
99
|
+
# rescue `ShutdownError` but re-raise it after calling the on-error
|
100
|
+
# callbacks, this ensures it causes the loop to exit
|
101
|
+
def dwp_work_loop
|
102
|
+
dwp_setup
|
103
|
+
while self.dwp_running?
|
104
|
+
begin
|
105
|
+
if !(work_item = queue.dwp_pop).nil?
|
106
|
+
begin
|
107
|
+
dwp_make_unavailable
|
108
|
+
dwp_work(work_item)
|
109
|
+
rescue ShutdownError => exception
|
110
|
+
dwp_handle_exception(exception, work_item)
|
111
|
+
Thread.current.raise exception
|
112
|
+
rescue StandardError => exception
|
113
|
+
dwp_handle_exception(exception, work_item)
|
114
|
+
ensure
|
115
|
+
dwp_make_available
|
116
|
+
end
|
117
|
+
end
|
118
|
+
rescue StandardError => exception
|
119
|
+
dwp_handle_exception(exception, work_item)
|
120
|
+
end
|
121
|
+
end
|
122
|
+
ensure
|
123
|
+
dwp_teardown
|
124
|
+
end
|
125
|
+
|
126
|
+
def dwp_setup
|
127
|
+
dwp_log{ "Starting" }
|
128
|
+
begin
|
129
|
+
dwp_run_callback 'on_start'
|
130
|
+
dwp_make_available
|
131
|
+
rescue StandardError => exception
|
132
|
+
dwp_handle_exception(exception)
|
133
|
+
Thread.current.raise exception
|
134
|
+
end
|
135
|
+
end
|
136
|
+
|
137
|
+
# this is a separate method so the test runner can call it individually
|
138
|
+
def dwp_make_unavailable
|
139
|
+
@dwp_runner.make_worker_unavailable(self)
|
140
|
+
dwp_run_callback 'on_unavailable'
|
141
|
+
dwp_log{ "Unavailable" }
|
142
|
+
end
|
143
|
+
|
144
|
+
# this is a separate method so the test runner can call it individually
|
145
|
+
def dwp_make_available
|
146
|
+
@dwp_runner.make_worker_available(self)
|
147
|
+
dwp_run_callback 'on_available'
|
148
|
+
dwp_log{ "Available" }
|
149
|
+
end
|
150
|
+
|
151
|
+
# this is a separate method so the test runner can call it individually
|
152
|
+
def dwp_work(work_item)
|
153
|
+
dwp_log{ "Working, item: #{work_item.inspect}" }
|
154
|
+
dwp_run_callback('before_work', work_item)
|
155
|
+
work!(work_item)
|
156
|
+
dwp_run_callback('after_work', work_item)
|
157
|
+
end
|
158
|
+
|
159
|
+
def dwp_teardown
|
160
|
+
begin
|
161
|
+
dwp_make_unavailable
|
162
|
+
dwp_run_callback 'on_shutdown'
|
163
|
+
rescue StandardError => exception
|
164
|
+
dwp_handle_exception(exception)
|
165
|
+
end
|
166
|
+
dwp_log{ "Shutdown" }
|
167
|
+
@dwp_running = false
|
168
|
+
@dwp_thread = nil
|
169
|
+
end
|
170
|
+
|
171
|
+
def dwp_handle_exception(exception, work_item = nil)
|
172
|
+
begin
|
173
|
+
dwp_log_exception(exception)
|
174
|
+
dwp_run_callback('on_error', exception, work_item)
|
175
|
+
rescue StandardError => on_error_exception
|
176
|
+
# errors while running on-error callbacks are logged but otherwise
|
177
|
+
# ignored to keep the worker from crashing, ideally these should be
|
178
|
+
# caught by the on-error callbacks themselves and never get here
|
179
|
+
dwp_log_exception(on_error_exception)
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
def dwp_run_callback(callback, *args)
|
184
|
+
(self.class.send("#{callback}_callbacks") || []).each do |callback|
|
185
|
+
self.instance_exec(*args, &callback)
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
def dwp_log(&message_block)
|
190
|
+
@dwp_runner.worker_log(self, &message_block)
|
191
|
+
end
|
192
|
+
|
193
|
+
def dwp_log_exception(exception)
|
194
|
+
dwp_log{ "#{exception.class}: #{exception.message}" }
|
195
|
+
(exception.backtrace || []).each{ |l| dwp_log{ l } }
|
196
|
+
end
|
80
197
|
|
81
|
-
def do_work(work_item)
|
82
|
-
@before_work_callbacks.each{ |p| p.call(self, work_item) }
|
83
|
-
@on_work.call(self, work_item)
|
84
|
-
@after_work_callbacks.each{ |p| p.call(self, work_item) }
|
85
198
|
end
|
86
199
|
|
87
|
-
|
88
|
-
|
200
|
+
module TestHelpers
|
201
|
+
|
202
|
+
def test_runner(worker_class, options = nil)
|
203
|
+
TestRunner.new(worker_class, options)
|
204
|
+
end
|
205
|
+
|
206
|
+
class TestRunner
|
207
|
+
attr_reader :worker_class, :worker
|
208
|
+
attr_reader :queue, :dwp_runner
|
209
|
+
|
210
|
+
def initialize(worker_class, options = nil)
|
211
|
+
@worker_class = worker_class
|
212
|
+
|
213
|
+
@queue = options[:queue] || begin
|
214
|
+
require 'dat-worker-pool/default_queue'
|
215
|
+
DatWorkerPool::DefaultQueue.new
|
216
|
+
end
|
217
|
+
|
218
|
+
@dwp_runner = DatWorkerPool::Runner.new({
|
219
|
+
:num_workers => MIN_WORKERS,
|
220
|
+
:logger => options[:logger],
|
221
|
+
:queue => @queue,
|
222
|
+
:worker_class => @worker_class,
|
223
|
+
:worker_params => options[:params]
|
224
|
+
})
|
225
|
+
|
226
|
+
@worker = worker_class.new(@dwp_runner, @queue, 1)
|
227
|
+
end
|
228
|
+
|
229
|
+
def run(work_item)
|
230
|
+
self.start
|
231
|
+
self.make_unavailable
|
232
|
+
self.work(work_item)
|
233
|
+
self.make_available
|
234
|
+
self.shutdown
|
235
|
+
end
|
236
|
+
|
237
|
+
def work(work_item)
|
238
|
+
self.worker.instance_eval{ dwp_work(work_item) }
|
239
|
+
end
|
240
|
+
|
241
|
+
def error(exception, work_item = nil)
|
242
|
+
run_callback('on_error', self.worker, exception, work_item)
|
243
|
+
end
|
244
|
+
|
245
|
+
def start
|
246
|
+
run_callback('on_start', self.worker)
|
247
|
+
end
|
248
|
+
|
249
|
+
def shutdown
|
250
|
+
run_callback('on_shutdown', self.worker)
|
251
|
+
end
|
252
|
+
|
253
|
+
def make_unavailable
|
254
|
+
self.worker.instance_eval{ dwp_make_unavailable }
|
255
|
+
end
|
256
|
+
|
257
|
+
def make_available
|
258
|
+
self.worker.instance_eval{ dwp_make_available }
|
259
|
+
end
|
260
|
+
|
261
|
+
private
|
262
|
+
|
263
|
+
def run_callback(callback, worker, *args)
|
264
|
+
self.worker.instance_eval{ dwp_run_callback(callback, *args) }
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
89
268
|
end
|
90
269
|
|
91
270
|
end
|