qs 0.5.0 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
data/Gemfile CHANGED
@@ -2,8 +2,8 @@ source 'https://rubygems.org'
2
2
 
3
3
  gemspec
4
4
 
5
- gem 'rake'
6
- gem 'pry', "~> 0.9.0"
5
+ gem 'rake', "~> 10.4.0"
6
+ gem 'pry', "~> 0.9.0"
7
7
 
8
8
  platform :ruby_18 do
9
9
  gem 'json', '~> 1.8'
File without changes
data/bench/report.rb CHANGED
@@ -4,7 +4,10 @@ require 'bench/setup'
4
4
 
5
5
  class BenchRunner
6
6
 
7
- TIME_MODIFIER = 10 ** 4 # 4 decimal places
7
+ BUNDLE_EXEC = "bundle exec --keep-file-descriptors".freeze
8
+ RUN_QS_BENCH_QUEUE = "#{BUNDLE_EXEC} ./bin/qs bench/config.qs".freeze
9
+ RUN_QS_DISPATCHER = "#{BUNDLE_EXEC} ./bin/qs bench/dispatcher.qs".freeze
10
+ TIME_MODIFIER = 10 ** 4 # 4 decimal places
8
11
 
9
12
  def initialize
10
13
  output_file_path = if ENV['OUTPUT_FILE']
@@ -24,6 +27,13 @@ class BenchRunner
24
27
  @event_params = { 'size' => 100_000 }
25
28
 
26
29
  @progress_reader, @progress_writer = IO.pipe
30
+ @run_qs_scmd_opts = {
31
+ :env => {
32
+ 'BENCH_REPORT' => 'yes',
33
+ 'BENCH_PROGRESS_IO' => @progress_writer.fileno.to_s
34
+ },
35
+ :options => { @progress_writer => @progress_writer }
36
+ }
27
37
 
28
38
  @results = {}
29
39
  end
@@ -43,12 +53,12 @@ class BenchRunner
43
53
 
44
54
  size = @results.values.map(&:size).max
45
55
  output "\n", false
46
- output "Enqueueing #{@number_of_jobs} Jobs Time: #{@results[:enqueueing_jobs].rjust(size)}"
47
- output "Running #{@number_of_jobs} Jobs Time: #{@results[:running_jobs].rjust(size)}"
56
+ output "Enqueueing #{@number_of_jobs} Jobs Time: #{@results[:enqueueing_jobs].rjust(size)}s"
57
+ output "Running #{@number_of_jobs} Jobs Time: #{@results[:running_jobs].rjust(size)}s"
48
58
 
49
59
  output "\n", false
50
- output "Publishing #{@number_of_events} Events Time: #{@results[:publishing_events].rjust(size)}"
51
- output "Running #{@number_of_events} Events Time: #{@results[:running_events].rjust(size)}"
60
+ output "Publishing #{@number_of_events} Events Time: #{@results[:publishing_events].rjust(size)}s"
61
+ output "Running #{@number_of_events} Events Time: #{@results[:running_events].rjust(size)}s"
52
62
 
53
63
  output "\n"
54
64
  output "Done running benchmark report"
@@ -69,11 +79,7 @@ class BenchRunner
69
79
  end
70
80
 
71
81
  def benchmark_running_jobs
72
- cmd_str = "bundle exec ./bin/qs bench/config.qs"
73
- cmd = Scmd.new(cmd_str, {
74
- 'BENCH_REPORT' => 'yes',
75
- 'BENCH_PROGRESS_IO' => @progress_writer.fileno
76
- })
82
+ cmd = Scmd.new(RUN_QS_BENCH_QUEUE, @run_qs_scmd_opts)
77
83
 
78
84
  output "Running jobs"
79
85
  begin
@@ -113,17 +119,8 @@ class BenchRunner
113
119
  end
114
120
 
115
121
  def benchmark_running_events
116
- bench_queue_cmd_str = "bundle exec ./bin/qs bench/config.qs"
117
- bench_queue_cmd = Scmd.new(bench_queue_cmd_str, {
118
- 'BENCH_REPORT' => 'yes',
119
- 'BENCH_PROGRESS_IO' => @progress_writer.fileno
120
- })
121
-
122
- dispatcher_queue_cmd_str = "bundle exec ./bin/qs bench/dispatcher.qs"
123
- dispatcher_queue_cmd = Scmd.new(dispatcher_queue_cmd_str, {
124
- 'BENCH_REPORT' => 'yes',
125
- 'BENCH_PROGRESS_IO' => @progress_writer.fileno
126
- })
122
+ bench_queue_cmd = Scmd.new(RUN_QS_BENCH_QUEUE, @run_qs_scmd_opts)
123
+ dispatcher_queue_cmd = Scmd.new(RUN_QS_DISPATCHER, @run_qs_scmd_opts)
127
124
 
128
125
  output "Running events"
129
126
  begin
data/bench/report.txt CHANGED
@@ -9,10 +9,10 @@ Publishing events
9
9
  Running events
10
10
  ....................................................................................................
11
11
 
12
- Enqueueing 10000 Jobs Time: 1.6858
13
- Running 10000 Jobs Time: 11.3011
12
+ Enqueueing 10000 Jobs Time: 1.7498s
13
+ Running 10000 Jobs Time: 13.0530s
14
14
 
15
- Publishing 10000 Events Time: 2.4596
16
- Running 10000 Events Time: 16.0522
15
+ Publishing 10000 Events Time: 2.6319s
16
+ Running 10000 Events Time: 17.8165s
17
17
 
18
18
  Done running benchmark report
data/lib/qs/daemon.rb CHANGED
@@ -1,4 +1,5 @@
1
1
  require 'dat-worker-pool'
2
+ require 'much-plugin'
2
3
  require 'ns-options'
3
4
  require 'pathname'
4
5
  require 'system_timer'
@@ -6,24 +7,22 @@ require 'thread'
6
7
  require 'qs'
7
8
  require 'qs/client'
8
9
  require 'qs/daemon_data'
9
- require 'qs/io_pipe'
10
10
  require 'qs/logger'
11
- require 'qs/payload_handler'
12
11
  require 'qs/queue_item'
12
+ require 'qs/worker'
13
13
 
14
14
  module Qs
15
15
 
16
16
  module Daemon
17
+ include MuchPlugin
17
18
 
18
19
  InvalidError = Class.new(ArgumentError)
19
20
 
20
21
  SIGNAL = '.'.freeze
21
22
 
22
- def self.included(klass)
23
- klass.class_eval do
24
- extend ClassMethods
25
- include InstanceMethods
26
- end
23
+ plugin_included do
24
+ extend ClassMethods
25
+ include InstanceMethods
27
26
  end
28
27
 
29
28
  module InstanceMethods
@@ -31,29 +30,39 @@ module Qs
31
30
  attr_reader :daemon_data, :logger
32
31
  attr_reader :signals_redis_key, :queue_redis_keys
33
32
 
34
- # * Set the size of the client to the max workers + 1. This ensures we
35
- # have 1 connection for fetching work from redis and at least 1
36
- # connection for each worker to requeue its message when hard-shutdown.
33
+ # set the size of the client to the num workers + 1, this ensures we have
34
+ # 1 connection for fetching work from redis and at least 1 connection for
35
+ # each worker to requeue its message when hard-shutdown
37
36
  def initialize
38
37
  self.class.configuration.validate!
39
38
  Qs.init
40
39
  @daemon_data = DaemonData.new(self.class.configuration.to_hash)
41
- @logger = @daemon_data.logger
40
+ @logger = @daemon_data.logger
42
41
 
43
42
  @client = QsClient.new(Qs.redis_config.merge({
44
43
  :timeout => 1,
45
- :size => self.daemon_data.max_workers + 1
44
+ :size => self.daemon_data.num_workers + 1
46
45
  }))
47
46
  @queue_redis_keys = self.daemon_data.queue_redis_keys
48
47
 
49
- @work_loop_thread = nil
50
- @worker_pool = nil
51
-
52
48
  @signals_redis_key = "signals:#{@daemon_data.name}-" \
53
49
  "#{Socket.gethostname}-#{::Process.pid}"
54
50
 
55
- @worker_available_io = IOPipe.new
56
- @signal = Signal.new(:stop)
51
+ @worker_available = WorkerAvailable.new
52
+
53
+ @worker_pool = DatWorkerPool.new(self.daemon_data.worker_class, {
54
+ :num_workers => self.daemon_data.num_workers,
55
+ :logger => self.daemon_data.dwp_logger,
56
+ :worker_params => self.daemon_data.worker_params.merge({
57
+ :qs_daemon_data => self.daemon_data,
58
+ :qs_client => @client,
59
+ :qs_worker_available => @worker_available,
60
+ :qs_logger => @logger
61
+ })
62
+ })
63
+
64
+ @thread = nil
65
+ @state = State.new(:stop)
57
66
  rescue InvalidError => exception
58
67
  exception.set_backtrace(caller)
59
68
  raise exception
@@ -72,158 +81,98 @@ module Qs
72
81
  end
73
82
 
74
83
  def running?
75
- !!(@work_loop_thread && @work_loop_thread.alive?)
84
+ !!(@thread && @thread.alive?)
76
85
  end
77
86
 
78
- # * Ping redis to check that it can communicate with redis before running,
79
- # this is friendlier than starting and continously erroring because it
80
- # can't dequeue.
87
+ # ping to check that it can communicate with redis before running, this is
88
+ # friendlier than starting and continously erroring because it can't
89
+ # dequeue
81
90
  def start
82
91
  @client.ping
83
- @signal.set :start
84
- @work_loop_thread ||= Thread.new{ work_loop }
92
+ @state.set :run
93
+ @thread ||= Thread.new{ work_loop }
85
94
  end
86
95
 
87
96
  def stop(wait = false)
88
97
  return unless self.running?
89
- @signal.set :stop
90
- wakeup_work_loop_thread
98
+ @state.set :stop
99
+ wakeup_thread
91
100
  wait_for_shutdown if wait
92
101
  end
93
102
 
94
103
  def halt(wait = false)
95
104
  return unless self.running?
96
- @signal.set :halt
97
- wakeup_work_loop_thread
105
+ @state.set :halt
106
+ wakeup_thread
98
107
  wait_for_shutdown if wait
99
108
  end
100
109
 
101
110
  private
102
111
 
103
- def process(queue_item)
104
- Qs::PayloadHandler.new(self.daemon_data, queue_item).run
105
- end
106
-
107
112
  def work_loop
108
- log "Starting work loop", :debug
109
- setup_redis_and_ios
110
- @worker_pool = build_worker_pool
111
- process_inputs while @signal.start?
112
- log "Stopping work loop", :debug
113
+ setup
114
+ fetch_messages while @state.run?
113
115
  rescue StandardError => exception
114
- @signal.set :stop
116
+ @state.set :stop
115
117
  log "Error occurred while running the daemon, exiting", :error
116
118
  log "#{exception.class}: #{exception.message}", :error
117
- log exception.backtrace.join("\n"), :error
119
+ (exception.backtrace || []).each{ |l| log(l, :error) }
118
120
  ensure
119
- shutdown_worker_pool
120
- @worker_available_io.teardown
121
- @work_loop_thread = nil
122
- log "Stopped work loop", :debug
121
+ teardown
123
122
  end
124
123
 
125
- def setup_redis_and_ios
126
- # clear any signals that are already on the signals redis list
124
+ # clear any signals that are already on the signals list in redis
125
+ def setup
127
126
  @client.clear(self.signals_redis_key)
128
- @worker_available_io.setup
129
- end
130
-
131
- def build_worker_pool
132
- wp = DatWorkerPool.new(
133
- self.daemon_data.min_workers,
134
- self.daemon_data.max_workers
135
- ){ |queue_item| process(queue_item) }
136
-
137
- # add internal callbacks
138
- wp.on_worker_error do |worker, exception, queue_item|
139
- handle_worker_exception(exception, queue_item)
127
+ @worker_pool.start
128
+ end
129
+
130
+ # shuffle the queue redis keys to avoid queue starvation, redis will pull
131
+ # messages off queues in the order they are passed to the command, by
132
+ # shuffling we ensure they are randomly ordered so every queue should get
133
+ # a chance; use 0 for the brpop timeout which means block indefinitely;
134
+ # rescue runtime errors so the daemon thread doesn't fail if redis is
135
+ # temporarily down, sleep for a second to keep the thread from thrashing
136
+ # by repeatedly erroring if redis is down
137
+ def fetch_messages
138
+ if !@worker_pool.worker_available? && @state.run?
139
+ @worker_available.wait
140
140
  end
141
- wp.on_worker_sleep{ @worker_available_io.write(SIGNAL) }
142
-
143
- # add any configured callbacks
144
- self.daemon_data.worker_start_procs.each{ |cb| wp.on_worker_start(&cb) }
145
- self.daemon_data.worker_shutdown_procs.each{ |cb| wp.on_worker_shutdown(&cb) }
146
- self.daemon_data.worker_sleep_procs.each{ |cb| wp.on_worker_sleep(&cb) }
147
- self.daemon_data.worker_wakeup_procs.each{ |cb| wp.on_worker_wakeup(&cb) }
148
-
149
- wp.start
150
- wp
151
- end
152
-
153
- # * Shuffle the queue redis keys to avoid queue starvation. Redis will
154
- # pull messages off queues in the order they are passed to the command,
155
- # by shuffling we ensure they are randomly ordered so every queue should
156
- # get a chance.
157
- # * Use 0 for the brpop timeout which means block indefinitely.
158
- # * Rescue runtime errors so the daemon thread doesn't fail if redis is
159
- # temporarily down. Sleep for a second to keep the thread from thrashing
160
- # by repeatedly erroring if redis is down.
161
- def process_inputs
162
- wait_for_available_worker
163
- return unless @worker_pool.worker_available? && @signal.start?
141
+ return unless @worker_pool.worker_available? && @state.run?
164
142
 
165
143
  begin
166
144
  args = [self.signals_redis_key, self.queue_redis_keys.shuffle, 0].flatten
167
145
  redis_key, encoded_payload = @client.block_dequeue(*args)
168
146
  if redis_key != @signals_redis_key
169
- @worker_pool.add_work(QueueItem.new(redis_key, encoded_payload))
147
+ @worker_pool.push(QueueItem.new(redis_key, encoded_payload))
170
148
  end
171
149
  rescue RuntimeError => exception
172
- log "Error dequeueing #{exception.message.inspect}", :error
173
- log exception.backtrace.join("\n"), :error
150
+ log "Error occurred while dequeueing", :error
151
+ log "#{exception.class}: #{exception.message}", :error
152
+ (exception.backtrace || []).each{ |l| log(l, :error) }
174
153
  sleep 1
175
154
  end
176
155
  end
177
156
 
178
- def wait_for_available_worker
179
- if !@worker_pool.worker_available? && @signal.start?
180
- @worker_available_io.wait
181
- @worker_available_io.read
182
- end
183
- end
184
-
185
- def shutdown_worker_pool
186
- return unless @worker_pool
187
- timeout = @signal.stop? ? self.daemon_data.shutdown_timeout : 0
188
- if timeout
189
- log "Shutting down, waiting up to #{timeout} seconds for work to finish"
190
- else
191
- log "Shutting down, waiting for work to finish"
192
- end
157
+ def teardown
158
+ timeout = @state.halt? ? 0 : self.daemon_data.shutdown_timeout
193
159
  @worker_pool.shutdown(timeout)
160
+
194
161
  log "Requeueing #{@worker_pool.work_items.size} message(s)"
195
- @worker_pool.work_items.each do |ri|
196
- @client.prepend(ri.queue_redis_key, ri.encoded_payload)
162
+ @worker_pool.work_items.each do |qi|
163
+ @client.prepend(qi.queue_redis_key, qi.encoded_payload)
197
164
  end
165
+ ensure
166
+ @thread = nil
198
167
  end
199
168
 
200
- def wait_for_shutdown
201
- @work_loop_thread.join if @work_loop_thread
169
+ def wakeup_thread
170
+ @client.append(self.signals_redis_key, SIGNAL)
171
+ @worker_available.signal
202
172
  end
203
173
 
204
- def wakeup_work_loop_thread
205
- @client.append(self.signals_redis_key, SIGNAL)
206
- @worker_available_io.write(SIGNAL)
207
- end
208
-
209
- # * This only catches errors that happen outside of running the payload
210
- # handler. The only known use-case for this is dat worker pools
211
- # hard-shutdown errors.
212
- # * If there isn't a queue item (this can happen when an idle worker is
213
- # being forced to exit) then we don't need to do anything.
214
- # * If we never started processing the queue item, its safe to requeue it.
215
- # Otherwise it happened while processing so the payload handler caught
216
- # it or it happened after the payload handler which we don't care about.
217
- def handle_worker_exception(exception, queue_item)
218
- return if queue_item.nil?
219
- if !queue_item.started
220
- log "Worker error, requeueing message because it hasn't started", :error
221
- @client.prepend(queue_item.queue_redis_key, queue_item.encoded_payload)
222
- else
223
- log "Worker error after message was processed, ignoring", :error
224
- end
225
- log "#{exception.class}: #{exception.message}", :error
226
- log exception.backtrace.join("\n"), :error
174
+ def wait_for_shutdown
175
+ @thread.join if @thread
227
176
  end
228
177
 
229
178
  def log(message, level = :info)
@@ -246,34 +195,20 @@ module Qs
246
195
  self.configuration.pid_file(*args)
247
196
  end
248
197
 
249
- def min_workers(*args)
250
- self.configuration.min_workers(*args)
251
- end
252
-
253
- def max_workers(*args)
254
- self.configuration.max_workers(*args)
255
- end
256
-
257
- def workers(*args)
258
- self.min_workers(*args)
259
- self.max_workers(*args)
260
- end
261
-
262
- def on_worker_start(&block)
263
- self.configuration.worker_start_procs << block
264
- end
265
-
266
- def on_worker_shutdown(&block)
267
- self.configuration.worker_shutdown_procs << block
198
+ def worker_class(new_worker_class = nil)
199
+ self.configuration.worker_class = new_worker_class if new_worker_class
200
+ self.configuration.worker_class
268
201
  end
269
202
 
270
- def on_worker_sleep(&block)
271
- self.configuration.worker_sleep_procs << block
203
+ def worker_params(new_worker_params = nil )
204
+ self.configuration.worker_params = new_worker_params if new_worker_params
205
+ self.configuration.worker_params
272
206
  end
273
207
 
274
- def on_worker_wakeup(&block)
275
- self.configuration.worker_wakeup_procs << block
208
+ def num_workers(*args)
209
+ self.configuration.num_workers(*args)
276
210
  end
211
+ alias :workers :num_workers
277
212
 
278
213
  def verbose_logging(*args)
279
214
  self.configuration.verbose_logging(*args)
@@ -307,28 +242,24 @@ module Qs
307
242
  option :name, String, :required => true
308
243
  option :pid_file, Pathname
309
244
 
310
- option :min_workers, Integer, :default => 1
311
- option :max_workers, Integer, :default => 4
245
+ option :num_workers, Integer, :default => 4
312
246
 
313
247
  option :verbose_logging, :default => true
314
248
  option :logger, :default => proc{ Qs::NullLogger.new }
315
249
 
316
250
  option :shutdown_timeout
317
251
 
318
- attr_accessor :process_label
319
252
  attr_accessor :init_procs, :error_procs
253
+ attr_accessor :worker_class, :worker_params
320
254
  attr_accessor :queues
321
- attr_reader :worker_start_procs, :worker_shutdown_procs
322
- attr_reader :worker_sleep_procs, :worker_wakeup_procs
323
255
 
324
256
  def initialize(values = nil)
325
257
  super(values)
326
- @process_label = !(v = ENV['QS_PROCESS_LABEL'].to_s).empty? ? v : self.name
327
258
  @init_procs, @error_procs = [], []
328
- @worker_start_procs, @worker_shutdown_procs = [], []
329
- @worker_sleep_procs, @worker_wakeup_procs = [], []
259
+ @worker_class = DefaultWorker
260
+ @worker_params = nil
330
261
  @queues = []
331
- @valid = nil
262
+ @valid = nil
332
263
  end
333
264
 
334
265
  def routes
@@ -337,14 +268,11 @@ module Qs
337
268
 
338
269
  def to_hash
339
270
  super.merge({
340
- :process_label => self.process_label,
341
- :error_procs => self.error_procs,
342
- :worker_start_procs => self.worker_start_procs,
343
- :worker_shutdown_procs => self.worker_shutdown_procs,
344
- :worker_sleep_procs => self.worker_sleep_procs,
345
- :worker_wakeup_procs => self.worker_wakeup_procs,
346
- :routes => self.routes,
347
- :queue_redis_keys => self.queues.map(&:redis_key)
271
+ :error_procs => self.error_procs,
272
+ :worker_class => self.worker_class,
273
+ :worker_params => self.worker_params,
274
+ :routes => self.routes,
275
+ :queue_redis_keys => self.queues.map(&:redis_key)
348
276
  })
349
277
  end
350
278
 
@@ -358,32 +286,30 @@ module Qs
358
286
  if self.queues.empty? || !self.required_set?
359
287
  raise InvalidError, "a name and queue must be configured"
360
288
  end
289
+ if !self.worker_class.kind_of?(Class) || !self.worker_class.include?(Qs::Worker)
290
+ raise InvalidError, "worker class must include `#{Qs::Worker}`"
291
+ end
361
292
  self.routes.each(&:validate!)
362
293
  @valid = true
363
294
  end
364
295
  end
365
296
 
366
- class Signal
367
- def initialize(value)
368
- @value = value
369
- @mutex = Mutex.new
370
- end
371
-
372
- def set(value)
373
- @mutex.synchronize{ @value = value }
374
- end
297
+ DefaultWorker = Class.new{ include Qs::Worker }
375
298
 
376
- def start?
377
- @mutex.synchronize{ @value == :start }
299
+ class WorkerAvailable
300
+ def initialize
301
+ @mutex = Mutex.new
302
+ @cond_var = ConditionVariable.new
378
303
  end
379
304
 
380
- def stop?
381
- @mutex.synchronize{ @value == :stop }
382
- end
305
+ def wait; @mutex.synchronize{ @cond_var.wait(@mutex) }; end
306
+ def signal; @mutex.synchronize{ @cond_var.signal }; end
307
+ end
383
308
 
384
- def halt?
385
- @mutex.synchronize{ @value == :halt }
386
- end
309
+ class State < DatWorkerPool::LockedObject
310
+ def run?; self.value == :run; end
311
+ def stop?; self.value == :stop; end
312
+ def halt?; self.value == :halt; end
387
313
  end
388
314
 
389
315
  end
@@ -7,33 +7,32 @@ module Qs
7
7
  # options one time here and memoize their values. This way, we don't pay the
8
8
  # NsOptions overhead when reading them while handling a message.
9
9
 
10
- attr_reader :name, :process_label
11
- attr_reader :pid_file
12
- attr_reader :min_workers, :max_workers
13
- attr_reader :worker_start_procs, :worker_shutdown_procs
14
- attr_reader :worker_sleep_procs, :worker_wakeup_procs
15
- attr_reader :logger, :verbose_logging
10
+ attr_reader :name, :process_label, :pid_file
11
+ attr_reader :worker_class, :worker_params, :num_workers
12
+ attr_reader :debug, :logger, :dwp_logger, :verbose_logging
16
13
  attr_reader :shutdown_timeout
17
- attr_reader :error_procs
18
- attr_reader :queue_redis_keys, :routes
14
+ attr_reader :error_procs, :queue_redis_keys, :routes
19
15
 
20
16
  def initialize(args = nil)
21
17
  args ||= {}
22
- @name = args[:name]
23
- @process_label = args[:process_label]
24
- @pid_file = args[:pid_file]
25
- @min_workers = args[:min_workers]
26
- @max_workers = args[:max_workers]
27
- @worker_start_procs = args[:worker_start_procs]
28
- @worker_shutdown_procs = args[:worker_shutdown_procs]
29
- @worker_sleep_procs = args[:worker_sleep_procs]
30
- @worker_wakeup_procs = args[:worker_wakeup_procs]
31
- @logger = args[:logger]
32
- @verbose_logging = !!args[:verbose_logging]
33
- @shutdown_timeout = args[:shutdown_timeout]
34
- @error_procs = args[:error_procs] || []
35
- @queue_redis_keys = args[:queue_redis_keys] || []
36
- @routes = build_routes(args[:routes] || [])
18
+ @name = args[:name]
19
+ @process_label = !(v = ENV['QS_PROCESS_LABEL'].to_s).empty? ? v : args[:name]
20
+ @pid_file = args[:pid_file]
21
+
22
+ @worker_class = args[:worker_class]
23
+ @worker_params = args[:worker_params] || {}
24
+ @num_workers = args[:num_workers]
25
+
26
+ @debug = !ENV['QS_DEBUG'].to_s.empty?
27
+ @logger = args[:logger]
28
+ @dwp_logger = @logger if @debug
29
+ @verbose_logging = !!args[:verbose_logging]
30
+
31
+ @shutdown_timeout = args[:shutdown_timeout]
32
+
33
+ @error_procs = args[:error_procs] || []
34
+ @queue_redis_keys = args[:queue_redis_keys] || []
35
+ @routes = build_routes(args[:routes] || [])
37
36
  end
38
37
 
39
38
  def route_for(route_id)
@@ -1,14 +1,14 @@
1
+ require 'much-plugin'
1
2
  require 'qs/message_handler'
2
3
 
3
4
  module Qs
4
5
 
5
6
  module EventHandler
7
+ include MuchPlugin
6
8
 
7
- def self.included(klass)
8
- klass.class_eval do
9
- include Qs::MessageHandler
10
- include InstanceMethods
11
- end
9
+ plugin_included do
10
+ include Qs::MessageHandler
11
+ include InstanceMethods
12
12
  end
13
13
 
14
14
  module InstanceMethods
@@ -29,6 +29,22 @@ module Qs
29
29
 
30
30
  end
31
31
 
32
+ module TestHelpers
33
+
34
+ def self.included(klass)
35
+ require 'qs/test_runner'
36
+ end
37
+
38
+ def test_runner(handler_class, args = nil)
39
+ Qs::EventTestRunner.new(handler_class, args)
40
+ end
41
+
42
+ def test_handler(handler_class, args = nil)
43
+ test_runner(handler_class, args).handler
44
+ end
45
+
46
+ end
47
+
32
48
  end
33
49
 
34
50
  end