chore-core 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +15 -0
- data/LICENSE.txt +20 -0
- data/README.md +260 -0
- data/Rakefile +32 -0
- data/bin/chore +34 -0
- data/chore-core.gemspec +46 -0
- data/lib/chore/cli.rb +232 -0
- data/lib/chore/configuration.rb +13 -0
- data/lib/chore/consumer.rb +52 -0
- data/lib/chore/duplicate_detector.rb +56 -0
- data/lib/chore/fetcher.rb +31 -0
- data/lib/chore/hooks.rb +25 -0
- data/lib/chore/job.rb +103 -0
- data/lib/chore/json_encoder.rb +18 -0
- data/lib/chore/manager.rb +47 -0
- data/lib/chore/publisher.rb +29 -0
- data/lib/chore/queues/filesystem/consumer.rb +128 -0
- data/lib/chore/queues/filesystem/filesystem_queue.rb +49 -0
- data/lib/chore/queues/filesystem/publisher.rb +45 -0
- data/lib/chore/queues/sqs/consumer.rb +121 -0
- data/lib/chore/queues/sqs/publisher.rb +55 -0
- data/lib/chore/queues/sqs.rb +38 -0
- data/lib/chore/railtie.rb +18 -0
- data/lib/chore/signal.rb +175 -0
- data/lib/chore/strategies/consumer/batcher.rb +76 -0
- data/lib/chore/strategies/consumer/single_consumer_strategy.rb +34 -0
- data/lib/chore/strategies/consumer/threaded_consumer_strategy.rb +81 -0
- data/lib/chore/strategies/worker/forked_worker_strategy.rb +221 -0
- data/lib/chore/strategies/worker/single_worker_strategy.rb +39 -0
- data/lib/chore/tasks/queues.task +11 -0
- data/lib/chore/unit_of_work.rb +17 -0
- data/lib/chore/util.rb +18 -0
- data/lib/chore/version.rb +9 -0
- data/lib/chore/worker.rb +117 -0
- data/lib/chore-core.rb +1 -0
- data/lib/chore.rb +218 -0
- data/spec/chore/cli_spec.rb +182 -0
- data/spec/chore/consumer_spec.rb +36 -0
- data/spec/chore/duplicate_detector_spec.rb +62 -0
- data/spec/chore/fetcher_spec.rb +38 -0
- data/spec/chore/hooks_spec.rb +44 -0
- data/spec/chore/job_spec.rb +80 -0
- data/spec/chore/json_encoder_spec.rb +11 -0
- data/spec/chore/manager_spec.rb +39 -0
- data/spec/chore/queues/filesystem/filesystem_consumer_spec.rb +71 -0
- data/spec/chore/queues/sqs/consumer_spec.rb +136 -0
- data/spec/chore/queues/sqs/publisher_spec.rb +74 -0
- data/spec/chore/queues/sqs_spec.rb +37 -0
- data/spec/chore/signal_spec.rb +244 -0
- data/spec/chore/strategies/consumer/batcher_spec.rb +93 -0
- data/spec/chore/strategies/consumer/single_consumer_strategy_spec.rb +23 -0
- data/spec/chore/strategies/consumer/threaded_consumer_strategy_spec.rb +105 -0
- data/spec/chore/strategies/worker/forked_worker_strategy_spec.rb +281 -0
- data/spec/chore/strategies/worker/single_worker_strategy_spec.rb +36 -0
- data/spec/chore/worker_spec.rb +134 -0
- data/spec/chore_spec.rb +108 -0
- data/spec/spec_helper.rb +58 -0
- data/spec/test_job.rb +7 -0
- metadata +194 -0
@@ -0,0 +1,221 @@
|
|
1
|
+
require 'chore/signal'
|
2
|
+
|
3
|
+
module Chore
|
4
|
+
module Strategy
|
5
|
+
class ForkedWorkerStrategy #:nodoc:
|
6
|
+
attr_accessor :workers
|
7
|
+
|
8
|
+
def initialize(manager)
|
9
|
+
@manager = manager
|
10
|
+
@stopped = false
|
11
|
+
@workers = {}
|
12
|
+
@queue = Queue.new
|
13
|
+
Chore.config.num_workers.times { @queue << :worker }
|
14
|
+
|
15
|
+
trap_master_signals
|
16
|
+
monitor_workers
|
17
|
+
|
18
|
+
Chore.run_hooks_for(:before_first_fork)
|
19
|
+
end
|
20
|
+
|
21
|
+
# Start up the worker strategy. In this particular case, what we're doing
|
22
|
+
# is starting up a WorkerListener, so we can talk to the children.
|
23
|
+
def start
|
24
|
+
Chore.logger.debug "Starting up worker strategy: #{self.class.name}"
|
25
|
+
end
|
26
|
+
|
27
|
+
# Stop the workers. The particulars of the implementation here are that we
|
28
|
+
# send a QUIT signal to each child, wait one minute for it to finish the last job
|
29
|
+
# it was working on. If it times out, then we send KILL. In an ideal world this means
|
30
|
+
# that <tt>stop!</tt> is non-destructive in that it allow each worker to complete it's
|
31
|
+
# current job before dying.
|
32
|
+
def stop!
|
33
|
+
return if @stopped
|
34
|
+
|
35
|
+
@stopped = true
|
36
|
+
Chore.logger.info { "Manager #{Process.pid} stopping" }
|
37
|
+
|
38
|
+
# Instead of using Process.waitall (which is a blocking operation that can
|
39
|
+
# cause the master process to hang), use a Unicorn style non-blocking
|
40
|
+
# shutdown process.
|
41
|
+
limit = Time.now + Chore.config.shutdown_timeout
|
42
|
+
until workers.empty? || Time.now > limit
|
43
|
+
signal_children("QUIT")
|
44
|
+
sleep(0.1)
|
45
|
+
reap_terminated_workers!
|
46
|
+
end
|
47
|
+
|
48
|
+
if !workers.empty?
|
49
|
+
Chore.logger.error "Timed out waiting for children to terminate. Terminating with prejudice."
|
50
|
+
signal_children("KILL")
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# Take a UnitOfWork (or an Array of UnitOfWork) and assign it to a Worker. We only
|
55
|
+
# assign work if there are <tt>workers_available?</tt>.
|
56
|
+
def assign(work)
|
57
|
+
return unless acquire_worker
|
58
|
+
|
59
|
+
begin
|
60
|
+
w = Worker.new(work)
|
61
|
+
Chore.run_hooks_for(:before_fork,w)
|
62
|
+
pid = nil
|
63
|
+
Chore.run_hooks_for(:around_fork,w) do
|
64
|
+
pid = fork do
|
65
|
+
after_fork(w)
|
66
|
+
Chore.run_hooks_for(:within_fork,w) do
|
67
|
+
Chore.run_hooks_for(:after_fork,w)
|
68
|
+
begin
|
69
|
+
Chore.logger.info("Started worker:#{Time.now}")
|
70
|
+
w.start
|
71
|
+
Chore.logger.info("Finished worker:#{Time.now}")
|
72
|
+
ensure
|
73
|
+
Chore.run_hooks_for(:before_fork_shutdown)
|
74
|
+
exit!(true)
|
75
|
+
end
|
76
|
+
end #within_fork
|
77
|
+
end #around_fork
|
78
|
+
end
|
79
|
+
|
80
|
+
Chore.logger.debug { "Forked worker #{pid}"}
|
81
|
+
workers[pid] = w
|
82
|
+
rescue => ex
|
83
|
+
Chore.logger.error { "Failed to fork worker: #{ex.message} #{ex.backtrace * "\n"}"}
|
84
|
+
release_worker
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
private
|
89
|
+
|
90
|
+
def trap_master_signals
|
91
|
+
Signal.trap('CHLD') { reap_terminated_workers! }
|
92
|
+
end
|
93
|
+
|
94
|
+
def trap_child_signals(worker)
|
95
|
+
# Register a new QUIT handler to make the current worker
|
96
|
+
# finish this job, and not complete another one.
|
97
|
+
Signal.trap("INT") { worker.stop! }
|
98
|
+
Signal.trap("QUIT") { worker.stop! }
|
99
|
+
#By design, we do nothing in children on USR1, so we are not re-defining this like we do INT and QUIT
|
100
|
+
end
|
101
|
+
|
102
|
+
def clear_child_signals
|
103
|
+
# Remove handlers from the parent process
|
104
|
+
Signal.reset
|
105
|
+
end
|
106
|
+
|
107
|
+
# Attempts to essentially acquire a lock on a worker. If no workers are
|
108
|
+
# available, then this will block until one is.
|
109
|
+
def acquire_worker
|
110
|
+
result = @queue.pop
|
111
|
+
|
112
|
+
if @stopped
|
113
|
+
# Strategy has stopped since the worker was acquired. If workers are
|
114
|
+
# allowed to run even though the strategy is stopped, this could result
|
115
|
+
# in forks occuring while the CLI is calling +Kernel#exit+ -- which can
|
116
|
+
# cause chore to hang.
|
117
|
+
release_worker
|
118
|
+
nil
|
119
|
+
else
|
120
|
+
result
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
# Releases the lock on a worker so that another thread can pick it up.
|
125
|
+
def release_worker
|
126
|
+
@queue << :worker
|
127
|
+
end
|
128
|
+
|
129
|
+
# Only call this in the forked child. It resets some things that need fixing up
|
130
|
+
# in the child.
|
131
|
+
def after_fork(worker)
|
132
|
+
# Immediately swap out the process name so that it doesn't look like
|
133
|
+
# the master process
|
134
|
+
procline("Started:#{Time.now}")
|
135
|
+
|
136
|
+
clear_child_signals
|
137
|
+
trap_child_signals(worker)
|
138
|
+
|
139
|
+
# We need to reset the logger after fork. This fixes a longstanding bug
|
140
|
+
# where workers would hang around and never die
|
141
|
+
Chore.logger = nil
|
142
|
+
|
143
|
+
# When we fork, the consumer's / publisher's need their connections reset. The specifics of this
|
144
|
+
# are queue dependent, and may result in a noop.
|
145
|
+
Chore.config.consumer.reset_connection!
|
146
|
+
Chore.config.publisher.reset_connection! if Chore.config.publisher #It is possible for this to be nil due to configuration woes with chore
|
147
|
+
end
|
148
|
+
|
149
|
+
# Reaps any in-flight workers that have completed. This only relies on
|
150
|
+
# known process ids instead of discovering all child processes from the
|
151
|
+
# OS. By doing this, we avoid running into a tight loop reaping
|
152
|
+
# short-lived forks.
|
153
|
+
def reap_terminated_workers!
|
154
|
+
pids.each do |pid|
|
155
|
+
reaped = false
|
156
|
+
begin
|
157
|
+
reaped = Process.wait(pid, Process::WNOHANG)
|
158
|
+
rescue Errno::ECHILD => ex
|
159
|
+
# Child process has already terminated
|
160
|
+
reaped = true
|
161
|
+
end
|
162
|
+
|
163
|
+
# Clean up / release worker
|
164
|
+
if reaped && workers.delete(pid)
|
165
|
+
release_worker
|
166
|
+
Chore.logger.debug { "Removed finished worker #{pid}"}
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
170
|
+
|
171
|
+
# Ensures that workers don't live past their expiration date. When this
|
172
|
+
# is detect, the workers are kill -9'd.
|
173
|
+
def monitor_workers
|
174
|
+
Thread.new do
|
175
|
+
while !@stopped
|
176
|
+
pids.each do |pid|
|
177
|
+
worker = workers[pid]
|
178
|
+
if worker && worker.expired?
|
179
|
+
messages = worker.work.map(&:message)
|
180
|
+
Chore.logger.error { "Failed to run jobs -- #{messages} -- by #{worker.expires_at}, started at #{worker.started_at}"}
|
181
|
+
Chore.run_hooks_for(:on_failure, {:body => 'Worker fork timed out.', :messages => messages}, TimeoutError.new)
|
182
|
+
|
183
|
+
signal_children('KILL', [pid])
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
sleep 1
|
188
|
+
end
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
# Take a snapshot in time of what workers are in flight. Iterating over
|
193
|
+
# the workers collection itself can lead to a hard loop if new items are
|
194
|
+
# constantly added.
|
195
|
+
def pids
|
196
|
+
workers.keys
|
197
|
+
end
|
198
|
+
|
199
|
+
# Wrapper around fork for specs.
|
200
|
+
def fork(&block)
|
201
|
+
Kernel.fork(&block)
|
202
|
+
end
|
203
|
+
|
204
|
+
def procline(str)
|
205
|
+
Chore.logger.info str
|
206
|
+
$0 = "chore-#{Chore::VERSION}:#{str}"
|
207
|
+
end
|
208
|
+
|
209
|
+
def signal_children(sig, pids_to_signal = pids)
|
210
|
+
pids_to_signal.each do |pid|
|
211
|
+
begin
|
212
|
+
Chore.logger.info { "Sending #{sig} to: #{pid}" }
|
213
|
+
Process.kill(sig, pid)
|
214
|
+
rescue Errno::ESRCH
|
215
|
+
end
|
216
|
+
end
|
217
|
+
end
|
218
|
+
|
219
|
+
end
|
220
|
+
end
|
221
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
module Chore
|
2
|
+
module Strategy
|
3
|
+
|
4
|
+
# Worker strategy for performing batches of work in a linear fashion. Ideally used for running
|
5
|
+
# Chore jobs locally in a development environment where performance or throughput may not matter.
|
6
|
+
class SingleWorkerStrategy
|
7
|
+
|
8
|
+
attr_reader :worker
|
9
|
+
|
10
|
+
def initialize(manager)
|
11
|
+
@manager = manager
|
12
|
+
@worker = nil
|
13
|
+
end
|
14
|
+
|
15
|
+
# Starts the <tt>SingleWorkerStrategy</tt>. Currently a noop
|
16
|
+
def start;end
|
17
|
+
|
18
|
+
# Stops the <tt>SingleWorkerStrategy</tt> if there is a worker to stop
|
19
|
+
def stop!
|
20
|
+
worker.stop! if worker
|
21
|
+
end
|
22
|
+
|
23
|
+
# Assigns work if there isn't already a worker in progress. Otherwise, is a noop
|
24
|
+
def assign(work)
|
25
|
+
if workers_available?
|
26
|
+
@worker = Worker.new(work)
|
27
|
+
@worker.start
|
28
|
+
@worker = nil
|
29
|
+
true
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
# Returns true if there is currently no worker
|
34
|
+
def workers_available?
|
35
|
+
@worker.nil?
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
module Chore
|
2
|
+
# Simple class to hold job processing information.
|
3
|
+
# Has six attributes:
|
4
|
+
# * +:id+ The queue implementation specific identifier for this message.
|
5
|
+
# * +:queue_name+ The name of the queue the job came from
|
6
|
+
# * +:queue_timeout+ The time (in seconds) before the job will get re-enqueued if not processed
|
7
|
+
# * +:message+ The actual data of the message.
|
8
|
+
# * +:previous_attempts+ The number of times the work has been attempted previously.
|
9
|
+
# * +:consumer+ The consumer instance used to fetch this message. Most queue implementations won't need access to this, but some (RabbitMQ) will. So we
|
10
|
+
# make sure to pass it along with each message. This instance will be used by the Worker for things like <tt>complete</tt> and </tt>reject</tt>.
|
11
|
+
class UnitOfWork < Struct.new(:id,:queue_name,:queue_timeout,:message,:previous_attempts,:consumer)
|
12
|
+
# The current attempt number for the worker processing this message.
|
13
|
+
def current_attempt
|
14
|
+
previous_attempts + 1
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
data/lib/chore/util.rb
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
module Chore
|
2
|
+
|
3
|
+
# Collection of utilities and helpers used by Chore internally
|
4
|
+
module Util
|
5
|
+
|
6
|
+
# To avoid bringing in all of active_support, we implemented constantize here
|
7
|
+
def constantize(camel_cased_word)
|
8
|
+
names = camel_cased_word.split('::')
|
9
|
+
names.shift if names.empty? || names.first.empty?
|
10
|
+
|
11
|
+
constant = Object
|
12
|
+
names.each do |name|
|
13
|
+
constant = constant.const_defined?(name) ? constant.const_get(name) : constant.const_missing(name)
|
14
|
+
end
|
15
|
+
constant
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
data/lib/chore/worker.rb
ADDED
@@ -0,0 +1,117 @@
|
|
1
|
+
require 'chore/util'
|
2
|
+
require 'chore/json_encoder'
|
3
|
+
|
4
|
+
module Chore
|
5
|
+
class TimeoutError < StandardError
|
6
|
+
end
|
7
|
+
|
8
|
+
# Worker is one of the core classes in Chore. It's responsible for most of the logic
|
9
|
+
# relating to actually processing a job. A given worker will take an amount of +work+
|
10
|
+
# and then process it all until either the worker is told to stop, or the work is
|
11
|
+
# completed. Worker is completely agnostic to the WorkerStrategy that it was called from.
|
12
|
+
class Worker
|
13
|
+
include Util
|
14
|
+
|
15
|
+
DEFAULT_OPTIONS = { :encoder => JsonEncoder }
|
16
|
+
attr_accessor :options
|
17
|
+
attr_reader :work
|
18
|
+
attr_reader :started_at
|
19
|
+
|
20
|
+
def self.start(work) #:nodoc:
|
21
|
+
self.new(work).start
|
22
|
+
end
|
23
|
+
|
24
|
+
# Create a Worker. Give it an array of work (or single item), and +opts+.
|
25
|
+
# Currently, the only option supported by Worker is +:encoder+ which should match
|
26
|
+
# the +:encoder+ used by the Publisher who created the message.
|
27
|
+
def initialize(work=[],opts={})
|
28
|
+
@stopping = false
|
29
|
+
@started_at = Time.now
|
30
|
+
@work = work
|
31
|
+
@work = [work] unless work.kind_of?(Array)
|
32
|
+
self.options = DEFAULT_OPTIONS.merge(opts)
|
33
|
+
end
|
34
|
+
|
35
|
+
# Whether this worker has existed for longer than it's allowed to
|
36
|
+
def expired?
|
37
|
+
Time.now > expires_at
|
38
|
+
end
|
39
|
+
|
40
|
+
# The time at which this worker expires
|
41
|
+
def expires_at
|
42
|
+
total_timeout = @work.inject(0) {|sum, item| sum += item.queue_timeout}
|
43
|
+
@started_at + total_timeout
|
44
|
+
end
|
45
|
+
|
46
|
+
# The workhorse. Do the work, all of it. This will block for an entirely unspecified amount
|
47
|
+
# of time based on the work to be performed. This will:
|
48
|
+
# * Decode each message.
|
49
|
+
# * Re-ify the messages into actual Job classes.
|
50
|
+
# * Call Job#perform on each job.
|
51
|
+
# * If successful it will call Consumer#complete (using the consumer in the UnitOfWork).
|
52
|
+
# * If unsuccessful it will call the appropriate Hooks based on the type of failure.
|
53
|
+
# * If unsuccessful *and* the maximum number of attempts for the job has been surpassed, it will call
|
54
|
+
# the permanent failure hooks and Consumer#complete.
|
55
|
+
# * Log the results via the Chore.logger
|
56
|
+
def start
|
57
|
+
@work.each do |item|
|
58
|
+
return if @stopping
|
59
|
+
Chore.logger.debug { "Doing: #{item.queue_name} with #{item.message}" }
|
60
|
+
begin
|
61
|
+
start_item(item)
|
62
|
+
rescue => e
|
63
|
+
Chore.logger.error { "Failed to run job for #{item.message} with error: #{e.message} #{e.backtrace * "\n"}" }
|
64
|
+
if item.current_attempt >= Chore.config.max_attempts
|
65
|
+
Chore.run_hooks_for(:on_permanent_failure,item.queue_name,item.message,e)
|
66
|
+
item.consumer.complete(item.id)
|
67
|
+
else
|
68
|
+
Chore.run_hooks_for(:on_failure,item.message,e)
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
# Tell the worker to stop after it completes the current job.
|
75
|
+
def stop!
|
76
|
+
@stopping = true
|
77
|
+
end
|
78
|
+
|
79
|
+
protected
|
80
|
+
def payload_class(message)
|
81
|
+
constantize(message['class'])
|
82
|
+
end
|
83
|
+
|
84
|
+
private
|
85
|
+
def start_item(item)
|
86
|
+
message = decode_job(item.message)
|
87
|
+
klass = payload_class(message)
|
88
|
+
return unless klass.run_hooks_for(:before_perform,message)
|
89
|
+
|
90
|
+
begin
|
91
|
+
perform_job(klass,message)
|
92
|
+
item.consumer.complete(item.id)
|
93
|
+
klass.run_hooks_for(:after_perform,message)
|
94
|
+
rescue Job::RejectMessageException
|
95
|
+
item.consumer.reject(item.id)
|
96
|
+
Chore.logger.error { "Failed to run job for #{item.message} with error: Job raised a RejectMessageException" }
|
97
|
+
klass.run_hooks_for(:on_rejected, message)
|
98
|
+
rescue => e
|
99
|
+
Chore.logger.error { "Failed to run job #{item.message} with error: #{e.message} at #{e.backtrace * "\n"}" }
|
100
|
+
if item.current_attempt >= klass.options[:max_attempts]
|
101
|
+
klass.run_hooks_for(:on_permanent_failure,item.queue_name,message,e)
|
102
|
+
item.consumer.complete(item.id)
|
103
|
+
else
|
104
|
+
klass.run_hooks_for(:on_failure,message,e)
|
105
|
+
end
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
def perform_job(klass, message)
|
110
|
+
klass.perform(*message['args'])
|
111
|
+
end
|
112
|
+
|
113
|
+
def decode_job(data)
|
114
|
+
options[:encoder].decode(data)
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
data/lib/chore-core.rb
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
require 'chore'
|
data/lib/chore.rb
ADDED
@@ -0,0 +1,218 @@
|
|
1
|
+
require 'ostruct'
|
2
|
+
require 'logger'
|
3
|
+
# Require chore files
|
4
|
+
require 'chore/version'
|
5
|
+
|
6
|
+
require 'chore/unit_of_work'
|
7
|
+
require 'chore/configuration'
|
8
|
+
require 'chore/cli'
|
9
|
+
require 'chore/consumer'
|
10
|
+
require 'chore/job'
|
11
|
+
require 'chore/json_encoder'
|
12
|
+
require 'chore/manager'
|
13
|
+
require 'chore/publisher'
|
14
|
+
require 'chore/util'
|
15
|
+
require 'chore/worker'
|
16
|
+
require 'chore/publisher'
|
17
|
+
|
18
|
+
# We have a number of things that can live here. I don't want to track
|
19
|
+
['queues/**','strategies/**'].each do |p|
|
20
|
+
Dir[File.join(File.dirname(__FILE__),'chore',p,'*.rb')].each {|f| require f}
|
21
|
+
end
|
22
|
+
|
23
|
+
module Chore #:nodoc:
|
24
|
+
VERSION = Chore::Version::STRING #:nodoc:
|
25
|
+
|
26
|
+
# The default configuration options for Chore.
|
27
|
+
DEFAULT_OPTIONS = {
|
28
|
+
:require => "./",
|
29
|
+
:num_workers => 4,
|
30
|
+
:threads_per_queue => 1,
|
31
|
+
:worker_strategy => Strategy::ForkedWorkerStrategy,
|
32
|
+
:consumer => Queues::SQS::Consumer,
|
33
|
+
:fetcher => Fetcher,
|
34
|
+
:consumer_strategy => Strategy::ThreadedConsumerStrategy,
|
35
|
+
:batch_size => 50,
|
36
|
+
:log_level => Logger::WARN,
|
37
|
+
:log_path => STDOUT,
|
38
|
+
:default_queue_timeout => (12 * 60 * 60), # 12 hours
|
39
|
+
:shutdown_timeout => (2 * 60),
|
40
|
+
:max_attempts => 1.0 / 0.0, # Infinity
|
41
|
+
:dupe_on_cache_failure => false
|
42
|
+
}
|
43
|
+
|
44
|
+
class << self
|
45
|
+
attr_accessor :logger
|
46
|
+
end
|
47
|
+
|
48
|
+
# Access Chore's logger in a memoized fashion. Will create an instance of the logger if
|
49
|
+
# one doesn't already exist.
|
50
|
+
def self.logger
|
51
|
+
@logger ||= Logger.new(config.log_path).tap do |l|
|
52
|
+
l.level = config.log_level
|
53
|
+
l.formatter = lambda do |severity, datetime, progname, msg|
|
54
|
+
"[#{datetime} (#{Process.pid})] #{severity} : #{msg}\n"
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
# Reopens any open files. This will match any logfile that was opened by Chore,
|
60
|
+
# Rails, or any other library.
|
61
|
+
def self.reopen_logs
|
62
|
+
# Find any open file in the process
|
63
|
+
files = []
|
64
|
+
ObjectSpace.each_object(File) {|file| files << file unless file.closed?}
|
65
|
+
|
66
|
+
files.each do |file|
|
67
|
+
begin
|
68
|
+
file.reopen(file.path, 'a+')
|
69
|
+
file.sync = true
|
70
|
+
rescue
|
71
|
+
# Can't reopen -- ignore / skip the file
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
# Add a global hook for +name+. Will run +&blk+ when the hook is executed.
|
77
|
+
# Global hooks are any hooks that don't have access to an instance of a job.
|
78
|
+
# See the docs on Hooks for a full list of global hooks.
|
79
|
+
#
|
80
|
+
# === Examples
|
81
|
+
# Chore.add_hook_for(:after_fork) do
|
82
|
+
# SomeDB.reset_connection!
|
83
|
+
# end
|
84
|
+
def self.add_hook(name,&blk)
|
85
|
+
@@hooks ||= {}
|
86
|
+
(@@hooks[name.to_sym] ||= []) << blk
|
87
|
+
end
|
88
|
+
|
89
|
+
# A helper to get a list of all the hooks for a given +name+
|
90
|
+
def self.hooks_for(name)
|
91
|
+
@@hooks ||= {}
|
92
|
+
@@hooks[name.to_sym] || []
|
93
|
+
end
|
94
|
+
|
95
|
+
def self.clear_hooks! #:nodoc:
|
96
|
+
@@hooks = {}
|
97
|
+
end
|
98
|
+
|
99
|
+
# Run the global hooks associated with a particular +name+ passing all +args+ to the registered block.
|
100
|
+
#
|
101
|
+
# == Before / After hooks
|
102
|
+
#
|
103
|
+
# If this is invoked for before / after hooks (i.e. no block is passed), then the
|
104
|
+
# hooks will be invoked in the order in which they're defined.
|
105
|
+
#
|
106
|
+
# For example:
|
107
|
+
#
|
108
|
+
# add_hook(:before_fork) {|worker| puts 1 }
|
109
|
+
# add_hook(:before_fork) {|worker| puts 2 }
|
110
|
+
# add_hook(:before_fork) {|worker| puts 3 }
|
111
|
+
#
|
112
|
+
# run_hooks_for(:before_fork, worker)
|
113
|
+
#
|
114
|
+
# # ...will produce the following output
|
115
|
+
# => 1
|
116
|
+
# => 2
|
117
|
+
# => 3
|
118
|
+
#
|
119
|
+
# == Around hooks
|
120
|
+
#
|
121
|
+
# If this is invoked for around hooks (i.e. a block is passed), then the hooks
|
122
|
+
# will be invoked in the order in which they're defined, with the passed block
|
123
|
+
# being invoked last after the hooks yield.
|
124
|
+
#
|
125
|
+
# For example:
|
126
|
+
#
|
127
|
+
# add_hook(:around_fork) {|worker, &block| puts 'before 1'; block.call; puts 'after 1'}
|
128
|
+
# add_hook(:around_fork) {|worker, &block| puts 'before 2'; block.call; puts 'after 2'}
|
129
|
+
# add_hook(:around_fork) {|worker, &block| puts 'before 3'; block.call; puts 'after 3'}
|
130
|
+
#
|
131
|
+
# run_hooks_for(:around_fork, worker) { puts 'block' }
|
132
|
+
#
|
133
|
+
# # ...will produce the following output
|
134
|
+
# => before 1
|
135
|
+
# => before 2
|
136
|
+
# => before 3
|
137
|
+
# => block
|
138
|
+
# => after 3
|
139
|
+
# => after 2
|
140
|
+
# => after 1
|
141
|
+
#
|
142
|
+
# You can imagine the callback order to be U shaped where logic *prior* to yielding
|
143
|
+
# is called in the order it's defined and logic *after* yielding is called in
|
144
|
+
# reverse order. At the bottom of the U is when the block passed into +run_hooks_for+
|
145
|
+
# gets invoked.
|
146
|
+
def self.run_hooks_for(name,*args,&block)
|
147
|
+
if block
|
148
|
+
run_around_hooks_for(name, args, &block)
|
149
|
+
else
|
150
|
+
hooks = self.hooks_for(name)
|
151
|
+
hooks.each {|h| h.call(*args, &block)} unless hooks.nil? || hooks.empty?
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
class << self
|
156
|
+
private
|
157
|
+
# Runs the global *around* hooks. This is similar to +run_hooks_for+ except it
|
158
|
+
# passing a block into each hook.
|
159
|
+
def run_around_hooks_for(name, args, index = 0, &block)
|
160
|
+
hooks = self.hooks_for(name)
|
161
|
+
|
162
|
+
if hook = hooks[index]
|
163
|
+
hook.call(*args) do
|
164
|
+
# Once the hook yields, call the next one
|
165
|
+
run_around_hooks_for(name, args, index + 1, &block)
|
166
|
+
end
|
167
|
+
else
|
168
|
+
# There are no more hooks: call the black passed into +run_hooks_for+.
|
169
|
+
# After this is called, the hooks will then execute their logic after the
|
170
|
+
# yield in reverse order.
|
171
|
+
block.call
|
172
|
+
end
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
# Configure global chore options. Takes a hash for +opts+.
|
177
|
+
# This includes things like the current Worker Strategy (+:worker_strategy+), the default Consumer (+:consumer+), and the default Consumer Strategy(+:consumer_strategy).
|
178
|
+
# It's safe to call multiple times (will merge the new config, into the old)
|
179
|
+
# This is used by the command line parsing code to setup Chore.
|
180
|
+
# If a +block+ is given, <tt>configure</tt> will yield the config object, so you can set options directly.
|
181
|
+
# === Examples
|
182
|
+
# Chore.configure({:worker_strategy => Chore::ForkedWorkerStrategy})
|
183
|
+
#
|
184
|
+
# Chore.configure do |c|
|
185
|
+
# c.consumer = Chore::Queues::SQS::Consumer
|
186
|
+
# c.batch_size = 50
|
187
|
+
# end
|
188
|
+
def self.configure(opts={})
|
189
|
+
@config = (@config ? @config.merge_hash(opts) : Chore::Configuration.new(DEFAULT_OPTIONS.merge(opts)))
|
190
|
+
yield @config if block_given?
|
191
|
+
@config
|
192
|
+
end
|
193
|
+
|
194
|
+
# Return the current Chore configuration as specified by <tt>configure</tt>. You can chain config options off of this to
|
195
|
+
# get access to current config data.
|
196
|
+
# === Examples
|
197
|
+
# puts Chore.config.num_workers
|
198
|
+
def self.config
|
199
|
+
@config ||= self.configure
|
200
|
+
end
|
201
|
+
|
202
|
+
# Helper flag for rails/web app chore initializers to use so that chore does not re-load itself during requirement loading
|
203
|
+
def self.configuring?
|
204
|
+
@configuring ||= false
|
205
|
+
end
|
206
|
+
|
207
|
+
# Setter for chore to indicate that it's in the middle of configuring itself
|
208
|
+
def self.configuring=(value)
|
209
|
+
@configuring = value
|
210
|
+
end
|
211
|
+
|
212
|
+
# List of queue_names as configured via Chore::Job including their prefix, if set.
|
213
|
+
def self.prefixed_queue_names
|
214
|
+
Chore::Job.job_classes.collect {|klass| c = klass.constantize; c.prefixed_queue_name}
|
215
|
+
end
|
216
|
+
end
|
217
|
+
|
218
|
+
require 'chore/railtie' if defined?(Rails)
|