rescheduler 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/lib/rescheduler.rb +177 -27
- metadata +4 -4
data/lib/rescheduler.rb
CHANGED
@@ -1,4 +1,5 @@
|
|
1
|
-
require '
|
1
|
+
require 'time' # Needed for Time.parse
|
2
|
+
require 'multi_json'
|
2
3
|
require 'redis'
|
3
4
|
|
4
5
|
module Rescheduler
|
@@ -8,6 +9,10 @@ module Rescheduler
|
|
8
9
|
attr_accessor :config
|
9
10
|
self.config = {}
|
10
11
|
|
12
|
+
# Debugging only (since initializers won't reload)
|
13
|
+
self.config[:prefix] = REDIS_PREFIX # This ensures cross DB deployment persistence
|
14
|
+
self.config[:redis] = RedisHelper.redis
|
15
|
+
|
11
16
|
#==========================
|
12
17
|
# Management routines
|
13
18
|
def prefix
|
@@ -22,7 +27,8 @@ module Rescheduler
|
|
22
27
|
redis.del(keys)
|
23
28
|
end
|
24
29
|
|
25
|
-
# Return a hash of statistics
|
30
|
+
# Return a hash of statistics, in this format
|
31
|
+
#
|
26
32
|
def stats
|
27
33
|
loop do
|
28
34
|
redis.watch(rk_args) do
|
@@ -76,19 +82,36 @@ module Rescheduler
|
|
76
82
|
end
|
77
83
|
end
|
78
84
|
|
85
|
+
# NOTE: Use this with care. Some lost jobs can be moved to immediate queue instead of deleted
|
86
|
+
# Pass '*' to delete everything.
|
87
|
+
def purge_bad_jobs(queue = '*')
|
88
|
+
pending, running, deferred = redis.multi do
|
89
|
+
redis.hkeys(rk_args)
|
90
|
+
redis.hkeys(rk_running)
|
91
|
+
redis.zrange(rk_deferred, 0, -1)
|
92
|
+
end
|
93
|
+
|
94
|
+
bad = pending - running - deferred
|
95
|
+
bad.each do |qnid|
|
96
|
+
next if queue != '*' && !qnid.start_with?(queue + ':')
|
97
|
+
idelete(qnid)
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
79
101
|
#==========================
|
80
102
|
# Task producer routines
|
81
103
|
# Add an immediate task to the queue
|
82
104
|
def enqueue(options=nil)
|
83
105
|
options ||= {}
|
84
|
-
now = Time.now
|
106
|
+
now = Time.now.to_i
|
85
107
|
|
86
108
|
# Error check
|
87
109
|
validate_queue_name(options[:queue]) if options.include?(:queue)
|
110
|
+
validate_recurrance(options)
|
88
111
|
|
89
112
|
# Convert due_in to due_at
|
90
113
|
if options.include?(:due_in)
|
91
|
-
raise ArgumentError, ':
|
114
|
+
raise ArgumentError, ':due_in and :due_at can not be both specified' if options.include?(:due_at)
|
92
115
|
options[:due_at] = now + options[:due_in]
|
93
116
|
end
|
94
117
|
|
@@ -98,7 +121,9 @@ module Rescheduler
|
|
98
121
|
options[:id] = redis.incr(rk_counter)
|
99
122
|
end
|
100
123
|
|
101
|
-
ts = options[:due_at].to_i
|
124
|
+
ts = options[:due_at].to_i
|
125
|
+
ts = now if ts == 0 # 0 means immediate
|
126
|
+
options[:due_at] = ts # Convert :due_at to integer timestamp to be reused in recurrance
|
102
127
|
qnid = get_qnid(options)
|
103
128
|
|
104
129
|
# Encode and save args
|
@@ -109,18 +134,18 @@ module Rescheduler
|
|
109
134
|
end
|
110
135
|
|
111
136
|
# Save options
|
112
|
-
redis.hset(rk_args, qnid, options
|
137
|
+
redis.hset(rk_args, qnid, MultiJson.dump(options))
|
113
138
|
|
114
139
|
# Determine the due time
|
115
|
-
if ts > now
|
140
|
+
if ts > now # Future job
|
116
141
|
redis.zadd(rk_deferred, ts, qnid)
|
117
142
|
else
|
118
143
|
redis.lpush(rk_queue(options[:queue]), qnid)
|
119
144
|
end
|
120
145
|
end
|
121
146
|
|
122
|
-
# Now decide if we need to
|
123
|
-
if (ts > now
|
147
|
+
# Now decide if we need to wake up the workers (outside of the transaction)
|
148
|
+
if (ts > now)
|
124
149
|
dt = redis.zrange(rk_deferred, 0, 0)[0]
|
125
150
|
# Wake up workers if our job is the first one in deferred queue, so they can reset timeout
|
126
151
|
if dt && dt == qnid
|
@@ -142,14 +167,58 @@ module Rescheduler
|
|
142
167
|
|
143
168
|
def delete(options)
|
144
169
|
qnid = get_qnid(options)
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
170
|
+
idelete(qnid)
|
171
|
+
end
|
172
|
+
|
173
|
+
# Make a job immediate if it is not already. Erase the wait
|
174
|
+
def make_immediate(options)
|
175
|
+
dtn = rk_deferred # Make a copy in case prefix changes
|
176
|
+
qnid = get_qnid(options)
|
177
|
+
ntry = 0
|
178
|
+
loop do
|
179
|
+
redis.watch(dtn) do
|
180
|
+
if redis.zcard(dtn, qnid) == 0
|
181
|
+
redis.unwatch(dtn)
|
182
|
+
return # Not a deferred job
|
183
|
+
else
|
184
|
+
redis.multi
|
185
|
+
redis.zrem(dtn, qnid)
|
186
|
+
q = qnid_to_queue(qnid)
|
187
|
+
redis.lpush(rk_queue(q), qnid)
|
188
|
+
if !redis.exec
|
189
|
+
# Contention happens, retrying
|
190
|
+
log_debug("make_immediate contention for #{qnid}")
|
191
|
+
Kernel.sleep (rand(ntry * 1000) / 1000.0) if ntry > 0
|
192
|
+
else
|
193
|
+
return # Done
|
194
|
+
end
|
195
|
+
end
|
196
|
+
end
|
197
|
+
ntry += 1
|
150
198
|
end
|
151
199
|
end
|
152
200
|
|
201
|
+
#=================
|
202
|
+
# Serialization (in case it is needed to transfer all Rescheduler across to another redis instance)
|
203
|
+
|
204
|
+
# Atomically save the state to file and stop all workers (state in redis is not destroyed)
|
205
|
+
# This function can take a while as it will wait for running jobs to finish first.
|
206
|
+
def serialize_and_stop(filename)
|
207
|
+
# TODO
|
208
|
+
end
|
209
|
+
|
210
|
+
# Load state from a file. Will merge into existing jobs if there are any (make sure it is done only once)
|
211
|
+
# This can be done before any worker starts, or after.
|
212
|
+
# Workers still need to be manually started
|
213
|
+
def deserialize(filename)
|
214
|
+
# TODO
|
215
|
+
end
|
216
|
+
|
217
|
+
# Clear redis states and delete all jobs (useful before deserialize)
|
218
|
+
def erase_all
|
219
|
+
# TODO
|
220
|
+
end
|
221
|
+
|
153
222
|
#=================
|
154
223
|
# Job definition
|
155
224
|
# Task consumer routines
|
@@ -159,6 +228,16 @@ module Rescheduler
|
|
159
228
|
return nil
|
160
229
|
end
|
161
230
|
|
231
|
+
#=================
|
232
|
+
# Error handling
|
233
|
+
def on_error(tube=nil, &block)
|
234
|
+
if tube != nil
|
235
|
+
@error_handlers ||= {}
|
236
|
+
@error_handlers[tube] = block
|
237
|
+
else
|
238
|
+
@global_error_handler = block;
|
239
|
+
end
|
240
|
+
end
|
162
241
|
#=================
|
163
242
|
# Runner/Maintenance routines
|
164
243
|
def start(*tubes)
|
@@ -217,9 +296,19 @@ module Rescheduler
|
|
217
296
|
|
218
297
|
private
|
219
298
|
|
299
|
+
# Internal routines operating out of qnid
|
300
|
+
def idelete(qnid)
|
301
|
+
queue = qnid.split(':').first
|
302
|
+
redis.multi do
|
303
|
+
redis.hdel(rk_args, qnid)
|
304
|
+
redis.zrem(rk_deferred, qnid)
|
305
|
+
redis.lrem(rk_queue(queue), 0, qnid)
|
306
|
+
end
|
307
|
+
end
|
308
|
+
|
220
309
|
# Runner routines
|
221
310
|
def run_job(qnid)
|
222
|
-
#
|
311
|
+
# 1. load job parameters for running
|
223
312
|
optstr = nil
|
224
313
|
begin
|
225
314
|
res = nil
|
@@ -237,36 +326,64 @@ module Rescheduler
|
|
237
326
|
end
|
238
327
|
if !res
|
239
328
|
# Contention, try read again
|
240
|
-
log_debug("Job read contention")
|
329
|
+
log_debug("Job read contention: (#{qnid})")
|
241
330
|
end
|
242
331
|
end
|
243
332
|
end until res
|
244
333
|
|
245
334
|
# Parse and run
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
335
|
+
sopt = MultiJson.load(optstr, :symbolize_keys => true)
|
336
|
+
|
337
|
+
# Handle parameters
|
338
|
+
if (sopt.include?(:recur_every))
|
339
|
+
newopt = sopt.dup
|
340
|
+
newopt[:due_at] = (sopt[:due_at] || Time.now).to_i + sopt[:recur_every].to_i
|
341
|
+
newopt.delete(:due_in) # In case the first job was specified by :due_in
|
342
|
+
enqueue(newopt)
|
343
|
+
end
|
344
|
+
|
345
|
+
if (sopt.include?(:recur_daily))
|
346
|
+
newopt = sopt.dup
|
347
|
+
newopt[:due_at] = time_from_recur_daily(sopt[:recur_daily])
|
348
|
+
newopt.delete(:due_in) # In case the first job was specified by :due_in
|
349
|
+
enqueue(newopt)
|
350
|
+
end
|
250
351
|
|
251
352
|
# 2. Find runner and invoke it
|
252
353
|
begin
|
253
354
|
log_debug(">>---- Starting #{qnid}")
|
254
355
|
runner = @runners[qnid_to_queue(qnid)]
|
255
|
-
runner.
|
256
|
-
|
257
|
-
|
258
|
-
|
356
|
+
if runner.is_a?(Proc)
|
357
|
+
runner.call(sopt)
|
358
|
+
log_debug("----<< Finished #{qnid}")
|
359
|
+
else
|
360
|
+
log_debug("----<< Failed #{qnid}: Unknown queue name, handler not defined")
|
361
|
+
end
|
362
|
+
rescue Exception => e
|
363
|
+
log_debug("----<< Failed #{qnid}: -------------\n #{$!}")
|
364
|
+
log_debug(e.backtrace[0..4].join("\n"))
|
365
|
+
handle_error(e, qnid, sopt)
|
366
|
+
log_debug("------------------------------------\n")
|
259
367
|
end
|
260
368
|
|
261
369
|
# 3. Remove job from running list (Done)
|
262
370
|
redis.hdel(rk_running, qnid)
|
263
371
|
end
|
264
372
|
|
373
|
+
def handle_error(e, qnid, sopt)
|
374
|
+
error_handler = @error_handlers && @error_handlers[qnid]
|
375
|
+
if error_handler
|
376
|
+
error_handler.call(e, sopt)
|
377
|
+
elsif @global_error_handler
|
378
|
+
@global_error_handler.call(e, sopt)
|
379
|
+
end
|
380
|
+
end
|
381
|
+
|
265
382
|
# Helper routines
|
266
383
|
|
267
384
|
# Find all the "due" deferred jobs and move them into respective queues
|
268
385
|
def service_deferred_jobs
|
269
|
-
dtn = rk_deferred
|
386
|
+
dtn = rk_deferred # Make a copy in case prefix changes
|
270
387
|
ntry = 0
|
271
388
|
loop do
|
272
389
|
curtime = Time.now.to_i
|
@@ -333,16 +450,49 @@ module Rescheduler
|
|
333
450
|
end
|
334
451
|
|
335
452
|
def redis
|
336
|
-
@redis ||= Redis.new(@config[:redis_connection] || {})
|
453
|
+
@redis ||= @config[:redis] || Redis.new(@config[:redis_connection] || {})
|
337
454
|
end
|
338
455
|
|
339
456
|
def validate_queue_name(queue)
|
340
457
|
raise ArgumentError, 'Queue name can not contain special characters' if queue.include?(':')
|
341
458
|
end
|
342
459
|
|
460
|
+
def parse_seconds_of_day(recur_daily)
|
461
|
+
return recur_daily if recur_daily.is_a?(Fixnum)
|
462
|
+
time = Time.parse(recur_daily)
|
463
|
+
return time.to_i - Time.new(time.year, time.month, time.day).to_i
|
464
|
+
end
|
465
|
+
|
466
|
+
# Find the next recur time
|
467
|
+
def time_from_recur_daily(recur_daily, now = Time.now)
|
468
|
+
recur = parse_seconds_of_day(recur_daily)
|
469
|
+
t = Time.new(now.year, now.month, now.day).to_i + recur
|
470
|
+
t += 86400 if t < now.to_i
|
471
|
+
return Time.at(t)
|
472
|
+
end
|
473
|
+
|
474
|
+
def validate_recurrance(options)
|
475
|
+
rcnt = 0
|
476
|
+
if (options.include?(:recur_every))
|
477
|
+
rcnt += 1
|
478
|
+
raise 'Expect integer for :recur_every parameter' unless options[:recur_every].is_a?(Fixnum)
|
479
|
+
end
|
480
|
+
|
481
|
+
if (options.include?(:recur_daily))
|
482
|
+
rcnt += 1
|
483
|
+
time = time_from_recur_daily(options[:recur_daily]) # Try parse and make sure we can
|
484
|
+
unless options.include?(:due_at) || options.include?(:due_in)
|
485
|
+
options[:due_at] = time # Setup the first run
|
486
|
+
end
|
487
|
+
end
|
488
|
+
raise 'Can only specify one recurrance parameter' if rcnt > 1
|
489
|
+
end
|
490
|
+
|
343
491
|
# Logging facility
|
344
492
|
def log_debug(msg)
|
345
|
-
|
493
|
+
return if @config[:silent]
|
494
|
+
print("#{Time.now.iso8601} #{msg}\n")
|
495
|
+
STDOUT.flush
|
346
496
|
end
|
347
497
|
|
348
498
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rescheduler
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.4.0
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date:
|
12
|
+
date: 2014-04-22 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: redis
|
@@ -28,7 +28,7 @@ dependencies:
|
|
28
28
|
- !ruby/object:Gem::Version
|
29
29
|
version: '0'
|
30
30
|
- !ruby/object:Gem::Dependency
|
31
|
-
name:
|
31
|
+
name: multi_json
|
32
32
|
requirement: !ruby/object:Gem::Requirement
|
33
33
|
none: false
|
34
34
|
requirements:
|
@@ -72,7 +72,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
72
72
|
version: '0'
|
73
73
|
requirements: []
|
74
74
|
rubyforge_project:
|
75
|
-
rubygems_version: 1.8.
|
75
|
+
rubygems_version: 1.8.25
|
76
76
|
signing_key:
|
77
77
|
specification_version: 3
|
78
78
|
summary: A job queue for immediate and delayed jobs using Redis
|