qless 0.11.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +9 -0
- data/exe/qless-stats +1 -1
- data/lib/qless/lua/qless-lib.lua +116 -106
- data/lib/qless/lua/qless.lua +91 -81
- data/lib/qless/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 3904eda7b2060bae62e4859fa594826f950c8119
|
4
|
+
data.tar.gz: 471a898995adbdb1de64be7b5fbf0f8dc86629b4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: edf869cddce2b2e31c0bb01664c706409c49c673eaa99eba0f804fa387c9a6d74af546116ea863b5a1945d18d17f59e99f296fe9d83ca0be389f6e8210902d20
|
7
|
+
data.tar.gz: 3efaa022a3687c87f6e2db7cf5abc60afa2eb94141861f21d7d57d1f8d26ca9a8806341c52de9f347700d7503bae84e6ee1433ace899e08eeddbea35baa11821
|
data/README.md
CHANGED
@@ -663,3 +663,12 @@ Mailing List
|
|
663
663
|
|
664
664
|
For questions and general Qless discussion, please join the [Qless
|
665
665
|
Mailing list](https://groups.google.com/forum/?fromgroups#!forum/qless).
|
666
|
+
|
667
|
+
Release Notes
|
668
|
+
=============
|
669
|
+
|
670
|
+
0.12.0
|
671
|
+
------
|
672
|
+
The metric `failures` provided by `qless-stats` has been replaced by `failed` for
|
673
|
+
compatibility with users of `graphite`. See [#275](https://github.com/seomoz/qless/pull/275)
|
674
|
+
for more details.
|
data/exe/qless-stats
CHANGED
data/lib/qless/lua/qless-lib.lua
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
-- Current SHA:
|
1
|
+
-- Current SHA: 9d2cca3846a96fee53000085e36638e74ed392ed
|
2
2
|
-- This is a generated file
|
3
3
|
-------------------------------------------------------------------------------
|
4
4
|
-- Forward declarations to make everything happy
|
@@ -68,14 +68,14 @@ end
|
|
68
68
|
-- If no group is provided, this returns a JSON blob of the counts of the
|
69
69
|
-- various groups of failures known. If a group is provided, it will report up
|
70
70
|
-- to `limit` from `start` of the jobs affected by that issue.
|
71
|
-
--
|
71
|
+
--
|
72
72
|
-- # If no group, then...
|
73
73
|
-- {
|
74
74
|
-- 'group1': 1,
|
75
75
|
-- 'group2': 5,
|
76
76
|
-- ...
|
77
77
|
-- }
|
78
|
-
--
|
78
|
+
--
|
79
79
|
-- # If a group is provided, then...
|
80
80
|
-- {
|
81
81
|
-- 'total': 20,
|
@@ -121,9 +121,9 @@ end
|
|
121
121
|
-------------------------------------------------------------------------------
|
122
122
|
-- Return all the job ids currently considered to be in the provided state
|
123
123
|
-- in a particular queue. The response is a list of job ids:
|
124
|
-
--
|
124
|
+
--
|
125
125
|
-- [
|
126
|
-
-- jid1,
|
126
|
+
-- jid1,
|
127
127
|
-- jid2,
|
128
128
|
-- ...
|
129
129
|
-- ]
|
@@ -154,7 +154,7 @@ function Qless.jobs(now, state, ...)
|
|
154
154
|
elseif state == 'depends' then
|
155
155
|
return queue.depends.peek(now, offset, count)
|
156
156
|
elseif state == 'recurring' then
|
157
|
-
return queue.recurring.peek(
|
157
|
+
return queue.recurring.peek('+inf', offset, count)
|
158
158
|
else
|
159
159
|
error('Jobs(): Unknown type "' .. state .. '"')
|
160
160
|
end
|
@@ -169,7 +169,7 @@ end
|
|
169
169
|
-- associated with that id, and 'untrack' stops tracking it. In this context,
|
170
170
|
-- tracking is nothing more than saving the job to a list of jobs that are
|
171
171
|
-- considered special.
|
172
|
-
--
|
172
|
+
--
|
173
173
|
-- {
|
174
174
|
-- 'jobs': [
|
175
175
|
-- {
|
@@ -254,18 +254,18 @@ function Qless.tag(now, command, ...)
|
|
254
254
|
tags = cjson.decode(tags)
|
255
255
|
local _tags = {}
|
256
256
|
for i,v in ipairs(tags) do _tags[v] = true end
|
257
|
-
|
257
|
+
|
258
258
|
-- Otherwise, add the job to the sorted set with that tags
|
259
259
|
for i=2,#arg do
|
260
260
|
local tag = arg[i]
|
261
|
-
if _tags[tag] == nil then
|
261
|
+
if _tags[tag] == nil or _tags[tag] == false then
|
262
262
|
_tags[tag] = true
|
263
263
|
table.insert(tags, tag)
|
264
264
|
end
|
265
265
|
redis.call('zadd', 'ql:t:' .. tag, now, jid)
|
266
266
|
redis.call('zincrby', 'ql:tags', 1, tag)
|
267
267
|
end
|
268
|
-
|
268
|
+
|
269
269
|
redis.call('hset', QlessJob.ns .. jid, 'tags', cjson.encode(tags))
|
270
270
|
return tags
|
271
271
|
else
|
@@ -280,7 +280,7 @@ function Qless.tag(now, command, ...)
|
|
280
280
|
tags = cjson.decode(tags)
|
281
281
|
local _tags = {}
|
282
282
|
for i,v in ipairs(tags) do _tags[v] = true end
|
283
|
-
|
283
|
+
|
284
284
|
-- Otherwise, add the job to the sorted set with that tags
|
285
285
|
for i=2,#arg do
|
286
286
|
local tag = arg[i]
|
@@ -288,10 +288,10 @@ function Qless.tag(now, command, ...)
|
|
288
288
|
redis.call('zrem', 'ql:t:' .. tag, jid)
|
289
289
|
redis.call('zincrby', 'ql:tags', -1, tag)
|
290
290
|
end
|
291
|
-
|
291
|
+
|
292
292
|
local results = {}
|
293
293
|
for i,tag in ipairs(tags) do if _tags[tag] then table.insert(results, tag) end end
|
294
|
-
|
294
|
+
|
295
295
|
redis.call('hset', QlessJob.ns .. jid, 'tags', cjson.encode(results))
|
296
296
|
return results
|
297
297
|
else
|
@@ -333,7 +333,7 @@ function Qless.cancel(...)
|
|
333
333
|
-- make sure that this operation will be ok
|
334
334
|
for i, jid in ipairs(arg) do
|
335
335
|
for j, dep in ipairs(dependents[jid]) do
|
336
|
-
if dependents[dep] == nil then
|
336
|
+
if dependents[dep] == nil or dependents[dep] == false then
|
337
337
|
error('Cancel(): ' .. jid .. ' is a dependency of ' .. dep ..
|
338
338
|
' but is not mentioned to be canceled')
|
339
339
|
end
|
@@ -418,7 +418,7 @@ function Qless.cancel(...)
|
|
418
418
|
redis.call('del', QlessJob.ns .. jid .. '-history')
|
419
419
|
end
|
420
420
|
end
|
421
|
-
|
421
|
+
|
422
422
|
return arg
|
423
423
|
end
|
424
424
|
|
@@ -535,26 +535,26 @@ end
|
|
535
535
|
|
536
536
|
-- Complete a job and optionally put it in another queue, either scheduled or
|
537
537
|
-- to be considered waiting immediately. It can also optionally accept other
|
538
|
-
-- jids on which this job will be considered dependent before it's considered
|
538
|
+
-- jids on which this job will be considered dependent before it's considered
|
539
539
|
-- valid.
|
540
540
|
--
|
541
541
|
-- The variable-length arguments may be pairs of the form:
|
542
|
-
--
|
542
|
+
--
|
543
543
|
-- ('next' , queue) : The queue to advance it to next
|
544
544
|
-- ('delay' , delay) : The delay for the next queue
|
545
545
|
-- ('depends', : Json of jobs it depends on in the new queue
|
546
546
|
-- '["jid1", "jid2", ...]')
|
547
547
|
---
|
548
|
-
function QlessJob:complete(now, worker, queue,
|
548
|
+
function QlessJob:complete(now, worker, queue, raw_data, ...)
|
549
549
|
assert(worker, 'Complete(): Arg "worker" missing')
|
550
550
|
assert(queue , 'Complete(): Arg "queue" missing')
|
551
|
-
data = assert(cjson.decode(
|
552
|
-
'Complete(): Arg "data" missing or not JSON: ' .. tostring(
|
551
|
+
local data = assert(cjson.decode(raw_data),
|
552
|
+
'Complete(): Arg "data" missing or not JSON: ' .. tostring(raw_data))
|
553
553
|
|
554
554
|
-- Read in all the optional parameters
|
555
555
|
local options = {}
|
556
556
|
for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end
|
557
|
-
|
557
|
+
|
558
558
|
-- Sanity check on optional args
|
559
559
|
local nextq = options['next']
|
560
560
|
local delay = assert(tonumber(options['delay'] or 0))
|
@@ -581,14 +581,15 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
581
581
|
'priority', 'retries', 'queue'))
|
582
582
|
|
583
583
|
if lastworker == false then
|
584
|
-
error('Complete(): Job does not exist')
|
584
|
+
error('Complete(): Job ' .. self.jid .. ' does not exist')
|
585
585
|
elseif (state ~= 'running') then
|
586
|
-
error('Complete(): Job is not currently running: ' ..
|
586
|
+
error('Complete(): Job ' .. self.jid .. ' is not currently running: ' ..
|
587
|
+
state)
|
587
588
|
elseif lastworker ~= worker then
|
588
|
-
error('Complete(): Job
|
589
|
-
tostring(lastworker))
|
589
|
+
error('Complete(): Job ' .. self.jid ..
|
590
|
+
' has been handed out to another worker: ' .. tostring(lastworker))
|
590
591
|
elseif queue ~= current_queue then
|
591
|
-
error('Complete(): Job running in another queue: ' ..
|
592
|
+
error('Complete(): Job ' .. self.jid .. ' running in another queue: ' ..
|
592
593
|
tostring(current_queue))
|
593
594
|
end
|
594
595
|
|
@@ -600,8 +601,8 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
600
601
|
-- update history
|
601
602
|
self:history(now, 'done')
|
602
603
|
|
603
|
-
if
|
604
|
-
redis.call('hset', QlessJob.ns .. self.jid, 'data',
|
604
|
+
if raw_data then
|
605
|
+
redis.call('hset', QlessJob.ns .. self.jid, 'data', raw_data)
|
605
606
|
end
|
606
607
|
|
607
608
|
-- Remove the job from the previous queue
|
@@ -647,7 +648,7 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
647
648
|
if redis.call('zscore', 'ql:queues', nextq) == false then
|
648
649
|
redis.call('zadd', 'ql:queues', now, nextq)
|
649
650
|
end
|
650
|
-
|
651
|
+
|
651
652
|
redis.call('hmset', QlessJob.ns .. self.jid,
|
652
653
|
'state', 'waiting',
|
653
654
|
'worker', '',
|
@@ -655,7 +656,7 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
655
656
|
'queue', nextq,
|
656
657
|
'expires', 0,
|
657
658
|
'remaining', tonumber(retries))
|
658
|
-
|
659
|
+
|
659
660
|
if (delay > 0) and (#depends == 0) then
|
660
661
|
queue_obj.scheduled.add(now + delay, self.jid)
|
661
662
|
return 'scheduled'
|
@@ -703,18 +704,18 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
703
704
|
'queue', '',
|
704
705
|
'expires', 0,
|
705
706
|
'remaining', tonumber(retries))
|
706
|
-
|
707
|
+
|
707
708
|
-- Do the completion dance
|
708
709
|
local count = Qless.config.get('jobs-history-count')
|
709
710
|
local time = Qless.config.get('jobs-history')
|
710
|
-
|
711
|
+
|
711
712
|
-- These are the default values
|
712
713
|
count = tonumber(count or 50000)
|
713
714
|
time = tonumber(time or 7 * 24 * 60 * 60)
|
714
|
-
|
715
|
+
|
715
716
|
-- Schedule this job for destructination eventually
|
716
717
|
redis.call('zadd', 'ql:completed', now, self.jid)
|
717
|
-
|
718
|
+
|
718
719
|
-- Now look at the expired job data. First, based on the current time
|
719
720
|
local jids = redis.call('zrangebyscore', 'ql:completed', 0, now - time)
|
720
721
|
-- Any jobs that need to be expired... delete
|
@@ -730,7 +731,7 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
730
731
|
end
|
731
732
|
-- And now remove those from the queued-for-cleanup queue
|
732
733
|
redis.call('zremrangebyscore', 'ql:completed', 0, now - time)
|
733
|
-
|
734
|
+
|
734
735
|
-- Now take the all by the most recent 'count' ids
|
735
736
|
jids = redis.call('zrange', 'ql:completed', 0, (-1-count))
|
736
737
|
for index, jid in ipairs(jids) do
|
@@ -744,7 +745,7 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
744
745
|
redis.call('del', QlessJob.ns .. jid .. '-history')
|
745
746
|
end
|
746
747
|
redis.call('zremrangebyrank', 'ql:completed', 0, (-1-count))
|
747
|
-
|
748
|
+
|
748
749
|
-- Alright, if this has any dependents, then we should go ahead
|
749
750
|
-- and unstick those guys.
|
750
751
|
for i, j in ipairs(redis.call(
|
@@ -768,10 +769,10 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
768
769
|
end
|
769
770
|
end
|
770
771
|
end
|
771
|
-
|
772
|
+
|
772
773
|
-- Delete our dependents key
|
773
774
|
redis.call('del', QlessJob.ns .. self.jid .. '-dependents')
|
774
|
-
|
775
|
+
|
775
776
|
return 'complete'
|
776
777
|
end
|
777
778
|
end
|
@@ -782,14 +783,14 @@ end
|
|
782
783
|
-- specific message. By `group`, we mean some phrase that might be one of
|
783
784
|
-- several categorical modes of failure. The `message` is something more
|
784
785
|
-- job-specific, like perhaps a traceback.
|
785
|
-
--
|
786
|
+
--
|
786
787
|
-- This method should __not__ be used to note that a job has been dropped or
|
787
788
|
-- has failed in a transient way. This method __should__ be used to note that
|
788
789
|
-- a job has something really wrong with it that must be remedied.
|
789
|
-
--
|
790
|
+
--
|
790
791
|
-- The motivation behind the `group` is so that similar errors can be grouped
|
791
792
|
-- together. Optionally, updated data can be provided for the job. A job in
|
792
|
-
-- any state can be marked as failed. If it has been given to a worker as a
|
793
|
+
-- any state can be marked as failed. If it has been given to a worker as a
|
793
794
|
-- job, then its subsequent requests to heartbeat or complete that job will
|
794
795
|
-- fail. Failed jobs are kept until they are canceled or completed.
|
795
796
|
--
|
@@ -821,11 +822,12 @@ function QlessJob:fail(now, worker, group, message, data)
|
|
821
822
|
|
822
823
|
-- If the job has been completed, we cannot fail it
|
823
824
|
if not state then
|
824
|
-
error('Fail(): Job does not exist')
|
825
|
+
error('Fail(): Job ' .. self.jid .. 'does not exist')
|
825
826
|
elseif state ~= 'running' then
|
826
|
-
error('Fail(): Job not currently running: ' .. state)
|
827
|
+
error('Fail(): Job ' .. self.jid .. 'not currently running: ' .. state)
|
827
828
|
elseif worker ~= oldworker then
|
828
|
-
error('Fail(): Job running with another worker: ' ..
|
829
|
+
error('Fail(): Job ' .. self.jid .. ' running with another worker: ' ..
|
830
|
+
oldworker)
|
829
831
|
end
|
830
832
|
|
831
833
|
-- Send out a log message
|
@@ -860,7 +862,7 @@ function QlessJob:fail(now, worker, group, message, data)
|
|
860
862
|
queue_obj.locks.remove(self.jid)
|
861
863
|
queue_obj.scheduled.remove(self.jid)
|
862
864
|
|
863
|
-
-- The reason that this appears here is that the above will fail if the
|
865
|
+
-- The reason that this appears here is that the above will fail if the
|
864
866
|
-- job doesn't exist
|
865
867
|
if data then
|
866
868
|
redis.call('hset', QlessJob.ns .. self.jid, 'data', cjson.encode(data))
|
@@ -897,7 +899,7 @@ end
|
|
897
899
|
-- Throws an exception if:
|
898
900
|
-- - the worker is not the worker with a lock on the job
|
899
901
|
-- - the job is not actually running
|
900
|
-
--
|
902
|
+
--
|
901
903
|
-- Otherwise, it returns the number of retries remaining. If the allowed
|
902
904
|
-- retries have been exhausted, then it is automatically failed, and a negative
|
903
905
|
-- number is returned.
|
@@ -910,7 +912,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message)
|
|
910
912
|
assert(worker, 'Retry(): Arg "worker" missing')
|
911
913
|
delay = assert(tonumber(delay or 0),
|
912
914
|
'Retry(): Arg "delay" not a number: ' .. tostring(delay))
|
913
|
-
|
915
|
+
|
914
916
|
-- Let's see what the old priority, and tags were
|
915
917
|
local oldqueue, state, retries, oldworker, priority, failure = unpack(
|
916
918
|
redis.call('hmget', QlessJob.ns .. self.jid, 'queue', 'state',
|
@@ -918,11 +920,13 @@ function QlessJob:retry(now, queue, worker, delay, group, message)
|
|
918
920
|
|
919
921
|
-- If this isn't the worker that owns
|
920
922
|
if oldworker == false then
|
921
|
-
error('Retry(): Job does not exist')
|
923
|
+
error('Retry(): Job ' .. self.jid .. ' does not exist')
|
922
924
|
elseif state ~= 'running' then
|
923
|
-
error('Retry(): Job is not currently running: ' ..
|
925
|
+
error('Retry(): Job ' .. self.jid .. ' is not currently running: ' ..
|
926
|
+
state)
|
924
927
|
elseif oldworker ~= worker then
|
925
|
-
error('Retry(): Job
|
928
|
+
error('Retry(): Job ' .. self.jid ..
|
929
|
+
' has been given to another worker: ' .. oldworker)
|
926
930
|
end
|
927
931
|
|
928
932
|
-- For each of these, decrement their retries. If any of them
|
@@ -943,7 +947,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message)
|
|
943
947
|
-- queue it's in
|
944
948
|
local group = group or 'failed-retries-' .. queue
|
945
949
|
self:history(now, 'failed', {['group'] = group})
|
946
|
-
|
950
|
+
|
947
951
|
redis.call('hmset', QlessJob.ns .. self.jid, 'state', 'failed',
|
948
952
|
'worker', '',
|
949
953
|
'expires', '')
|
@@ -967,7 +971,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message)
|
|
967
971
|
['worker'] = unpack(self:data('worker'))
|
968
972
|
}))
|
969
973
|
end
|
970
|
-
|
974
|
+
|
971
975
|
-- Add this type of failure to the list of failures
|
972
976
|
redis.call('sadd', 'ql:failures', group)
|
973
977
|
-- And add this particular instance to the failed types
|
@@ -1103,11 +1107,14 @@ function QlessJob:heartbeat(now, worker, data)
|
|
1103
1107
|
redis.call('hmget', QlessJob.ns .. self.jid, 'worker', 'state'))
|
1104
1108
|
if job_worker == false then
|
1105
1109
|
-- This means the job doesn't exist
|
1106
|
-
error('Heartbeat(): Job does not exist')
|
1110
|
+
error('Heartbeat(): Job ' .. self.jid .. ' does not exist')
|
1107
1111
|
elseif state ~= 'running' then
|
1108
|
-
error(
|
1112
|
+
error(
|
1113
|
+
'Heartbeat(): Job ' .. self.jid .. ' not currently running: ' .. state)
|
1109
1114
|
elseif job_worker ~= worker or #job_worker == 0 then
|
1110
|
-
error(
|
1115
|
+
error(
|
1116
|
+
'Heartbeat(): Job ' .. self.jid ..
|
1117
|
+
' given out to another worker: ' .. job_worker)
|
1111
1118
|
else
|
1112
1119
|
-- Otherwise, optionally update the user data, and the heartbeat
|
1113
1120
|
if data then
|
@@ -1119,11 +1126,11 @@ function QlessJob:heartbeat(now, worker, data)
|
|
1119
1126
|
redis.call('hmset', QlessJob.ns .. self.jid,
|
1120
1127
|
'expires', expires, 'worker', worker)
|
1121
1128
|
end
|
1122
|
-
|
1129
|
+
|
1123
1130
|
-- Update hwen this job was last updated on that worker
|
1124
1131
|
-- Add this job to the list of jobs handled by this worker
|
1125
1132
|
redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, self.jid)
|
1126
|
-
|
1133
|
+
|
1127
1134
|
-- And now we should just update the locks
|
1128
1135
|
local queue = Qless.queue(
|
1129
1136
|
redis.call('hget', QlessJob.ns .. self.jid, 'queue'))
|
@@ -1144,7 +1151,7 @@ function QlessJob:priority(priority)
|
|
1144
1151
|
-- Get the queue the job is currently in, if any
|
1145
1152
|
local queue = redis.call('hget', QlessJob.ns .. self.jid, 'queue')
|
1146
1153
|
|
1147
|
-
if queue == nil then
|
1154
|
+
if queue == nil or queue == false then
|
1148
1155
|
-- If the job doesn't exist, throw an error
|
1149
1156
|
error('Priority(): Job ' .. self.jid .. ' does not exist')
|
1150
1157
|
elseif queue == '' then
|
@@ -1177,8 +1184,8 @@ end
|
|
1177
1184
|
function QlessJob:timeout(now)
|
1178
1185
|
local queue_name, state, worker = unpack(redis.call('hmget',
|
1179
1186
|
QlessJob.ns .. self.jid, 'queue', 'state', 'worker'))
|
1180
|
-
if queue_name == nil then
|
1181
|
-
error('Timeout(): Job does not exist')
|
1187
|
+
if queue_name == nil or queue_name == false then
|
1188
|
+
error('Timeout(): Job ' .. self.jid .. ' does not exist')
|
1182
1189
|
elseif state ~= 'running' then
|
1183
1190
|
error('Timeout(): Job ' .. self.jid .. ' not running')
|
1184
1191
|
else
|
@@ -1186,7 +1193,7 @@ function QlessJob:timeout(now)
|
|
1186
1193
|
self:history(now, 'timed-out')
|
1187
1194
|
local queue = Qless.queue(queue_name)
|
1188
1195
|
queue.locks.remove(self.jid)
|
1189
|
-
queue.work.add(now,
|
1196
|
+
queue.work.add(now, '+inf', self.jid)
|
1190
1197
|
redis.call('hmset', QlessJob.ns .. self.jid,
|
1191
1198
|
'state', 'stalled', 'expires', 0)
|
1192
1199
|
local encoded = cjson.encode({
|
@@ -1261,7 +1268,7 @@ function QlessJob:history(now, what, item)
|
|
1261
1268
|
-- We'll always keep the first item around
|
1262
1269
|
local obj = redis.call('lpop', QlessJob.ns .. self.jid .. '-history')
|
1263
1270
|
redis.call('ltrim', QlessJob.ns .. self.jid .. '-history', -count + 2, -1)
|
1264
|
-
if obj ~= nil then
|
1271
|
+
if obj ~= nil and obj ~= false then
|
1265
1272
|
redis.call('lpush', QlessJob.ns .. self.jid .. '-history', obj)
|
1266
1273
|
end
|
1267
1274
|
end
|
@@ -1296,8 +1303,11 @@ function Qless.queue(name)
|
|
1296
1303
|
return redis.call('zrem', queue:prefix('work'), unpack(arg))
|
1297
1304
|
end
|
1298
1305
|
end, add = function(now, priority, jid)
|
1306
|
+
if priority ~= '+inf' then
|
1307
|
+
priority = priority - (now / 10000000000)
|
1308
|
+
end
|
1299
1309
|
return redis.call('zadd',
|
1300
|
-
queue:prefix('work'), priority
|
1310
|
+
queue:prefix('work'), priority, jid)
|
1301
1311
|
end, score = function(jid)
|
1302
1312
|
return redis.call('zscore', queue:prefix('work'), jid)
|
1303
1313
|
end, length = function()
|
@@ -1309,10 +1319,10 @@ function Qless.queue(name)
|
|
1309
1319
|
queue.locks = {
|
1310
1320
|
expired = function(now, offset, count)
|
1311
1321
|
return redis.call('zrangebyscore',
|
1312
|
-
queue:prefix('locks'), -
|
1322
|
+
queue:prefix('locks'), '-inf', now, 'LIMIT', offset, count)
|
1313
1323
|
end, peek = function(now, offset, count)
|
1314
1324
|
return redis.call('zrangebyscore', queue:prefix('locks'),
|
1315
|
-
now,
|
1325
|
+
now, '+inf', 'LIMIT', offset, count)
|
1316
1326
|
end, add = function(expires, jid)
|
1317
1327
|
redis.call('zadd', queue:prefix('locks'), expires, jid)
|
1318
1328
|
end, remove = function(...)
|
@@ -1320,7 +1330,7 @@ function Qless.queue(name)
|
|
1320
1330
|
return redis.call('zrem', queue:prefix('locks'), unpack(arg))
|
1321
1331
|
end
|
1322
1332
|
end, running = function(now)
|
1323
|
-
return redis.call('zcount', queue:prefix('locks'), now,
|
1333
|
+
return redis.call('zcount', queue:prefix('locks'), now, '+inf')
|
1324
1334
|
end, length = function(now)
|
1325
1335
|
-- If a 'now' is provided, we're interested in how many are before
|
1326
1336
|
-- that time
|
@@ -1453,11 +1463,11 @@ function QlessQueue:stats(now, date)
|
|
1453
1463
|
|
1454
1464
|
local key = 'ql:s:' .. name .. ':' .. bin .. ':' .. queue
|
1455
1465
|
local count, mean, vk = unpack(redis.call('hmget', key, 'total', 'mean', 'vk'))
|
1456
|
-
|
1466
|
+
|
1457
1467
|
count = tonumber(count) or 0
|
1458
1468
|
mean = tonumber(mean) or 0
|
1459
1469
|
vk = tonumber(vk)
|
1460
|
-
|
1470
|
+
|
1461
1471
|
results.count = count or 0
|
1462
1472
|
results.mean = mean or 0
|
1463
1473
|
results.histogram = {}
|
@@ -1507,8 +1517,8 @@ function QlessQueue:peek(now, count)
|
|
1507
1517
|
|
1508
1518
|
-- Now we've checked __all__ the locks for this queue the could
|
1509
1519
|
-- have expired, and are no more than the number requested. If
|
1510
|
-
-- we still need values in order to meet the demand, then we
|
1511
|
-
-- should check if any scheduled items, and if so, we should
|
1520
|
+
-- we still need values in order to meet the demand, then we
|
1521
|
+
-- should check if any scheduled items, and if so, we should
|
1512
1522
|
-- insert them to ensure correctness when pulling off the next
|
1513
1523
|
-- unit of work.
|
1514
1524
|
self:check_scheduled(now, count - #jids)
|
@@ -1582,8 +1592,8 @@ function QlessQueue:pop(now, worker, count)
|
|
1582
1592
|
-- look for all the recurring jobs that need jobs run
|
1583
1593
|
self:check_recurring(now, count - #jids)
|
1584
1594
|
|
1585
|
-
-- If we still need values in order to meet the demand, then we
|
1586
|
-
-- should check if any scheduled items, and if so, we should
|
1595
|
+
-- If we still need values in order to meet the demand, then we
|
1596
|
+
-- should check if any scheduled items, and if so, we should
|
1587
1597
|
-- insert them to ensure correctness when pulling off the next
|
1588
1598
|
-- unit of work.
|
1589
1599
|
self:check_scheduled(now, count - #jids)
|
@@ -1605,19 +1615,19 @@ function QlessQueue:pop(now, worker, count)
|
|
1605
1615
|
self:stat(now, 'wait', waiting)
|
1606
1616
|
redis.call('hset', QlessJob.ns .. jid,
|
1607
1617
|
'time', string.format("%.20f", now))
|
1608
|
-
|
1618
|
+
|
1609
1619
|
-- Add this job to the list of jobs handled by this worker
|
1610
1620
|
redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid)
|
1611
|
-
|
1621
|
+
|
1612
1622
|
-- Update the jobs data, and add its locks, and return the job
|
1613
1623
|
job:update({
|
1614
1624
|
worker = worker,
|
1615
1625
|
expires = expires,
|
1616
1626
|
state = 'running'
|
1617
1627
|
})
|
1618
|
-
|
1628
|
+
|
1619
1629
|
self.locks.add(expires, jid)
|
1620
|
-
|
1630
|
+
|
1621
1631
|
local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false
|
1622
1632
|
if tracked then
|
1623
1633
|
Qless.publish('popped', jid)
|
@@ -1668,7 +1678,7 @@ function QlessQueue:stat(now, stat, val)
|
|
1668
1678
|
redis.call('hincrby', key, 'h' .. math.floor(val / 3600), 1)
|
1669
1679
|
else -- days
|
1670
1680
|
redis.call('hincrby', key, 'd' .. math.floor(val / 86400), 1)
|
1671
|
-
end
|
1681
|
+
end
|
1672
1682
|
redis.call('hmset', key, 'total', count, 'mean', mean, 'vk', vk)
|
1673
1683
|
end
|
1674
1684
|
|
@@ -1728,8 +1738,8 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...)
|
|
1728
1738
|
-- Now find what's in the original, but not the new
|
1729
1739
|
local original = redis.call(
|
1730
1740
|
'smembers', QlessJob.ns .. jid .. '-dependencies')
|
1731
|
-
for _, dep in pairs(original) do
|
1732
|
-
if new[dep] == nil then
|
1741
|
+
for _, dep in pairs(original) do
|
1742
|
+
if new[dep] == nil or new[dep] == false then
|
1733
1743
|
-- Remove k as a dependency
|
1734
1744
|
redis.call('srem', QlessJob.ns .. dep .. '-dependents' , jid)
|
1735
1745
|
redis.call('srem', QlessJob.ns .. jid .. '-dependencies', dep)
|
@@ -1851,7 +1861,7 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...)
|
|
1851
1861
|
end
|
1852
1862
|
|
1853
1863
|
-- Lastly, we're going to make sure that this item is in the
|
1854
|
-
-- set of known queues. We should keep this sorted by the
|
1864
|
+
-- set of known queues. We should keep this sorted by the
|
1855
1865
|
-- order in which we saw each of these queues
|
1856
1866
|
if redis.call('zscore', 'ql:queues', self.name) == false then
|
1857
1867
|
redis.call('zadd', 'ql:queues', now, self.name)
|
@@ -1921,7 +1931,7 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...)
|
|
1921
1931
|
if #arg % 2 == 1 then
|
1922
1932
|
error('Odd number of additional args: ' .. tostring(arg))
|
1923
1933
|
end
|
1924
|
-
|
1934
|
+
|
1925
1935
|
-- Read in all the optional parameters
|
1926
1936
|
local options = {}
|
1927
1937
|
for i = 3, #arg, 2 do options[arg[i]] = arg[i + 1] end
|
@@ -1941,12 +1951,12 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...)
|
|
1941
1951
|
local count, old_queue = unpack(redis.call('hmget', 'ql:r:' .. jid, 'count', 'queue'))
|
1942
1952
|
count = count or 0
|
1943
1953
|
|
1944
|
-
-- If it has previously been in another queue, then we should remove
|
1954
|
+
-- If it has previously been in another queue, then we should remove
|
1945
1955
|
-- some information about it
|
1946
1956
|
if old_queue then
|
1947
1957
|
Qless.queue(old_queue).recurring.remove(jid)
|
1948
1958
|
end
|
1949
|
-
|
1959
|
+
|
1950
1960
|
-- Do some insertions
|
1951
1961
|
redis.call('hmset', 'ql:r:' .. jid,
|
1952
1962
|
'jid' , jid,
|
@@ -1964,14 +1974,14 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...)
|
|
1964
1974
|
'backlog' , options.backlog)
|
1965
1975
|
-- Now, we should schedule the next run of the job
|
1966
1976
|
self.recurring.add(now + offset, jid)
|
1967
|
-
|
1977
|
+
|
1968
1978
|
-- Lastly, we're going to make sure that this item is in the
|
1969
|
-
-- set of known queues. We should keep this sorted by the
|
1979
|
+
-- set of known queues. We should keep this sorted by the
|
1970
1980
|
-- order in which we saw each of these queues
|
1971
1981
|
if redis.call('zscore', 'ql:queues', self.name) == false then
|
1972
1982
|
redis.call('zadd', 'ql:queues', now, self.name)
|
1973
1983
|
end
|
1974
|
-
|
1984
|
+
|
1975
1985
|
return jid
|
1976
1986
|
else
|
1977
1987
|
error('Recur(): schedule type "' .. tostring(spec) .. '" unknown')
|
@@ -2017,22 +2027,22 @@ function QlessQueue:check_recurring(now, count)
|
|
2017
2027
|
)
|
2018
2028
|
end
|
2019
2029
|
end
|
2020
|
-
|
2021
|
-
-- We're saving this value so that in the history, we can accurately
|
2030
|
+
|
2031
|
+
-- We're saving this value so that in the history, we can accurately
|
2022
2032
|
-- reflect when the job would normally have been scheduled
|
2023
2033
|
while (score <= now) and (moved < count) do
|
2024
2034
|
local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1)
|
2025
2035
|
moved = moved + 1
|
2026
2036
|
|
2027
2037
|
local child_jid = jid .. '-' .. count
|
2028
|
-
|
2038
|
+
|
2029
2039
|
-- Add this job to the list of jobs tagged with whatever tags were
|
2030
2040
|
-- supplied
|
2031
2041
|
for i, tag in ipairs(_tags) do
|
2032
2042
|
redis.call('zadd', 'ql:t:' .. tag, now, child_jid)
|
2033
2043
|
redis.call('zincrby', 'ql:tags', 1, tag)
|
2034
2044
|
end
|
2035
|
-
|
2045
|
+
|
2036
2046
|
-- First, let's save its data
|
2037
2047
|
redis.call('hmset', QlessJob.ns .. child_jid,
|
2038
2048
|
'jid' , child_jid,
|
@@ -2049,12 +2059,12 @@ function QlessQueue:check_recurring(now, count)
|
|
2049
2059
|
'time' , string.format("%.20f", score),
|
2050
2060
|
'spawned_from_jid', jid)
|
2051
2061
|
Qless.job(child_jid):history(score, 'put', {q = self.name})
|
2052
|
-
|
2062
|
+
|
2053
2063
|
-- Now, if a delay was provided, and if it's in the future,
|
2054
2064
|
-- then we'll have to schedule it. Otherwise, we're just
|
2055
2065
|
-- going to add it to the work queue.
|
2056
2066
|
self.work.add(score, priority, child_jid)
|
2057
|
-
|
2067
|
+
|
2058
2068
|
score = score + interval
|
2059
2069
|
self.recurring.add(score, jid)
|
2060
2070
|
end
|
@@ -2069,7 +2079,7 @@ function QlessQueue:check_scheduled(now, count)
|
|
2069
2079
|
-- insert into the work queue
|
2070
2080
|
local scheduled = self.scheduled.ready(now, 0, count)
|
2071
2081
|
for index, jid in ipairs(scheduled) do
|
2072
|
-
-- With these in hand, we'll have to go out and find the
|
2082
|
+
-- With these in hand, we'll have to go out and find the
|
2073
2083
|
-- priorities of these jobs, and then we'll insert them
|
2074
2084
|
-- into the work queue and then when that's complete, we'll
|
2075
2085
|
-- remove them from the scheduled queue
|
@@ -2154,7 +2164,7 @@ function QlessQueue:invalidate_locks(now, count)
|
|
2154
2164
|
-- See how many remaining retries the job has
|
2155
2165
|
local remaining = tonumber(redis.call(
|
2156
2166
|
'hincrby', QlessJob.ns .. jid, 'remaining', -1))
|
2157
|
-
|
2167
|
+
|
2158
2168
|
-- This is where we actually have to time out the work
|
2159
2169
|
if remaining < 0 then
|
2160
2170
|
-- Now remove the instance from the schedule, and work queues
|
@@ -2162,7 +2172,7 @@ function QlessQueue:invalidate_locks(now, count)
|
|
2162
2172
|
self.work.remove(jid)
|
2163
2173
|
self.locks.remove(jid)
|
2164
2174
|
self.scheduled.remove(jid)
|
2165
|
-
|
2175
|
+
|
2166
2176
|
local group = 'failed-retries-' .. Qless.job(jid):data()['queue']
|
2167
2177
|
local job = Qless.job(jid)
|
2168
2178
|
job:history(now, 'failed', {group = group})
|
@@ -2178,12 +2188,12 @@ function QlessQueue:invalidate_locks(now, count)
|
|
2178
2188
|
['when'] = now,
|
2179
2189
|
['worker'] = unpack(job:data('worker'))
|
2180
2190
|
}))
|
2181
|
-
|
2191
|
+
|
2182
2192
|
-- Add this type of failure to the list of failures
|
2183
2193
|
redis.call('sadd', 'ql:failures', group)
|
2184
2194
|
-- And add this particular instance to the failed types
|
2185
2195
|
redis.call('lpush', 'ql:f:' .. group, jid)
|
2186
|
-
|
2196
|
+
|
2187
2197
|
if redis.call('zscore', 'ql:tracked', jid) ~= false then
|
2188
2198
|
Qless.publish('failed', jid)
|
2189
2199
|
end
|
@@ -2260,11 +2270,11 @@ function QlessRecurringJob:data()
|
|
2260
2270
|
local job = redis.call(
|
2261
2271
|
'hmget', 'ql:r:' .. self.jid, 'jid', 'klass', 'state', 'queue',
|
2262
2272
|
'priority', 'interval', 'retries', 'count', 'data', 'tags', 'backlog')
|
2263
|
-
|
2273
|
+
|
2264
2274
|
if not job[1] then
|
2265
2275
|
return nil
|
2266
2276
|
end
|
2267
|
-
|
2277
|
+
|
2268
2278
|
return {
|
2269
2279
|
jid = job[1],
|
2270
2280
|
klass = job[2],
|
@@ -2287,7 +2297,7 @@ end
|
|
2287
2297
|
-- - data
|
2288
2298
|
-- - klass
|
2289
2299
|
-- - queue
|
2290
|
-
-- - backlog
|
2300
|
+
-- - backlog
|
2291
2301
|
function QlessRecurringJob:update(now, ...)
|
2292
2302
|
local options = {}
|
2293
2303
|
-- Make sure that the job exists
|
@@ -2345,10 +2355,10 @@ function QlessRecurringJob:tag(...)
|
|
2345
2355
|
tags = cjson.decode(tags)
|
2346
2356
|
local _tags = {}
|
2347
2357
|
for i,v in ipairs(tags) do _tags[v] = true end
|
2348
|
-
|
2358
|
+
|
2349
2359
|
-- Otherwise, add the job to the sorted set with that tags
|
2350
|
-
for i=1,#arg do if _tags[arg[i]] == nil then table.insert(tags, arg[i]) end end
|
2351
|
-
|
2360
|
+
for i=1,#arg do if _tags[arg[i]] == nil or _tags[arg[i]] == false then table.insert(tags, arg[i]) end end
|
2361
|
+
|
2352
2362
|
tags = cjson.encode(tags)
|
2353
2363
|
redis.call('hset', 'ql:r:' .. self.jid, 'tags', tags)
|
2354
2364
|
return tags
|
@@ -2404,7 +2414,7 @@ end
|
|
2404
2414
|
-- Provide data about all the workers, or if a specific worker is provided,
|
2405
2415
|
-- then which jobs that worker is responsible for. If no worker is provided,
|
2406
2416
|
-- expect a response of the form:
|
2407
|
-
--
|
2417
|
+
--
|
2408
2418
|
-- [
|
2409
2419
|
-- # This is sorted by the recency of activity from that worker
|
2410
2420
|
-- {
|
@@ -2415,9 +2425,9 @@ end
|
|
2415
2425
|
-- ...
|
2416
2426
|
-- }
|
2417
2427
|
-- ]
|
2418
|
-
--
|
2428
|
+
--
|
2419
2429
|
-- If a worker id is provided, then expect a response of the form:
|
2420
|
-
--
|
2430
|
+
--
|
2421
2431
|
-- {
|
2422
2432
|
-- 'jobs': [
|
2423
2433
|
-- jid1,
|
data/lib/qless/lua/qless.lua
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
-- Current SHA:
|
1
|
+
-- Current SHA: 9d2cca3846a96fee53000085e36638e74ed392ed
|
2
2
|
-- This is a generated file
|
3
3
|
local Qless = {
|
4
4
|
ns = 'ql:'
|
@@ -98,7 +98,7 @@ function Qless.jobs(now, state, ...)
|
|
98
98
|
elseif state == 'depends' then
|
99
99
|
return queue.depends.peek(now, offset, count)
|
100
100
|
elseif state == 'recurring' then
|
101
|
-
return queue.recurring.peek(
|
101
|
+
return queue.recurring.peek('+inf', offset, count)
|
102
102
|
else
|
103
103
|
error('Jobs(): Unknown type "' .. state .. '"')
|
104
104
|
end
|
@@ -147,17 +147,17 @@ function Qless.tag(now, command, ...)
|
|
147
147
|
tags = cjson.decode(tags)
|
148
148
|
local _tags = {}
|
149
149
|
for i,v in ipairs(tags) do _tags[v] = true end
|
150
|
-
|
150
|
+
|
151
151
|
for i=2,#arg do
|
152
152
|
local tag = arg[i]
|
153
|
-
if _tags[tag] == nil then
|
153
|
+
if _tags[tag] == nil or _tags[tag] == false then
|
154
154
|
_tags[tag] = true
|
155
155
|
table.insert(tags, tag)
|
156
156
|
end
|
157
157
|
redis.call('zadd', 'ql:t:' .. tag, now, jid)
|
158
158
|
redis.call('zincrby', 'ql:tags', 1, tag)
|
159
159
|
end
|
160
|
-
|
160
|
+
|
161
161
|
redis.call('hset', QlessJob.ns .. jid, 'tags', cjson.encode(tags))
|
162
162
|
return tags
|
163
163
|
else
|
@@ -170,17 +170,17 @@ function Qless.tag(now, command, ...)
|
|
170
170
|
tags = cjson.decode(tags)
|
171
171
|
local _tags = {}
|
172
172
|
for i,v in ipairs(tags) do _tags[v] = true end
|
173
|
-
|
173
|
+
|
174
174
|
for i=2,#arg do
|
175
175
|
local tag = arg[i]
|
176
176
|
_tags[tag] = nil
|
177
177
|
redis.call('zrem', 'ql:t:' .. tag, jid)
|
178
178
|
redis.call('zincrby', 'ql:tags', -1, tag)
|
179
179
|
end
|
180
|
-
|
180
|
+
|
181
181
|
local results = {}
|
182
182
|
for i,tag in ipairs(tags) do if _tags[tag] then table.insert(results, tag) end end
|
183
|
-
|
183
|
+
|
184
184
|
redis.call('hset', QlessJob.ns .. jid, 'tags', cjson.encode(results))
|
185
185
|
return results
|
186
186
|
else
|
@@ -214,7 +214,7 @@ function Qless.cancel(...)
|
|
214
214
|
|
215
215
|
for i, jid in ipairs(arg) do
|
216
216
|
for j, dep in ipairs(dependents[jid]) do
|
217
|
-
if dependents[dep] == nil then
|
217
|
+
if dependents[dep] == nil or dependents[dep] == false then
|
218
218
|
error('Cancel(): ' .. jid .. ' is a dependency of ' .. dep ..
|
219
219
|
' but is not mentioned to be canceled')
|
220
220
|
end
|
@@ -282,7 +282,7 @@ function Qless.cancel(...)
|
|
282
282
|
redis.call('del', QlessJob.ns .. jid .. '-history')
|
283
283
|
end
|
284
284
|
end
|
285
|
-
|
285
|
+
|
286
286
|
return arg
|
287
287
|
end
|
288
288
|
|
@@ -376,15 +376,15 @@ function QlessJob:data(...)
|
|
376
376
|
end
|
377
377
|
end
|
378
378
|
|
379
|
-
function QlessJob:complete(now, worker, queue,
|
379
|
+
function QlessJob:complete(now, worker, queue, raw_data, ...)
|
380
380
|
assert(worker, 'Complete(): Arg "worker" missing')
|
381
381
|
assert(queue , 'Complete(): Arg "queue" missing')
|
382
|
-
data = assert(cjson.decode(
|
383
|
-
'Complete(): Arg "data" missing or not JSON: ' .. tostring(
|
382
|
+
local data = assert(cjson.decode(raw_data),
|
383
|
+
'Complete(): Arg "data" missing or not JSON: ' .. tostring(raw_data))
|
384
384
|
|
385
385
|
local options = {}
|
386
386
|
for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end
|
387
|
-
|
387
|
+
|
388
388
|
local nextq = options['next']
|
389
389
|
local delay = assert(tonumber(options['delay'] or 0))
|
390
390
|
local depends = assert(cjson.decode(options['depends'] or '[]'),
|
@@ -405,21 +405,22 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
405
405
|
'priority', 'retries', 'queue'))
|
406
406
|
|
407
407
|
if lastworker == false then
|
408
|
-
error('Complete(): Job does not exist')
|
408
|
+
error('Complete(): Job ' .. self.jid .. ' does not exist')
|
409
409
|
elseif (state ~= 'running') then
|
410
|
-
error('Complete(): Job is not currently running: ' ..
|
410
|
+
error('Complete(): Job ' .. self.jid .. ' is not currently running: ' ..
|
411
|
+
state)
|
411
412
|
elseif lastworker ~= worker then
|
412
|
-
error('Complete(): Job
|
413
|
-
tostring(lastworker))
|
413
|
+
error('Complete(): Job ' .. self.jid ..
|
414
|
+
' has been handed out to another worker: ' .. tostring(lastworker))
|
414
415
|
elseif queue ~= current_queue then
|
415
|
-
error('Complete(): Job running in another queue: ' ..
|
416
|
+
error('Complete(): Job ' .. self.jid .. ' running in another queue: ' ..
|
416
417
|
tostring(current_queue))
|
417
418
|
end
|
418
419
|
|
419
420
|
self:history(now, 'done')
|
420
421
|
|
421
|
-
if
|
422
|
-
redis.call('hset', QlessJob.ns .. self.jid, 'data',
|
422
|
+
if raw_data then
|
423
|
+
redis.call('hset', QlessJob.ns .. self.jid, 'data', raw_data)
|
423
424
|
end
|
424
425
|
|
425
426
|
local queue_obj = Qless.queue(queue)
|
@@ -454,7 +455,7 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
454
455
|
if redis.call('zscore', 'ql:queues', nextq) == false then
|
455
456
|
redis.call('zadd', 'ql:queues', now, nextq)
|
456
457
|
end
|
457
|
-
|
458
|
+
|
458
459
|
redis.call('hmset', QlessJob.ns .. self.jid,
|
459
460
|
'state', 'waiting',
|
460
461
|
'worker', '',
|
@@ -462,7 +463,7 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
462
463
|
'queue', nextq,
|
463
464
|
'expires', 0,
|
464
465
|
'remaining', tonumber(retries))
|
465
|
-
|
466
|
+
|
466
467
|
if (delay > 0) and (#depends == 0) then
|
467
468
|
queue_obj.scheduled.add(now + delay, self.jid)
|
468
469
|
return 'scheduled'
|
@@ -505,15 +506,15 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
505
506
|
'queue', '',
|
506
507
|
'expires', 0,
|
507
508
|
'remaining', tonumber(retries))
|
508
|
-
|
509
|
+
|
509
510
|
local count = Qless.config.get('jobs-history-count')
|
510
511
|
local time = Qless.config.get('jobs-history')
|
511
|
-
|
512
|
+
|
512
513
|
count = tonumber(count or 50000)
|
513
514
|
time = tonumber(time or 7 * 24 * 60 * 60)
|
514
|
-
|
515
|
+
|
515
516
|
redis.call('zadd', 'ql:completed', now, self.jid)
|
516
|
-
|
517
|
+
|
517
518
|
local jids = redis.call('zrangebyscore', 'ql:completed', 0, now - time)
|
518
519
|
for index, jid in ipairs(jids) do
|
519
520
|
local tags = cjson.decode(
|
@@ -526,7 +527,7 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
526
527
|
redis.call('del', QlessJob.ns .. jid .. '-history')
|
527
528
|
end
|
528
529
|
redis.call('zremrangebyscore', 'ql:completed', 0, now - time)
|
529
|
-
|
530
|
+
|
530
531
|
jids = redis.call('zrange', 'ql:completed', 0, (-1-count))
|
531
532
|
for index, jid in ipairs(jids) do
|
532
533
|
local tags = cjson.decode(
|
@@ -539,7 +540,7 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
539
540
|
redis.call('del', QlessJob.ns .. jid .. '-history')
|
540
541
|
end
|
541
542
|
redis.call('zremrangebyrank', 'ql:completed', 0, (-1-count))
|
542
|
-
|
543
|
+
|
543
544
|
for i, j in ipairs(redis.call(
|
544
545
|
'smembers', QlessJob.ns .. self.jid .. '-dependents')) do
|
545
546
|
redis.call('srem', QlessJob.ns .. j .. '-dependencies', self.jid)
|
@@ -561,9 +562,9 @@ function QlessJob:complete(now, worker, queue, data, ...)
|
|
561
562
|
end
|
562
563
|
end
|
563
564
|
end
|
564
|
-
|
565
|
+
|
565
566
|
redis.call('del', QlessJob.ns .. self.jid .. '-dependents')
|
566
|
-
|
567
|
+
|
567
568
|
return 'complete'
|
568
569
|
end
|
569
570
|
end
|
@@ -583,11 +584,12 @@ function QlessJob:fail(now, worker, group, message, data)
|
|
583
584
|
'hmget', QlessJob.ns .. self.jid, 'queue', 'state', 'worker'))
|
584
585
|
|
585
586
|
if not state then
|
586
|
-
error('Fail(): Job does not exist')
|
587
|
+
error('Fail(): Job ' .. self.jid .. 'does not exist')
|
587
588
|
elseif state ~= 'running' then
|
588
|
-
error('Fail(): Job not currently running: ' .. state)
|
589
|
+
error('Fail(): Job ' .. self.jid .. 'not currently running: ' .. state)
|
589
590
|
elseif worker ~= oldworker then
|
590
|
-
error('Fail(): Job running with another worker: ' ..
|
591
|
+
error('Fail(): Job ' .. self.jid .. ' running with another worker: ' ..
|
592
|
+
oldworker)
|
591
593
|
end
|
592
594
|
|
593
595
|
Qless.publish('log', cjson.encode({
|
@@ -641,17 +643,19 @@ function QlessJob:retry(now, queue, worker, delay, group, message)
|
|
641
643
|
assert(worker, 'Retry(): Arg "worker" missing')
|
642
644
|
delay = assert(tonumber(delay or 0),
|
643
645
|
'Retry(): Arg "delay" not a number: ' .. tostring(delay))
|
644
|
-
|
646
|
+
|
645
647
|
local oldqueue, state, retries, oldworker, priority, failure = unpack(
|
646
648
|
redis.call('hmget', QlessJob.ns .. self.jid, 'queue', 'state',
|
647
649
|
'retries', 'worker', 'priority', 'failure'))
|
648
650
|
|
649
651
|
if oldworker == false then
|
650
|
-
error('Retry(): Job does not exist')
|
652
|
+
error('Retry(): Job ' .. self.jid .. ' does not exist')
|
651
653
|
elseif state ~= 'running' then
|
652
|
-
error('Retry(): Job is not currently running: ' ..
|
654
|
+
error('Retry(): Job ' .. self.jid .. ' is not currently running: ' ..
|
655
|
+
state)
|
653
656
|
elseif oldworker ~= worker then
|
654
|
-
error('Retry(): Job
|
657
|
+
error('Retry(): Job ' .. self.jid ..
|
658
|
+
' has been given to another worker: ' .. oldworker)
|
655
659
|
end
|
656
660
|
|
657
661
|
local remaining = tonumber(redis.call(
|
@@ -665,7 +669,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message)
|
|
665
669
|
if remaining < 0 then
|
666
670
|
local group = group or 'failed-retries-' .. queue
|
667
671
|
self:history(now, 'failed', {['group'] = group})
|
668
|
-
|
672
|
+
|
669
673
|
redis.call('hmset', QlessJob.ns .. self.jid, 'state', 'failed',
|
670
674
|
'worker', '',
|
671
675
|
'expires', '')
|
@@ -688,7 +692,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message)
|
|
688
692
|
['worker'] = unpack(self:data('worker'))
|
689
693
|
}))
|
690
694
|
end
|
691
|
-
|
695
|
+
|
692
696
|
redis.call('sadd', 'ql:failures', group)
|
693
697
|
redis.call('lpush', 'ql:f:' .. group, self.jid)
|
694
698
|
local bin = now - (now % 86400)
|
@@ -793,11 +797,14 @@ function QlessJob:heartbeat(now, worker, data)
|
|
793
797
|
local job_worker, state = unpack(
|
794
798
|
redis.call('hmget', QlessJob.ns .. self.jid, 'worker', 'state'))
|
795
799
|
if job_worker == false then
|
796
|
-
error('Heartbeat(): Job does not exist')
|
800
|
+
error('Heartbeat(): Job ' .. self.jid .. ' does not exist')
|
797
801
|
elseif state ~= 'running' then
|
798
|
-
error(
|
802
|
+
error(
|
803
|
+
'Heartbeat(): Job ' .. self.jid .. ' not currently running: ' .. state)
|
799
804
|
elseif job_worker ~= worker or #job_worker == 0 then
|
800
|
-
error(
|
805
|
+
error(
|
806
|
+
'Heartbeat(): Job ' .. self.jid ..
|
807
|
+
' given out to another worker: ' .. job_worker)
|
801
808
|
else
|
802
809
|
if data then
|
803
810
|
redis.call('hmset', QlessJob.ns .. self.jid, 'expires',
|
@@ -806,9 +813,9 @@ function QlessJob:heartbeat(now, worker, data)
|
|
806
813
|
redis.call('hmset', QlessJob.ns .. self.jid,
|
807
814
|
'expires', expires, 'worker', worker)
|
808
815
|
end
|
809
|
-
|
816
|
+
|
810
817
|
redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, self.jid)
|
811
|
-
|
818
|
+
|
812
819
|
local queue = Qless.queue(
|
813
820
|
redis.call('hget', QlessJob.ns .. self.jid, 'queue'))
|
814
821
|
queue.locks.add(expires, self.jid)
|
@@ -823,7 +830,7 @@ function QlessJob:priority(priority)
|
|
823
830
|
|
824
831
|
local queue = redis.call('hget', QlessJob.ns .. self.jid, 'queue')
|
825
832
|
|
826
|
-
if queue == nil then
|
833
|
+
if queue == nil or queue == false then
|
827
834
|
error('Priority(): Job ' .. self.jid .. ' does not exist')
|
828
835
|
elseif queue == '' then
|
829
836
|
redis.call('hset', QlessJob.ns .. self.jid, 'priority', priority)
|
@@ -850,15 +857,15 @@ end
|
|
850
857
|
function QlessJob:timeout(now)
|
851
858
|
local queue_name, state, worker = unpack(redis.call('hmget',
|
852
859
|
QlessJob.ns .. self.jid, 'queue', 'state', 'worker'))
|
853
|
-
if queue_name == nil then
|
854
|
-
error('Timeout(): Job does not exist')
|
860
|
+
if queue_name == nil or queue_name == false then
|
861
|
+
error('Timeout(): Job ' .. self.jid .. ' does not exist')
|
855
862
|
elseif state ~= 'running' then
|
856
863
|
error('Timeout(): Job ' .. self.jid .. ' not running')
|
857
864
|
else
|
858
865
|
self:history(now, 'timed-out')
|
859
866
|
local queue = Qless.queue(queue_name)
|
860
867
|
queue.locks.remove(self.jid)
|
861
|
-
queue.work.add(now,
|
868
|
+
queue.work.add(now, '+inf', self.jid)
|
862
869
|
redis.call('hmset', QlessJob.ns .. self.jid,
|
863
870
|
'state', 'stalled', 'expires', 0)
|
864
871
|
local encoded = cjson.encode({
|
@@ -921,7 +928,7 @@ function QlessJob:history(now, what, item)
|
|
921
928
|
if count > 0 then
|
922
929
|
local obj = redis.call('lpop', QlessJob.ns .. self.jid .. '-history')
|
923
930
|
redis.call('ltrim', QlessJob.ns .. self.jid .. '-history', -count + 2, -1)
|
924
|
-
if obj ~= nil then
|
931
|
+
if obj ~= nil and obj ~= false then
|
925
932
|
redis.call('lpush', QlessJob.ns .. self.jid .. '-history', obj)
|
926
933
|
end
|
927
934
|
end
|
@@ -951,8 +958,11 @@ function Qless.queue(name)
|
|
951
958
|
return redis.call('zrem', queue:prefix('work'), unpack(arg))
|
952
959
|
end
|
953
960
|
end, add = function(now, priority, jid)
|
961
|
+
if priority ~= '+inf' then
|
962
|
+
priority = priority - (now / 10000000000)
|
963
|
+
end
|
954
964
|
return redis.call('zadd',
|
955
|
-
queue:prefix('work'), priority
|
965
|
+
queue:prefix('work'), priority, jid)
|
956
966
|
end, score = function(jid)
|
957
967
|
return redis.call('zscore', queue:prefix('work'), jid)
|
958
968
|
end, length = function()
|
@@ -963,10 +973,10 @@ function Qless.queue(name)
|
|
963
973
|
queue.locks = {
|
964
974
|
expired = function(now, offset, count)
|
965
975
|
return redis.call('zrangebyscore',
|
966
|
-
queue:prefix('locks'), -
|
976
|
+
queue:prefix('locks'), '-inf', now, 'LIMIT', offset, count)
|
967
977
|
end, peek = function(now, offset, count)
|
968
978
|
return redis.call('zrangebyscore', queue:prefix('locks'),
|
969
|
-
now,
|
979
|
+
now, '+inf', 'LIMIT', offset, count)
|
970
980
|
end, add = function(expires, jid)
|
971
981
|
redis.call('zadd', queue:prefix('locks'), expires, jid)
|
972
982
|
end, remove = function(...)
|
@@ -974,7 +984,7 @@ function Qless.queue(name)
|
|
974
984
|
return redis.call('zrem', queue:prefix('locks'), unpack(arg))
|
975
985
|
end
|
976
986
|
end, running = function(now)
|
977
|
-
return redis.call('zcount', queue:prefix('locks'), now,
|
987
|
+
return redis.call('zcount', queue:prefix('locks'), now, '+inf')
|
978
988
|
end, length = function(now)
|
979
989
|
if now then
|
980
990
|
return redis.call('zcount', queue:prefix('locks'), 0, now)
|
@@ -1065,11 +1075,11 @@ function QlessQueue:stats(now, date)
|
|
1065
1075
|
|
1066
1076
|
local key = 'ql:s:' .. name .. ':' .. bin .. ':' .. queue
|
1067
1077
|
local count, mean, vk = unpack(redis.call('hmget', key, 'total', 'mean', 'vk'))
|
1068
|
-
|
1078
|
+
|
1069
1079
|
count = tonumber(count) or 0
|
1070
1080
|
mean = tonumber(mean) or 0
|
1071
1081
|
vk = tonumber(vk)
|
1072
|
-
|
1082
|
+
|
1073
1083
|
results.count = count or 0
|
1074
1084
|
results.mean = mean or 0
|
1075
1085
|
results.histogram = {}
|
@@ -1174,17 +1184,17 @@ function QlessQueue:pop(now, worker, count)
|
|
1174
1184
|
self:stat(now, 'wait', waiting)
|
1175
1185
|
redis.call('hset', QlessJob.ns .. jid,
|
1176
1186
|
'time', string.format("%.20f", now))
|
1177
|
-
|
1187
|
+
|
1178
1188
|
redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid)
|
1179
|
-
|
1189
|
+
|
1180
1190
|
job:update({
|
1181
1191
|
worker = worker,
|
1182
1192
|
expires = expires,
|
1183
1193
|
state = 'running'
|
1184
1194
|
})
|
1185
|
-
|
1195
|
+
|
1186
1196
|
self.locks.add(expires, jid)
|
1187
|
-
|
1197
|
+
|
1188
1198
|
local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false
|
1189
1199
|
if tracked then
|
1190
1200
|
Qless.publish('popped', jid)
|
@@ -1224,7 +1234,7 @@ function QlessQueue:stat(now, stat, val)
|
|
1224
1234
|
redis.call('hincrby', key, 'h' .. math.floor(val / 3600), 1)
|
1225
1235
|
else -- days
|
1226
1236
|
redis.call('hincrby', key, 'd' .. math.floor(val / 86400), 1)
|
1227
|
-
end
|
1237
|
+
end
|
1228
1238
|
redis.call('hmset', key, 'total', count, 'mean', mean, 'vk', vk)
|
1229
1239
|
end
|
1230
1240
|
|
@@ -1266,8 +1276,8 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...)
|
|
1266
1276
|
|
1267
1277
|
local original = redis.call(
|
1268
1278
|
'smembers', QlessJob.ns .. jid .. '-dependencies')
|
1269
|
-
for _, dep in pairs(original) do
|
1270
|
-
if new[dep] == nil then
|
1279
|
+
for _, dep in pairs(original) do
|
1280
|
+
if new[dep] == nil or new[dep] == false then
|
1271
1281
|
redis.call('srem', QlessJob.ns .. dep .. '-dependents' , jid)
|
1272
1282
|
redis.call('srem', QlessJob.ns .. jid .. '-dependencies', dep)
|
1273
1283
|
end
|
@@ -1421,7 +1431,7 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...)
|
|
1421
1431
|
if #arg % 2 == 1 then
|
1422
1432
|
error('Odd number of additional args: ' .. tostring(arg))
|
1423
1433
|
end
|
1424
|
-
|
1434
|
+
|
1425
1435
|
local options = {}
|
1426
1436
|
for i = 3, #arg, 2 do options[arg[i]] = arg[i + 1] end
|
1427
1437
|
options.tags = assert(cjson.decode(options.tags or '{}'),
|
@@ -1443,7 +1453,7 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...)
|
|
1443
1453
|
if old_queue then
|
1444
1454
|
Qless.queue(old_queue).recurring.remove(jid)
|
1445
1455
|
end
|
1446
|
-
|
1456
|
+
|
1447
1457
|
redis.call('hmset', 'ql:r:' .. jid,
|
1448
1458
|
'jid' , jid,
|
1449
1459
|
'klass' , klass,
|
@@ -1458,11 +1468,11 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...)
|
|
1458
1468
|
'retries' , options.retries,
|
1459
1469
|
'backlog' , options.backlog)
|
1460
1470
|
self.recurring.add(now + offset, jid)
|
1461
|
-
|
1471
|
+
|
1462
1472
|
if redis.call('zscore', 'ql:queues', self.name) == false then
|
1463
1473
|
redis.call('zadd', 'ql:queues', now, self.name)
|
1464
1474
|
end
|
1465
|
-
|
1475
|
+
|
1466
1476
|
return jid
|
1467
1477
|
else
|
1468
1478
|
error('Recur(): schedule type "' .. tostring(spec) .. '" unknown')
|
@@ -1493,18 +1503,18 @@ function QlessQueue:check_recurring(now, count)
|
|
1493
1503
|
)
|
1494
1504
|
end
|
1495
1505
|
end
|
1496
|
-
|
1506
|
+
|
1497
1507
|
while (score <= now) and (moved < count) do
|
1498
1508
|
local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1)
|
1499
1509
|
moved = moved + 1
|
1500
1510
|
|
1501
1511
|
local child_jid = jid .. '-' .. count
|
1502
|
-
|
1512
|
+
|
1503
1513
|
for i, tag in ipairs(_tags) do
|
1504
1514
|
redis.call('zadd', 'ql:t:' .. tag, now, child_jid)
|
1505
1515
|
redis.call('zincrby', 'ql:tags', 1, tag)
|
1506
1516
|
end
|
1507
|
-
|
1517
|
+
|
1508
1518
|
redis.call('hmset', QlessJob.ns .. child_jid,
|
1509
1519
|
'jid' , child_jid,
|
1510
1520
|
'klass' , klass,
|
@@ -1520,9 +1530,9 @@ function QlessQueue:check_recurring(now, count)
|
|
1520
1530
|
'time' , string.format("%.20f", score),
|
1521
1531
|
'spawned_from_jid', jid)
|
1522
1532
|
Qless.job(child_jid):history(score, 'put', {q = self.name})
|
1523
|
-
|
1533
|
+
|
1524
1534
|
self.work.add(score, priority, child_jid)
|
1525
|
-
|
1535
|
+
|
1526
1536
|
score = score + interval
|
1527
1537
|
self.recurring.add(score, jid)
|
1528
1538
|
end
|
@@ -1587,12 +1597,12 @@ function QlessQueue:invalidate_locks(now, count)
|
|
1587
1597
|
|
1588
1598
|
local remaining = tonumber(redis.call(
|
1589
1599
|
'hincrby', QlessJob.ns .. jid, 'remaining', -1))
|
1590
|
-
|
1600
|
+
|
1591
1601
|
if remaining < 0 then
|
1592
1602
|
self.work.remove(jid)
|
1593
1603
|
self.locks.remove(jid)
|
1594
1604
|
self.scheduled.remove(jid)
|
1595
|
-
|
1605
|
+
|
1596
1606
|
local group = 'failed-retries-' .. Qless.job(jid):data()['queue']
|
1597
1607
|
local job = Qless.job(jid)
|
1598
1608
|
job:history(now, 'failed', {group = group})
|
@@ -1607,10 +1617,10 @@ function QlessQueue:invalidate_locks(now, count)
|
|
1607
1617
|
['when'] = now,
|
1608
1618
|
['worker'] = unpack(job:data('worker'))
|
1609
1619
|
}))
|
1610
|
-
|
1620
|
+
|
1611
1621
|
redis.call('sadd', 'ql:failures', group)
|
1612
1622
|
redis.call('lpush', 'ql:f:' .. group, jid)
|
1613
|
-
|
1623
|
+
|
1614
1624
|
if redis.call('zscore', 'ql:tracked', jid) ~= false then
|
1615
1625
|
Qless.publish('failed', jid)
|
1616
1626
|
end
|
@@ -1669,11 +1679,11 @@ function QlessRecurringJob:data()
|
|
1669
1679
|
local job = redis.call(
|
1670
1680
|
'hmget', 'ql:r:' .. self.jid, 'jid', 'klass', 'state', 'queue',
|
1671
1681
|
'priority', 'interval', 'retries', 'count', 'data', 'tags', 'backlog')
|
1672
|
-
|
1682
|
+
|
1673
1683
|
if not job[1] then
|
1674
1684
|
return nil
|
1675
1685
|
end
|
1676
|
-
|
1686
|
+
|
1677
1687
|
return {
|
1678
1688
|
jid = job[1],
|
1679
1689
|
klass = job[2],
|
@@ -1739,9 +1749,9 @@ function QlessRecurringJob:tag(...)
|
|
1739
1749
|
tags = cjson.decode(tags)
|
1740
1750
|
local _tags = {}
|
1741
1751
|
for i,v in ipairs(tags) do _tags[v] = true end
|
1742
|
-
|
1743
|
-
for i=1,#arg do if _tags[arg[i]] == nil then table.insert(tags, arg[i]) end end
|
1744
|
-
|
1752
|
+
|
1753
|
+
for i=1,#arg do if _tags[arg[i]] == nil or _tags[arg[i]] == false then table.insert(tags, arg[i]) end end
|
1754
|
+
|
1745
1755
|
tags = cjson.encode(tags)
|
1746
1756
|
redis.call('hset', 'ql:r:' .. self.jid, 'tags', tags)
|
1747
1757
|
return tags
|
data/lib/qless/version.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: qless
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.12.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Dan Lecocq
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: exe
|
11
11
|
cert_chain: []
|
12
|
-
date:
|
12
|
+
date: 2018-01-17 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: metriks
|