qless 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/Gemfile +8 -0
- data/HISTORY.md +168 -0
- data/README.md +571 -0
- data/Rakefile +28 -0
- data/bin/qless-campfire +106 -0
- data/bin/qless-growl +99 -0
- data/bin/qless-web +23 -0
- data/lib/qless.rb +185 -0
- data/lib/qless/config.rb +31 -0
- data/lib/qless/job.rb +259 -0
- data/lib/qless/job_reservers/ordered.rb +23 -0
- data/lib/qless/job_reservers/round_robin.rb +34 -0
- data/lib/qless/lua.rb +25 -0
- data/lib/qless/qless-core/cancel.lua +71 -0
- data/lib/qless/qless-core/complete.lua +218 -0
- data/lib/qless/qless-core/config.lua +44 -0
- data/lib/qless/qless-core/depends.lua +65 -0
- data/lib/qless/qless-core/fail.lua +107 -0
- data/lib/qless/qless-core/failed.lua +83 -0
- data/lib/qless/qless-core/get.lua +37 -0
- data/lib/qless/qless-core/heartbeat.lua +50 -0
- data/lib/qless/qless-core/jobs.lua +41 -0
- data/lib/qless/qless-core/peek.lua +155 -0
- data/lib/qless/qless-core/pop.lua +278 -0
- data/lib/qless/qless-core/priority.lua +32 -0
- data/lib/qless/qless-core/put.lua +156 -0
- data/lib/qless/qless-core/queues.lua +58 -0
- data/lib/qless/qless-core/recur.lua +181 -0
- data/lib/qless/qless-core/retry.lua +73 -0
- data/lib/qless/qless-core/ruby/lib/qless-core.rb +1 -0
- data/lib/qless/qless-core/ruby/lib/qless/core.rb +13 -0
- data/lib/qless/qless-core/ruby/lib/qless/core/version.rb +5 -0
- data/lib/qless/qless-core/ruby/spec/qless_core_spec.rb +13 -0
- data/lib/qless/qless-core/stats.lua +92 -0
- data/lib/qless/qless-core/tag.lua +100 -0
- data/lib/qless/qless-core/track.lua +79 -0
- data/lib/qless/qless-core/workers.lua +69 -0
- data/lib/qless/queue.rb +141 -0
- data/lib/qless/server.rb +411 -0
- data/lib/qless/tasks.rb +10 -0
- data/lib/qless/version.rb +3 -0
- data/lib/qless/worker.rb +195 -0
- metadata +239 -0
@@ -0,0 +1,44 @@
|
|
1
|
+
-- config(0, 'get', [option])
|
2
|
+
-- config(0, 'set', option, value)
|
3
|
+
-- config(0, 'unset', option)
|
4
|
+
-- -------------------------------------------------------------------------------------------------------
|
5
|
+
-- This script provides an interface to get, set, and unset configuration
|
6
|
+
-- options.
|
7
|
+
--
|
8
|
+
-- Args:
|
9
|
+
-- 1) [option]
|
10
|
+
|
11
|
+
if #KEYS > 0 then error('Config(): No keys should be provided') end
|
12
|
+
|
13
|
+
local command = ARGV[1]
|
14
|
+
|
15
|
+
local defaults = {
|
16
|
+
['application'] = 'qless',
|
17
|
+
['heartbeat'] = 60,
|
18
|
+
['stats-history'] = 30,
|
19
|
+
['histogram-history'] = 7,
|
20
|
+
['jobs-history-count'] = 50000,
|
21
|
+
['jobs-history'] = 604800
|
22
|
+
}
|
23
|
+
|
24
|
+
if command == 'get' then
|
25
|
+
if ARGV[2] then
|
26
|
+
return redis.call('hget', 'ql:config', ARGV[2]) or defaults[ARGV[2]]
|
27
|
+
else
|
28
|
+
-- Inspired by redis-lua https://github.com/nrk/redis-lua/blob/version-2.0/src/redis.lua
|
29
|
+
local reply = redis.call('hgetall', 'ql:config')
|
30
|
+
for i = 1, #reply, 2 do defaults[reply[i]] = reply[i + 1] end
|
31
|
+
return cjson.encode(defaults)
|
32
|
+
end
|
33
|
+
elseif command == 'set' then
|
34
|
+
local option = assert(ARGV[2], 'Config(): Arg "option" missing')
|
35
|
+
local value = assert(ARGV[3], 'Config(): Arg "value" missing')
|
36
|
+
redis.call('hset', 'ql:config', option, value)
|
37
|
+
elseif command == 'unset' then
|
38
|
+
local option = assert(ARGV[2], 'Config(): Arg "option" missing')
|
39
|
+
redis.call('hdel', 'ql:config', option)
|
40
|
+
else
|
41
|
+
error('Config(): Unrecognized command ' .. command)
|
42
|
+
end
|
43
|
+
|
44
|
+
|
@@ -0,0 +1,65 @@
|
|
1
|
+
-- Depends(0, jid, ('on', [jid, [jid, [...]]]) | ('off', ('all' | [jid, [jid, [...]]]))
|
2
|
+
-- ------------------------------------------------------------------------------------
|
3
|
+
-- Add or remove dependencies a job has. If 'on' is provided, the provided jids are
|
4
|
+
-- added as dependencies. If 'off' and 'all' are provided, then all the current dependencies
|
5
|
+
-- are removed. If 'off' is provided and the next argument is not 'all', then those
|
6
|
+
-- jids are removed as dependencies.
|
7
|
+
--
|
8
|
+
-- If a job is not already in the 'depends' state, then this call will return false.
|
9
|
+
-- Otherwise, it will return true
|
10
|
+
--
|
11
|
+
-- Args:
|
12
|
+
-- 1) jid
|
13
|
+
|
14
|
+
if #KEYS > 0 then error('Depends(): No Keys should be provided') end
|
15
|
+
|
16
|
+
local jid = assert(ARGV[1], 'Depends(): Arg "jid" missing.')
|
17
|
+
local command = assert(ARGV[2], 'Depends(): Arg 2 missing')
|
18
|
+
|
19
|
+
if redis.call('hget', 'ql:j:' .. jid, 'state') ~= 'depends' then
|
20
|
+
return false
|
21
|
+
end
|
22
|
+
|
23
|
+
if ARGV[2] == 'on' then
|
24
|
+
-- These are the jids we legitimately have to wait on
|
25
|
+
for i=3,#ARGV do
|
26
|
+
local j = ARGV[i]
|
27
|
+
-- Make sure it's something other than 'nil' or complete.
|
28
|
+
local state = redis.call('hget', 'ql:j:' .. j, 'state')
|
29
|
+
if (state and state ~= 'complete') then
|
30
|
+
redis.call('sadd', 'ql:j:' .. j .. '-dependents' , jid)
|
31
|
+
redis.call('sadd', 'ql:j:' .. jid .. '-dependencies', j)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
return true
|
35
|
+
elseif ARGV[2] == 'off' then
|
36
|
+
if ARGV[3] == 'all' then
|
37
|
+
for i, j in ipairs(redis.call('smembers', 'ql:j:' .. jid .. '-dependencies')) do
|
38
|
+
redis.call('srem', 'ql:j:' .. j .. '-dependents', jid)
|
39
|
+
end
|
40
|
+
redis.call('del', 'ql:j:' .. jid .. '-dependencies')
|
41
|
+
local q, p = unpack(redis.call('hmget', 'ql:j:' .. jid, 'queue', 'priority'))
|
42
|
+
if q then
|
43
|
+
redis.call('zrem', 'ql:q:' .. q .. '-depends', jid)
|
44
|
+
redis.call('zadd', 'ql:q:' .. q .. '-work', p, jid)
|
45
|
+
redis.call('hset', 'ql:j:' .. jid, 'state', 'waiting')
|
46
|
+
end
|
47
|
+
else
|
48
|
+
for i=3,#ARGV do
|
49
|
+
local j = ARGV[i]
|
50
|
+
redis.call('srem', 'ql:j:' .. j .. '-dependents', jid)
|
51
|
+
redis.call('srem', 'ql:j:' .. jid .. '-dependencies', j)
|
52
|
+
if redis.call('scard', 'ql:j:' .. jid .. '-dependencies') == 0 then
|
53
|
+
local q, p = unpack(redis.call('hmget', 'ql:j:' .. jid, 'queue', 'priority'))
|
54
|
+
if q then
|
55
|
+
redis.call('zrem', 'ql:q:' .. q .. '-depends', jid)
|
56
|
+
redis.call('zadd', 'ql:q:' .. q .. '-work', p, jid)
|
57
|
+
redis.call('hset', 'ql:j:' .. jid, 'state', 'waiting')
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
return true
|
63
|
+
else
|
64
|
+
error('Depends(): Second arg must be "on" or "off"')
|
65
|
+
end
|
@@ -0,0 +1,107 @@
|
|
1
|
+
-- Fail(0, jid, worker, group, message, now, [data])
|
2
|
+
-- -------------------------------------------------
|
3
|
+
-- Mark the particular job as failed, with the provided group, and a more specific
|
4
|
+
-- message. By `group`, we mean some phrase that might be one of several categorical
|
5
|
+
-- modes of failure. The `message` is something more job-specific, like perhaps
|
6
|
+
-- a traceback.
|
7
|
+
--
|
8
|
+
-- This method should __not__ be used to note that a job has been dropped or has
|
9
|
+
-- failed in a transient way. This method __should__ be used to note that a job has
|
10
|
+
-- something really wrong with it that must be remedied.
|
11
|
+
--
|
12
|
+
-- The motivation behind the `group` is so that similar errors can be grouped together.
|
13
|
+
-- Optionally, updated data can be provided for the job. A job in any state can be
|
14
|
+
-- marked as failed. If it has been given to a worker as a job, then its subsequent
|
15
|
+
-- requests to heartbeat or complete that job will fail. Failed jobs are kept until
|
16
|
+
-- they are canceled or completed. __Returns__ the id of the failed job if successful,
|
17
|
+
-- or `False` on failure.
|
18
|
+
--
|
19
|
+
-- Args:
|
20
|
+
-- 1) jid
|
21
|
+
-- 2) worker
|
22
|
+
-- 3) group
|
23
|
+
-- 4) message
|
24
|
+
-- 5) the current time
|
25
|
+
-- 6) [data]
|
26
|
+
|
27
|
+
if #KEYS > 0 then error('Fail(): No Keys should be provided') end
|
28
|
+
|
29
|
+
local jid = assert(ARGV[1] , 'Fail(): Arg "jid" missing')
|
30
|
+
local worker = assert(ARGV[2] , 'Fail(): Arg "worker" missing')
|
31
|
+
local group = assert(ARGV[3] , 'Fail(): Arg "group" missing')
|
32
|
+
local message = assert(ARGV[4] , 'Fail(): Arg "message" missing')
|
33
|
+
local now = assert(tonumber(ARGV[5]), 'Fail(): Arg "now" missing or malformed: ' .. (ARGV[5] or 'nil'))
|
34
|
+
local data = ARGV[6]
|
35
|
+
|
36
|
+
-- The bin is midnight of the provided day
|
37
|
+
-- 24 * 60 * 60 = 86400
|
38
|
+
local bin = now - (now % 86400)
|
39
|
+
|
40
|
+
if data then
|
41
|
+
data = cjson.decode(data)
|
42
|
+
end
|
43
|
+
|
44
|
+
-- First things first, we should get the history
|
45
|
+
local history, queue, state = unpack(redis.call('hmget', 'ql:j:' .. jid, 'history', 'queue', 'state'))
|
46
|
+
|
47
|
+
-- If the job has been completed, we cannot fail it
|
48
|
+
if state ~= 'running' then
|
49
|
+
return false
|
50
|
+
end
|
51
|
+
|
52
|
+
if redis.call('zscore', 'ql:tracked', jid) ~= false then
|
53
|
+
redis.call('publish', 'failed', jid)
|
54
|
+
end
|
55
|
+
|
56
|
+
-- Remove this job from the jobs that the worker that was running it has
|
57
|
+
redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
|
58
|
+
|
59
|
+
-- Now, take the element of the history for which our provided worker is the worker, and update 'failed'
|
60
|
+
history = cjson.decode(history or '[]')
|
61
|
+
if #history > 0 then
|
62
|
+
for i=#history,1,-1 do
|
63
|
+
if history[i]['worker'] == worker then
|
64
|
+
history[i]['failed'] = math.floor(now)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
else
|
68
|
+
history = {
|
69
|
+
{
|
70
|
+
worker = worker,
|
71
|
+
failed = math.floor(now)
|
72
|
+
}
|
73
|
+
}
|
74
|
+
end
|
75
|
+
|
76
|
+
-- Increment the number of failures for that queue for the
|
77
|
+
-- given day.
|
78
|
+
redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failures', 1)
|
79
|
+
redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failed' , 1)
|
80
|
+
|
81
|
+
-- Now remove the instance from the schedule, and work queues for the queue it's in
|
82
|
+
redis.call('zrem', 'ql:q:' .. queue .. '-work', jid)
|
83
|
+
redis.call('zrem', 'ql:q:' .. queue .. '-locks', jid)
|
84
|
+
redis.call('zrem', 'ql:q:' .. queue .. '-scheduled', jid)
|
85
|
+
|
86
|
+
-- The reason that this appears here is that the above will fail if the job doesn't exist
|
87
|
+
if data then
|
88
|
+
redis.call('hset', 'ql:j:' .. jid, 'data', cjson.encode(data))
|
89
|
+
end
|
90
|
+
|
91
|
+
redis.call('hmset', 'ql:j:' .. jid, 'state', 'failed', 'worker', '',
|
92
|
+
'expires', '', 'history', cjson.encode(history), 'failure', cjson.encode({
|
93
|
+
['group'] = group,
|
94
|
+
['message'] = message,
|
95
|
+
['when'] = math.floor(now),
|
96
|
+
['worker'] = worker
|
97
|
+
}))
|
98
|
+
|
99
|
+
-- Add this group of failure to the list of failures
|
100
|
+
redis.call('sadd', 'ql:failures', group)
|
101
|
+
-- And add this particular instance to the failed groups
|
102
|
+
redis.call('lpush', 'ql:f:' .. group, jid)
|
103
|
+
|
104
|
+
-- Here is where we'd intcrement stats about the particular stage
|
105
|
+
-- and possibly the workers
|
106
|
+
|
107
|
+
return jid
|
@@ -0,0 +1,83 @@
|
|
1
|
+
-- Failed(0, [group, [start, [limit]]])
|
2
|
+
-- ------------------------------------
|
3
|
+
-- If no group is provided, this returns a JSON blob of the counts of the various
|
4
|
+
-- groups of failures known. If a group is provided, it will report up to `limit`
|
5
|
+
-- from `start` of the jobs affected by that issue. __Returns__ a JSON blob.
|
6
|
+
--
|
7
|
+
-- # If no group, then...
|
8
|
+
-- {
|
9
|
+
-- 'group1': 1,
|
10
|
+
-- 'group2': 5,
|
11
|
+
-- ...
|
12
|
+
-- }
|
13
|
+
--
|
14
|
+
-- # If a group is provided, then...
|
15
|
+
-- {
|
16
|
+
-- 'total': 20,
|
17
|
+
-- 'jobs': [
|
18
|
+
-- {
|
19
|
+
-- # All the normal keys for a job
|
20
|
+
-- 'jid': ...,
|
21
|
+
-- 'data': ...
|
22
|
+
-- # The message for this particular instance
|
23
|
+
-- 'message': ...,
|
24
|
+
-- 'group': ...,
|
25
|
+
-- }, ...
|
26
|
+
-- ]
|
27
|
+
-- }
|
28
|
+
--
|
29
|
+
-- Args:
|
30
|
+
-- 1) [group]
|
31
|
+
-- 2) [start]
|
32
|
+
-- 3) [limit]
|
33
|
+
|
34
|
+
if #KEYS > 0 then error('Failed(): No Keys should be provided') end
|
35
|
+
|
36
|
+
local group = ARGV[1]
|
37
|
+
local start = assert(tonumber(ARGV[2] or 0), 'Failed(): Arg "start" is not a number: ' .. (ARGV[2] or 'nil'))
|
38
|
+
local limit = assert(tonumber(ARGV[3] or 25), 'Failed(): Arg "limit" is not a number: ' .. (ARGV[3] or 'nil'))
|
39
|
+
|
40
|
+
if group then
|
41
|
+
-- If a group was provided, then we should do paginated lookup into that
|
42
|
+
local response = {
|
43
|
+
total = redis.call('llen', 'ql:f:' .. group),
|
44
|
+
jobs = {}
|
45
|
+
}
|
46
|
+
local jids = redis.call('lrange', 'ql:f:' .. group, start, limit)
|
47
|
+
for index, jid in ipairs(jids) do
|
48
|
+
local job = redis.call(
|
49
|
+
'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
|
50
|
+
'expires', 'retries', 'remaining', 'data', 'tags', 'history', 'failure')
|
51
|
+
|
52
|
+
table.insert(response.jobs, {
|
53
|
+
jid = job[1],
|
54
|
+
klass = job[2],
|
55
|
+
state = job[3],
|
56
|
+
queue = job[4],
|
57
|
+
worker = job[5] or '',
|
58
|
+
tracked = redis.call('zscore', 'ql:tracked', jid) ~= false,
|
59
|
+
priority = tonumber(job[6]),
|
60
|
+
expires = tonumber(job[7]) or 0,
|
61
|
+
retries = tonumber(job[8]),
|
62
|
+
remaining = tonumber(job[9]),
|
63
|
+
data = cjson.decode(job[10]),
|
64
|
+
tags = cjson.decode(job[11]),
|
65
|
+
history = cjson.decode(job[12]),
|
66
|
+
failure = cjson.decode(job[13] or '{}'),
|
67
|
+
dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents'),
|
68
|
+
-- A job in the failed state can not have dependencies
|
69
|
+
-- because it has been popped off of a queue, which
|
70
|
+
-- means all of its dependencies have been satisfied
|
71
|
+
dependencies = {}
|
72
|
+
})
|
73
|
+
end
|
74
|
+
return cjson.encode(response)
|
75
|
+
else
|
76
|
+
-- Otherwise, we should just list all the known failure groups we have
|
77
|
+
local response = {}
|
78
|
+
local groups = redis.call('smembers', 'ql:failures')
|
79
|
+
for index, group in ipairs(groups) do
|
80
|
+
response[group] = redis.call('llen', 'ql:f:' .. group)
|
81
|
+
end
|
82
|
+
return cjson.encode(response)
|
83
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
-- This gets all the data associated with the job with the
|
2
|
+
-- provided id.
|
3
|
+
--
|
4
|
+
-- Args:
|
5
|
+
-- 1) jid
|
6
|
+
|
7
|
+
if #KEYS > 0 then error('Get(): No Keys should be provided') end
|
8
|
+
|
9
|
+
local jid = assert(ARGV[1], 'Get(): Arg "jid" missing')
|
10
|
+
|
11
|
+
-- Let's get all the data we can
|
12
|
+
local job = redis.call(
|
13
|
+
'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
|
14
|
+
'expires', 'retries', 'remaining', 'data', 'tags', 'history', 'failure')
|
15
|
+
|
16
|
+
if not job[1] then
|
17
|
+
return false
|
18
|
+
end
|
19
|
+
|
20
|
+
return cjson.encode({
|
21
|
+
jid = job[1],
|
22
|
+
klass = job[2],
|
23
|
+
state = job[3],
|
24
|
+
queue = job[4],
|
25
|
+
worker = job[5] or '',
|
26
|
+
tracked = redis.call('zscore', 'ql:tracked', jid) ~= false,
|
27
|
+
priority = tonumber(job[6]),
|
28
|
+
expires = tonumber(job[7]) or 0,
|
29
|
+
retries = tonumber(job[8]),
|
30
|
+
remaining = tonumber(job[9]),
|
31
|
+
data = cjson.decode(job[10]),
|
32
|
+
tags = cjson.decode(job[11]),
|
33
|
+
history = cjson.decode(job[12]),
|
34
|
+
failure = cjson.decode(job[13] or '{}'),
|
35
|
+
dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents'),
|
36
|
+
dependencies = redis.call('smembers', 'ql:j:' .. jid .. '-dependencies')
|
37
|
+
})
|
@@ -0,0 +1,50 @@
|
|
1
|
+
-- This scripts conducts a heartbeat for a job, and returns
|
2
|
+
-- either the new expiration or False if the lock has been
|
3
|
+
-- given to another node
|
4
|
+
--
|
5
|
+
-- Args:
|
6
|
+
-- 1) jid
|
7
|
+
-- 2) worker
|
8
|
+
-- 3) now
|
9
|
+
-- 4) [data]
|
10
|
+
|
11
|
+
if #KEYS > 0 then error('Heartbeat(): No Keys should be provided') end
|
12
|
+
|
13
|
+
local jid = assert(ARGV[1] , 'Heartbeat(): Arg "jid" missing')
|
14
|
+
local worker = assert(ARGV[2] , 'Heartbeat(): Arg "worker" missing')
|
15
|
+
local now = assert(tonumber(ARGV[3]), 'Heartbeat(): Arg "now" missing')
|
16
|
+
local data = ARGV[4]
|
17
|
+
|
18
|
+
-- We should find the heartbeat interval for this queue
|
19
|
+
-- heartbeat. First, though, we need to find the queue
|
20
|
+
-- this particular job is in
|
21
|
+
local queue = redis.call('hget', 'ql:j:' .. jid, 'queue') or ''
|
22
|
+
local _hb, _qhb = unpack(redis.call('hmget', 'ql:config', 'heartbeat', queue .. '-heartbeat'))
|
23
|
+
local expires = now + tonumber(_qhb or _hb or 60)
|
24
|
+
|
25
|
+
if data then
|
26
|
+
data = cjson.decode(data)
|
27
|
+
end
|
28
|
+
|
29
|
+
-- First, let's see if the worker still owns this job, and there is a worker
|
30
|
+
if redis.call('hget', 'ql:j:' .. jid, 'worker') ~= worker or #worker == 0 then
|
31
|
+
return false
|
32
|
+
else
|
33
|
+
-- Otherwise, optionally update the user data, and the heartbeat
|
34
|
+
if data then
|
35
|
+
-- I don't know if this is wise, but I'm decoding and encoding
|
36
|
+
-- the user data to hopefully ensure its sanity
|
37
|
+
redis.call('hmset', 'ql:j:' .. jid, 'expires', expires, 'worker', worker, 'data', cjson.encode(data))
|
38
|
+
else
|
39
|
+
redis.call('hmset', 'ql:j:' .. jid, 'expires', expires, 'worker', worker)
|
40
|
+
end
|
41
|
+
|
42
|
+
-- Update hwen this job was last updated on that worker
|
43
|
+
-- Add this job to the list of jobs handled by this worker
|
44
|
+
redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid)
|
45
|
+
|
46
|
+
-- And now we should just update the locks
|
47
|
+
local queue = redis.call('hget', 'ql:j:' .. jid, 'queue')
|
48
|
+
redis.call('zadd', 'ql:q:'.. queue .. '-locks', expires, jid)
|
49
|
+
return expires
|
50
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
-- Jobs(0, 'complete' | (('stalled' | 'running' | 'scheduled' | 'depends', 'recurring'), now, queue) [offset, [count]])
|
2
|
+
-- -------------------------------------------------------------------------------------------------------
|
3
|
+
--
|
4
|
+
-- Return all the job ids currently considered to be in the provided state
|
5
|
+
-- in a particular queue. The response is a list of job ids:
|
6
|
+
--
|
7
|
+
-- [
|
8
|
+
-- jid1,
|
9
|
+
-- jid2,
|
10
|
+
-- ...
|
11
|
+
-- ]
|
12
|
+
|
13
|
+
if #KEYS > 0 then
|
14
|
+
error('Jobs(): Got '.. #KEYS .. ' expected 0 KEYS arguments')
|
15
|
+
end
|
16
|
+
|
17
|
+
local t = assert(ARGV[1] , 'Jobs(): Arg "type" missing')
|
18
|
+
if t == 'complete' then
|
19
|
+
local offset = assert(tonumber(ARGV[2] or 0) , 'Jobs(): Arg "offset" not a number: ' .. tostring(ARGV[2]))
|
20
|
+
local count = assert(tonumber(ARGV[3] or 25), 'Jobs(): Arg "count" not a number: ' .. tostring(ARGV[3]))
|
21
|
+
return redis.call('zrevrange', 'ql:completed', offset, offset + count - 1)
|
22
|
+
else
|
23
|
+
local now = assert(tonumber(ARGV[2]) , 'Jobs(): Arg "now" missing or not a number: ' .. tostring(ARGV[2]))
|
24
|
+
local queue = assert(ARGV[3] , 'Jobs(): Arg "queue" missing')
|
25
|
+
local offset = assert(tonumber(ARGV[4] or 0) , 'Jobs(): Arg "offset" not a number: ' .. tostring(ARGV[4]))
|
26
|
+
local count = assert(tonumber(ARGV[5] or 25), 'Jobs(): Arg "count" not a number: ' .. tostring(ARGV[5]))
|
27
|
+
|
28
|
+
if t == 'running' then
|
29
|
+
return redis.call('zrangebyscore', 'ql:q:' .. queue .. '-locks', now, 133389432700, 'limit', offset, count)
|
30
|
+
elseif t == 'stalled' then
|
31
|
+
return redis.call('zrangebyscore', 'ql:q:' .. queue .. '-locks', 0, now, 'limit', offset, count)
|
32
|
+
elseif t == 'scheduled' then
|
33
|
+
return redis.call('zrange', 'ql:q:' .. queue .. '-scheduled', offset, offset + count - 1)
|
34
|
+
elseif t == 'depends' then
|
35
|
+
return redis.call('zrange', 'ql:q:' .. queue .. '-depends', offset, offset + count - 1)
|
36
|
+
elseif t == 'recurring' then
|
37
|
+
return redis.call('zrange', 'ql:q:' .. queue .. '-recur', offset, offset + count - 1)
|
38
|
+
else
|
39
|
+
error('Jobs(): Unknown type "' .. t .. '"')
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,155 @@
|
|
1
|
+
-- This script takes the name of the queue and then checks
|
2
|
+
-- for any expired locks, then inserts any scheduled items
|
3
|
+
-- that are now valid, and lastly returns any work items
|
4
|
+
-- that can be handed over.
|
5
|
+
--
|
6
|
+
-- Keys:
|
7
|
+
-- 1) queue name
|
8
|
+
-- Args:
|
9
|
+
-- 1) the number of items to return
|
10
|
+
-- 2) the current time
|
11
|
+
|
12
|
+
if #KEYS ~= 1 then
|
13
|
+
if #KEYS < 1 then
|
14
|
+
error('Peek(): Expected 1 KEYS argument')
|
15
|
+
else
|
16
|
+
error('Peek(): Got ' .. #KEYS .. ', expected 1 KEYS argument')
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
local queue = assert(KEYS[1] , 'Peek(): Key "queue" missing')
|
21
|
+
local key = 'ql:q:' .. queue
|
22
|
+
local count = assert(tonumber(ARGV[1]) , 'Peek(): Arg "count" missing or not a number: ' .. (ARGV[2] or 'nil'))
|
23
|
+
local now = assert(tonumber(ARGV[2]) , 'Peek(): Arg "now" missing or not a number: ' .. (ARGV[3] or 'nil'))
|
24
|
+
|
25
|
+
-- These are the ids that we're going to return
|
26
|
+
local keys = {}
|
27
|
+
|
28
|
+
-- Iterate through all the expired locks and add them to the list
|
29
|
+
-- of keys that we'll return
|
30
|
+
for index, jid in ipairs(redis.call('zrangebyscore', key .. '-locks', 0, now, 'LIMIT', 0, count)) do
|
31
|
+
table.insert(keys, jid)
|
32
|
+
end
|
33
|
+
|
34
|
+
-- If we still need jobs in order to meet demand, then we should
|
35
|
+
-- look for all the recurring jobs that need jobs run
|
36
|
+
if #keys < count then
|
37
|
+
local r = redis.call('zrangebyscore', key .. '-recur', 0, now)
|
38
|
+
for index, jid in ipairs(r) do
|
39
|
+
-- For each of the jids that need jobs scheduled, first
|
40
|
+
-- get the last time each of them was run, and then increment
|
41
|
+
-- it by its interval. While this time is less than now,
|
42
|
+
-- we need to keep putting jobs on the queue
|
43
|
+
local klass, data, priority, tags, retries, interval = unpack(redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval'))
|
44
|
+
local _tags = cjson.decode(tags)
|
45
|
+
|
46
|
+
while math.floor(tonumber(redis.call('zscore', key .. '-recur', jid))) < now do
|
47
|
+
local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1)
|
48
|
+
|
49
|
+
-- Add this job to the list of jobs tagged with whatever tags were supplied
|
50
|
+
for i, tag in ipairs(_tags) do
|
51
|
+
redis.call('zadd', 'ql:t:' .. tag, now, jid .. '-' .. count)
|
52
|
+
redis.call('zincrby', 'ql:tags', 1, tag)
|
53
|
+
end
|
54
|
+
|
55
|
+
-- First, let's save its data
|
56
|
+
redis.call('hmset', 'ql:j:' .. jid .. '-' .. count,
|
57
|
+
'jid' , jid .. '-' .. count,
|
58
|
+
'klass' , klass,
|
59
|
+
'data' , data,
|
60
|
+
'priority' , priority,
|
61
|
+
'tags' , tags,
|
62
|
+
'state' , 'waiting',
|
63
|
+
'worker' , '',
|
64
|
+
'expires' , 0,
|
65
|
+
'queue' , queue,
|
66
|
+
'retries' , retries,
|
67
|
+
'remaining', retries,
|
68
|
+
'history' , cjson.encode({{
|
69
|
+
q = queue,
|
70
|
+
put = math.floor(now)
|
71
|
+
}}))
|
72
|
+
|
73
|
+
-- Now, if a delay was provided, and if it's in the future,
|
74
|
+
-- then we'll have to schedule it. Otherwise, we're just
|
75
|
+
-- going to add it to the work queue.
|
76
|
+
redis.call('zadd', key .. '-work', priority - (now / 10000000000), jid .. '-' .. count)
|
77
|
+
|
78
|
+
redis.call('zincrby', key .. '-recur', interval, jid)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
-- Now we've checked __all__ the locks for this queue the could
|
84
|
+
-- have expired, and are no more than the number requested. If
|
85
|
+
-- we still need values in order to meet the demand, then we
|
86
|
+
-- should check if any scheduled items, and if so, we should
|
87
|
+
-- insert them to ensure correctness when pulling off the next
|
88
|
+
-- unit of work.
|
89
|
+
if #keys < count then
|
90
|
+
-- zadd is a list of arguments that we'll be able to use to
|
91
|
+
-- insert into the work queue
|
92
|
+
local zadd = {}
|
93
|
+
local r = redis.call('zrangebyscore', key .. '-scheduled', 0, now, 'LIMIT', 0, (count - #keys))
|
94
|
+
for index, jid in ipairs(r) do
|
95
|
+
-- With these in hand, we'll have to go out and find the
|
96
|
+
-- priorities of these jobs, and then we'll insert them
|
97
|
+
-- into the work queue and then when that's complete, we'll
|
98
|
+
-- remove them from the scheduled queue
|
99
|
+
table.insert(zadd, tonumber(redis.call('hget', 'ql:j:' .. jid, 'priority') or 0))
|
100
|
+
table.insert(zadd, jid)
|
101
|
+
-- We should also update them to have the state 'waiting'
|
102
|
+
-- instead of 'scheduled'
|
103
|
+
redis.call('hset', 'ql:j:' .. jid, 'state', 'waiting')
|
104
|
+
end
|
105
|
+
|
106
|
+
if #zadd > 0 then
|
107
|
+
-- Now add these to the work list, and then remove them
|
108
|
+
-- from the scheduled list
|
109
|
+
redis.call('zadd', key .. '-work', unpack(zadd))
|
110
|
+
redis.call('zrem', key .. '-scheduled', unpack(r))
|
111
|
+
end
|
112
|
+
|
113
|
+
-- And now we should get up to the maximum number of requested
|
114
|
+
-- work items from the work queue.
|
115
|
+
for index, jid in ipairs(redis.call('zrevrange', key .. '-work', 0, (count - #keys) - 1)) do
|
116
|
+
table.insert(keys, jid)
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
-- Alright, now the `keys` table is filled with all the job
|
121
|
+
-- ids which we'll be returning. Now we need to get the
|
122
|
+
-- metadeata about each of these, update their metadata to
|
123
|
+
-- reflect which worker they're on, when the lock expires,
|
124
|
+
-- etc., add them to the locks queue and then we have to
|
125
|
+
-- finally return a list of json blobs
|
126
|
+
|
127
|
+
local response = {}
|
128
|
+
for index, jid in ipairs(keys) do
|
129
|
+
local job = redis.call(
|
130
|
+
'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
|
131
|
+
'expires', 'retries', 'remaining', 'data', 'tags', 'history', 'failure')
|
132
|
+
|
133
|
+
table.insert(response, cjson.encode({
|
134
|
+
jid = job[1],
|
135
|
+
klass = job[2],
|
136
|
+
state = job[3],
|
137
|
+
queue = job[4],
|
138
|
+
worker = job[5] or '',
|
139
|
+
tracked = redis.call('zscore', 'ql:tracked', jid) ~= false,
|
140
|
+
priority = tonumber(job[6]),
|
141
|
+
expires = tonumber(job[7]) or 0,
|
142
|
+
retries = tonumber(job[8]),
|
143
|
+
remaining = tonumber(job[9]),
|
144
|
+
data = cjson.decode(job[10]),
|
145
|
+
tags = cjson.decode(job[11]),
|
146
|
+
history = cjson.decode(job[12]),
|
147
|
+
failure = cjson.decode(job[13] or '{}'),
|
148
|
+
dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents'),
|
149
|
+
-- A job in the waiting state can not have dependencies
|
150
|
+
dependencies = {}
|
151
|
+
|
152
|
+
}))
|
153
|
+
end
|
154
|
+
|
155
|
+
return response
|