qless 0.9.3 → 0.10.0
Sign up to get free protection for your applications and to get access to all the features.
- data/Gemfile +9 -3
- data/README.md +70 -25
- data/Rakefile +125 -9
- data/exe/install_phantomjs +21 -0
- data/lib/qless.rb +115 -76
- data/lib/qless/config.rb +11 -9
- data/lib/qless/failure_formatter.rb +43 -0
- data/lib/qless/job.rb +201 -102
- data/lib/qless/job_reservers/ordered.rb +7 -1
- data/lib/qless/job_reservers/round_robin.rb +16 -6
- data/lib/qless/job_reservers/shuffled_round_robin.rb +9 -2
- data/lib/qless/lua/qless-lib.lua +2463 -0
- data/lib/qless/lua/qless.lua +2012 -0
- data/lib/qless/lua_script.rb +63 -12
- data/lib/qless/middleware/memory_usage_monitor.rb +62 -0
- data/lib/qless/middleware/metriks.rb +45 -0
- data/lib/qless/middleware/redis_reconnect.rb +6 -3
- data/lib/qless/middleware/requeue_exceptions.rb +94 -0
- data/lib/qless/middleware/retry_exceptions.rb +38 -9
- data/lib/qless/middleware/sentry.rb +3 -7
- data/lib/qless/middleware/timeout.rb +64 -0
- data/lib/qless/queue.rb +90 -55
- data/lib/qless/server.rb +177 -130
- data/lib/qless/server/views/_job.erb +33 -15
- data/lib/qless/server/views/completed.erb +11 -0
- data/lib/qless/server/views/layout.erb +70 -11
- data/lib/qless/server/views/overview.erb +93 -53
- data/lib/qless/server/views/queue.erb +9 -8
- data/lib/qless/server/views/queues.erb +18 -1
- data/lib/qless/subscriber.rb +37 -22
- data/lib/qless/tasks.rb +5 -10
- data/lib/qless/test_helpers/worker_helpers.rb +55 -0
- data/lib/qless/version.rb +3 -1
- data/lib/qless/worker.rb +4 -413
- data/lib/qless/worker/base.rb +247 -0
- data/lib/qless/worker/forking.rb +245 -0
- data/lib/qless/worker/serial.rb +41 -0
- metadata +135 -52
- data/lib/qless/qless-core/cancel.lua +0 -101
- data/lib/qless/qless-core/complete.lua +0 -233
- data/lib/qless/qless-core/config.lua +0 -56
- data/lib/qless/qless-core/depends.lua +0 -65
- data/lib/qless/qless-core/deregister_workers.lua +0 -12
- data/lib/qless/qless-core/fail.lua +0 -117
- data/lib/qless/qless-core/failed.lua +0 -83
- data/lib/qless/qless-core/get.lua +0 -37
- data/lib/qless/qless-core/heartbeat.lua +0 -51
- data/lib/qless/qless-core/jobs.lua +0 -41
- data/lib/qless/qless-core/pause.lua +0 -18
- data/lib/qless/qless-core/peek.lua +0 -165
- data/lib/qless/qless-core/pop.lua +0 -314
- data/lib/qless/qless-core/priority.lua +0 -32
- data/lib/qless/qless-core/put.lua +0 -169
- data/lib/qless/qless-core/qless-lib.lua +0 -2354
- data/lib/qless/qless-core/qless.lua +0 -1862
- data/lib/qless/qless-core/queues.lua +0 -58
- data/lib/qless/qless-core/recur.lua +0 -190
- data/lib/qless/qless-core/retry.lua +0 -73
- data/lib/qless/qless-core/stats.lua +0 -92
- data/lib/qless/qless-core/tag.lua +0 -100
- data/lib/qless/qless-core/track.lua +0 -79
- data/lib/qless/qless-core/unfail.lua +0 -54
- data/lib/qless/qless-core/unpause.lua +0 -12
- data/lib/qless/qless-core/workers.lua +0 -69
- data/lib/qless/wait_until.rb +0 -19
@@ -1,56 +0,0 @@
|
|
1
|
-
-- config(0, 'get', [option])
|
2
|
-
-- config(0, 'set', option, value)
|
3
|
-
-- config(0, 'unset', option)
|
4
|
-
-- -------------------------------------------------------------------------------------------------------
|
5
|
-
-- This script provides an interface to get, set, and unset configuration
|
6
|
-
-- options.
|
7
|
-
--
|
8
|
-
-- Args:
|
9
|
-
-- 1) [option]
|
10
|
-
|
11
|
-
if #KEYS > 0 then error('Config(): No keys should be provided') end
|
12
|
-
|
13
|
-
local command = ARGV[1]
|
14
|
-
|
15
|
-
local defaults = {
|
16
|
-
['application'] = 'qless',
|
17
|
-
['heartbeat'] = 60,
|
18
|
-
['stats-history'] = 30,
|
19
|
-
['histogram-history'] = 7,
|
20
|
-
['jobs-history-count'] = 50000,
|
21
|
-
['jobs-history'] = 604800
|
22
|
-
}
|
23
|
-
|
24
|
-
if command == 'get' then
|
25
|
-
if ARGV[2] then
|
26
|
-
return redis.call('hget', 'ql:config', ARGV[2]) or defaults[ARGV[2]]
|
27
|
-
else
|
28
|
-
-- Inspired by redis-lua https://github.com/nrk/redis-lua/blob/version-2.0/src/redis.lua
|
29
|
-
local reply = redis.call('hgetall', 'ql:config')
|
30
|
-
for i = 1, #reply, 2 do defaults[reply[i]] = reply[i + 1] end
|
31
|
-
return cjson.encode(defaults)
|
32
|
-
end
|
33
|
-
elseif command == 'set' then
|
34
|
-
local option = assert(ARGV[2], 'Config(): Arg "option" missing')
|
35
|
-
local value = assert(ARGV[3], 'Config(): Arg "value" missing')
|
36
|
-
-- Send out a log message
|
37
|
-
redis.call('publish', 'ql:log', cjson.encode({
|
38
|
-
event = 'config_set',
|
39
|
-
option = option
|
40
|
-
}))
|
41
|
-
|
42
|
-
redis.call('hset', 'ql:config', option, value)
|
43
|
-
elseif command == 'unset' then
|
44
|
-
local option = assert(ARGV[2], 'Config(): Arg "option" missing')
|
45
|
-
-- Send out a log message
|
46
|
-
redis.call('publish', 'ql:log', cjson.encode({
|
47
|
-
event = 'config_unset',
|
48
|
-
option = option
|
49
|
-
}))
|
50
|
-
|
51
|
-
redis.call('hdel', 'ql:config', option)
|
52
|
-
else
|
53
|
-
error('Config(): Unrecognized command ' .. command)
|
54
|
-
end
|
55
|
-
|
56
|
-
|
@@ -1,65 +0,0 @@
|
|
1
|
-
-- Depends(0, jid, ('on', [jid, [jid, [...]]]) | ('off', ('all' | [jid, [jid, [...]]]))
|
2
|
-
-- ------------------------------------------------------------------------------------
|
3
|
-
-- Add or remove dependencies a job has. If 'on' is provided, the provided jids are
|
4
|
-
-- added as dependencies. If 'off' and 'all' are provided, then all the current dependencies
|
5
|
-
-- are removed. If 'off' is provided and the next argument is not 'all', then those
|
6
|
-
-- jids are removed as dependencies.
|
7
|
-
--
|
8
|
-
-- If a job is not already in the 'depends' state, then this call will return false.
|
9
|
-
-- Otherwise, it will return true
|
10
|
-
--
|
11
|
-
-- Args:
|
12
|
-
-- 1) jid
|
13
|
-
|
14
|
-
if #KEYS > 0 then error('Depends(): No Keys should be provided') end
|
15
|
-
|
16
|
-
local jid = assert(ARGV[1], 'Depends(): Arg "jid" missing.')
|
17
|
-
local command = assert(ARGV[2], 'Depends(): Arg 2 missing')
|
18
|
-
|
19
|
-
if redis.call('hget', 'ql:j:' .. jid, 'state') ~= 'depends' then
|
20
|
-
return false
|
21
|
-
end
|
22
|
-
|
23
|
-
if ARGV[2] == 'on' then
|
24
|
-
-- These are the jids we legitimately have to wait on
|
25
|
-
for i=3,#ARGV do
|
26
|
-
local j = ARGV[i]
|
27
|
-
-- Make sure it's something other than 'nil' or complete.
|
28
|
-
local state = redis.call('hget', 'ql:j:' .. j, 'state')
|
29
|
-
if (state and state ~= 'complete') then
|
30
|
-
redis.call('sadd', 'ql:j:' .. j .. '-dependents' , jid)
|
31
|
-
redis.call('sadd', 'ql:j:' .. jid .. '-dependencies', j)
|
32
|
-
end
|
33
|
-
end
|
34
|
-
return true
|
35
|
-
elseif ARGV[2] == 'off' then
|
36
|
-
if ARGV[3] == 'all' then
|
37
|
-
for i, j in ipairs(redis.call('smembers', 'ql:j:' .. jid .. '-dependencies')) do
|
38
|
-
redis.call('srem', 'ql:j:' .. j .. '-dependents', jid)
|
39
|
-
end
|
40
|
-
redis.call('del', 'ql:j:' .. jid .. '-dependencies')
|
41
|
-
local q, p = unpack(redis.call('hmget', 'ql:j:' .. jid, 'queue', 'priority'))
|
42
|
-
if q then
|
43
|
-
redis.call('zrem', 'ql:q:' .. q .. '-depends', jid)
|
44
|
-
redis.call('zadd', 'ql:q:' .. q .. '-work', p, jid)
|
45
|
-
redis.call('hset', 'ql:j:' .. jid, 'state', 'waiting')
|
46
|
-
end
|
47
|
-
else
|
48
|
-
for i=3,#ARGV do
|
49
|
-
local j = ARGV[i]
|
50
|
-
redis.call('srem', 'ql:j:' .. j .. '-dependents', jid)
|
51
|
-
redis.call('srem', 'ql:j:' .. jid .. '-dependencies', j)
|
52
|
-
if redis.call('scard', 'ql:j:' .. jid .. '-dependencies') == 0 then
|
53
|
-
local q, p = unpack(redis.call('hmget', 'ql:j:' .. jid, 'queue', 'priority'))
|
54
|
-
if q then
|
55
|
-
redis.call('zrem', 'ql:q:' .. q .. '-depends', jid)
|
56
|
-
redis.call('zadd', 'ql:q:' .. q .. '-work', p, jid)
|
57
|
-
redis.call('hset', 'ql:j:' .. jid, 'state', 'waiting')
|
58
|
-
end
|
59
|
-
end
|
60
|
-
end
|
61
|
-
end
|
62
|
-
return true
|
63
|
-
else
|
64
|
-
error('Depends(): Second arg must be "on" or "off"')
|
65
|
-
end
|
@@ -1,12 +0,0 @@
|
|
1
|
-
-- DeregisterWorkers(0, worker)
|
2
|
-
-- This script takes the name of a worker(s) on removes it/them
|
3
|
-
-- from the ql:workers set.
|
4
|
-
--
|
5
|
-
-- Args: The list of workers to deregister.
|
6
|
-
|
7
|
-
if #KEYS > 0 then error('DeregisterWorkers(): No Keys should be provided') end
|
8
|
-
if #ARGV < 1 then error('DeregisterWorkers(): Must provide at least one worker to deregister') end
|
9
|
-
|
10
|
-
local key = 'ql:workers'
|
11
|
-
|
12
|
-
redis.call('zrem', key, unpack(ARGV))
|
@@ -1,117 +0,0 @@
|
|
1
|
-
-- Fail(0, jid, worker, group, message, now, [data])
|
2
|
-
-- -------------------------------------------------
|
3
|
-
-- Mark the particular job as failed, with the provided group, and a more
|
4
|
-
-- specific message. By `group`, we mean some phrase that might be one of
|
5
|
-
-- several categorical modes of failure. The `message` is something more
|
6
|
-
-- job-specific, like perhaps a traceback.
|
7
|
-
--
|
8
|
-
-- This method should __not__ be used to note that a job has been dropped or
|
9
|
-
-- has failed in a transient way. This method __should__ be used to note that
|
10
|
-
-- a job has something really wrong with it that must be remedied.
|
11
|
-
--
|
12
|
-
-- The motivation behind the `group` is so that similar errors can be grouped
|
13
|
-
-- together. Optionally, updated data can be provided for the job. A job in
|
14
|
-
-- any state can be marked as failed. If it has been given to a worker as a
|
15
|
-
-- job, then its subsequent requests to heartbeat or complete that job will
|
16
|
-
-- fail. Failed jobs are kept until they are canceled or completed.
|
17
|
-
--
|
18
|
-
-- __Returns__ the id of the failed job if successful, or `False` on failure.
|
19
|
-
--
|
20
|
-
-- Args:
|
21
|
-
-- 1) jid
|
22
|
-
-- 2) worker
|
23
|
-
-- 3) group
|
24
|
-
-- 4) message
|
25
|
-
-- 5) the current time
|
26
|
-
-- 6) [data]
|
27
|
-
|
28
|
-
if #KEYS > 0 then error('Fail(): No Keys should be provided') end
|
29
|
-
|
30
|
-
local jid = assert(ARGV[1] , 'Fail(): Arg "jid" missing')
|
31
|
-
local worker = assert(ARGV[2] , 'Fail(): Arg "worker" missing')
|
32
|
-
local group = assert(ARGV[3] , 'Fail(): Arg "group" missing')
|
33
|
-
local message = assert(ARGV[4] , 'Fail(): Arg "message" missing')
|
34
|
-
local now = assert(tonumber(ARGV[5]), 'Fail(): Arg "now" missing or malformed: ' .. (ARGV[5] or 'nil'))
|
35
|
-
local data = ARGV[6]
|
36
|
-
|
37
|
-
-- The bin is midnight of the provided day
|
38
|
-
-- 24 * 60 * 60 = 86400
|
39
|
-
local bin = now - (now % 86400)
|
40
|
-
|
41
|
-
if data then
|
42
|
-
data = cjson.decode(data)
|
43
|
-
end
|
44
|
-
|
45
|
-
-- First things first, we should get the history
|
46
|
-
local history, queue, state = unpack(redis.call('hmget', 'ql:j:' .. jid, 'history', 'queue', 'state'))
|
47
|
-
|
48
|
-
-- If the job has been completed, we cannot fail it
|
49
|
-
if state ~= 'running' then
|
50
|
-
return false
|
51
|
-
end
|
52
|
-
|
53
|
-
-- Send out a log message
|
54
|
-
redis.call('publish', 'ql:log', cjson.encode({
|
55
|
-
jid = jid,
|
56
|
-
event = 'failed',
|
57
|
-
worker = worker,
|
58
|
-
group = group,
|
59
|
-
message = message
|
60
|
-
}))
|
61
|
-
|
62
|
-
if redis.call('zscore', 'ql:tracked', jid) ~= false then
|
63
|
-
redis.call('publish', 'failed', jid)
|
64
|
-
end
|
65
|
-
|
66
|
-
-- Remove this job from the jobs that the worker that was running it has
|
67
|
-
redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
|
68
|
-
|
69
|
-
-- Now, take the element of the history for which our provided worker is the worker, and update 'failed'
|
70
|
-
history = cjson.decode(history or '[]')
|
71
|
-
if #history > 0 then
|
72
|
-
for i=#history,1,-1 do
|
73
|
-
if history[i]['worker'] == worker then
|
74
|
-
history[i]['failed'] = math.floor(now)
|
75
|
-
end
|
76
|
-
end
|
77
|
-
else
|
78
|
-
history = {
|
79
|
-
{
|
80
|
-
worker = worker,
|
81
|
-
failed = math.floor(now)
|
82
|
-
}
|
83
|
-
}
|
84
|
-
end
|
85
|
-
|
86
|
-
-- Increment the number of failures for that queue for the
|
87
|
-
-- given day.
|
88
|
-
redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failures', 1)
|
89
|
-
redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failed' , 1)
|
90
|
-
|
91
|
-
-- Now remove the instance from the schedule, and work queues for the queue it's in
|
92
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-work', jid)
|
93
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-locks', jid)
|
94
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-scheduled', jid)
|
95
|
-
|
96
|
-
-- The reason that this appears here is that the above will fail if the job doesn't exist
|
97
|
-
if data then
|
98
|
-
redis.call('hset', 'ql:j:' .. jid, 'data', cjson.encode(data))
|
99
|
-
end
|
100
|
-
|
101
|
-
redis.call('hmset', 'ql:j:' .. jid, 'state', 'failed', 'worker', '',
|
102
|
-
'expires', '', 'history', cjson.encode(history), 'failure', cjson.encode({
|
103
|
-
['group'] = group,
|
104
|
-
['message'] = message,
|
105
|
-
['when'] = math.floor(now),
|
106
|
-
['worker'] = worker
|
107
|
-
}))
|
108
|
-
|
109
|
-
-- Add this group of failure to the list of failures
|
110
|
-
redis.call('sadd', 'ql:failures', group)
|
111
|
-
-- And add this particular instance to the failed groups
|
112
|
-
redis.call('lpush', 'ql:f:' .. group, jid)
|
113
|
-
|
114
|
-
-- Here is where we'd intcrement stats about the particular stage
|
115
|
-
-- and possibly the workers
|
116
|
-
|
117
|
-
return jid
|
@@ -1,83 +0,0 @@
|
|
1
|
-
-- Failed(0, [group, [start, [limit]]])
|
2
|
-
-- ------------------------------------
|
3
|
-
-- If no group is provided, this returns a JSON blob of the counts of the various
|
4
|
-
-- groups of failures known. If a group is provided, it will report up to `limit`
|
5
|
-
-- from `start` of the jobs affected by that issue. __Returns__ a JSON blob.
|
6
|
-
--
|
7
|
-
-- # If no group, then...
|
8
|
-
-- {
|
9
|
-
-- 'group1': 1,
|
10
|
-
-- 'group2': 5,
|
11
|
-
-- ...
|
12
|
-
-- }
|
13
|
-
--
|
14
|
-
-- # If a group is provided, then...
|
15
|
-
-- {
|
16
|
-
-- 'total': 20,
|
17
|
-
-- 'jobs': [
|
18
|
-
-- {
|
19
|
-
-- # All the normal keys for a job
|
20
|
-
-- 'jid': ...,
|
21
|
-
-- 'data': ...
|
22
|
-
-- # The message for this particular instance
|
23
|
-
-- 'message': ...,
|
24
|
-
-- 'group': ...,
|
25
|
-
-- }, ...
|
26
|
-
-- ]
|
27
|
-
-- }
|
28
|
-
--
|
29
|
-
-- Args:
|
30
|
-
-- 1) [group]
|
31
|
-
-- 2) [start]
|
32
|
-
-- 3) [limit]
|
33
|
-
|
34
|
-
if #KEYS > 0 then error('Failed(): No Keys should be provided') end
|
35
|
-
|
36
|
-
local group = ARGV[1]
|
37
|
-
local start = assert(tonumber(ARGV[2] or 0), 'Failed(): Arg "start" is not a number: ' .. (ARGV[2] or 'nil'))
|
38
|
-
local limit = assert(tonumber(ARGV[3] or 25), 'Failed(): Arg "limit" is not a number: ' .. (ARGV[3] or 'nil'))
|
39
|
-
|
40
|
-
if group then
|
41
|
-
-- If a group was provided, then we should do paginated lookup into that
|
42
|
-
local response = {
|
43
|
-
total = redis.call('llen', 'ql:f:' .. group),
|
44
|
-
jobs = {}
|
45
|
-
}
|
46
|
-
local jids = redis.call('lrange', 'ql:f:' .. group, start, limit - 1)
|
47
|
-
for index, jid in ipairs(jids) do
|
48
|
-
local job = redis.call(
|
49
|
-
'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
|
50
|
-
'expires', 'retries', 'remaining', 'data', 'tags', 'history', 'failure')
|
51
|
-
|
52
|
-
table.insert(response.jobs, {
|
53
|
-
jid = job[1],
|
54
|
-
klass = job[2],
|
55
|
-
state = job[3],
|
56
|
-
queue = job[4],
|
57
|
-
worker = job[5] or '',
|
58
|
-
tracked = redis.call('zscore', 'ql:tracked', jid) ~= false,
|
59
|
-
priority = tonumber(job[6]),
|
60
|
-
expires = tonumber(job[7]) or 0,
|
61
|
-
retries = tonumber(job[8]),
|
62
|
-
remaining = tonumber(job[9]),
|
63
|
-
data = cjson.decode(job[10]),
|
64
|
-
tags = cjson.decode(job[11]),
|
65
|
-
history = cjson.decode(job[12]),
|
66
|
-
failure = cjson.decode(job[13] or '{}'),
|
67
|
-
dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents'),
|
68
|
-
-- A job in the failed state can not have dependencies
|
69
|
-
-- because it has been popped off of a queue, which
|
70
|
-
-- means all of its dependencies have been satisfied
|
71
|
-
dependencies = {}
|
72
|
-
})
|
73
|
-
end
|
74
|
-
return cjson.encode(response)
|
75
|
-
else
|
76
|
-
-- Otherwise, we should just list all the known failure groups we have
|
77
|
-
local response = {}
|
78
|
-
local groups = redis.call('smembers', 'ql:failures')
|
79
|
-
for index, group in ipairs(groups) do
|
80
|
-
response[group] = redis.call('llen', 'ql:f:' .. group)
|
81
|
-
end
|
82
|
-
return cjson.encode(response)
|
83
|
-
end
|
@@ -1,37 +0,0 @@
|
|
1
|
-
-- This gets all the data associated with the job with the
|
2
|
-
-- provided id.
|
3
|
-
--
|
4
|
-
-- Args:
|
5
|
-
-- 1) jid
|
6
|
-
|
7
|
-
if #KEYS > 0 then error('Get(): No Keys should be provided') end
|
8
|
-
|
9
|
-
local jid = assert(ARGV[1], 'Get(): Arg "jid" missing')
|
10
|
-
|
11
|
-
-- Let's get all the data we can
|
12
|
-
local job = redis.call(
|
13
|
-
'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
|
14
|
-
'expires', 'retries', 'remaining', 'data', 'tags', 'history', 'failure')
|
15
|
-
|
16
|
-
if not job[1] then
|
17
|
-
return false
|
18
|
-
end
|
19
|
-
|
20
|
-
return cjson.encode({
|
21
|
-
jid = job[1],
|
22
|
-
klass = job[2],
|
23
|
-
state = job[3],
|
24
|
-
queue = job[4],
|
25
|
-
worker = job[5] or '',
|
26
|
-
tracked = redis.call('zscore', 'ql:tracked', jid) ~= false,
|
27
|
-
priority = tonumber(job[6]),
|
28
|
-
expires = tonumber(job[7]) or 0,
|
29
|
-
retries = tonumber(job[8]),
|
30
|
-
remaining = tonumber(job[9]),
|
31
|
-
data = cjson.decode(job[10]),
|
32
|
-
tags = cjson.decode(job[11]),
|
33
|
-
history = cjson.decode(job[12]),
|
34
|
-
failure = cjson.decode(job[13] or '{}'),
|
35
|
-
dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents'),
|
36
|
-
dependencies = redis.call('smembers', 'ql:j:' .. jid .. '-dependencies')
|
37
|
-
})
|
@@ -1,51 +0,0 @@
|
|
1
|
-
-- This scripts conducts a heartbeat for a job, and returns
|
2
|
-
-- either the new expiration or False if the lock has been
|
3
|
-
-- given to another node
|
4
|
-
--
|
5
|
-
-- Args:
|
6
|
-
-- 1) jid
|
7
|
-
-- 2) worker
|
8
|
-
-- 3) now
|
9
|
-
-- 4) [data]
|
10
|
-
|
11
|
-
if #KEYS > 0 then error('Heartbeat(): No Keys should be provided') end
|
12
|
-
|
13
|
-
local jid = assert(ARGV[1] , 'Heartbeat(): Arg "jid" missing')
|
14
|
-
local worker = assert(ARGV[2] , 'Heartbeat(): Arg "worker" missing')
|
15
|
-
local now = assert(tonumber(ARGV[3]), 'Heartbeat(): Arg "now" missing')
|
16
|
-
local data = ARGV[4]
|
17
|
-
|
18
|
-
-- We should find the heartbeat interval for this queue
|
19
|
-
-- heartbeat. First, though, we need to find the queue
|
20
|
-
-- this particular job is in
|
21
|
-
local queue = redis.call('hget', 'ql:j:' .. jid, 'queue') or ''
|
22
|
-
local _hb, _qhb = unpack(redis.call('hmget', 'ql:config', 'heartbeat', queue .. '-heartbeat'))
|
23
|
-
local expires = now + tonumber(_qhb or _hb or 60)
|
24
|
-
|
25
|
-
if data then
|
26
|
-
data = cjson.decode(data)
|
27
|
-
end
|
28
|
-
|
29
|
-
-- First, let's see if the worker still owns this job, and there is a worker
|
30
|
-
local job_worker = redis.call('hget', 'ql:j:' .. jid, 'worker')
|
31
|
-
if job_worker ~= worker or #job_worker == 0 then
|
32
|
-
return false
|
33
|
-
else
|
34
|
-
-- Otherwise, optionally update the user data, and the heartbeat
|
35
|
-
if data then
|
36
|
-
-- I don't know if this is wise, but I'm decoding and encoding
|
37
|
-
-- the user data to hopefully ensure its sanity
|
38
|
-
redis.call('hmset', 'ql:j:' .. jid, 'expires', expires, 'worker', worker, 'data', cjson.encode(data))
|
39
|
-
else
|
40
|
-
redis.call('hmset', 'ql:j:' .. jid, 'expires', expires, 'worker', worker)
|
41
|
-
end
|
42
|
-
|
43
|
-
-- Update hwen this job was last updated on that worker
|
44
|
-
-- Add this job to the list of jobs handled by this worker
|
45
|
-
redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid)
|
46
|
-
|
47
|
-
-- And now we should just update the locks
|
48
|
-
local queue = redis.call('hget', 'ql:j:' .. jid, 'queue')
|
49
|
-
redis.call('zadd', 'ql:q:'.. queue .. '-locks', expires, jid)
|
50
|
-
return expires
|
51
|
-
end
|
@@ -1,41 +0,0 @@
|
|
1
|
-
-- Jobs(0, 'complete' | (('stalled' | 'running' | 'scheduled' | 'depends', 'recurring'), now, queue) [offset, [count]])
|
2
|
-
-- -------------------------------------------------------------------------------------------------------
|
3
|
-
--
|
4
|
-
-- Return all the job ids currently considered to be in the provided state
|
5
|
-
-- in a particular queue. The response is a list of job ids:
|
6
|
-
--
|
7
|
-
-- [
|
8
|
-
-- jid1,
|
9
|
-
-- jid2,
|
10
|
-
-- ...
|
11
|
-
-- ]
|
12
|
-
|
13
|
-
if #KEYS > 0 then
|
14
|
-
error('Jobs(): Got '.. #KEYS .. ' expected 0 KEYS arguments')
|
15
|
-
end
|
16
|
-
|
17
|
-
local t = assert(ARGV[1] , 'Jobs(): Arg "type" missing')
|
18
|
-
if t == 'complete' then
|
19
|
-
local offset = assert(tonumber(ARGV[2] or 0) , 'Jobs(): Arg "offset" not a number: ' .. tostring(ARGV[2]))
|
20
|
-
local count = assert(tonumber(ARGV[3] or 25), 'Jobs(): Arg "count" not a number: ' .. tostring(ARGV[3]))
|
21
|
-
return redis.call('zrevrange', 'ql:completed', offset, offset + count - 1)
|
22
|
-
else
|
23
|
-
local now = assert(tonumber(ARGV[2]) , 'Jobs(): Arg "now" missing or not a number: ' .. tostring(ARGV[2]))
|
24
|
-
local queue = assert(ARGV[3] , 'Jobs(): Arg "queue" missing')
|
25
|
-
local offset = assert(tonumber(ARGV[4] or 0) , 'Jobs(): Arg "offset" not a number: ' .. tostring(ARGV[4]))
|
26
|
-
local count = assert(tonumber(ARGV[5] or 25), 'Jobs(): Arg "count" not a number: ' .. tostring(ARGV[5]))
|
27
|
-
|
28
|
-
if t == 'running' then
|
29
|
-
return redis.call('zrangebyscore', 'ql:q:' .. queue .. '-locks', now, 133389432700, 'limit', offset, count)
|
30
|
-
elseif t == 'stalled' then
|
31
|
-
return redis.call('zrangebyscore', 'ql:q:' .. queue .. '-locks', 0, now, 'limit', offset, count)
|
32
|
-
elseif t == 'scheduled' then
|
33
|
-
return redis.call('zrange', 'ql:q:' .. queue .. '-scheduled', offset, offset + count - 1)
|
34
|
-
elseif t == 'depends' then
|
35
|
-
return redis.call('zrange', 'ql:q:' .. queue .. '-depends', offset, offset + count - 1)
|
36
|
-
elseif t == 'recurring' then
|
37
|
-
return redis.call('zrange', 'ql:q:' .. queue .. '-recur', offset, offset + count - 1)
|
38
|
-
else
|
39
|
-
error('Jobs(): Unknown type "' .. t .. '"')
|
40
|
-
end
|
41
|
-
end
|