qless 0.9.3 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. data/Gemfile +9 -3
  2. data/README.md +70 -25
  3. data/Rakefile +125 -9
  4. data/exe/install_phantomjs +21 -0
  5. data/lib/qless.rb +115 -76
  6. data/lib/qless/config.rb +11 -9
  7. data/lib/qless/failure_formatter.rb +43 -0
  8. data/lib/qless/job.rb +201 -102
  9. data/lib/qless/job_reservers/ordered.rb +7 -1
  10. data/lib/qless/job_reservers/round_robin.rb +16 -6
  11. data/lib/qless/job_reservers/shuffled_round_robin.rb +9 -2
  12. data/lib/qless/lua/qless-lib.lua +2463 -0
  13. data/lib/qless/lua/qless.lua +2012 -0
  14. data/lib/qless/lua_script.rb +63 -12
  15. data/lib/qless/middleware/memory_usage_monitor.rb +62 -0
  16. data/lib/qless/middleware/metriks.rb +45 -0
  17. data/lib/qless/middleware/redis_reconnect.rb +6 -3
  18. data/lib/qless/middleware/requeue_exceptions.rb +94 -0
  19. data/lib/qless/middleware/retry_exceptions.rb +38 -9
  20. data/lib/qless/middleware/sentry.rb +3 -7
  21. data/lib/qless/middleware/timeout.rb +64 -0
  22. data/lib/qless/queue.rb +90 -55
  23. data/lib/qless/server.rb +177 -130
  24. data/lib/qless/server/views/_job.erb +33 -15
  25. data/lib/qless/server/views/completed.erb +11 -0
  26. data/lib/qless/server/views/layout.erb +70 -11
  27. data/lib/qless/server/views/overview.erb +93 -53
  28. data/lib/qless/server/views/queue.erb +9 -8
  29. data/lib/qless/server/views/queues.erb +18 -1
  30. data/lib/qless/subscriber.rb +37 -22
  31. data/lib/qless/tasks.rb +5 -10
  32. data/lib/qless/test_helpers/worker_helpers.rb +55 -0
  33. data/lib/qless/version.rb +3 -1
  34. data/lib/qless/worker.rb +4 -413
  35. data/lib/qless/worker/base.rb +247 -0
  36. data/lib/qless/worker/forking.rb +245 -0
  37. data/lib/qless/worker/serial.rb +41 -0
  38. metadata +135 -52
  39. data/lib/qless/qless-core/cancel.lua +0 -101
  40. data/lib/qless/qless-core/complete.lua +0 -233
  41. data/lib/qless/qless-core/config.lua +0 -56
  42. data/lib/qless/qless-core/depends.lua +0 -65
  43. data/lib/qless/qless-core/deregister_workers.lua +0 -12
  44. data/lib/qless/qless-core/fail.lua +0 -117
  45. data/lib/qless/qless-core/failed.lua +0 -83
  46. data/lib/qless/qless-core/get.lua +0 -37
  47. data/lib/qless/qless-core/heartbeat.lua +0 -51
  48. data/lib/qless/qless-core/jobs.lua +0 -41
  49. data/lib/qless/qless-core/pause.lua +0 -18
  50. data/lib/qless/qless-core/peek.lua +0 -165
  51. data/lib/qless/qless-core/pop.lua +0 -314
  52. data/lib/qless/qless-core/priority.lua +0 -32
  53. data/lib/qless/qless-core/put.lua +0 -169
  54. data/lib/qless/qless-core/qless-lib.lua +0 -2354
  55. data/lib/qless/qless-core/qless.lua +0 -1862
  56. data/lib/qless/qless-core/queues.lua +0 -58
  57. data/lib/qless/qless-core/recur.lua +0 -190
  58. data/lib/qless/qless-core/retry.lua +0 -73
  59. data/lib/qless/qless-core/stats.lua +0 -92
  60. data/lib/qless/qless-core/tag.lua +0 -100
  61. data/lib/qless/qless-core/track.lua +0 -79
  62. data/lib/qless/qless-core/unfail.lua +0 -54
  63. data/lib/qless/qless-core/unpause.lua +0 -12
  64. data/lib/qless/qless-core/workers.lua +0 -69
  65. data/lib/qless/wait_until.rb +0 -19
@@ -1,18 +0,0 @@
1
- -- This script takes the name of the queue(s) and adds it
2
- -- to the ql:paused_queues set.
3
- --
4
- -- Args: The list of queues to pause.
5
- --
6
- -- Note: long term, we have discussed adding a rate-limiting
7
- -- feature to qless-core, which would be more flexible and
8
- -- could be used for pausing (i.e. pause = set the rate to 0).
9
- -- For now, this is far simpler, but we should rewrite this
10
- -- in terms of the rate limiting feature if/when that is added.
11
-
12
- if #KEYS > 0 then error('Pause(): No Keys should be provided') end
13
- if #ARGV < 1 then error('Pause(): Must provide at least one queue to pause') end
14
-
15
- local key = 'ql:paused_queues'
16
-
17
- redis.call('sadd', key, unpack(ARGV))
18
-
@@ -1,165 +0,0 @@
1
- -- This script takes the name of the queue and then checks
2
- -- for any expired locks, then inserts any scheduled items
3
- -- that are now valid, and lastly returns any work items
4
- -- that can be handed over.
5
- --
6
- -- Keys:
7
- -- 1) queue name
8
- -- Args:
9
- -- 1) the number of items to return
10
- -- 2) the current time
11
-
12
- if #KEYS ~= 1 then
13
- if #KEYS < 1 then
14
- error('Peek(): Expected 1 KEYS argument')
15
- else
16
- error('Peek(): Got ' .. #KEYS .. ', expected 1 KEYS argument')
17
- end
18
- end
19
-
20
- local queue = assert(KEYS[1] , 'Peek(): Key "queue" missing')
21
- local key = 'ql:q:' .. queue
22
- local count = assert(tonumber(ARGV[1]) , 'Peek(): Arg "count" missing or not a number: ' .. (ARGV[2] or 'nil'))
23
- local now = assert(tonumber(ARGV[2]) , 'Peek(): Arg "now" missing or not a number: ' .. (ARGV[3] or 'nil'))
24
-
25
- -- These are the ids that we're going to return
26
- local keys = {}
27
-
28
- -- Iterate through all the expired locks and add them to the list
29
- -- of keys that we'll return
30
- for index, jid in ipairs(redis.call('zrangebyscore', key .. '-locks', 0, now, 'LIMIT', 0, count)) do
31
- table.insert(keys, jid)
32
- end
33
-
34
- -- If we still need jobs in order to meet demand, then we should
35
- -- look for all the recurring jobs that need jobs run
36
- if #keys < count then
37
- -- This is how many jobs we've moved so far
38
- local moved = 0
39
- -- These are the recurring jobs that need work
40
- local r = redis.call('zrangebyscore', key .. '-recur', 0, now, 'LIMIT', 0, count)
41
- for index, jid in ipairs(r) do
42
- -- For each of the jids that need jobs scheduled, first
43
- -- get the last time each of them was run, and then increment
44
- -- it by its interval. While this time is less than now,
45
- -- we need to keep putting jobs on the queue
46
- local klass, data, priority, tags, retries, interval = unpack(redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval'))
47
- local _tags = cjson.decode(tags)
48
-
49
- -- We're saving this value so that in the history, we can accurately
50
- -- reflect when the job would normally have been scheduled
51
- local score = math.floor(tonumber(redis.call('zscore', key .. '-recur', jid)))
52
- while (score <= now) and (moved < (count - #keys)) do
53
- local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1)
54
- moved = moved + 1
55
-
56
- -- Add this job to the list of jobs tagged with whatever tags were supplied
57
- for i, tag in ipairs(_tags) do
58
- redis.call('zadd', 'ql:t:' .. tag, now, jid .. '-' .. count)
59
- redis.call('zincrby', 'ql:tags', 1, tag)
60
- end
61
-
62
- -- First, let's save its data
63
- redis.call('hmset', 'ql:j:' .. jid .. '-' .. count,
64
- 'jid' , jid .. '-' .. count,
65
- 'klass' , klass,
66
- 'data' , data,
67
- 'priority' , priority,
68
- 'tags' , tags,
69
- 'state' , 'waiting',
70
- 'worker' , '',
71
- 'expires' , 0,
72
- 'queue' , queue,
73
- 'retries' , retries,
74
- 'remaining', retries,
75
- 'history' , cjson.encode({{
76
- -- The job was essentially put in this queue at this time,
77
- -- and not the current time
78
- q = queue,
79
- put = math.floor(score)
80
- }}))
81
-
82
- -- Now, if a delay was provided, and if it's in the future,
83
- -- then we'll have to schedule it. Otherwise, we're just
84
- -- going to add it to the work queue.
85
- redis.call('zadd', key .. '-work', priority - (score / 10000000000), jid .. '-' .. count)
86
-
87
- redis.call('zincrby', key .. '-recur', interval, jid)
88
- score = score + interval
89
- end
90
- end
91
- end
92
-
93
- -- Now we've checked __all__ the locks for this queue the could
94
- -- have expired, and are no more than the number requested. If
95
- -- we still need values in order to meet the demand, then we
96
- -- should check if any scheduled items, and if so, we should
97
- -- insert them to ensure correctness when pulling off the next
98
- -- unit of work.
99
- if #keys < count then
100
- -- zadd is a list of arguments that we'll be able to use to
101
- -- insert into the work queue
102
- local zadd = {}
103
- local r = redis.call('zrangebyscore', key .. '-scheduled', 0, now, 'LIMIT', 0, (count - #keys))
104
- for index, jid in ipairs(r) do
105
- -- With these in hand, we'll have to go out and find the
106
- -- priorities of these jobs, and then we'll insert them
107
- -- into the work queue and then when that's complete, we'll
108
- -- remove them from the scheduled queue
109
- table.insert(zadd, tonumber(redis.call('hget', 'ql:j:' .. jid, 'priority') or 0))
110
- table.insert(zadd, jid)
111
- -- We should also update them to have the state 'waiting'
112
- -- instead of 'scheduled'
113
- redis.call('hset', 'ql:j:' .. jid, 'state', 'waiting')
114
- end
115
-
116
- if #zadd > 0 then
117
- -- Now add these to the work list, and then remove them
118
- -- from the scheduled list
119
- redis.call('zadd', key .. '-work', unpack(zadd))
120
- redis.call('zrem', key .. '-scheduled', unpack(r))
121
- end
122
-
123
- -- And now we should get up to the maximum number of requested
124
- -- work items from the work queue.
125
- for index, jid in ipairs(redis.call('zrevrange', key .. '-work', 0, (count - #keys) - 1)) do
126
- table.insert(keys, jid)
127
- end
128
- end
129
-
130
- -- Alright, now the `keys` table is filled with all the job
131
- -- ids which we'll be returning. Now we need to get the
132
- -- metadeata about each of these, update their metadata to
133
- -- reflect which worker they're on, when the lock expires,
134
- -- etc., add them to the locks queue and then we have to
135
- -- finally return a list of json blobs
136
-
137
- local response = {}
138
- for index, jid in ipairs(keys) do
139
- local job = redis.call(
140
- 'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
141
- 'expires', 'retries', 'remaining', 'data', 'tags', 'history', 'failure')
142
-
143
- table.insert(response, cjson.encode({
144
- jid = job[1],
145
- klass = job[2],
146
- state = job[3],
147
- queue = job[4],
148
- worker = job[5] or '',
149
- tracked = redis.call('zscore', 'ql:tracked', jid) ~= false,
150
- priority = tonumber(job[6]),
151
- expires = tonumber(job[7]) or 0,
152
- retries = tonumber(job[8]),
153
- remaining = tonumber(job[9]),
154
- data = cjson.decode(job[10]),
155
- tags = cjson.decode(job[11]),
156
- history = cjson.decode(job[12]),
157
- failure = cjson.decode(job[13] or '{}'),
158
- dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents'),
159
- -- A job in the waiting state can not have dependencies
160
- dependencies = {}
161
-
162
- }))
163
- end
164
-
165
- return response
@@ -1,314 +0,0 @@
1
- -- This script takes the name of the queue and then checks
2
- -- for any expired locks, then inserts any scheduled items
3
- -- that are now valid, and lastly returns any work items
4
- -- that can be handed over.
5
- --
6
- -- Keys:
7
- -- 1) queue name
8
- -- Args:
9
- -- 1) worker name
10
- -- 2) the number of items to return
11
- -- 3) the current time
12
-
13
- if #KEYS ~= 1 then
14
- if #KEYS < 1 then
15
- error('Pop(): Expected 1 KEYS argument')
16
- else
17
- error('Pop(): Got ' .. #KEYS .. ', expected 1 KEYS argument')
18
- end
19
- end
20
-
21
- local queue = assert(KEYS[1] , 'Pop(): Key "queue" missing')
22
- local key = 'ql:q:' .. queue
23
- local worker = assert(ARGV[1] , 'Pop(): Arg "worker" missing')
24
- local count = assert(tonumber(ARGV[2]) , 'Pop(): Arg "count" missing or not a number: ' .. (ARGV[2] or 'nil'))
25
- local now = assert(tonumber(ARGV[3]) , 'Pop(): Arg "now" missing or not a number: ' .. (ARGV[3] or 'nil'))
26
-
27
- -- We should find the heartbeat interval for this queue
28
- -- heartbeat
29
- local _hb, _qhb, _mc = unpack(redis.call('hmget', 'ql:config', 'heartbeat', queue .. '-heartbeat', queue .. '-max-concurrency'))
30
- local expires = now + tonumber(_qhb or _hb or 60)
31
- local max_concurrency = tonumber(_mc or 0)
32
-
33
- if max_concurrency > 0 then
34
- -- We need to find out how many locks are still valid.
35
- local num_still_locked = redis.call('zcount', key .. '-locks', now, '+inf')
36
- -- Only allow the minimum of the two through
37
- count = math.min(max_concurrency - num_still_locked, count)
38
- end
39
-
40
- -- The bin is midnight of the provided day
41
- -- 24 * 60 * 60 = 86400
42
- local bin = now - (now % 86400)
43
-
44
- -- These are the ids that we're going to return
45
- local keys = {}
46
-
47
- -- Make sure we this worker to the list of seen workers
48
- redis.call('zadd', 'ql:workers', now, worker)
49
-
50
- if redis.call('sismember', 'ql:paused_queues', queue) == 1 then
51
- return {}
52
- end
53
-
54
- -- Iterate through all the expired locks and add them to the list
55
- -- of keys that we'll return
56
- for index, jid in ipairs(redis.call('zrangebyscore', key .. '-locks', 0, now, 'LIMIT', 0, count)) do
57
- -- Remove this job from the jobs that the worker that was running it has
58
- local w = redis.call('hget', 'ql:j:' .. jid, 'worker')
59
- redis.call('zrem', 'ql:w:' .. w .. ':jobs', jid)
60
-
61
- -- Send a message to let the worker know that its lost its lock on the job
62
- local encoded = cjson.encode({
63
- jid = jid,
64
- event = 'lock_lost',
65
- worker = w
66
- })
67
- redis.call('publish', 'ql:w:' .. w, encoded)
68
- redis.call('publish', 'ql:log', encoded)
69
-
70
- -- For each of these, decrement their retries. If any of them
71
- -- have exhausted their retries, then we should mark them as
72
- -- failed.
73
- if redis.call('hincrby', 'ql:j:' .. jid, 'remaining', -1) < 0 then
74
- -- Now remove the instance from the schedule, and work queues for the queue it's in
75
- redis.call('zrem', 'ql:q:' .. queue .. '-work', jid)
76
- redis.call('zrem', 'ql:q:' .. queue .. '-locks', jid)
77
- redis.call('zrem', 'ql:q:' .. queue .. '-scheduled', jid)
78
-
79
- local group = 'failed-retries-' .. queue
80
- -- First things first, we should get the history
81
- local history = redis.call('hget', 'ql:j:' .. jid, 'history')
82
-
83
- -- Now, take the element of the history for which our provided worker is the worker, and update 'failed'
84
- history = cjson.decode(history or '[]')
85
- history[#history]['failed'] = now
86
-
87
- redis.call('hmset', 'ql:j:' .. jid, 'state', 'failed', 'worker', '',
88
- 'expires', '', 'history', cjson.encode(history), 'failure', cjson.encode({
89
- ['group'] = group,
90
- ['message'] = 'Job exhausted retries in queue "' .. queue .. '"',
91
- ['when'] = now,
92
- ['worker'] = history[#history]['worker']
93
- }))
94
-
95
- -- Add this type of failure to the list of failures
96
- redis.call('sadd', 'ql:failures', group)
97
- -- And add this particular instance to the failed types
98
- redis.call('lpush', 'ql:f:' .. group, jid)
99
-
100
- if redis.call('zscore', 'ql:tracked', jid) ~= false then
101
- redis.call('publish', 'failed', jid)
102
- end
103
- else
104
- table.insert(keys, jid)
105
-
106
- if redis.call('zscore', 'ql:tracked', jid) ~= false then
107
- redis.call('publish', 'stalled', jid)
108
- end
109
- end
110
- end
111
- -- Now we've checked __all__ the locks for this queue the could
112
- -- have expired, and are no more than the number requested.
113
-
114
- -- If we got any expired locks, then we should increment the
115
- -- number of retries for this stage for this bin
116
- redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'retries', #keys)
117
-
118
- -- If we still need jobs in order to meet demand, then we should
119
- -- look for all the recurring jobs that need jobs run
120
- if #keys < count then
121
- -- This is how many jobs we've moved so far
122
- local moved = 0
123
- -- These are the recurring jobs that need work
124
- local r = redis.call('zrangebyscore', key .. '-recur', 0, now, 'LIMIT', 0, (count - #keys))
125
- for index, jid in ipairs(r) do
126
- -- For each of the jids that need jobs scheduled, first
127
- -- get the last time each of them was run, and then increment
128
- -- it by its interval. While this time is less than now,
129
- -- we need to keep putting jobs on the queue
130
- local klass, data, priority, tags, retries, interval = unpack(redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval'))
131
- local _tags = cjson.decode(tags)
132
-
133
- -- We're saving this value so that in the history, we can accurately
134
- -- reflect when the job would normally have been scheduled
135
- local score = math.floor(tonumber(redis.call('zscore', key .. '-recur', jid)))
136
-
137
- while (score <= now) and (moved < (count - #keys)) do
138
- -- Increment the count of how many jobs we've moved from recurring
139
- -- to 'work'
140
- moved = moved + 1
141
-
142
- -- the count'th job that we've moved from this recurring job
143
- local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1)
144
-
145
- -- Add this job to the list of jobs tagged with whatever tags were supplied
146
- for i, tag in ipairs(_tags) do
147
- redis.call('zadd', 'ql:t:' .. tag, now, jid .. '-' .. count)
148
- redis.call('zincrby', 'ql:tags', 1, tag)
149
- end
150
-
151
- -- First, let's save its data
152
- redis.call('hmset', 'ql:j:' .. jid .. '-' .. count,
153
- 'jid' , jid .. '-' .. count,
154
- 'klass' , klass,
155
- 'data' , data,
156
- 'priority' , priority,
157
- 'tags' , tags,
158
- 'state' , 'waiting',
159
- 'worker' , '',
160
- 'expires' , 0,
161
- 'queue' , queue,
162
- 'retries' , retries,
163
- 'remaining', retries,
164
- 'history' , cjson.encode({{
165
- -- The job was essentially put in this queue at this time,
166
- -- and not the current time
167
- q = queue,
168
- put = math.floor(score)
169
- }}))
170
-
171
- -- Now, if a delay was provided, and if it's in the future,
172
- -- then we'll have to schedule it. Otherwise, we're just
173
- -- going to add it to the work queue.
174
- redis.call('zadd', key .. '-work', priority - (score / 10000000000), jid .. '-' .. count)
175
-
176
- redis.call('zincrby', key .. '-recur', interval, jid)
177
- score = score + interval
178
- end
179
- end
180
- end
181
-
182
- -- If we still need values in order to meet the demand, then we
183
- -- should check if any scheduled items, and if so, we should
184
- -- insert them to ensure correctness when pulling off the next
185
- -- unit of work.
186
- if #keys < count then
187
- -- zadd is a list of arguments that we'll be able to use to
188
- -- insert into the work queue
189
- local zadd = {}
190
- local r = redis.call('zrangebyscore', key .. '-scheduled', 0, now, 'LIMIT', 0, (count - #keys))
191
- for index, jid in ipairs(r) do
192
- -- With these in hand, we'll have to go out and find the
193
- -- priorities of these jobs, and then we'll insert them
194
- -- into the work queue and then when that's complete, we'll
195
- -- remove them from the scheduled queue
196
- table.insert(zadd, tonumber(redis.call('hget', 'ql:j:' .. jid, 'priority') or 0))
197
- table.insert(zadd, jid)
198
- end
199
-
200
- -- Now add these to the work list, and then remove them
201
- -- from the scheduled list
202
- if #zadd > 0 then
203
- redis.call('zadd', key .. '-work', unpack(zadd))
204
- redis.call('zrem', key .. '-scheduled', unpack(r))
205
- end
206
-
207
- -- And now we should get up to the maximum number of requested
208
- -- work items from the work queue.
209
- for index, jid in ipairs(redis.call('zrevrange', key .. '-work', 0, (count - #keys) - 1)) do
210
- table.insert(keys, jid)
211
- end
212
- end
213
-
214
- -- Alright, now the `keys` table is filled with all the job
215
- -- ids which we'll be returning. Now we need to get the
216
- -- metadeata about each of these, update their metadata to
217
- -- reflect which worker they're on, when the lock expires,
218
- -- etc., add them to the locks queue and then we have to
219
- -- finally return a list of json blobs
220
-
221
- local response = {}
222
- local state
223
- local history
224
- for index, jid in ipairs(keys) do
225
- -- First, we should get the state and history of the item
226
- state, history = unpack(redis.call('hmget', 'ql:j:' .. jid, 'state', 'history'))
227
-
228
- history = cjson.decode(history or '{}')
229
- history[#history]['worker'] = worker
230
- history[#history]['popped'] = math.floor(now)
231
-
232
- ----------------------------------------------------------
233
- -- This is the massive stats update that we have to do
234
- ----------------------------------------------------------
235
- -- This is how long we've been waiting to get popped
236
- local waiting = math.floor(now) - history[#history]['put']
237
- -- Now we'll go through the apparently long and arduous process of update
238
- local count, mean, vk = unpack(redis.call('hmget', 'ql:s:wait:' .. bin .. ':' .. queue, 'total', 'mean', 'vk'))
239
- count = count or 0
240
- if count == 0 then
241
- mean = waiting
242
- vk = 0
243
- count = 1
244
- else
245
- count = count + 1
246
- local oldmean = mean
247
- mean = mean + (waiting - mean) / count
248
- vk = vk + (waiting - mean) * (waiting - oldmean)
249
- end
250
- -- Now, update the histogram
251
- -- - `s1`, `s2`, ..., -- second-resolution histogram counts
252
- -- - `m1`, `m2`, ..., -- minute-resolution
253
- -- - `h1`, `h2`, ..., -- hour-resolution
254
- -- - `d1`, `d2`, ..., -- day-resolution
255
- waiting = math.floor(waiting)
256
- if waiting < 60 then -- seconds
257
- redis.call('hincrby', 'ql:s:wait:' .. bin .. ':' .. queue, 's' .. waiting, 1)
258
- elseif waiting < 3600 then -- minutes
259
- redis.call('hincrby', 'ql:s:wait:' .. bin .. ':' .. queue, 'm' .. math.floor(waiting / 60), 1)
260
- elseif waiting < 86400 then -- hours
261
- redis.call('hincrby', 'ql:s:wait:' .. bin .. ':' .. queue, 'h' .. math.floor(waiting / 3600), 1)
262
- else -- days
263
- redis.call('hincrby', 'ql:s:wait:' .. bin .. ':' .. queue, 'd' .. math.floor(waiting / 86400), 1)
264
- end
265
- redis.call('hmset', 'ql:s:wait:' .. bin .. ':' .. queue, 'total', count, 'mean', mean, 'vk', vk)
266
- ----------------------------------------------------------
267
-
268
- -- Add this job to the list of jobs handled by this worker
269
- redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid)
270
-
271
- -- Update the jobs data, and add its locks, and return the job
272
- redis.call(
273
- 'hmset', 'ql:j:' .. jid, 'worker', worker, 'expires', expires,
274
- 'state', 'running', 'history', cjson.encode(history))
275
-
276
- redis.call('zadd', key .. '-locks', expires, jid)
277
- local job = redis.call(
278
- 'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
279
- 'expires', 'retries', 'remaining', 'data', 'tags', 'history', 'failure')
280
-
281
- local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false
282
- if tracked then
283
- redis.call('publish', 'popped', jid)
284
- end
285
-
286
- table.insert(response, cjson.encode({
287
- jid = job[1],
288
- klass = job[2],
289
- state = job[3],
290
- queue = job[4],
291
- worker = job[5] or '',
292
- tracked = tracked,
293
- priority = tonumber(job[6]),
294
- expires = tonumber(job[7]) or 0,
295
- retries = tonumber(job[8]),
296
- remaining = tonumber(job[9]),
297
- data = cjson.decode(job[10]),
298
- tags = cjson.decode(job[11]),
299
- history = cjson.decode(job[12]),
300
- failure = cjson.decode(job[13] or '{}'),
301
- dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents'),
302
- -- A job in the waiting state can not have dependencies
303
- -- because it has been popped off of a queue, which
304
- -- means all of its dependencies have been satisfied
305
- dependencies = {}
306
-
307
- }))
308
- end
309
-
310
- if #keys > 0 then
311
- redis.call('zrem', key .. '-work', unpack(keys))
312
- end
313
-
314
- return response