reqless 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. checksums.yaml +7 -0
  2. data/Gemfile +8 -0
  3. data/README.md +648 -0
  4. data/Rakefile +117 -0
  5. data/bin/docker-build-and-test +22 -0
  6. data/exe/reqless-web +11 -0
  7. data/lib/reqless/config.rb +31 -0
  8. data/lib/reqless/failure_formatter.rb +43 -0
  9. data/lib/reqless/job.rb +496 -0
  10. data/lib/reqless/job_reservers/ordered.rb +29 -0
  11. data/lib/reqless/job_reservers/round_robin.rb +46 -0
  12. data/lib/reqless/job_reservers/shuffled_round_robin.rb +21 -0
  13. data/lib/reqless/lua/reqless-lib.lua +2965 -0
  14. data/lib/reqless/lua/reqless.lua +2545 -0
  15. data/lib/reqless/lua_script.rb +90 -0
  16. data/lib/reqless/middleware/requeue_exceptions.rb +94 -0
  17. data/lib/reqless/middleware/retry_exceptions.rb +72 -0
  18. data/lib/reqless/middleware/sentry.rb +66 -0
  19. data/lib/reqless/middleware/timeout.rb +63 -0
  20. data/lib/reqless/queue.rb +189 -0
  21. data/lib/reqless/queue_priority_pattern.rb +16 -0
  22. data/lib/reqless/server/static/css/bootstrap-responsive.css +686 -0
  23. data/lib/reqless/server/static/css/bootstrap-responsive.min.css +12 -0
  24. data/lib/reqless/server/static/css/bootstrap.css +3991 -0
  25. data/lib/reqless/server/static/css/bootstrap.min.css +689 -0
  26. data/lib/reqless/server/static/css/codemirror.css +112 -0
  27. data/lib/reqless/server/static/css/docs.css +839 -0
  28. data/lib/reqless/server/static/css/jquery.noty.css +105 -0
  29. data/lib/reqless/server/static/css/noty_theme_twitter.css +137 -0
  30. data/lib/reqless/server/static/css/style.css +200 -0
  31. data/lib/reqless/server/static/favicon.ico +0 -0
  32. data/lib/reqless/server/static/img/glyphicons-halflings-white.png +0 -0
  33. data/lib/reqless/server/static/img/glyphicons-halflings.png +0 -0
  34. data/lib/reqless/server/static/js/bootstrap-alert.js +94 -0
  35. data/lib/reqless/server/static/js/bootstrap-scrollspy.js +125 -0
  36. data/lib/reqless/server/static/js/bootstrap-tab.js +130 -0
  37. data/lib/reqless/server/static/js/bootstrap-tooltip.js +270 -0
  38. data/lib/reqless/server/static/js/bootstrap-typeahead.js +285 -0
  39. data/lib/reqless/server/static/js/bootstrap.js +1726 -0
  40. data/lib/reqless/server/static/js/bootstrap.min.js +6 -0
  41. data/lib/reqless/server/static/js/codemirror.js +2972 -0
  42. data/lib/reqless/server/static/js/jquery.noty.js +220 -0
  43. data/lib/reqless/server/static/js/mode/javascript.js +360 -0
  44. data/lib/reqless/server/static/js/theme/cobalt.css +18 -0
  45. data/lib/reqless/server/static/js/theme/eclipse.css +25 -0
  46. data/lib/reqless/server/static/js/theme/elegant.css +10 -0
  47. data/lib/reqless/server/static/js/theme/lesser-dark.css +45 -0
  48. data/lib/reqless/server/static/js/theme/monokai.css +28 -0
  49. data/lib/reqless/server/static/js/theme/neat.css +9 -0
  50. data/lib/reqless/server/static/js/theme/night.css +21 -0
  51. data/lib/reqless/server/static/js/theme/rubyblue.css +21 -0
  52. data/lib/reqless/server/static/js/theme/xq-dark.css +46 -0
  53. data/lib/reqless/server/views/_job.erb +259 -0
  54. data/lib/reqless/server/views/_job_list.erb +8 -0
  55. data/lib/reqless/server/views/_pagination.erb +7 -0
  56. data/lib/reqless/server/views/about.erb +130 -0
  57. data/lib/reqless/server/views/completed.erb +11 -0
  58. data/lib/reqless/server/views/config.erb +14 -0
  59. data/lib/reqless/server/views/failed.erb +48 -0
  60. data/lib/reqless/server/views/failed_type.erb +18 -0
  61. data/lib/reqless/server/views/job.erb +17 -0
  62. data/lib/reqless/server/views/layout.erb +451 -0
  63. data/lib/reqless/server/views/overview.erb +137 -0
  64. data/lib/reqless/server/views/queue.erb +125 -0
  65. data/lib/reqless/server/views/queues.erb +45 -0
  66. data/lib/reqless/server/views/tag.erb +6 -0
  67. data/lib/reqless/server/views/throttles.erb +38 -0
  68. data/lib/reqless/server/views/track.erb +75 -0
  69. data/lib/reqless/server/views/worker.erb +34 -0
  70. data/lib/reqless/server/views/workers.erb +14 -0
  71. data/lib/reqless/server.rb +549 -0
  72. data/lib/reqless/subscriber.rb +74 -0
  73. data/lib/reqless/test_helpers/worker_helpers.rb +55 -0
  74. data/lib/reqless/throttle.rb +57 -0
  75. data/lib/reqless/version.rb +5 -0
  76. data/lib/reqless/worker/base.rb +237 -0
  77. data/lib/reqless/worker/forking.rb +215 -0
  78. data/lib/reqless/worker/serial.rb +41 -0
  79. data/lib/reqless/worker.rb +5 -0
  80. data/lib/reqless.rb +309 -0
  81. metadata +399 -0
@@ -0,0 +1,2965 @@
1
+ -- Current SHA: 8b6600adb988e7f4922f606798b6ad64c06a245d
2
+ -- This is a generated file
3
+ -- cjson can't tell an empty array from an empty object, so empty arrays end up
4
+ -- encoded as objects. This function makes empty arrays look like empty arrays.
5
+ local function cjsonArrayDegenerationWorkaround(array)
6
+ if #array == 0 then
7
+ return "[]"
8
+ end
9
+ return cjson.encode(array)
10
+ end
11
+ -------------------------------------------------------------------------------
12
+ -- Forward declarations to make everything happy
13
+ -------------------------------------------------------------------------------
14
+ local Reqless = {
15
+ ns = 'ql:'
16
+ }
17
+
18
+ -- Queue forward delcaration
19
+ local ReqlessQueue = {
20
+ ns = Reqless.ns .. 'q:'
21
+ }
22
+ ReqlessQueue.__index = ReqlessQueue
23
+
24
+ -- Worker forward declaration
25
+ local ReqlessWorker = {
26
+ ns = Reqless.ns .. 'w:'
27
+ }
28
+ ReqlessWorker.__index = ReqlessWorker
29
+
30
+ -- Job forward declaration
31
+ local ReqlessJob = {
32
+ ns = Reqless.ns .. 'j:'
33
+ }
34
+ ReqlessJob.__index = ReqlessJob
35
+
36
+ -- throttle forward declaration
37
+ local ReqlessThrottle = {
38
+ ns = Reqless.ns .. 'th:'
39
+ }
40
+ ReqlessThrottle.__index = ReqlessThrottle
41
+
42
+ -- RecurringJob forward declaration
43
+ local ReqlessRecurringJob = {}
44
+ ReqlessRecurringJob.__index = ReqlessRecurringJob
45
+
46
+ -- Config forward declaration
47
+ Reqless.config = {}
48
+
49
+ -- Extend a table. This comes up quite frequently
50
+ local function table_extend(self, other)
51
+ for _, v in ipairs(other) do
52
+ table.insert(self, v)
53
+ end
54
+ end
55
+
56
+ -- This is essentially the same as redis' publish, but it prefixes the channel
57
+ -- with the Reqless namespace
58
+ function Reqless.publish(channel, message)
59
+ redis.call('publish', Reqless.ns .. channel, message)
60
+ end
61
+
62
+ -- Return a job object given its job id
63
+ function Reqless.job(jid)
64
+ assert(jid, 'Job(): no jid provided')
65
+ local job = {}
66
+ setmetatable(job, ReqlessJob)
67
+ job.jid = jid
68
+ return job
69
+ end
70
+
71
+ -- Return a recurring job object
72
+ function Reqless.recurring(jid)
73
+ assert(jid, 'Recurring(): no jid provided')
74
+ local job = {}
75
+ setmetatable(job, ReqlessRecurringJob)
76
+ job.jid = jid
77
+ return job
78
+ end
79
+
80
+ -- Return a throttle object
81
+ -- throttle objects are used for arbitrary throttling of jobs.
82
+ function Reqless.throttle(tid)
83
+ assert(tid, 'Throttle(): no tid provided')
84
+ local throttle = ReqlessThrottle.data({id = tid})
85
+ setmetatable(throttle, ReqlessThrottle)
86
+
87
+ -- set of jids which have acquired a lock on this throttle.
88
+ throttle.locks = {
89
+ length = function()
90
+ return (redis.call('zcard', ReqlessThrottle.ns .. tid .. '-locks') or 0)
91
+ end, members = function()
92
+ return redis.call('zrange', ReqlessThrottle.ns .. tid .. '-locks', 0, -1)
93
+ end, add = function(...)
94
+ if #arg > 0 then
95
+ redis.call('zadd', ReqlessThrottle.ns .. tid .. '-locks', unpack(arg))
96
+ end
97
+ end, remove = function(...)
98
+ if #arg > 0 then
99
+ return redis.call('zrem', ReqlessThrottle.ns .. tid .. '-locks', unpack(arg))
100
+ end
101
+ end, pop = function(min, max)
102
+ return redis.call('zremrangebyrank', ReqlessThrottle.ns .. tid .. '-locks', min, max)
103
+ end, peek = function(min, max)
104
+ return redis.call('zrange', ReqlessThrottle.ns .. tid .. '-locks', min, max)
105
+ end
106
+ }
107
+
108
+ -- set of jids which are waiting for the throttle to become available.
109
+ throttle.pending = {
110
+ length = function()
111
+ return (redis.call('zcard', ReqlessThrottle.ns .. tid .. '-pending') or 0)
112
+ end, members = function()
113
+ return redis.call('zrange', ReqlessThrottle.ns .. tid .. '-pending', 0, -1)
114
+ end, add = function(now, jid)
115
+ redis.call('zadd', ReqlessThrottle.ns .. tid .. '-pending', now, jid)
116
+ end, remove = function(...)
117
+ if #arg > 0 then
118
+ return redis.call('zrem', ReqlessThrottle.ns .. tid .. '-pending', unpack(arg))
119
+ end
120
+ end, pop = function(min, max)
121
+ return redis.call('zremrangebyrank', ReqlessThrottle.ns .. tid .. '-pending', min, max)
122
+ end, peek = function(min, max)
123
+ return redis.call('zrange', ReqlessThrottle.ns .. tid .. '-pending', min, max)
124
+ end
125
+ }
126
+
127
+ return throttle
128
+ end
129
+
130
+ -- Failed([group, [start, [limit]]])
131
+ -- ------------------------------------
132
+ -- If no group is provided, this returns a JSON blob of the counts of the
133
+ -- various groups of failures known. If a group is provided, it will report up
134
+ -- to `limit` from `start` of the jobs affected by that issue.
135
+ --
136
+ -- # If no group, then...
137
+ -- {
138
+ -- 'group1': 1,
139
+ -- 'group2': 5,
140
+ -- ...
141
+ -- }
142
+ --
143
+ -- # If a group is provided, then...
144
+ -- {
145
+ -- 'total': 20,
146
+ -- 'jobs': [
147
+ -- {
148
+ -- # All the normal keys for a job
149
+ -- 'jid': ...,
150
+ -- 'data': ...
151
+ -- # The message for this particular instance
152
+ -- 'message': ...,
153
+ -- 'group': ...,
154
+ -- }, ...
155
+ -- ]
156
+ -- }
157
+ --
158
+ function Reqless.failed(group, start, limit)
159
+ start = assert(tonumber(start or 0),
160
+ 'Failed(): Arg "start" is not a number: ' .. (start or 'nil'))
161
+ limit = assert(tonumber(limit or 25),
162
+ 'Failed(): Arg "limit" is not a number: ' .. (limit or 'nil'))
163
+
164
+ if group then
165
+ -- If a group was provided, then we should do paginated lookup
166
+ return {
167
+ total = redis.call('llen', 'ql:f:' .. group),
168
+ jobs = redis.call('lrange', 'ql:f:' .. group, start, start + limit - 1)
169
+ }
170
+ end
171
+
172
+ -- Otherwise, we should just list all the known failure groups we have
173
+ local response = {}
174
+ local groups = redis.call('smembers', 'ql:failures')
175
+ for _, group in ipairs(groups) do
176
+ response[group] = redis.call('llen', 'ql:f:' .. group)
177
+ end
178
+ return response
179
+ end
180
+
181
+ -- Jobs(now, 'complete', [offset, [limit]])
182
+ -- Jobs(now, (
183
+ -- 'stalled' | 'running' | 'scheduled' | 'depends', 'recurring'
184
+ -- ), queue, [offset, [limit]])
185
+ -------------------------------------------------------------------------------
186
+ -- Return all the job ids currently considered to be in the provided state
187
+ -- in a particular queue. The response is a list of job ids:
188
+ --
189
+ -- [
190
+ -- jid1,
191
+ -- jid2,
192
+ -- ...
193
+ -- ]
194
+ function Reqless.jobs(now, state, ...)
195
+ assert(state, 'Jobs(): Arg "state" missing')
196
+ if state == 'complete' then
197
+ local offset = assert(tonumber(arg[1] or 0),
198
+ 'Jobs(): Arg "offset" not a number: ' .. tostring(arg[1]))
199
+ local limit = assert(tonumber(arg[2] or 25),
200
+ 'Jobs(): Arg "limit" not a number: ' .. tostring(arg[2]))
201
+ return redis.call('zrevrange', 'ql:completed', offset,
202
+ offset + limit - 1)
203
+ end
204
+
205
+ local queue_name = assert(arg[1], 'Jobs(): Arg "queue" missing')
206
+ local offset = assert(tonumber(arg[2] or 0),
207
+ 'Jobs(): Arg "offset" not a number: ' .. tostring(arg[2]))
208
+ local limit = assert(tonumber(arg[3] or 25),
209
+ 'Jobs(): Arg "limit" not a number: ' .. tostring(arg[3]))
210
+
211
+ local queue = Reqless.queue(queue_name)
212
+ if state == 'running' then
213
+ return queue.locks.peek(now, offset, limit)
214
+ elseif state == 'stalled' then
215
+ return queue.locks.expired(now, offset, limit)
216
+ elseif state == 'throttled' then
217
+ return queue.throttled.peek(now, offset, limit)
218
+ elseif state == 'scheduled' then
219
+ queue:check_scheduled(now, queue.scheduled.length())
220
+ return queue.scheduled.peek(now, offset, limit)
221
+ elseif state == 'depends' then
222
+ return queue.depends.peek(now, offset, limit)
223
+ elseif state == 'recurring' then
224
+ return queue.recurring.peek(math.huge, offset, limit)
225
+ end
226
+
227
+ error('Jobs(): Unknown type "' .. state .. '"')
228
+ end
229
+
230
+ -- Track()
231
+ -- Track(now, ('track' | 'untrack'), jid)
232
+ -- ------------------------------------------
233
+ -- If no arguments are provided, it returns details of all currently-tracked
234
+ -- jobs. If the first argument is 'track', then it will start tracking the job
235
+ -- associated with that id, and 'untrack' stops tracking it. In this context,
236
+ -- tracking is nothing more than saving the job to a list of jobs that are
237
+ -- considered special.
238
+ --
239
+ -- {
240
+ -- 'jobs': [
241
+ -- {
242
+ -- 'jid': ...,
243
+ -- # All the other details you'd get from 'job.get'
244
+ -- }, {
245
+ -- ...
246
+ -- }
247
+ -- ], 'expired': [
248
+ -- # These are all the jids that are completed and whose data expired
249
+ -- 'deadbeef',
250
+ -- ...,
251
+ -- ...,
252
+ -- ]
253
+ -- }
254
+ --
255
+ function Reqless.track(now, command, jid)
256
+ if command ~= nil then
257
+ assert(jid, 'Track(): Arg "jid" missing')
258
+ -- Verify that job exists
259
+ assert(Reqless.job(jid):exists(), 'Track(): Job does not exist')
260
+ if string.lower(command) == 'track' then
261
+ Reqless.publish('track', jid)
262
+ return redis.call('zadd', 'ql:tracked', now, jid)
263
+ elseif string.lower(command) == 'untrack' then
264
+ Reqless.publish('untrack', jid)
265
+ return redis.call('zrem', 'ql:tracked', jid)
266
+ end
267
+ error('Track(): Unknown action "' .. command .. '"')
268
+ end
269
+
270
+ local response = {
271
+ jobs = {},
272
+ expired = {},
273
+ }
274
+ local jids = redis.call('zrange', 'ql:tracked', 0, -1)
275
+ for _, jid in ipairs(jids) do
276
+ local data = Reqless.job(jid):data()
277
+ if data then
278
+ table.insert(response.jobs, data)
279
+ else
280
+ table.insert(response.expired, jid)
281
+ end
282
+ end
283
+ return response
284
+ end
285
+
286
+ -- tag(now, ('add' | 'remove'), jid, tag, [tag, ...])
287
+ -- tag(now, 'get', tag, [offset, [limit]])
288
+ -- tag(now, 'top', [offset, [limit]])
289
+ -- -----------------------------------------------------------------------------
290
+ -- Accepts a jid, 'add' or 'remove', and then a list of tags
291
+ -- to either add or remove from the job. Alternatively, 'get',
292
+ -- a tag to get jobs associated with that tag, and offset and
293
+ -- limit
294
+ --
295
+ -- If 'add' or 'remove', the response is a list of the jobs
296
+ -- current tags, or False if the job doesn't exist. If 'get',
297
+ -- the response is of the form:
298
+ --
299
+ -- {
300
+ -- total: ...,
301
+ -- jobs: [
302
+ -- jid,
303
+ -- ...
304
+ -- ]
305
+ -- }
306
+ --
307
+ -- If 'top' is supplied, it returns the most commonly-used tags
308
+ -- in a paginated fashion.
309
+ function Reqless.tag(now, command, ...)
310
+ assert(command,
311
+ 'Tag(): Arg "command" must be "add", "remove", "get" or "top"')
312
+
313
+ if command == 'get' then
314
+ local tag = assert(arg[1], 'Tag(): Arg "tag" missing')
315
+ local offset = assert(tonumber(arg[2] or 0),
316
+ 'Tag(): Arg "offset" not a number: ' .. tostring(arg[2]))
317
+ local limit = assert(tonumber(arg[3] or 25),
318
+ 'Tag(): Arg "limit" not a number: ' .. tostring(arg[3]))
319
+ return {
320
+ total = redis.call('zcard', 'ql:t:' .. tag),
321
+ jobs = redis.call('zrange', 'ql:t:' .. tag, offset, offset + limit - 1)
322
+ }
323
+ elseif command == 'top' then
324
+ local offset = assert(tonumber(arg[1] or 0) , 'Tag(): Arg "offset" not a number: ' .. tostring(arg[1]))
325
+ local limit = assert(tonumber(arg[2] or 25), 'Tag(): Arg "limit" not a number: ' .. tostring(arg[2]))
326
+ return redis.call('zrevrangebyscore', 'ql:tags', '+inf', 2, 'limit', offset, limit)
327
+ elseif command ~= 'add' and command ~= 'remove' then
328
+ error('Tag(): First argument must be "add", "remove", "get", or "top"')
329
+ end
330
+
331
+ local jid = assert(arg[1], 'Tag(): Arg "jid" missing')
332
+ local tags = redis.call('hget', ReqlessJob.ns .. jid, 'tags')
333
+ -- If the job has been canceled / deleted, raise an error
334
+ if not tags then
335
+ error('Tag(): Job ' .. jid .. ' does not exist')
336
+ end
337
+
338
+ -- Decode the json blob, convert to dictionary
339
+ tags = cjson.decode(tags)
340
+ local _tags = {}
341
+ for _, v in ipairs(tags) do _tags[v] = true end
342
+
343
+ if command == 'add' then
344
+ -- Add the job to the sorted set with that tags
345
+ for i=2, #arg do
346
+ local tag = arg[i]
347
+ if _tags[tag] == nil then
348
+ _tags[tag] = true
349
+ table.insert(tags, tag)
350
+ end
351
+ Reqless.job(jid):insert_tag(now, tag)
352
+ end
353
+
354
+ redis.call('hset', ReqlessJob.ns .. jid, 'tags', cjson.encode(tags))
355
+ return tags
356
+ end
357
+
358
+ -- Remove the job from the sorted set with that tags
359
+ for i=2, #arg do
360
+ local tag = arg[i]
361
+ _tags[tag] = nil
362
+ Reqless.job(jid):remove_tag(tag)
363
+ end
364
+
365
+ local results = {}
366
+ for _, tag in ipairs(tags) do
367
+ if _tags[tag] then
368
+ table.insert(results, tag)
369
+ end
370
+ end
371
+
372
+ redis.call('hset', ReqlessJob.ns .. jid, 'tags', cjson.encode(results))
373
+ return results
374
+ end
375
+
376
+ -- Cancel(...)
377
+ -- --------------
378
+ -- Cancel a job from taking place. It will be deleted from the system, and any
379
+ -- attempts to renew a heartbeat will fail, and any attempts to complete it
380
+ -- will fail. If you try to get the data on the object, you will get nothing.
381
+ function Reqless.cancel(now, ...)
382
+ -- Dependents is a mapping of a job to its dependent jids
383
+ local dependents = {}
384
+ for _, jid in ipairs(arg) do
385
+ dependents[jid] = redis.call(
386
+ 'smembers', ReqlessJob.ns .. jid .. '-dependents') or {}
387
+ end
388
+
389
+ -- Now, we'll loop through every jid we intend to cancel, and we'll go
390
+ -- make sure that this operation will be ok
391
+ for _, jid in ipairs(arg) do
392
+ for j, dep in ipairs(dependents[jid]) do
393
+ if dependents[dep] == nil then
394
+ error('Cancel(): ' .. jid .. ' is a dependency of ' .. dep ..
395
+ ' but is not mentioned to be canceled')
396
+ end
397
+ end
398
+ end
399
+
400
+ -- If we've made it this far, then we are good to go. We can now just
401
+ -- remove any trace of all these jobs, as they form a dependent clique
402
+ for _, jid in ipairs(arg) do
403
+ -- Find any stage it's associated with and remove its from that stage
404
+ local state, queue, failure, worker = unpack(redis.call(
405
+ 'hmget', ReqlessJob.ns .. jid, 'state', 'queue', 'failure', 'worker'))
406
+
407
+ if state ~= 'complete' then
408
+ -- Send a message out on the appropriate channels
409
+ local encoded = cjson.encode({
410
+ jid = jid,
411
+ worker = worker,
412
+ event = 'canceled',
413
+ queue = queue
414
+ })
415
+ Reqless.publish('log', encoded)
416
+
417
+ -- Remove this job from whatever worker has it, if any
418
+ if worker and (worker ~= '') then
419
+ redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
420
+ -- If necessary, send a message to the appropriate worker, too
421
+ Reqless.publish('w:' .. worker, encoded)
422
+ end
423
+
424
+ -- Remove it from that queue
425
+ if queue then
426
+ local queue = Reqless.queue(queue)
427
+ queue:remove_job(jid)
428
+ end
429
+
430
+ local job = Reqless.job(jid)
431
+
432
+ job:throttles_release(now)
433
+
434
+ -- We should probably go through all our dependencies and remove
435
+ -- ourselves from the list of dependents
436
+ for _, j in ipairs(redis.call(
437
+ 'smembers', ReqlessJob.ns .. jid .. '-dependencies')) do
438
+ redis.call('srem', ReqlessJob.ns .. j .. '-dependents', jid)
439
+ end
440
+
441
+ -- If we're in the failed state, remove all of our data
442
+ if state == 'failed' then
443
+ failure = cjson.decode(failure)
444
+ -- We need to make this remove it from the failed queues
445
+ redis.call('lrem', 'ql:f:' .. failure.group, 0, jid)
446
+ if redis.call('llen', 'ql:f:' .. failure.group) == 0 then
447
+ redis.call('srem', 'ql:failures', failure.group)
448
+ end
449
+ -- Remove one count from the failed count of the particular
450
+ -- queue
451
+ local bin = failure.when - (failure.when % 86400)
452
+ local failed = redis.call(
453
+ 'hget', 'ql:s:stats:' .. bin .. ':' .. queue, 'failed')
454
+ redis.call('hset',
455
+ 'ql:s:stats:' .. bin .. ':' .. queue, 'failed', failed - 1)
456
+ end
457
+
458
+ job:delete()
459
+
460
+ -- If the job was being tracked, we should notify
461
+ if redis.call('zscore', 'ql:tracked', jid) ~= false then
462
+ Reqless.publish('canceled', jid)
463
+ end
464
+ end
465
+ end
466
+
467
+ return arg
468
+ end
469
+ -------------------------------------------------------------------------------
470
+ -- Configuration interactions
471
+ -------------------------------------------------------------------------------
472
+
473
+ -- This represents our default configuration settings. Redis hash values are
474
+ -- strings, so use strings for the defaults for more consistent typing.
475
+ Reqless.config.defaults = {
476
+ ['application'] = 'reqless',
477
+ ['grace-period'] = '10',
478
+ ['heartbeat'] = '60',
479
+ ['jobs-history'] = '604800',
480
+ ['jobs-history-count'] = '50000',
481
+ ['max-job-history'] = '100',
482
+ ['max-pop-retry'] = '1',
483
+ ['max-worker-age'] = '86400',
484
+ }
485
+
486
+ -- Get one or more of the keys
487
+ Reqless.config.get = function(key, default)
488
+ if key then
489
+ return redis.call('hget', 'ql:config', key) or
490
+ Reqless.config.defaults[key] or default
491
+ end
492
+
493
+ -- Inspired by redis-lua https://github.com/nrk/redis-lua/blob/version-2.0/src/redis.lua
494
+ local reply = redis.call('hgetall', 'ql:config')
495
+ for i = 1, #reply, 2 do
496
+ Reqless.config.defaults[reply[i]] = reply[i + 1]
497
+ end
498
+ return Reqless.config.defaults
499
+ end
500
+
501
+ -- Set a configuration variable
502
+ Reqless.config.set = function(option, value)
503
+ assert(option, 'config.set(): Arg "option" missing')
504
+ assert(value , 'config.set(): Arg "value" missing')
505
+ -- Send out a log message
506
+ Reqless.publish('log', cjson.encode({
507
+ event = 'config_set',
508
+ option = option,
509
+ value = value
510
+ }))
511
+
512
+ redis.call('hset', 'ql:config', option, value)
513
+ end
514
+
515
+ -- Unset a configuration option
516
+ Reqless.config.unset = function(option)
517
+ assert(option, 'config.unset(): Arg "option" missing')
518
+ -- Send out a log message
519
+ Reqless.publish('log', cjson.encode({
520
+ event = 'config_unset',
521
+ option = option
522
+ }))
523
+
524
+ redis.call('hdel', 'ql:config', option)
525
+ end
526
+ -------------------------------------------------------------------------------
527
+ -- Job Class
528
+ --
529
+ -- It returns an object that represents the job with the provided JID
530
+ -------------------------------------------------------------------------------
531
+
532
+ -- This gets all the data associated with the job with the provided id. If the
533
+ -- job is not found, it returns nil. If found, it returns an object with the
534
+ -- appropriate properties
535
+ function ReqlessJob:data(...)
536
+ local job = redis.call(
537
+ 'hmget', ReqlessJob.ns .. self.jid, 'jid', 'klass', 'state', 'queue',
538
+ 'worker', 'priority', 'expires', 'retries', 'remaining', 'data',
539
+ 'tags', 'failure', 'throttles', 'spawned_from_jid')
540
+
541
+ -- Return nil if we haven't found it
542
+ if not job[1] then
543
+ return nil
544
+ end
545
+
546
+ local data = {
547
+ jid = job[1],
548
+ klass = job[2],
549
+ state = job[3],
550
+ queue = job[4],
551
+ worker = job[5] or '',
552
+ tracked = redis.call('zscore', 'ql:tracked', self.jid) ~= false,
553
+ priority = tonumber(job[6]),
554
+ expires = tonumber(job[7]) or 0,
555
+ retries = tonumber(job[8]),
556
+ remaining = math.floor(tonumber(job[9])),
557
+ data = job[10],
558
+ tags = cjson.decode(job[11]),
559
+ history = self:history(),
560
+ failure = cjson.decode(job[12] or '{}'),
561
+ throttles = cjson.decode(job[13] or '[]'),
562
+ spawned_from_jid = job[14],
563
+ dependents = redis.call('smembers', ReqlessJob.ns .. self.jid .. '-dependents'),
564
+ dependencies = redis.call('smembers', ReqlessJob.ns .. self.jid .. '-dependencies'),
565
+ }
566
+
567
+ if #arg > 0 then
568
+ -- This section could probably be optimized, but I wanted the interface
569
+ -- in place first
570
+ local response = {}
571
+ for _, key in ipairs(arg) do
572
+ table.insert(response, data[key])
573
+ end
574
+ return response
575
+ end
576
+
577
+ return data
578
+ end
579
+
580
+ -- Complete a job and optionally put it in another queue, either scheduled or
581
+ -- to be considered waiting immediately. It can also optionally accept other
582
+ -- jids on which this job will be considered dependent before it's considered
583
+ -- valid.
584
+ --
585
+ -- The variable-length arguments may be pairs of the form:
586
+ --
587
+ -- ('next' , queue) : The queue to advance it to next
588
+ -- ('delay' , delay) : The delay for the next queue
589
+ -- ('depends', : Json of jobs it depends on in the new queue
590
+ -- '["jid1", "jid2", ...]')
591
+ ---
592
+ function ReqlessJob:complete(now, worker, queue_name, raw_data, ...)
593
+ assert(worker, 'Complete(): Arg "worker" missing')
594
+ assert(queue_name , 'Complete(): Arg "queue_name" missing')
595
+ local data = assert(cjson.decode(raw_data),
596
+ 'Complete(): Arg "data" missing or not JSON: ' .. tostring(raw_data))
597
+
598
+ -- Read in all the optional parameters
599
+ local options = {}
600
+ for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end
601
+
602
+ -- Sanity check on optional args
603
+ local next_queue_name = options['next']
604
+ local delay = assert(tonumber(options['delay'] or 0))
605
+ local depends = assert(cjson.decode(options['depends'] or '[]'),
606
+ 'Complete(): Arg "depends" not JSON: ' .. tostring(options['depends']))
607
+
608
+ -- Delay doesn't make sense without next_queue_name
609
+ if options['delay'] and next_queue_name == nil then
610
+ error('Complete(): "delay" cannot be used without a "next".')
611
+ end
612
+
613
+ -- Depends doesn't make sense without next_queue_name
614
+ if options['depends'] and next_queue_name == nil then
615
+ error('Complete(): "depends" cannot be used without a "next".')
616
+ end
617
+
618
+ -- The bin is midnight of the provided day
619
+ -- 24 * 60 * 60 = 86400
620
+ local bin = now - (now % 86400)
621
+
622
+ -- First things first, we should see if the worker still owns this job
623
+ local lastworker, state, priority, retries, current_queue = unpack(
624
+ redis.call('hmget', ReqlessJob.ns .. self.jid, 'worker', 'state',
625
+ 'priority', 'retries', 'queue'))
626
+
627
+ if lastworker == false then
628
+ error('Complete(): Job does not exist')
629
+ elseif (state ~= 'running') then
630
+ error('Complete(): Job is not currently running: ' .. state)
631
+ elseif lastworker ~= worker then
632
+ error('Complete(): Job has been handed out to another worker: ' ..
633
+ tostring(lastworker))
634
+ elseif queue_name ~= current_queue then
635
+ error('Complete(): Job running in another queue: ' ..
636
+ tostring(current_queue))
637
+ end
638
+
639
+ -- Now we can assume that the worker does own the job. We need to
640
+ -- 1) Remove the job from the 'locks' from the old queue
641
+ -- 2) Enqueue it in the next stage if necessary
642
+ -- 3) Update the data
643
+ -- 4) Mark the job as completed, remove the worker, remove expires, and
644
+ -- update history
645
+ self:history(now, 'done')
646
+
647
+ redis.call('hset', ReqlessJob.ns .. self.jid, 'data', raw_data)
648
+
649
+ -- Remove the job from the previous queue
650
+ local queue = Reqless.queue(queue_name)
651
+ queue:remove_job(self.jid)
652
+
653
+ self:throttles_release(now)
654
+
655
+ -- Calculate how long the job has been running.
656
+ local popped_time = tonumber(
657
+ redis.call('hget', ReqlessJob.ns .. self.jid, 'time') or now)
658
+ local run_time = now - popped_time
659
+ queue:stat(now, 'run', run_time)
660
+ redis.call('hset', ReqlessJob.ns .. self.jid,
661
+ 'time', string.format("%.20f", now))
662
+
663
+ -- Remove this job from the jobs that the worker that was running it has
664
+ redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid)
665
+
666
+ if redis.call('zscore', 'ql:tracked', self.jid) ~= false then
667
+ Reqless.publish('completed', self.jid)
668
+ end
669
+
670
+ if next_queue_name then
671
+ local next_queue = Reqless.queue(next_queue_name)
672
+ -- Send a message out to log
673
+ Reqless.publish('log', cjson.encode({
674
+ jid = self.jid,
675
+ event = 'advanced',
676
+ queue = queue_name,
677
+ to = next_queue_name,
678
+ }))
679
+
680
+ -- Enqueue the job
681
+ self:history(now, 'put', {queue = next_queue_name})
682
+
683
+ -- We're going to make sure that this queue is in the
684
+ -- set of known queues
685
+ if redis.call('zscore', 'ql:queues', next_queue_name) == false then
686
+ redis.call('zadd', 'ql:queues', now, next_queue_name)
687
+ end
688
+
689
+ redis.call('hmset', ReqlessJob.ns .. self.jid,
690
+ 'state', 'waiting',
691
+ 'worker', '',
692
+ 'failure', '{}',
693
+ 'queue', next_queue_name,
694
+ 'expires', 0,
695
+ 'remaining', tonumber(retries))
696
+
697
+ if (delay > 0) and (#depends == 0) then
698
+ next_queue.scheduled.add(now + delay, self.jid)
699
+ return 'scheduled'
700
+ end
701
+
702
+ -- These are the jids we legitimately have to wait on
703
+ local count = 0
704
+ for _, j in ipairs(depends) do
705
+ -- Make sure it's something other than 'nil' or complete.
706
+ local state = redis.call('hget', ReqlessJob.ns .. j, 'state')
707
+ if (state and state ~= 'complete') then
708
+ count = count + 1
709
+ redis.call(
710
+ 'sadd', ReqlessJob.ns .. j .. '-dependents',self.jid)
711
+ redis.call(
712
+ 'sadd', ReqlessJob.ns .. self.jid .. '-dependencies', j)
713
+ end
714
+ end
715
+ if count > 0 then
716
+ next_queue.depends.add(now, self.jid)
717
+ redis.call('hset', ReqlessJob.ns .. self.jid, 'state', 'depends')
718
+ if delay > 0 then
719
+ -- We've already put it in 'depends'. Now, we must just save the data
720
+ -- for when it's scheduled
721
+ next_queue.depends.add(now, self.jid)
722
+ redis.call('hset', ReqlessJob.ns .. self.jid, 'scheduled', now + delay)
723
+ end
724
+ return 'depends'
725
+ end
726
+
727
+ next_queue.work.add(now, priority, self.jid)
728
+ return 'waiting'
729
+ end
730
+ -- Send a message out to log
731
+ Reqless.publish('log', cjson.encode({
732
+ jid = self.jid,
733
+ event = 'completed',
734
+ queue = queue_name,
735
+ }))
736
+
737
+ redis.call('hmset', ReqlessJob.ns .. self.jid,
738
+ 'state', 'complete',
739
+ 'worker', '',
740
+ 'failure', '{}',
741
+ 'queue', '',
742
+ 'expires', 0,
743
+ 'remaining', tonumber(retries))
744
+
745
+ -- Do the completion dance
746
+ local count = Reqless.config.get('jobs-history-count')
747
+ local time = Reqless.config.get('jobs-history')
748
+
749
+ -- These are the default values
750
+ count = tonumber(count or 50000)
751
+ time = tonumber(time or 7 * 24 * 60 * 60)
752
+
753
+ -- Schedule this job for destructination eventually
754
+ redis.call('zadd', 'ql:completed', now, self.jid)
755
+
756
+ -- Now look at the expired job data. First, based on the current time
757
+ local jids = redis.call('zrangebyscore', 'ql:completed', 0, now - time)
758
+ -- Any jobs that need to be expired... delete
759
+ for _, jid in ipairs(jids) do
760
+ Reqless.job(jid):delete()
761
+ end
762
+
763
+ -- And now remove those from the queued-for-cleanup queue
764
+ redis.call('zremrangebyscore', 'ql:completed', 0, now - time)
765
+
766
+ -- Now take the all by the most recent 'count' ids
767
+ jids = redis.call('zrange', 'ql:completed', 0, (-1-count))
768
+ for _, jid in ipairs(jids) do
769
+ Reqless.job(jid):delete()
770
+ end
771
+ redis.call('zremrangebyrank', 'ql:completed', 0, (-1-count))
772
+
773
+ -- Alright, if this has any dependents, then we should go ahead
774
+ -- and unstick those guys.
775
+ for _, j in ipairs(redis.call(
776
+ 'smembers', ReqlessJob.ns .. self.jid .. '-dependents')) do
777
+ redis.call('srem', ReqlessJob.ns .. j .. '-dependencies', self.jid)
778
+ if redis.call(
779
+ 'scard', ReqlessJob.ns .. j .. '-dependencies') == 0 then
780
+ local other_queue_name, priority, scheduled = unpack(
781
+ redis.call('hmget', ReqlessJob.ns .. j, 'queue', 'priority', 'scheduled'))
782
+ if other_queue_name then
783
+ local other_queue = Reqless.queue(other_queue_name)
784
+ other_queue.depends.remove(j)
785
+ if scheduled then
786
+ other_queue.scheduled.add(scheduled, j)
787
+ redis.call('hset', ReqlessJob.ns .. j, 'state', 'scheduled')
788
+ redis.call('hdel', ReqlessJob.ns .. j, 'scheduled')
789
+ else
790
+ other_queue.work.add(now, priority, j)
791
+ redis.call('hset', ReqlessJob.ns .. j, 'state', 'waiting')
792
+ end
793
+ end
794
+ end
795
+ end
796
+
797
+ -- Delete our dependents key
798
+ redis.call('del', ReqlessJob.ns .. self.jid .. '-dependents')
799
+
800
+ return 'complete'
801
+ end
802
+
803
+ -- Fail(now, worker, group, message, [data])
804
+ -- -------------------------------------------------
805
+ -- Mark the particular job as failed, with the provided group, and a more
806
+ -- specific message. By `group`, we mean some phrase that might be one of
807
+ -- several categorical modes of failure. The `message` is something more
808
+ -- job-specific, like perhaps a traceback.
809
+ --
810
+ -- This method should __not__ be used to note that a job has been dropped or
811
+ -- has failed in a transient way. This method __should__ be used to note that
812
+ -- a job has something really wrong with it that must be remedied.
813
+ --
814
+ -- The motivation behind the `group` is so that similar errors can be grouped
815
+ -- together. Optionally, updated data can be provided for the job. A job in
816
+ -- any state can be marked as failed. If it has been given to a worker as a
817
+ -- job, then its subsequent requests to heartbeat or complete that job will
818
+ -- fail. Failed jobs are kept until they are canceled or completed.
819
+ --
820
+ -- __Returns__ the id of the failed job if successful, or `False` on failure.
821
+ --
822
+ -- Args:
823
+ -- 1) jid
824
+ -- 2) worker
825
+ -- 3) group
826
+ -- 4) message
827
+ -- 5) the current time
828
+ -- 6) [data]
829
+ function ReqlessJob:fail(now, worker, group, message, data)
830
+ local worker = assert(worker , 'Fail(): Arg "worker" missing')
831
+ local group = assert(group , 'Fail(): Arg "group" missing')
832
+ local message = assert(message , 'Fail(): Arg "message" missing')
833
+
834
+ -- The bin is midnight of the provided day
835
+ -- 24 * 60 * 60 = 86400
836
+ local bin = now - (now % 86400)
837
+
838
+ if data then
839
+ data = cjson.decode(data)
840
+ end
841
+
842
+ -- First things first, we should get the history
843
+ local queue_name, state, oldworker = unpack(redis.call(
844
+ 'hmget', ReqlessJob.ns .. self.jid, 'queue', 'state', 'worker'))
845
+
846
+ -- If the job has been completed, we cannot fail it
847
+ if not state then
848
+ error('Fail(): Job does not exist')
849
+ elseif state ~= 'running' then
850
+ error('Fail(): Job not currently running: ' .. state)
851
+ elseif worker ~= oldworker then
852
+ error('Fail(): Job running with another worker: ' .. oldworker)
853
+ end
854
+
855
+ -- Send out a log message
856
+ Reqless.publish('log', cjson.encode({
857
+ jid = self.jid,
858
+ event = 'failed',
859
+ worker = worker,
860
+ group = group,
861
+ message = message,
862
+ }))
863
+
864
+ if redis.call('zscore', 'ql:tracked', self.jid) ~= false then
865
+ Reqless.publish('failed', self.jid)
866
+ end
867
+
868
+ -- Remove this job from the jobs that the worker that was running it has
869
+ redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid)
870
+
871
+ -- Now, take the element of the history for which our provided worker is
872
+ -- the worker, and update 'failed'
873
+ self:history(now, 'failed', {worker = worker, group = group})
874
+
875
+ -- Increment the number of failures for that queue for the
876
+ -- given day.
877
+ redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue_name, 'failures', 1)
878
+ redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue_name, 'failed' , 1)
879
+
880
+ -- Now remove the instance from the schedule, and work queues for the
881
+ -- queue it's in
882
+ local queue = Reqless.queue(queue_name)
883
+ queue:remove_job(self.jid)
884
+
885
+ -- The reason that this appears here is that the above will fail if the
886
+ -- job doesn't exist
887
+ if data then
888
+ redis.call('hset', ReqlessJob.ns .. self.jid, 'data', cjson.encode(data))
889
+ end
890
+
891
+ redis.call('hmset', ReqlessJob.ns .. self.jid,
892
+ 'state', 'failed',
893
+ 'worker', '',
894
+ 'expires', '',
895
+ 'failure', cjson.encode({
896
+ group = group,
897
+ message = message,
898
+ when = math.floor(now),
899
+ worker = worker
900
+ }))
901
+
902
+ self:throttles_release(now)
903
+
904
+ -- Add this group of failure to the list of failures
905
+ redis.call('sadd', 'ql:failures', group)
906
+ -- And add this particular instance to the failed groups
907
+ redis.call('lpush', 'ql:f:' .. group, self.jid)
908
+
909
+ -- Here is where we'd increment stats about the particular stage
910
+ -- and possibly the workers
911
+
912
+ return self.jid
913
+ end
914
+
915
+ -- retry(now, queue_name, worker, [delay, [group, [message]]])
916
+ -- ------------------------------------------
917
+ -- This script accepts jid, queue, worker and delay for retrying a job. This
918
+ -- is similar in functionality to `put`, except that this counts against the
919
+ -- retries a job has for a stage.
920
+ --
921
+ -- Throws an exception if:
922
+ -- - the worker is not the worker with a lock on the job
923
+ -- - the job is not actually running
924
+ --
925
+ -- Otherwise, it returns the number of retries remaining. If the allowed
926
+ -- retries have been exhausted, then it is automatically failed, and a negative
927
+ -- number is returned.
928
+ --
929
+ -- If a group and message is provided, then if the retries are exhausted, then
930
+ -- the provided group and message will be used in place of the default
931
+ -- messaging about retries in the particular queue being exhausted
932
+ function ReqlessJob:retry(now, queue_name, worker, delay, group, message)
933
+ assert(queue_name , 'Retry(): Arg "queue_name" missing')
934
+ assert(worker, 'Retry(): Arg "worker" missing')
935
+ delay = assert(tonumber(delay or 0),
936
+ 'Retry(): Arg "delay" not a number: ' .. tostring(delay))
937
+
938
+ -- Let's see what the old priority, and tags were
939
+ local old_queue_name, state, retries, oldworker, priority, failure = unpack(
940
+ redis.call('hmget', ReqlessJob.ns .. self.jid, 'queue', 'state',
941
+ 'retries', 'worker', 'priority', 'failure'))
942
+
943
+ -- If this isn't the worker that owns
944
+ if oldworker == false then
945
+ error('Retry(): Job does not exist')
946
+ elseif state ~= 'running' then
947
+ error('Retry(): Job is not currently running: ' .. state)
948
+ elseif oldworker ~= worker then
949
+ error('Retry(): Job has been given to another worker: ' .. oldworker)
950
+ end
951
+
952
+ -- For each of these, decrement their retries. If any of them
953
+ -- have exhausted their retries, then we should mark them as
954
+ -- failed.
955
+ local remaining = tonumber(redis.call(
956
+ 'hincrby', ReqlessJob.ns .. self.jid, 'remaining', -1))
957
+ redis.call('hdel', ReqlessJob.ns .. self.jid, 'grace')
958
+
959
+ -- Remove it from the locks key of the old queue
960
+ Reqless.queue(old_queue_name).locks.remove(self.jid)
961
+
962
+ -- Release the throttle for the job
963
+ self:throttles_release(now)
964
+
965
+ -- Remove this job from the worker that was previously working it
966
+ redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid)
967
+
968
+ if remaining < 0 then
969
+ -- Now remove the instance from the schedule, and work queues for the
970
+ -- queue it's in
971
+ local group = group or 'failed-retries-' .. queue_name
972
+ self:history(now, 'failed-retries', {group = group})
973
+
974
+ redis.call('hmset', ReqlessJob.ns .. self.jid, 'state', 'failed',
975
+ 'worker', '',
976
+ 'expires', '')
977
+ -- If the failure has not already been set, then set it
978
+ if group ~= nil and message ~= nil then
979
+ redis.call('hset', ReqlessJob.ns .. self.jid,
980
+ 'failure', cjson.encode({
981
+ group = group,
982
+ message = message,
983
+ when = math.floor(now),
984
+ worker = worker
985
+ })
986
+ )
987
+ else
988
+ redis.call('hset', ReqlessJob.ns .. self.jid,
989
+ 'failure', cjson.encode({
990
+ group = group,
991
+ message = 'Job exhausted retries in queue "' .. old_queue_name .. '"',
992
+ when = now,
993
+ worker = unpack(self:data('worker'))
994
+ }))
995
+ end
996
+
997
+ -- Add this type of failure to the list of failures
998
+ redis.call('sadd', 'ql:failures', group)
999
+ -- And add this particular instance to the failed types
1000
+ redis.call('lpush', 'ql:f:' .. group, self.jid)
1001
+ -- Increment the count of the failed jobs
1002
+ local bin = now - (now % 86400)
1003
+ redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue_name, 'failures', 1)
1004
+ redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue_name, 'failed' , 1)
1005
+ else
1006
+ -- Put it in the queue again with a delay. Like put()
1007
+ local queue = Reqless.queue(queue_name)
1008
+ if delay > 0 then
1009
+ queue.scheduled.add(now + delay, self.jid)
1010
+ redis.call('hset', ReqlessJob.ns .. self.jid, 'state', 'scheduled')
1011
+ else
1012
+ queue.work.add(now, priority, self.jid)
1013
+ redis.call('hset', ReqlessJob.ns .. self.jid, 'state', 'waiting')
1014
+ end
1015
+
1016
+ -- If a group and a message was provided, then we should save it
1017
+ if group ~= nil and message ~= nil then
1018
+ redis.call('hset', ReqlessJob.ns .. self.jid,
1019
+ 'failure', cjson.encode({
1020
+ group = group,
1021
+ message = message,
1022
+ when = math.floor(now),
1023
+ worker = worker
1024
+ })
1025
+ )
1026
+ end
1027
+ end
1028
+
1029
+ return math.floor(remaining)
1030
+ end
1031
+
1032
+ -- Depends(jid, 'on', [jid, [jid, [...]]]
1033
+ -- Depends(jid, 'off', [jid, [jid, [...]]])
1034
+ -- Depends(jid, 'off', 'all')
1035
+ -------------------------------------------------------------------------------
1036
+ -- Add or remove dependencies a job has. If 'on' is provided, the provided
1037
+ -- jids are added as dependencies. If 'off' and 'all' are provided, then all
1038
+ -- the current dependencies are removed. If 'off' is provided and the next
1039
+ -- argument is not 'all', then those jids are removed as dependencies.
1040
+ --
1041
+ -- If a job is not already in the 'depends' state, then this call will raise an
1042
+ -- error. Otherwise, it will return true.
1043
+ function ReqlessJob:depends(now, command, ...)
1044
+ assert(command, 'Depends(): Arg "command" missing')
1045
+ if command ~= 'on' and command ~= 'off' then
1046
+ error('Depends(): Argument "command" must be "on" or "off"')
1047
+ end
1048
+
1049
+ local state = redis.call('hget', ReqlessJob.ns .. self.jid, 'state')
1050
+ if state ~= 'depends' then
1051
+ error('Depends(): Job ' .. self.jid ..
1052
+ ' not in the depends state: ' .. tostring(state))
1053
+ end
1054
+
1055
+ if command == 'on' then
1056
+ -- These are the jids we legitimately have to wait on
1057
+ for _, j in ipairs(arg) do
1058
+ -- Make sure it's something other than 'nil' or complete.
1059
+ local state = redis.call('hget', ReqlessJob.ns .. j, 'state')
1060
+ if (state and state ~= 'complete') then
1061
+ redis.call(
1062
+ 'sadd', ReqlessJob.ns .. j .. '-dependents' , self.jid)
1063
+ redis.call(
1064
+ 'sadd', ReqlessJob.ns .. self.jid .. '-dependencies', j)
1065
+ end
1066
+ end
1067
+ return true
1068
+ end
1069
+
1070
+ if arg[1] == 'all' then
1071
+ for _, j in ipairs(redis.call(
1072
+ 'smembers', ReqlessJob.ns .. self.jid .. '-dependencies')) do
1073
+ redis.call('srem', ReqlessJob.ns .. j .. '-dependents', self.jid)
1074
+ end
1075
+ redis.call('del', ReqlessJob.ns .. self.jid .. '-dependencies')
1076
+ local queue_name, priority = unpack(redis.call(
1077
+ 'hmget', ReqlessJob.ns .. self.jid, 'queue', 'priority'))
1078
+ if queue_name then
1079
+ local queue = Reqless.queue(queue_name)
1080
+ queue.depends.remove(self.jid)
1081
+ queue.work.add(now, priority, self.jid)
1082
+ redis.call('hset', ReqlessJob.ns .. self.jid, 'state', 'waiting')
1083
+ end
1084
+ else
1085
+ for _, j in ipairs(arg) do
1086
+ redis.call('srem', ReqlessJob.ns .. j .. '-dependents', self.jid)
1087
+ redis.call(
1088
+ 'srem', ReqlessJob.ns .. self.jid .. '-dependencies', j)
1089
+ if redis.call('scard',
1090
+ ReqlessJob.ns .. self.jid .. '-dependencies') == 0 then
1091
+ local queue_name, priority = unpack(redis.call(
1092
+ 'hmget', ReqlessJob.ns .. self.jid, 'queue', 'priority'))
1093
+ if queue_name then
1094
+ local queue = Reqless.queue(queue_name)
1095
+ queue.depends.remove(self.jid)
1096
+ queue.work.add(now, priority, self.jid)
1097
+ redis.call('hset',
1098
+ ReqlessJob.ns .. self.jid, 'state', 'waiting')
1099
+ end
1100
+ end
1101
+ end
1102
+ end
1103
+ return true
1104
+ end
1105
+
1106
+ -- Heartbeat
1107
+ ------------
1108
+ -- Renew this worker's lock on this job. Throws an exception if:
1109
+ -- - the job's been given to another worker
1110
+ -- - the job's been completed
1111
+ -- - the job's been canceled
1112
+ -- - the job's not running
1113
+ function ReqlessJob:heartbeat(now, worker, data)
1114
+ assert(worker, 'Heatbeat(): Arg "worker" missing')
1115
+
1116
+ -- We should find the heartbeat interval for this queue
1117
+ -- heartbeat. First, though, we need to find the queue
1118
+ -- this particular job is in
1119
+ local queue_name = redis.call('hget', ReqlessJob.ns .. self.jid, 'queue') or ''
1120
+ local expires = now + tonumber(
1121
+ Reqless.config.get(queue_name .. '-heartbeat') or
1122
+ Reqless.config.get('heartbeat', 60))
1123
+
1124
+ if data then
1125
+ data = cjson.decode(data)
1126
+ end
1127
+
1128
+ -- First, let's see if the worker still owns this job, and there is a
1129
+ -- worker
1130
+ local job_worker, state = unpack(
1131
+ redis.call('hmget', ReqlessJob.ns .. self.jid, 'worker', 'state'))
1132
+ if job_worker == false then
1133
+ -- This means the job doesn't exist
1134
+ error('Heartbeat(): Job does not exist')
1135
+ elseif state ~= 'running' then
1136
+ error('Heartbeat(): Job not currently running: ' .. state)
1137
+ elseif job_worker ~= worker or #job_worker == 0 then
1138
+ error('Heartbeat(): Job given out to another worker: ' .. job_worker)
1139
+ end
1140
+
1141
+ -- Otherwise, optionally update the user data, and the heartbeat
1142
+ if data then
1143
+ -- I don't know if this is wise, but I'm decoding and encoding
1144
+ -- the user data to hopefully ensure its sanity
1145
+ redis.call('hmset', ReqlessJob.ns .. self.jid, 'expires',
1146
+ expires, 'worker', worker, 'data', cjson.encode(data))
1147
+ else
1148
+ redis.call('hmset', ReqlessJob.ns .. self.jid,
1149
+ 'expires', expires, 'worker', worker)
1150
+ end
1151
+
1152
+ -- Update when this job was last updated on that worker
1153
+ -- Add this job to the list of jobs handled by this worker
1154
+ redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, self.jid)
1155
+
1156
+ -- And now we should just update the locks
1157
+ local queue = Reqless.queue(
1158
+ redis.call('hget', ReqlessJob.ns .. self.jid, 'queue'))
1159
+ queue.locks.add(expires, self.jid)
1160
+ return expires
1161
+ end
1162
+
1163
+ -- Priority
1164
+ -- --------
1165
+ -- Update the priority of this job. If the job doesn't exist, throws an
1166
+ -- exception
1167
+ function ReqlessJob:priority(priority)
1168
+ priority = assert(tonumber(priority),
1169
+ 'Priority(): Arg "priority" missing or not a number: ' ..
1170
+ tostring(priority))
1171
+
1172
+ -- Get the queue the job is currently in, if any
1173
+ local queue_name = redis.call('hget', ReqlessJob.ns .. self.jid, 'queue')
1174
+
1175
+ if queue_name == nil then
1176
+ -- If the job doesn't exist, throw an error
1177
+ error('Priority(): Job ' .. self.jid .. ' does not exist')
1178
+ end
1179
+
1180
+ -- See if the job is a candidate for updating its priority in the queue it's
1181
+ -- currently in
1182
+ if queue_name ~= '' then
1183
+ local queue = Reqless.queue(queue_name)
1184
+ if queue.work.score(self.jid) then
1185
+ queue.work.add(0, priority, self.jid)
1186
+ end
1187
+ end
1188
+
1189
+ redis.call('hset', ReqlessJob.ns .. self.jid, 'priority', priority)
1190
+ return priority
1191
+ end
1192
+
1193
+ -- Update the jobs' attributes with the provided dictionary
1194
+ function ReqlessJob:update(data)
1195
+ local tmp = {}
1196
+ for k, v in pairs(data) do
1197
+ table.insert(tmp, k)
1198
+ table.insert(tmp, v)
1199
+ end
1200
+ redis.call('hmset', ReqlessJob.ns .. self.jid, unpack(tmp))
1201
+ end
1202
+
1203
+ -- Times out the job now rather than when its lock is normally set to expire
1204
+ function ReqlessJob:timeout(now)
1205
+ local queue_name, state, worker = unpack(redis.call('hmget',
1206
+ ReqlessJob.ns .. self.jid, 'queue', 'state', 'worker'))
1207
+ if queue_name == nil then
1208
+ error('Timeout(): Job does not exist')
1209
+ elseif state ~= 'running' then
1210
+ error('Timeout(): Job ' .. self.jid .. ' not running')
1211
+ end
1212
+ -- Time out the job
1213
+ self:history(now, 'timed-out')
1214
+ local queue = Reqless.queue(queue_name)
1215
+ queue.locks.remove(self.jid)
1216
+
1217
+ -- Release acquired throttles
1218
+ self:throttles_release(now)
1219
+
1220
+ queue.work.add(now, math.huge, self.jid)
1221
+ redis.call('hmset', ReqlessJob.ns .. self.jid,
1222
+ 'state', 'stalled', 'expires', 0, 'worker', '')
1223
+ local encoded = cjson.encode({
1224
+ jid = self.jid,
1225
+ event = 'lock_lost',
1226
+ worker = worker,
1227
+ })
1228
+ Reqless.publish('w:' .. worker, encoded)
1229
+ Reqless.publish('log', encoded)
1230
+ return queue_name
1231
+ end
1232
+
1233
+ -- Return whether or not this job exists
1234
+ function ReqlessJob:exists()
1235
+ return redis.call('exists', ReqlessJob.ns .. self.jid) == 1
1236
+ end
1237
+
1238
+ -- Get or append to history
1239
+ function ReqlessJob:history(now, what, item)
1240
+ -- First, check if there's an old-style history, and update it if there is
1241
+ local history = redis.call('hget', ReqlessJob.ns .. self.jid, 'history')
1242
+ if history then
1243
+ history = cjson.decode(history)
1244
+ for _, value in ipairs(history) do
1245
+ redis.call('rpush', ReqlessJob.ns .. self.jid .. '-history',
1246
+ cjson.encode({math.floor(value.put), 'put', {queue = value.queue}}))
1247
+
1248
+ -- If there's any popped time
1249
+ if value.popped then
1250
+ redis.call('rpush', ReqlessJob.ns .. self.jid .. '-history',
1251
+ cjson.encode({math.floor(value.popped), 'popped',
1252
+ {worker = value.worker}}))
1253
+ end
1254
+
1255
+ -- If there's any failure
1256
+ if value.failed then
1257
+ redis.call('rpush', ReqlessJob.ns .. self.jid .. '-history',
1258
+ cjson.encode(
1259
+ {math.floor(value.failed), 'failed', nil}))
1260
+ end
1261
+
1262
+ -- If it was completed
1263
+ if value.done then
1264
+ redis.call('rpush', ReqlessJob.ns .. self.jid .. '-history',
1265
+ cjson.encode(
1266
+ {math.floor(value.done), 'done', nil}))
1267
+ end
1268
+ end
1269
+ -- With all this ported forward, delete the old-style history
1270
+ redis.call('hdel', ReqlessJob.ns .. self.jid, 'history')
1271
+ end
1272
+
1273
+ -- Now to the meat of the function
1274
+ if what == nil then
1275
+ -- Get the history
1276
+ local response = {}
1277
+ for _, value in ipairs(redis.call('lrange',
1278
+ ReqlessJob.ns .. self.jid .. '-history', 0, -1)) do
1279
+ value = cjson.decode(value)
1280
+ local dict = value[3] or {}
1281
+ dict['when'] = value[1]
1282
+ dict['what'] = value[2]
1283
+ table.insert(response, dict)
1284
+ end
1285
+ return response
1286
+ end
1287
+
1288
+ -- Append to the history. If the length of the history should be limited,
1289
+ -- then we'll truncate it.
1290
+ local count = tonumber(Reqless.config.get('max-job-history', 100))
1291
+ if count > 0 then
1292
+ -- We'll always keep the first item around
1293
+ local obj = redis.call('lpop', ReqlessJob.ns .. self.jid .. '-history')
1294
+ redis.call('ltrim', ReqlessJob.ns .. self.jid .. '-history', -count + 2, -1)
1295
+ if obj ~= nil and obj ~= false then
1296
+ redis.call('lpush', ReqlessJob.ns .. self.jid .. '-history', obj)
1297
+ end
1298
+ end
1299
+ return redis.call('rpush', ReqlessJob.ns .. self.jid .. '-history',
1300
+ cjson.encode({math.floor(now), what, item}))
1301
+ end
1302
+
1303
+ function ReqlessJob:throttles_release(now)
1304
+ local throttles = redis.call('hget', ReqlessJob.ns .. self.jid, 'throttles')
1305
+ throttles = cjson.decode(throttles or '[]')
1306
+
1307
+ for _, tid in ipairs(throttles) do
1308
+ Reqless.throttle(tid):release(now, self.jid)
1309
+ end
1310
+ end
1311
+
1312
+ function ReqlessJob:throttles_available()
1313
+ for _, tid in ipairs(self:throttles()) do
1314
+ if not Reqless.throttle(tid):available() then
1315
+ return false
1316
+ end
1317
+ end
1318
+
1319
+ return true
1320
+ end
1321
+
1322
+ function ReqlessJob:throttles_acquire(now)
1323
+ if not self:throttles_available() then
1324
+ return false
1325
+ end
1326
+
1327
+ for _, tid in ipairs(self:throttles()) do
1328
+ Reqless.throttle(tid):acquire(self.jid)
1329
+ end
1330
+
1331
+ return true
1332
+ end
1333
+
1334
+ -- Finds the first unavailable throttle and adds the job to its pending job set.
1335
+ function ReqlessJob:throttle(now)
1336
+ for _, tid in ipairs(self:throttles()) do
1337
+ local throttle = Reqless.throttle(tid)
1338
+ if not throttle:available() then
1339
+ throttle:pend(now, self.jid)
1340
+ return
1341
+ end
1342
+ end
1343
+ end
1344
+
1345
+ function ReqlessJob:throttles()
1346
+ -- memoize throttles for the job.
1347
+ if not self._throttles then
1348
+ self._throttles = cjson.decode(redis.call('hget', ReqlessJob.ns .. self.jid, 'throttles') or '[]')
1349
+ end
1350
+
1351
+ return self._throttles
1352
+ end
1353
+
1354
+ -- Completely removes all the data
1355
+ -- associated with this job, use
1356
+ -- with care.
1357
+ function ReqlessJob:delete()
1358
+ local tags = redis.call('hget', ReqlessJob.ns .. self.jid, 'tags') or '[]'
1359
+ tags = cjson.decode(tags)
1360
+ -- remove the jid from each tag
1361
+ for _, tag in ipairs(tags) do
1362
+ self:remove_tag(tag)
1363
+ end
1364
+ -- Delete the job's data
1365
+ redis.call('del', ReqlessJob.ns .. self.jid)
1366
+ -- Delete the job's history
1367
+ redis.call('del', ReqlessJob.ns .. self.jid .. '-history')
1368
+ -- Delete any notion of dependencies it has
1369
+ redis.call('del', ReqlessJob.ns .. self.jid .. '-dependencies')
1370
+ end
1371
+
1372
+ -- Inserts the jid into the specified tag.
1373
+ -- This should probably be moved to its own tag
1374
+ -- object.
1375
+ function ReqlessJob:insert_tag(now, tag)
1376
+ redis.call('zadd', 'ql:t:' .. tag, now, self.jid)
1377
+ redis.call('zincrby', 'ql:tags', 1, tag)
1378
+ end
1379
+
1380
+ -- Removes the jid from the specified tag.
1381
+ -- this should probably be moved to its own tag
1382
+ -- object.
1383
+ function ReqlessJob:remove_tag(tag)
1384
+ -- namespace the tag
1385
+ local namespaced_tag = 'ql:t:' .. tag
1386
+
1387
+ -- Remove the job from the specified tag
1388
+ redis.call('zrem', namespaced_tag, self.jid)
1389
+
1390
+ -- Check if any tags jids remain in the tag set.
1391
+ local remaining = redis.call('zcard', namespaced_tag)
1392
+
1393
+ -- If the number of jids in the tagged set
1394
+ -- is 0 it means we have no jobs with this tag
1395
+ -- and we should remove it from the set of all tags
1396
+ -- to prevent memory leaks.
1397
+ if tonumber(remaining) == 0 then
1398
+ redis.call('zrem', 'ql:tags', tag)
1399
+ else
1400
+ -- Decrement the tag in the set of all tags.
1401
+ redis.call('zincrby', 'ql:tags', -1, tag)
1402
+ end
1403
+ end
1404
+ -------------------------------------------------------------------------------
1405
+ -- Queue class
1406
+ -------------------------------------------------------------------------------
1407
+ -- Return a queue object
1408
+ function Reqless.queue(name)
1409
+ assert(name, 'Queue(): no queue name provided')
1410
+ local queue = {}
1411
+ setmetatable(queue, ReqlessQueue)
1412
+ queue.name = name
1413
+
1414
+ -- Access to our work
1415
+ queue.work = {
1416
+ peek = function(offset, limit)
1417
+ if limit <= 0 then
1418
+ return {}
1419
+ end
1420
+ return redis.call('zrevrange', queue:prefix('work'), offset, offset + limit - 1)
1421
+ end, remove = function(...)
1422
+ if #arg > 0 then
1423
+ return redis.call('zrem', queue:prefix('work'), unpack(arg))
1424
+ end
1425
+ end, add = function(now, priority, jid)
1426
+ return redis.call('zadd',
1427
+ queue:prefix('work'), priority - (now / 10000000000), jid)
1428
+ end, score = function(jid)
1429
+ return redis.call('zscore', queue:prefix('work'), jid)
1430
+ end, length = function()
1431
+ return redis.call('zcard', queue:prefix('work'))
1432
+ end
1433
+ }
1434
+
1435
+ -- Access to our locks
1436
+ queue.locks = {
1437
+ expired = function(now, offset, limit)
1438
+ return redis.call('zrangebyscore',
1439
+ queue:prefix('locks'), -math.huge, now, 'LIMIT', offset, limit)
1440
+ end, peek = function(now, offset, limit)
1441
+ return redis.call('zrangebyscore', queue:prefix('locks'),
1442
+ now, math.huge, 'LIMIT', offset, limit)
1443
+ end, add = function(expires, jid)
1444
+ redis.call('zadd', queue:prefix('locks'), expires, jid)
1445
+ end, remove = function(...)
1446
+ if #arg > 0 then
1447
+ return redis.call('zrem', queue:prefix('locks'), unpack(arg))
1448
+ end
1449
+ end, running = function(now)
1450
+ return redis.call('zcount', queue:prefix('locks'), now, math.huge)
1451
+ end, length = function(now)
1452
+ -- If a 'now' is provided, we're interested in how many are before
1453
+ -- that time
1454
+ if now then
1455
+ return redis.call('zcount', queue:prefix('locks'), 0, now)
1456
+ else
1457
+ return redis.call('zcard', queue:prefix('locks'))
1458
+ end
1459
+ end
1460
+ }
1461
+
1462
+ -- Access to our dependent jobs
1463
+ queue.depends = {
1464
+ peek = function(now, offset, limit)
1465
+ return redis.call('zrange',
1466
+ queue:prefix('depends'), offset, offset + limit - 1)
1467
+ end, add = function(now, jid)
1468
+ redis.call('zadd', queue:prefix('depends'), now, jid)
1469
+ end, remove = function(...)
1470
+ if #arg > 0 then
1471
+ return redis.call('zrem', queue:prefix('depends'), unpack(arg))
1472
+ end
1473
+ end, length = function()
1474
+ return redis.call('zcard', queue:prefix('depends'))
1475
+ end
1476
+ }
1477
+
1478
+
1479
+ -- Access to the queue level throttled jobs.
1480
+ queue.throttled = {
1481
+ length = function()
1482
+ return (redis.call('zcard', queue:prefix('throttled')) or 0)
1483
+ end, peek = function(now, offset, limit)
1484
+ return redis.call('zrange', queue:prefix('throttled'), offset, offset + limit - 1)
1485
+ end, add = function(...)
1486
+ if #arg > 0 then
1487
+ redis.call('zadd', queue:prefix('throttled'), unpack(arg))
1488
+ end
1489
+ end, remove = function(...)
1490
+ if #arg > 0 then
1491
+ return redis.call('zrem', queue:prefix('throttled'), unpack(arg))
1492
+ end
1493
+ end, pop = function(min, max)
1494
+ return redis.call('zremrangebyrank', queue:prefix('throttled'), min, max)
1495
+ end
1496
+ }
1497
+
1498
+ -- Access to our scheduled jobs
1499
+ queue.scheduled = {
1500
+ peek = function(now, offset, limit)
1501
+ return redis.call('zrange',
1502
+ queue:prefix('scheduled'), offset, offset + limit - 1)
1503
+ end, ready = function(now, offset, limit)
1504
+ return redis.call('zrangebyscore',
1505
+ queue:prefix('scheduled'), 0, now, 'LIMIT', offset, limit)
1506
+ end, add = function(when, jid)
1507
+ redis.call('zadd', queue:prefix('scheduled'), when, jid)
1508
+ end, remove = function(...)
1509
+ if #arg > 0 then
1510
+ return redis.call('zrem', queue:prefix('scheduled'), unpack(arg))
1511
+ end
1512
+ end, length = function()
1513
+ return redis.call('zcard', queue:prefix('scheduled'))
1514
+ end
1515
+ }
1516
+
1517
+ -- Access to our recurring jobs
1518
+ queue.recurring = {
1519
+ peek = function(now, offset, limit)
1520
+ return redis.call('zrangebyscore', queue:prefix('recur'),
1521
+ 0, now, 'LIMIT', offset, limit)
1522
+ end, ready = function(now, offset, limit)
1523
+ end, add = function(when, jid)
1524
+ redis.call('zadd', queue:prefix('recur'), when, jid)
1525
+ end, remove = function(...)
1526
+ if #arg > 0 then
1527
+ return redis.call('zrem', queue:prefix('recur'), unpack(arg))
1528
+ end
1529
+ end, update = function(increment, jid)
1530
+ redis.call('zincrby', queue:prefix('recur'), increment, jid)
1531
+ end, score = function(jid)
1532
+ return redis.call('zscore', queue:prefix('recur'), jid)
1533
+ end, length = function()
1534
+ return redis.call('zcard', queue:prefix('recur'))
1535
+ end
1536
+ }
1537
+ return queue
1538
+ end
1539
+
1540
+ -- Return the prefix for this particular queue
1541
+ function ReqlessQueue:prefix(group)
1542
+ if group then
1543
+ return ReqlessQueue.ns .. self.name .. '-' .. group
1544
+ end
1545
+
1546
+ return ReqlessQueue.ns .. self.name
1547
+ end
1548
+
1549
+ -- Stats(now, date)
1550
+ -- ---------------------
1551
+ -- Return the current statistics for a given queue on a given date. The
1552
+ -- results are returned are a JSON blob:
1553
+ --
1554
+ --
1555
+ -- {
1556
+ -- # These are unimplemented as of yet
1557
+ -- 'failed': 3,
1558
+ -- 'retries': 5,
1559
+ -- 'wait' : {
1560
+ -- 'total' : ...,
1561
+ -- 'mean' : ...,
1562
+ -- 'variance' : ...,
1563
+ -- 'histogram': [
1564
+ -- ...
1565
+ -- ]
1566
+ -- }, 'run': {
1567
+ -- 'total' : ...,
1568
+ -- 'mean' : ...,
1569
+ -- 'variance' : ...,
1570
+ -- 'histogram': [
1571
+ -- ...
1572
+ -- ]
1573
+ -- }
1574
+ -- }
1575
+ --
1576
+ -- The histogram's data points are at the second resolution for the first
1577
+ -- minute, the minute resolution for the first hour, the 15-minute resolution
1578
+ -- for the first day, the hour resolution for the first 3 days, and then at
1579
+ -- the day resolution from there on out. The `histogram` key is a list of
1580
+ -- those values.
1581
+ function ReqlessQueue:stats(now, date)
1582
+ date = assert(tonumber(date),
1583
+ 'Stats(): Arg "date" missing or not a number: ' .. (date or 'nil'))
1584
+
1585
+ -- The bin is midnight of the provided day
1586
+ -- 24 * 60 * 60 = 86400
1587
+ local bin = date - (date % 86400)
1588
+
1589
+ -- This a table of all the keys we want to use in order to produce a histogram
1590
+ local histokeys = {
1591
+ 's0','s1','s2','s3','s4','s5','s6','s7','s8','s9','s10','s11','s12','s13','s14','s15','s16','s17','s18','s19','s20','s21','s22','s23','s24','s25','s26','s27','s28','s29','s30','s31','s32','s33','s34','s35','s36','s37','s38','s39','s40','s41','s42','s43','s44','s45','s46','s47','s48','s49','s50','s51','s52','s53','s54','s55','s56','s57','s58','s59',
1592
+ 'm1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12','m13','m14','m15','m16','m17','m18','m19','m20','m21','m22','m23','m24','m25','m26','m27','m28','m29','m30','m31','m32','m33','m34','m35','m36','m37','m38','m39','m40','m41','m42','m43','m44','m45','m46','m47','m48','m49','m50','m51','m52','m53','m54','m55','m56','m57','m58','m59',
1593
+ 'h1','h2','h3','h4','h5','h6','h7','h8','h9','h10','h11','h12','h13','h14','h15','h16','h17','h18','h19','h20','h21','h22','h23',
1594
+ 'd1','d2','d3','d4','d5','d6'
1595
+ }
1596
+
1597
+ local mkstats = function(name, bin, queue)
1598
+ -- The results we'll be sending back
1599
+ local results = {}
1600
+
1601
+ local key = 'ql:s:' .. name .. ':' .. bin .. ':' .. queue
1602
+ local count, mean, vk = unpack(redis.call('hmget', key, 'total', 'mean', 'vk'))
1603
+
1604
+ count = tonumber(count) or 0
1605
+ mean = tonumber(mean) or 0
1606
+ vk = tonumber(vk)
1607
+
1608
+ results.count = count or 0
1609
+ results.mean = mean or 0
1610
+ results.histogram = {}
1611
+
1612
+ if not count then
1613
+ results.std = 0
1614
+ else
1615
+ if count > 1 then
1616
+ results.std = math.sqrt(vk / (count - 1))
1617
+ else
1618
+ results.std = 0
1619
+ end
1620
+ end
1621
+
1622
+ local histogram = redis.call('hmget', key, unpack(histokeys))
1623
+ for i=1, #histokeys do
1624
+ table.insert(results.histogram, tonumber(histogram[i]) or 0)
1625
+ end
1626
+ return results
1627
+ end
1628
+
1629
+ local retries, failed, failures = unpack(redis.call('hmget', 'ql:s:stats:' .. bin .. ':' .. self.name, 'retries', 'failed', 'failures'))
1630
+ return {
1631
+ retries = tonumber(retries or 0),
1632
+ failed = tonumber(failed or 0),
1633
+ failures = tonumber(failures or 0),
1634
+ wait = mkstats('wait', bin, self.name),
1635
+ run = mkstats('run' , bin, self.name)
1636
+ }
1637
+ end
1638
+
1639
+ -- Peek
1640
+ -------
1641
+ -- Examine the next jobs that would be popped from the queue without actually
1642
+ -- popping them.
1643
+ function ReqlessQueue:peek(now, offset, limit)
1644
+ offset = assert(tonumber(offset),
1645
+ 'Peek(): Arg "offset" missing or not a number: ' .. tostring(offset))
1646
+
1647
+ limit = assert(tonumber(limit),
1648
+ 'Peek(): Arg "limit" missing or not a number: ' .. tostring(limit))
1649
+
1650
+ if limit <= 0 then
1651
+ return {}
1652
+ end
1653
+
1654
+ local offset_with_limit = offset + limit
1655
+
1656
+ -- These are the ids that we're going to return. We'll begin with any jobs
1657
+ -- that have lost their locks
1658
+ local jids = self.locks.expired(now, 0, offset_with_limit)
1659
+
1660
+ -- Since we can't just peek the range we want, we have to consider all offset
1661
+ -- + limit jobs before we can take the relevant range.
1662
+ local remaining_capacity = offset_with_limit - #jids
1663
+
1664
+ -- If we still need jobs in order to meet demand, then we should
1665
+ -- look for all the recurring jobs that need jobs run
1666
+ self:check_recurring(now, remaining_capacity)
1667
+
1668
+ -- Now we've checked __all__ the locks for this queue the could
1669
+ -- have expired, and are no more than the number requested. If
1670
+ -- we still need values in order to meet the demand, then we
1671
+ -- should check if any scheduled items, and if so, we should
1672
+ -- insert them to ensure correctness when pulling off the next
1673
+ -- unit of work.
1674
+ self:check_scheduled(now, remaining_capacity)
1675
+
1676
+ if offset > #jids then
1677
+ -- Offset takes us past the expired jids, so just return straight from the
1678
+ -- work queue
1679
+ return self.work.peek(offset - #jids, limit)
1680
+ end
1681
+
1682
+ -- Return a mix of expired jids and prioritized items from the work queue
1683
+ table_extend(jids, self.work.peek(0, remaining_capacity))
1684
+
1685
+ if #jids < offset then
1686
+ return {}
1687
+ end
1688
+
1689
+ return {unpack(jids, offset + 1, offset_with_limit)}
1690
+ end
1691
+
1692
+ -- Return true if this queue is paused
1693
+ function ReqlessQueue:paused()
1694
+ return redis.call('sismember', 'ql:paused_queues', self.name) == 1
1695
+ end
1696
+
1697
+ -- Pause this queue
1698
+ --
1699
+ -- Note: long term, we have discussed adding a rate-limiting
1700
+ -- feature to reqless-core, which would be more flexible and
1701
+ -- could be used for pausing (i.e. pause = set the rate to 0).
1702
+ -- For now, this is far simpler, but we should rewrite this
1703
+ -- in terms of the rate limiting feature if/when that is added.
1704
+ function ReqlessQueue.pause(now, ...)
1705
+ redis.call('sadd', 'ql:paused_queues', unpack(arg))
1706
+ end
1707
+
1708
+ -- Unpause this queue
1709
+ function ReqlessQueue.unpause(...)
1710
+ redis.call('srem', 'ql:paused_queues', unpack(arg))
1711
+ end
1712
+
1713
+ -- Checks for expired locks, scheduled and recurring jobs, returning any
1714
+ -- jobs that are ready to be processes
1715
+ function ReqlessQueue:pop(now, worker, limit)
1716
+ assert(worker, 'Pop(): Arg "worker" missing')
1717
+ limit = assert(tonumber(limit),
1718
+ 'Pop(): Arg "limit" missing or not a number: ' .. tostring(limit))
1719
+
1720
+ -- If this queue is paused, then return no jobs
1721
+ if self:paused() then
1722
+ return {}
1723
+ end
1724
+
1725
+ -- Make sure we this worker to the list of seen workers
1726
+ redis.call('zadd', 'ql:workers', now, worker)
1727
+
1728
+ local dead_jids = self:invalidate_locks(now, limit) or {}
1729
+ local popped = {}
1730
+
1731
+ for _, jid in ipairs(dead_jids) do
1732
+ local success = self:pop_job(now, worker, Reqless.job(jid))
1733
+ -- only track jid if a job was popped and it's not a phantom jid
1734
+ if success then
1735
+ table.insert(popped, jid)
1736
+ end
1737
+ end
1738
+
1739
+ -- if queue is at max capacity don't pop any further jobs.
1740
+ if not Reqless.throttle(ReqlessQueue.ns .. self.name):available() then
1741
+ return popped
1742
+ end
1743
+
1744
+ -- Now we've checked __all__ the locks for this queue the could
1745
+ -- have expired, and are no more than the number requested.
1746
+
1747
+ -- If we still need jobs in order to meet demand, then we should
1748
+ -- look for all the recurring jobs that need jobs run
1749
+ self:check_recurring(now, limit - #dead_jids)
1750
+
1751
+ -- If we still need values in order to meet the demand, then we
1752
+ -- should check if any scheduled items, and if so, we should
1753
+ -- insert them to ensure correctness when pulling off the next
1754
+ -- unit of work.
1755
+ self:check_scheduled(now, limit - #dead_jids)
1756
+
1757
+ -- With these in place, we can expand this list of jids based on the work
1758
+ -- queue itself and the priorities therein
1759
+
1760
+ -- Since throttles could prevent work queue items from being popped, we can
1761
+ -- retry a number of times till we find work items that are not throttled
1762
+ local pop_retry_limit = tonumber(
1763
+ Reqless.config.get(self.name .. '-max-pop-retry') or
1764
+ Reqless.config.get('max-pop-retry', 1)
1765
+ )
1766
+
1767
+ -- Keep trying to fulfill fulfill jobs from the work queue until we reach
1768
+ -- the desired limit or exhaust our retry limit
1769
+ while #popped < limit and pop_retry_limit > 0 do
1770
+
1771
+ local jids = self.work.peek(0, limit - #popped) or {}
1772
+
1773
+ -- If there is nothing in the work queue, then no need to keep looping
1774
+ if #jids == 0 then
1775
+ break
1776
+ end
1777
+
1778
+
1779
+ for _, jid in ipairs(jids) do
1780
+ local job = Reqless.job(jid)
1781
+ if job:throttles_acquire(now) then
1782
+ local success = self:pop_job(now, worker, job)
1783
+ -- only track jid if a job was popped and it's not a phantom jid
1784
+ if success then
1785
+ table.insert(popped, jid)
1786
+ end
1787
+ else
1788
+ self:throttle(now, job)
1789
+ end
1790
+ end
1791
+
1792
+ -- All jobs should have acquired locks or be throttled,
1793
+ -- ergo, remove all jids from work queue
1794
+ self.work.remove(unpack(jids))
1795
+
1796
+ pop_retry_limit = pop_retry_limit - 1
1797
+ end
1798
+
1799
+ return popped
1800
+ end
1801
+
1802
+ -- Throttle a job
1803
+ function ReqlessQueue:throttle(now, job)
1804
+ job:throttle(now)
1805
+ self.throttled.add(now, job.jid)
1806
+ local state = unpack(job:data('state'))
1807
+ if state ~= 'throttled' then
1808
+ job:update({state = 'throttled'})
1809
+ job:history(now, 'throttled', {queue = self.name})
1810
+ end
1811
+ end
1812
+
1813
+ function ReqlessQueue:pop_job(now, worker, job)
1814
+ local state
1815
+ local jid = job.jid
1816
+ local job_state = job:data('state')
1817
+ -- if the job doesn't exist, short circuit
1818
+ if not job_state then
1819
+ return false
1820
+ end
1821
+
1822
+ state = unpack(job_state)
1823
+ job:history(now, 'popped', {worker = worker})
1824
+
1825
+ -- We should find the heartbeat interval for this queue heartbeat
1826
+ local expires = now + tonumber(
1827
+ Reqless.config.get(self.name .. '-heartbeat') or
1828
+ Reqless.config.get('heartbeat', 60))
1829
+
1830
+ -- Update the wait time statistics
1831
+ -- Just does job:data('time') do the same as this?
1832
+ local time = tonumber(redis.call('hget', ReqlessJob.ns .. jid, 'time') or now)
1833
+ local waiting = now - time
1834
+ self:stat(now, 'wait', waiting)
1835
+ redis.call('hset', ReqlessJob.ns .. jid,
1836
+ 'time', string.format("%.20f", now))
1837
+
1838
+ -- Add this job to the list of jobs handled by this worker
1839
+ redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid)
1840
+
1841
+ -- Update the jobs data, and add its locks, and return the job
1842
+ job:update({
1843
+ worker = worker,
1844
+ expires = expires,
1845
+ state = 'running'
1846
+ })
1847
+
1848
+ self.locks.add(expires, jid)
1849
+
1850
+ local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false
1851
+ if tracked then
1852
+ Reqless.publish('popped', jid)
1853
+ end
1854
+ return true
1855
+ end
1856
+
1857
+ -- Update the stats for this queue
1858
+ function ReqlessQueue:stat(now, stat, val)
1859
+ -- The bin is midnight of the provided day
1860
+ local bin = now - (now % 86400)
1861
+ local key = 'ql:s:' .. stat .. ':' .. bin .. ':' .. self.name
1862
+
1863
+ -- Get the current data
1864
+ local count, mean, vk = unpack(
1865
+ redis.call('hmget', key, 'total', 'mean', 'vk'))
1866
+
1867
+ -- If there isn't any data there presently, then we must initialize it
1868
+ count = count or 0
1869
+ if count == 0 then
1870
+ mean = val
1871
+ vk = 0
1872
+ count = 1
1873
+ else
1874
+ count = count + 1
1875
+ local oldmean = mean
1876
+ mean = mean + (val - mean) / count
1877
+ vk = vk + (val - mean) * (val - oldmean)
1878
+ end
1879
+
1880
+ -- Now, update the histogram
1881
+ -- - `s1`, `s2`, ..., -- second-resolution histogram counts
1882
+ -- - `m1`, `m2`, ..., -- minute-resolution
1883
+ -- - `h1`, `h2`, ..., -- hour-resolution
1884
+ -- - `d1`, `d2`, ..., -- day-resolution
1885
+ val = math.floor(val)
1886
+ if val < 60 then -- seconds
1887
+ redis.call('hincrby', key, 's' .. val, 1)
1888
+ elseif val < 3600 then -- minutes
1889
+ redis.call('hincrby', key, 'm' .. math.floor(val / 60), 1)
1890
+ elseif val < 86400 then -- hours
1891
+ redis.call('hincrby', key, 'h' .. math.floor(val / 3600), 1)
1892
+ else -- days
1893
+ redis.call('hincrby', key, 'd' .. math.floor(val / 86400), 1)
1894
+ end
1895
+ redis.call('hmset', key, 'total', count, 'mean', mean, 'vk', vk)
1896
+ end
1897
+
1898
+ -- Put(now, jid, klass, data, delay,
1899
+ -- [priority, p],
1900
+ -- [tags, t],
1901
+ -- [retries, r],
1902
+ -- [depends, '[...]'])
1903
+ -- -----------------------
1904
+ -- Insert a job into the queue with the given priority, tags, delay, klass and
1905
+ -- data.
1906
+ function ReqlessQueue:put(now, worker, jid, klass, raw_data, delay, ...)
1907
+ assert(jid , 'Put(): Arg "jid" missing')
1908
+ assert(klass, 'Put(): Arg "klass" missing')
1909
+ local data = assert(cjson.decode(raw_data),
1910
+ 'Put(): Arg "data" missing or not JSON: ' .. tostring(raw_data))
1911
+ delay = assert(tonumber(delay),
1912
+ 'Put(): Arg "delay" not a number: ' .. tostring(delay))
1913
+
1914
+ -- Read in all the optional parameters. All of these must come in pairs, so
1915
+ -- if we have an odd number of extra args, raise an error
1916
+ if #arg % 2 == 1 then
1917
+ error('Odd number of additional args: ' .. tostring(arg))
1918
+ end
1919
+ local options = {}
1920
+ for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end
1921
+
1922
+ -- Let's see what the old priority and tags were
1923
+ local job = Reqless.job(jid)
1924
+ local priority, tags, oldqueue, state, failure, retries, oldworker =
1925
+ unpack(redis.call('hmget', ReqlessJob.ns .. jid, 'priority', 'tags',
1926
+ 'queue', 'state', 'failure', 'retries', 'worker'))
1927
+
1928
+ -- If there are old tags, then we should remove the tags this job has
1929
+ if tags then
1930
+ Reqless.tag(now, 'remove', jid, unpack(cjson.decode(tags)))
1931
+ end
1932
+
1933
+ -- Sanity check on optional args
1934
+ local retries = assert(tonumber(options['retries'] or retries or 5) ,
1935
+ 'Put(): Arg "retries" not a number: ' .. tostring(options['retries']))
1936
+ local tags = assert(cjson.decode(options['tags'] or tags or '[]' ),
1937
+ 'Put(): Arg "tags" not JSON' .. tostring(options['tags']))
1938
+ local priority = assert(tonumber(options['priority'] or priority or 0),
1939
+ 'Put(): Arg "priority" not a number' .. tostring(options['priority']))
1940
+ local depends = assert(cjson.decode(options['depends'] or '[]') ,
1941
+ 'Put(): Arg "depends" not JSON: ' .. tostring(options['depends']))
1942
+ local throttles = assert(cjson.decode(options['throttles'] or '[]'),
1943
+ 'Put(): Arg "throttles" not JSON array: ' .. tostring(options['throttles']))
1944
+
1945
+ -- If the job has old dependencies, determine which dependencies are
1946
+ -- in the new dependencies but not in the old ones, and which are in the
1947
+ -- old ones but not in the new
1948
+ if #depends > 0 then
1949
+ -- This makes it easier to check if it's in the new list
1950
+ local new = {}
1951
+ for _, d in ipairs(depends) do new[d] = 1 end
1952
+
1953
+ -- Now find what's in the original, but not the new
1954
+ local original = redis.call(
1955
+ 'smembers', ReqlessJob.ns .. jid .. '-dependencies')
1956
+ for _, dep in pairs(original) do
1957
+ if new[dep] == nil then
1958
+ -- Remove k as a dependency
1959
+ redis.call('srem', ReqlessJob.ns .. dep .. '-dependents' , jid)
1960
+ redis.call('srem', ReqlessJob.ns .. jid .. '-dependencies', dep)
1961
+ end
1962
+ end
1963
+ end
1964
+
1965
+ -- Send out a log message
1966
+ Reqless.publish('log', cjson.encode({
1967
+ jid = jid,
1968
+ event = 'put',
1969
+ queue = self.name
1970
+ }))
1971
+
1972
+ -- Update the history to include this new change
1973
+ job:history(now, 'put', {queue = self.name})
1974
+
1975
+ -- If this item was previously in another queue, then we should remove it from there
1976
+ -- and remove the associated throttle
1977
+ if oldqueue then
1978
+ local queue_obj = Reqless.queue(oldqueue)
1979
+ queue_obj:remove_job(jid)
1980
+ local old_qid = ReqlessQueue.ns .. oldqueue
1981
+ for index, throttle_name in ipairs(throttles) do
1982
+ if throttle_name == old_qid then
1983
+ table.remove(throttles, index)
1984
+ end
1985
+ end
1986
+ end
1987
+
1988
+ -- If this had previously been given out to a worker, make sure to remove it
1989
+ -- from that worker's jobs
1990
+ if oldworker and oldworker ~= '' then
1991
+ redis.call('zrem', 'ql:w:' .. oldworker .. ':jobs', jid)
1992
+ -- If it's a different worker that's putting this job, send a notification
1993
+ -- to the last owner of the job
1994
+ if oldworker ~= worker then
1995
+ -- We need to inform whatever worker had that job
1996
+ local encoded = cjson.encode({
1997
+ jid = jid,
1998
+ event = 'lock_lost',
1999
+ worker = oldworker
2000
+ })
2001
+ Reqless.publish('w:' .. oldworker, encoded)
2002
+ Reqless.publish('log', encoded)
2003
+ end
2004
+ end
2005
+
2006
+ -- If the job was previously in the 'completed' state, then we should
2007
+ -- remove it from being enqueued for destructination
2008
+ if state == 'complete' then
2009
+ redis.call('zrem', 'ql:completed', jid)
2010
+ end
2011
+
2012
+ -- Add this job to the list of jobs tagged with whatever tags were supplied
2013
+ for _, tag in ipairs(tags) do
2014
+ Reqless.job(jid):insert_tag(now, tag)
2015
+ end
2016
+
2017
+ -- If we're in the failed state, remove all of our data
2018
+ if state == 'failed' then
2019
+ failure = cjson.decode(failure)
2020
+ -- We need to make this remove it from the failed queues
2021
+ redis.call('lrem', 'ql:f:' .. failure.group, 0, jid)
2022
+ if redis.call('llen', 'ql:f:' .. failure.group) == 0 then
2023
+ redis.call('srem', 'ql:failures', failure.group)
2024
+ end
2025
+ -- The bin is midnight of the provided day
2026
+ -- 24 * 60 * 60 = 86400
2027
+ local bin = failure.when - (failure.when % 86400)
2028
+ -- We also need to decrement the stats about the queue on
2029
+ -- the day that this failure actually happened.
2030
+ redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. self.name, 'failed' , -1)
2031
+ end
2032
+
2033
+ -- insert default queue throttle
2034
+ table.insert(throttles, ReqlessQueue.ns .. self.name)
2035
+
2036
+ data = {
2037
+ 'jid' , jid,
2038
+ 'klass' , klass,
2039
+ 'data' , raw_data,
2040
+ 'priority' , priority,
2041
+ 'tags' , cjson.encode(tags),
2042
+ 'state' , ((delay > 0) and 'scheduled') or 'waiting',
2043
+ 'worker' , '',
2044
+ 'expires' , 0,
2045
+ 'queue' , self.name,
2046
+ 'retries' , retries,
2047
+ 'remaining', retries,
2048
+ 'time' , string.format("%.20f", now),
2049
+ 'throttles', cjson.encode(throttles)
2050
+ }
2051
+
2052
+ -- First, let's save its data
2053
+ redis.call('hmset', ReqlessJob.ns .. jid, unpack(data))
2054
+
2055
+ -- These are the jids we legitimately have to wait on
2056
+ for _, j in ipairs(depends) do
2057
+ -- Make sure it's something other than 'nil' or complete.
2058
+ local state = redis.call('hget', ReqlessJob.ns .. j, 'state')
2059
+ if (state and state ~= 'complete') then
2060
+ redis.call('sadd', ReqlessJob.ns .. j .. '-dependents' , jid)
2061
+ redis.call('sadd', ReqlessJob.ns .. jid .. '-dependencies', j)
2062
+ end
2063
+ end
2064
+
2065
+ -- Now, if a delay was provided, and if it's in the future,
2066
+ -- then we'll have to schedule it. Otherwise, we're just
2067
+ -- going to add it to the work queue.
2068
+ if delay > 0 then
2069
+ if redis.call('scard', ReqlessJob.ns .. jid .. '-dependencies') > 0 then
2070
+ -- We've already put it in 'depends'. Now, we must just save the data
2071
+ -- for when it's scheduled
2072
+ self.depends.add(now, jid)
2073
+ redis.call('hmset', ReqlessJob.ns .. jid,
2074
+ 'state', 'depends',
2075
+ 'scheduled', now + delay)
2076
+ else
2077
+ self.scheduled.add(now + delay, jid)
2078
+ end
2079
+ else
2080
+ -- to avoid false negatives when popping jobs check if the job should be
2081
+ -- throttled immediately.
2082
+ local job = Reqless.job(jid)
2083
+ if redis.call('scard', ReqlessJob.ns .. jid .. '-dependencies') > 0 then
2084
+ self.depends.add(now, jid)
2085
+ redis.call('hset', ReqlessJob.ns .. jid, 'state', 'depends')
2086
+ elseif not job:throttles_available() then
2087
+ self:throttle(now, job)
2088
+ else
2089
+ self.work.add(now, priority, jid)
2090
+ end
2091
+ end
2092
+
2093
+ -- Lastly, we're going to make sure that this item is in the
2094
+ -- set of known queues. We should keep this sorted by the
2095
+ -- order in which we saw each of these queues
2096
+ if redis.call('zscore', 'ql:queues', self.name) == false then
2097
+ redis.call('zadd', 'ql:queues', now, self.name)
2098
+ end
2099
+
2100
+ if redis.call('zscore', 'ql:tracked', jid) ~= false then
2101
+ Reqless.publish('put', jid)
2102
+ end
2103
+
2104
+ return jid
2105
+ end
2106
+
2107
+ -- Move `count` jobs out of the failed state and into this queue
2108
+ function ReqlessQueue:unfail(now, group, count)
2109
+ assert(group, 'Unfail(): Arg "group" missing')
2110
+ count = assert(tonumber(count or 25),
2111
+ 'Unfail(): Arg "count" not a number: ' .. tostring(count))
2112
+ assert(count > 0, 'Unfail(): Arg "count" must be greater than zero')
2113
+
2114
+ -- Get up to that many jobs, and we'll put them in the appropriate queue
2115
+ local jids = redis.call('lrange', 'ql:f:' .. group, -count, -1)
2116
+
2117
+ -- And now set each job's state, and put it into the appropriate queue
2118
+ local toinsert = {}
2119
+ for _, jid in ipairs(jids) do
2120
+ local job = Reqless.job(jid)
2121
+ local data = job:data()
2122
+ job:history(now, 'put', {queue = self.name})
2123
+ redis.call('hmset', ReqlessJob.ns .. data.jid,
2124
+ 'state' , 'waiting',
2125
+ 'worker' , '',
2126
+ 'expires' , 0,
2127
+ 'queue' , self.name,
2128
+ 'remaining', data.retries or 5)
2129
+ self.work.add(now, data.priority, data.jid)
2130
+ end
2131
+
2132
+ -- Remove these jobs from the failed state
2133
+ redis.call('ltrim', 'ql:f:' .. group, 0, -count - 1)
2134
+ if (redis.call('llen', 'ql:f:' .. group) == 0) then
2135
+ redis.call('srem', 'ql:failures', group)
2136
+ end
2137
+
2138
+ return #jids
2139
+ end
2140
+
2141
+ -- Recur a job of type klass in this queue
2142
+ function ReqlessQueue:recurAtInterval(now, jid, klass, raw_data, interval, offset, ...)
2143
+ assert(jid , 'Recur(): Arg "jid" missing')
2144
+ assert(klass, 'Recur(): Arg "klass" missing')
2145
+ local data = assert(cjson.decode(raw_data),
2146
+ 'Recur(): Arg "data" not JSON: ' .. tostring(raw_data))
2147
+
2148
+ local interval = assert(tonumber(interval),
2149
+ 'Recur(): Arg "interval" not a number: ' .. tostring(interval))
2150
+ local offset = assert(tonumber(offset),
2151
+ 'Recur(): Arg "offset" not a number: ' .. tostring(offset))
2152
+ if interval <= 0 then
2153
+ error('Recur(): Arg "interval" must be greater than 0')
2154
+ end
2155
+
2156
+ -- Read in all the optional parameters. All of these must come in
2157
+ -- pairs, so if we have an odd number of extra args, raise an error
2158
+ if #arg % 2 == 1 then
2159
+ error('Recur(): Odd number of additional args: ' .. tostring(arg))
2160
+ end
2161
+
2162
+ -- Read in all the optional parameters
2163
+ local options = {}
2164
+ for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end
2165
+ options.tags = assert(cjson.decode(options.tags or '{}'),
2166
+ 'Recur(): Arg "tags" must be JSON string array: ' .. tostring(
2167
+ options.tags))
2168
+ options.priority = assert(tonumber(options.priority or 0),
2169
+ 'Recur(): Arg "priority" not a number: ' .. tostring(
2170
+ options.priority))
2171
+ options.retries = assert(tonumber(options.retries or 0),
2172
+ 'Recur(): Arg "retries" not a number: ' .. tostring(
2173
+ options.retries))
2174
+ options.backlog = assert(tonumber(options.backlog or 0),
2175
+ 'Recur(): Arg "backlog" not a number: ' .. tostring(
2176
+ options.backlog))
2177
+ options.throttles = assert(cjson.decode(options['throttles'] or '{}'),
2178
+ 'Recur(): Arg "throttles" not JSON array: ' .. tostring(options['throttles']))
2179
+
2180
+ local count, old_queue = unpack(redis.call('hmget', 'ql:r:' .. jid, 'count', 'queue'))
2181
+ count = count or 0
2182
+
2183
+ local throttles = options['throttles'] or {}
2184
+
2185
+ -- If it has previously been in another queue, then we should remove
2186
+ -- some information about it
2187
+ if old_queue then
2188
+ Reqless.queue(old_queue).recurring.remove(jid)
2189
+
2190
+ for index, throttle_name in ipairs(throttles) do
2191
+ if throttle_name == old_queue then
2192
+ table.remove(throttles, index)
2193
+ end
2194
+ end
2195
+ end
2196
+
2197
+ -- insert default queue throttle
2198
+ table.insert(throttles, ReqlessQueue.ns .. self.name)
2199
+
2200
+ -- Do some insertions
2201
+ redis.call('hmset', 'ql:r:' .. jid,
2202
+ 'jid' , jid,
2203
+ 'klass' , klass,
2204
+ 'data' , raw_data,
2205
+ 'priority' , options.priority,
2206
+ 'tags' , cjson.encode(options.tags or {}),
2207
+ 'state' , 'recur',
2208
+ 'queue' , self.name,
2209
+ 'type' , 'interval',
2210
+ -- How many jobs we've spawned from this
2211
+ 'count' , count,
2212
+ 'interval' , interval,
2213
+ 'retries' , options.retries,
2214
+ 'backlog' , options.backlog,
2215
+ 'throttles', cjson.encode(throttles))
2216
+ -- Now, we should schedule the next run of the job
2217
+ self.recurring.add(now + offset, jid)
2218
+
2219
+ -- Lastly, we're going to make sure that this item is in the
2220
+ -- set of known queues. We should keep this sorted by the
2221
+ -- order in which we saw each of these queues
2222
+ if redis.call('zscore', 'ql:queues', self.name) == false then
2223
+ redis.call('zadd', 'ql:queues', now, self.name)
2224
+ end
2225
+
2226
+ return jid
2227
+ end
2228
+
2229
+ -- Return the length of the queue
2230
+ function ReqlessQueue:length()
2231
+ return self.locks.length() + self.work.length() + self.scheduled.length()
2232
+ end
2233
+
2234
+ -------------------------------------------------------------------------------
2235
+ -- Housekeeping methods
2236
+ -------------------------------------------------------------------------------
2237
+ function ReqlessQueue:remove_job(jid)
2238
+ self.work.remove(jid)
2239
+ self.locks.remove(jid)
2240
+ self.throttled.remove(jid)
2241
+ self.depends.remove(jid)
2242
+ self.scheduled.remove(jid)
2243
+ end
2244
+
2245
+ -- Instantiate any recurring jobs that are ready
2246
+ function ReqlessQueue:check_recurring(now, count)
2247
+ if count <= 0 then
2248
+ return
2249
+ end
2250
+ -- This is how many jobs we've moved so far
2251
+ local moved = 0
2252
+ -- These are the recurring jobs that need work
2253
+ local r = self.recurring.peek(now, 0, count)
2254
+ for _, jid in ipairs(r) do
2255
+ -- For each of the jids that need jobs scheduled, first
2256
+ -- get the last time each of them was run, and then increment
2257
+ -- it by its interval. While this time is less than now,
2258
+ -- we need to keep putting jobs on the queue
2259
+ local r = redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority',
2260
+ 'tags', 'retries', 'interval', 'backlog', 'throttles')
2261
+ local klass, data, priority, tags, retries, interval, backlog, throttles = unpack(
2262
+ redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority',
2263
+ 'tags', 'retries', 'interval', 'backlog', 'throttles'))
2264
+ local _tags = cjson.decode(tags)
2265
+ local score = math.floor(tonumber(self.recurring.score(jid)))
2266
+ interval = tonumber(interval)
2267
+
2268
+ -- If the backlog is set for this job, then see if it's been a long
2269
+ -- time since the last pop
2270
+ backlog = tonumber(backlog or 0)
2271
+ if backlog ~= 0 then
2272
+ -- Check how many jobs we could concievably generate
2273
+ local num = ((now - score) / interval)
2274
+ if num > backlog then
2275
+ -- Update the score
2276
+ score = score + (
2277
+ math.ceil(num - backlog) * interval
2278
+ )
2279
+ end
2280
+ end
2281
+
2282
+ -- We're saving this value so that in the history, we can accurately
2283
+ -- reflect when the job would normally have been scheduled
2284
+ while (score <= now) and (moved < count) do
2285
+ local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1)
2286
+ moved = moved + 1
2287
+
2288
+ local child_jid = jid .. '-' .. count
2289
+
2290
+ -- Add this job to the list of jobs tagged with whatever tags were
2291
+ -- supplied
2292
+ for _, tag in ipairs(_tags) do
2293
+ Reqless.job(child_jid):insert_tag(now, tag)
2294
+ end
2295
+
2296
+ -- First, let's save its data
2297
+ redis.call('hmset', ReqlessJob.ns .. child_jid,
2298
+ 'jid' , child_jid,
2299
+ 'klass' , klass,
2300
+ 'data' , data,
2301
+ 'priority' , priority,
2302
+ 'tags' , tags,
2303
+ 'state' , 'waiting',
2304
+ 'worker' , '',
2305
+ 'expires' , 0,
2306
+ 'queue' , self.name,
2307
+ 'retries' , retries,
2308
+ 'remaining', retries,
2309
+ 'time' , string.format("%.20f", score),
2310
+ 'throttles', throttles,
2311
+ 'spawned_from_jid', jid)
2312
+
2313
+ Reqless.job(child_jid):history(score, 'put', {queue = self.name})
2314
+
2315
+ -- Now, if a delay was provided, and if it's in the future,
2316
+ -- then we'll have to schedule it. Otherwise, we're just
2317
+ -- going to add it to the work queue.
2318
+ self.work.add(score, priority, child_jid)
2319
+
2320
+ score = score + interval
2321
+ self.recurring.add(score, jid)
2322
+ end
2323
+ end
2324
+ end
2325
+
2326
+ -- Check for any jobs that have been scheduled, and shovel them onto
2327
+ -- the work queue. Returns nothing, but afterwards, up to `count`
2328
+ -- scheduled jobs will be moved into the work queue
2329
+ function ReqlessQueue:check_scheduled(now, count)
2330
+ if count <= 0 then
2331
+ return
2332
+ end
2333
+ -- zadd is a list of arguments that we'll be able to use to
2334
+ -- insert into the work queue
2335
+ local scheduled = self.scheduled.ready(now, 0, count)
2336
+ for _, jid in ipairs(scheduled) do
2337
+ -- With these in hand, we'll have to go out and find the
2338
+ -- priorities of these jobs, and then we'll insert them
2339
+ -- into the work queue and then when that's complete, we'll
2340
+ -- remove them from the scheduled queue
2341
+ local priority = tonumber(
2342
+ redis.call('hget', ReqlessJob.ns .. jid, 'priority') or 0)
2343
+ self.work.add(now, priority, jid)
2344
+ self.scheduled.remove(jid)
2345
+
2346
+ -- We should also update them to have the state 'waiting'
2347
+ -- instead of 'scheduled'
2348
+ redis.call('hset', ReqlessJob.ns .. jid, 'state', 'waiting')
2349
+ end
2350
+ end
2351
+
2352
+ -- Check for and invalidate any locks that have been lost. Returns the
2353
+ -- list of jids that have been invalidated
2354
+ function ReqlessQueue:invalidate_locks(now, count)
2355
+ local jids = {}
2356
+ -- Iterate through all the expired locks and add them to the list
2357
+ -- of keys that we'll return
2358
+ for _, jid in ipairs(self.locks.expired(now, 0, count)) do
2359
+ -- Remove this job from the jobs that the worker that was running it
2360
+ -- has
2361
+ local worker, failure = unpack(
2362
+ redis.call('hmget', ReqlessJob.ns .. jid, 'worker', 'failure'))
2363
+ redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
2364
+
2365
+ -- We'll provide a grace period after jobs time out for them to give
2366
+ -- some indication of the failure mode. After that time, however, we'll
2367
+ -- consider the worker dust in the wind
2368
+ local grace_period = tonumber(Reqless.config.get('grace-period'))
2369
+
2370
+ -- Whether or not we've already sent a coutesy message
2371
+ local courtesy_sent = tonumber(
2372
+ redis.call('hget', ReqlessJob.ns .. jid, 'grace') or 0)
2373
+
2374
+ -- If the remaining value is an odd multiple of 0.5, then we'll assume
2375
+ -- that we're just sending the message. Otherwise, it's time to
2376
+ -- actually hand out the work to another worker
2377
+ local send_message = (courtesy_sent ~= 1)
2378
+ local invalidate = not send_message
2379
+
2380
+ -- If the grace period has been disabled, then we'll do both.
2381
+ if grace_period <= 0 then
2382
+ send_message = true
2383
+ invalidate = true
2384
+ end
2385
+
2386
+ if send_message then
2387
+ -- This is where we supply a courtesy message and give the worker
2388
+ -- time to provide a failure message
2389
+ if redis.call('zscore', 'ql:tracked', jid) ~= false then
2390
+ Reqless.publish('stalled', jid)
2391
+ end
2392
+ Reqless.job(jid):history(now, 'timed-out')
2393
+ redis.call('hset', ReqlessJob.ns .. jid, 'grace', 1)
2394
+
2395
+ -- Send a message to let the worker know that its lost its lock on
2396
+ -- the job
2397
+ local encoded = cjson.encode({
2398
+ jid = jid,
2399
+ event = 'lock_lost',
2400
+ worker = worker,
2401
+ })
2402
+ Reqless.publish('w:' .. worker, encoded)
2403
+ Reqless.publish('log', encoded)
2404
+ self.locks.add(now + grace_period, jid)
2405
+
2406
+ -- If we got any expired locks, then we should increment the
2407
+ -- number of retries for this stage for this bin. The bin is
2408
+ -- midnight of the provided day
2409
+ local bin = now - (now % 86400)
2410
+ redis.call('hincrby',
2411
+ 'ql:s:stats:' .. bin .. ':' .. self.name, 'retries', 1)
2412
+ end
2413
+
2414
+ if invalidate then
2415
+ -- Unset the grace period attribute so that next time we'll send
2416
+ -- the grace period
2417
+ redis.call('hdel', ReqlessJob.ns .. jid, 'grace', 0)
2418
+
2419
+ -- See how many remaining retries the job has
2420
+ local remaining = tonumber(redis.call(
2421
+ 'hincrby', ReqlessJob.ns .. jid, 'remaining', -1))
2422
+
2423
+ -- This is where we actually have to time out the work
2424
+ if remaining < 0 then
2425
+ -- Now remove the instance from the schedule, and work queues
2426
+ -- for the queue it's in
2427
+ self.work.remove(jid)
2428
+ self.locks.remove(jid)
2429
+ self.scheduled.remove(jid)
2430
+
2431
+ local job = Reqless.job(jid)
2432
+ local job_data = Reqless.job(jid):data()
2433
+ local queue = job_data['queue']
2434
+ local group = 'failed-retries-' .. queue
2435
+
2436
+ job:throttles_release(now)
2437
+
2438
+ job:history(now, 'failed', {group = group})
2439
+ redis.call('hmset', ReqlessJob.ns .. jid, 'state', 'failed',
2440
+ 'worker', '',
2441
+ 'expires', '')
2442
+ -- If the failure has not already been set, then set it
2443
+ redis.call('hset', ReqlessJob.ns .. jid,
2444
+ 'failure', cjson.encode({
2445
+ group = group,
2446
+ message = 'Job exhausted retries in queue "' .. self.name .. '"',
2447
+ when = now,
2448
+ worker = unpack(job:data('worker'))
2449
+ }))
2450
+
2451
+ -- Add this type of failure to the list of failures
2452
+ redis.call('sadd', 'ql:failures', group)
2453
+ -- And add this particular instance to the failed types
2454
+ redis.call('lpush', 'ql:f:' .. group, jid)
2455
+
2456
+ if redis.call('zscore', 'ql:tracked', jid) ~= false then
2457
+ Reqless.publish('failed', jid)
2458
+ end
2459
+ Reqless.publish('log', cjson.encode({
2460
+ jid = jid,
2461
+ event = 'failed',
2462
+ group = group,
2463
+ worker = worker,
2464
+ message =
2465
+ 'Job exhausted retries in queue "' .. self.name .. '"'
2466
+ }))
2467
+
2468
+ -- Increment the count of the failed jobs
2469
+ local bin = now - (now % 86400)
2470
+ redis.call('hincrby',
2471
+ 'ql:s:stats:' .. bin .. ':' .. self.name, 'failures', 1)
2472
+ redis.call('hincrby',
2473
+ 'ql:s:stats:' .. bin .. ':' .. self.name, 'failed' , 1)
2474
+ else
2475
+ table.insert(jids, jid)
2476
+ end
2477
+ end
2478
+ end
2479
+
2480
+ return jids
2481
+ end
2482
+
2483
+ -- Forget the provided queues. As in, remove them from the list of known queues
2484
+ function ReqlessQueue.deregister(...)
2485
+ redis.call('zrem', Reqless.ns .. 'queues', unpack(arg))
2486
+ end
2487
+
2488
+ -- Return information about a particular queue, or all queues
2489
+ -- [
2490
+ -- {
2491
+ -- 'name': 'testing',
2492
+ -- 'stalled': 2,
2493
+ -- 'waiting': 5,
2494
+ -- 'running': 5,
2495
+ -- 'scheduled': 10,
2496
+ -- 'depends': 5,
2497
+ -- 'recurring': 0
2498
+ -- }, {
2499
+ -- ...
2500
+ -- }
2501
+ -- ]
2502
+ function ReqlessQueue.counts(now, name)
2503
+ if name then
2504
+ local queue = Reqless.queue(name)
2505
+ local stalled = queue.locks.length(now)
2506
+ -- Check for any scheduled jobs that need to be moved
2507
+ queue:check_scheduled(now, queue.scheduled.length())
2508
+ return {
2509
+ name = name,
2510
+ waiting = queue.work.length(),
2511
+ stalled = stalled,
2512
+ running = queue.locks.length() - stalled,
2513
+ throttled = queue.throttled.length(),
2514
+ scheduled = queue.scheduled.length(),
2515
+ depends = queue.depends.length(),
2516
+ recurring = queue.recurring.length(),
2517
+ paused = queue:paused()
2518
+ }
2519
+ end
2520
+
2521
+ local queues = redis.call('zrange', 'ql:queues', 0, -1)
2522
+ local response = {}
2523
+ for _, qname in ipairs(queues) do
2524
+ table.insert(response, ReqlessQueue.counts(now, qname))
2525
+ end
2526
+ return response
2527
+ end
2528
+ local ReqlessQueuePatterns = {
2529
+ default_identifiers_default_pattern = '["*"]',
2530
+ ns = Reqless.ns .. "qp:",
2531
+ }
2532
+ ReqlessQueuePatterns.__index = ReqlessQueuePatterns
2533
+
2534
+ ReqlessQueuePatterns['getIdentifierPatterns'] = function(now)
2535
+ local reply = redis.call('hgetall', ReqlessQueuePatterns.ns .. 'identifiers')
2536
+
2537
+ if #reply == 0 then
2538
+ -- Check legacy key
2539
+ reply = redis.call('hgetall', 'qmore:dynamic')
2540
+ end
2541
+
2542
+ -- Include default pattern in case identifier patterns have never been set.
2543
+ local identifierPatterns = {
2544
+ ['default'] = ReqlessQueuePatterns.default_identifiers_default_pattern,
2545
+ }
2546
+ for i = 1, #reply, 2 do
2547
+ identifierPatterns[reply[i]] = reply[i + 1]
2548
+ end
2549
+
2550
+ return identifierPatterns
2551
+ end
2552
+
2553
+ -- Each key is a string and each value is string containing a JSON list of
2554
+ -- patterns.
2555
+ ReqlessQueuePatterns['setIdentifierPatterns'] = function(now, ...)
2556
+ if #arg % 2 == 1 then
2557
+ error('Odd number of identifier patterns: ' .. tostring(arg))
2558
+ end
2559
+ local key = ReqlessQueuePatterns.ns .. 'identifiers'
2560
+
2561
+ local goodDefault = false;
2562
+ local identifierPatterns = {}
2563
+ for i = 1, #arg, 2 do
2564
+ local key = arg[i]
2565
+ local serializedValues = arg[i + 1]
2566
+
2567
+ -- Ensure that the value is valid JSON.
2568
+ local values = cjson.decode(serializedValues)
2569
+
2570
+ -- Only write the value if there are items in the list.
2571
+ if #values > 0 then
2572
+ if key == 'default' then
2573
+ goodDefault = true
2574
+ end
2575
+ table.insert(identifierPatterns, key)
2576
+ table.insert(identifierPatterns, serializedValues)
2577
+ end
2578
+ end
2579
+
2580
+ -- Ensure some kind of default value is persisted.
2581
+ if not goodDefault then
2582
+ table.insert(identifierPatterns, "default")
2583
+ table.insert(
2584
+ identifierPatterns,
2585
+ ReqlessQueuePatterns.default_identifiers_default_pattern
2586
+ )
2587
+ end
2588
+
2589
+ -- Clear out the legacy key too
2590
+ redis.call('del', key, 'qmore:dynamic')
2591
+ redis.call('hset', key, unpack(identifierPatterns))
2592
+ end
2593
+
2594
+ ReqlessQueuePatterns['getPriorityPatterns'] = function(now)
2595
+ local reply = redis.call('lrange', ReqlessQueuePatterns.ns .. 'priorities', 0, -1)
2596
+
2597
+ if #reply == 0 then
2598
+ -- Check legacy key
2599
+ reply = redis.call('lrange', 'qmore:priority', 0, -1)
2600
+ end
2601
+
2602
+ return reply
2603
+ end
2604
+
2605
+ -- Each key is a string and each value is a string containing a JSON object
2606
+ -- where the JSON object has a shape like:
2607
+ -- {"fairly": true, "pattern": ["string", "string", "string"]}
2608
+ ReqlessQueuePatterns['setPriorityPatterns'] = function(now, ...)
2609
+ local key = ReqlessQueuePatterns.ns .. 'priorities'
2610
+ redis.call('del', key)
2611
+ -- Clear out the legacy key
2612
+ redis.call('del', 'qmore:priority')
2613
+ if #arg > 0 then
2614
+ redis.call('rpush', key, unpack(arg))
2615
+ end
2616
+ end
2617
+ -- Get all the attributes of this particular job
2618
+ function ReqlessRecurringJob:data()
2619
+ local job = redis.call(
2620
+ 'hmget', 'ql:r:' .. self.jid, 'jid', 'klass', 'state', 'queue',
2621
+ 'priority', 'interval', 'retries', 'count', 'data', 'tags', 'backlog', 'throttles')
2622
+
2623
+ if not job[1] then
2624
+ return nil
2625
+ end
2626
+
2627
+ return {
2628
+ jid = job[1],
2629
+ klass = job[2],
2630
+ state = job[3],
2631
+ queue = job[4],
2632
+ priority = tonumber(job[5]),
2633
+ interval = tonumber(job[6]),
2634
+ retries = tonumber(job[7]),
2635
+ count = tonumber(job[8]),
2636
+ data = job[9],
2637
+ tags = cjson.decode(job[10]),
2638
+ backlog = tonumber(job[11] or 0),
2639
+ throttles = cjson.decode(job[12] or '[]'),
2640
+ }
2641
+ end
2642
+
2643
+ -- Update the recurring job data. Key can be:
2644
+ -- - priority
2645
+ -- - interval
2646
+ -- - retries
2647
+ -- - data
2648
+ -- - klass
2649
+ -- - queue
2650
+ -- - backlog
2651
+ function ReqlessRecurringJob:update(now, ...)
2652
+ local options = {}
2653
+ -- Make sure that the job exists
2654
+ if redis.call('exists', 'ql:r:' .. self.jid) == 0 then
2655
+ error('Recur(): No recurring job ' .. self.jid)
2656
+ end
2657
+
2658
+ for i = 1, #arg, 2 do
2659
+ local key = arg[i]
2660
+ local value = arg[i+1]
2661
+ assert(value, 'No value provided for ' .. tostring(key))
2662
+ if key == 'priority' or key == 'interval' or key == 'retries' then
2663
+ value = assert(tonumber(value), 'Recur(): Arg "' .. key .. '" must be a number: ' .. tostring(value))
2664
+ -- If the command is 'interval', then we need to update the
2665
+ -- time when it should next be scheduled
2666
+ if key == 'interval' then
2667
+ local queue, interval = unpack(redis.call('hmget', 'ql:r:' .. self.jid, 'queue', 'interval'))
2668
+ Reqless.queue(queue).recurring.update(
2669
+ value - tonumber(interval), self.jid)
2670
+ end
2671
+ redis.call('hset', 'ql:r:' .. self.jid, key, value)
2672
+ elseif key == 'data' then
2673
+ assert(cjson.decode(value), 'Recur(): Arg "data" is not JSON-encoded: ' .. tostring(value))
2674
+ redis.call('hset', 'ql:r:' .. self.jid, 'data', value)
2675
+ elseif key == 'klass' then
2676
+ redis.call('hset', 'ql:r:' .. self.jid, 'klass', value)
2677
+ elseif key == 'queue' then
2678
+ local old_queue_name = redis.call('hget', 'ql:r:' .. self.jid, 'queue')
2679
+ local queue_obj = Reqless.queue(old_queue_name)
2680
+ local score = queue_obj.recurring.score(self.jid)
2681
+
2682
+ -- Detach from the old queue
2683
+ queue_obj.recurring.remove(self.jid)
2684
+ local throttles = cjson.decode(redis.call('hget', 'ql:r:' .. self.jid, 'throttles') or '{}')
2685
+ for index, throttle_name in ipairs(throttles) do
2686
+ if throttle_name == ReqlessQueue.ns .. old_queue_name then
2687
+ table.remove(throttles, index)
2688
+ end
2689
+ end
2690
+
2691
+
2692
+ -- Attach to the new queue
2693
+ table.insert(throttles, ReqlessQueue.ns .. value)
2694
+ redis.call('hset', 'ql:r:' .. self.jid, 'throttles', cjson.encode(throttles))
2695
+
2696
+ Reqless.queue(value).recurring.add(score, self.jid)
2697
+ redis.call('hset', 'ql:r:' .. self.jid, 'queue', value)
2698
+ -- If we don't already know about the queue, learn about it
2699
+ if redis.call('zscore', 'ql:queues', value) == false then
2700
+ redis.call('zadd', 'ql:queues', now, value)
2701
+ end
2702
+ elseif key == 'backlog' then
2703
+ value = assert(tonumber(value),
2704
+ 'Recur(): Arg "backlog" not a number: ' .. tostring(value))
2705
+ redis.call('hset', 'ql:r:' .. self.jid, 'backlog', value)
2706
+ elseif key == 'throttles' then
2707
+ local throttles = assert(cjson.decode(value), 'Recur(): Arg "throttles" is not JSON-encoded: ' .. tostring(value))
2708
+ redis.call('hset', 'ql:r:' .. self.jid, 'throttles', cjson.encode(throttles))
2709
+ else
2710
+ error('Recur(): Unrecognized option "' .. key .. '"')
2711
+ end
2712
+ end
2713
+
2714
+ return true
2715
+ end
2716
+
2717
+ -- Tags this recurring job with the provided tags
2718
+ function ReqlessRecurringJob:tag(...)
2719
+ local tags = redis.call('hget', 'ql:r:' .. self.jid, 'tags')
2720
+ -- If the job has been canceled / deleted, then throw an error.
2721
+ if not tags then
2722
+ error('Tag(): Job ' .. self.jid .. ' does not exist')
2723
+ end
2724
+
2725
+ -- Decode the json blob, convert to dictionary
2726
+ tags = cjson.decode(tags)
2727
+ local _tags = {}
2728
+ for _, v in ipairs(tags) do
2729
+ _tags[v] = true
2730
+ end
2731
+
2732
+ -- Otherwise, add the job to the sorted set with that tags
2733
+ for i = 1, #arg do
2734
+ if _tags[arg[i]] == nil then
2735
+ table.insert(tags, arg[i])
2736
+ end
2737
+ end
2738
+
2739
+ tags = cjsonArrayDegenerationWorkaround(tags)
2740
+ redis.call('hset', 'ql:r:' .. self.jid, 'tags', tags)
2741
+
2742
+ return tags
2743
+ end
2744
+
2745
+ -- Removes a tag from the recurring job
2746
+ function ReqlessRecurringJob:untag(...)
2747
+ -- Get the existing tags
2748
+ local tags = redis.call('hget', 'ql:r:' .. self.jid, 'tags')
2749
+
2750
+ -- If the job has been canceled / deleted, then return false
2751
+ if not tags then
2752
+ error('Untag(): Job ' .. self.jid .. ' does not exist')
2753
+ end
2754
+
2755
+ -- Decode the json blob, convert to dictionary
2756
+ tags = cjson.decode(tags)
2757
+
2758
+ local _tags = {}
2759
+ -- Make a dictionary
2760
+ for _, v in ipairs(tags) do
2761
+ _tags[v] = true
2762
+ end
2763
+
2764
+ -- Delete these from the hash
2765
+ for i = 1, #arg do
2766
+ _tags[arg[i]] = nil
2767
+ end
2768
+
2769
+ -- Back into a list
2770
+ local results = {}
2771
+ for _, tag in ipairs(tags) do
2772
+ if _tags[tag] then
2773
+ table.insert(results, tag)
2774
+ end
2775
+ end
2776
+
2777
+ -- json encode them, set, and return
2778
+ tags = cjson.encode(results)
2779
+ redis.call('hset', 'ql:r:' .. self.jid, 'tags', tags)
2780
+
2781
+ return tags
2782
+ end
2783
+
2784
+ -- Stop further occurrences of this job
2785
+ function ReqlessRecurringJob:cancel()
2786
+ -- First, find out what queue it was attached to
2787
+ local queue = redis.call('hget', 'ql:r:' .. self.jid, 'queue')
2788
+ if queue then
2789
+ -- Now, delete it from the queue it was attached to, and delete the
2790
+ -- thing itself
2791
+ Reqless.queue(queue).recurring.remove(self.jid)
2792
+ redis.call('del', 'ql:r:' .. self.jid)
2793
+ end
2794
+
2795
+ return true
2796
+ end
2797
+ -- Deregisters these workers from the list of known workers
2798
+ function ReqlessWorker.deregister(...)
2799
+ redis.call('zrem', 'ql:workers', unpack(arg))
2800
+ end
2801
+
2802
+ -- Provide data about all the workers, or if a specific worker is provided,
2803
+ -- then which jobs that worker is responsible for. If no worker is provided,
2804
+ -- expect a response of the form:
2805
+ --
2806
+ -- [
2807
+ -- # This is sorted by the recency of activity from that worker
2808
+ -- {
2809
+ -- 'name' : 'hostname1-pid1',
2810
+ -- 'jobs' : 20,
2811
+ -- 'stalled': 0
2812
+ -- }, {
2813
+ -- ...
2814
+ -- }
2815
+ -- ]
2816
+ --
2817
+ -- If a worker id is provided, then expect a response of the form:
2818
+ --
2819
+ -- {
2820
+ -- 'jobs': [
2821
+ -- jid1,
2822
+ -- jid2,
2823
+ -- ...
2824
+ -- ], 'stalled': [
2825
+ -- jid1,
2826
+ -- ...
2827
+ -- ]
2828
+ -- }
2829
+ --
2830
+ function ReqlessWorker.counts(now, worker)
2831
+ -- Clean up all the workers' job lists if they're too old. This is
2832
+ -- determined by the `max-worker-age` configuration, defaulting to the
2833
+ -- last day. Seems like a 'reasonable' default
2834
+ local interval = tonumber(Reqless.config.get('max-worker-age', 86400))
2835
+
2836
+ local workers = redis.call('zrangebyscore', 'ql:workers', 0, now - interval)
2837
+ for _, worker in ipairs(workers) do
2838
+ redis.call('del', 'ql:w:' .. worker .. ':jobs')
2839
+ end
2840
+
2841
+ -- And now remove them from the list of known workers
2842
+ redis.call('zremrangebyscore', 'ql:workers', 0, now - interval)
2843
+
2844
+ if worker then
2845
+ return {
2846
+ jobs = redis.call('zrevrangebyscore', 'ql:w:' .. worker .. ':jobs', now + 8640000, now),
2847
+ stalled = redis.call('zrevrangebyscore', 'ql:w:' .. worker .. ':jobs', now, 0)
2848
+ }
2849
+ end
2850
+
2851
+ local response = {}
2852
+ local workers = redis.call('zrevrange', 'ql:workers', 0, -1)
2853
+ for _, worker in ipairs(workers) do
2854
+ table.insert(response, {
2855
+ name = worker,
2856
+ jobs = redis.call('zcount', 'ql:w:' .. worker .. ':jobs', now, now + 8640000),
2857
+ stalled = redis.call('zcount', 'ql:w:' .. worker .. ':jobs', 0, now)
2858
+ })
2859
+ end
2860
+ return response
2861
+ end
2862
+ -- Retrieve the data for a throttled resource
2863
+ function ReqlessThrottle:data()
2864
+ -- Default values for the data
2865
+ local data = {
2866
+ id = self.id,
2867
+ maximum = 0
2868
+ }
2869
+
2870
+ -- Retrieve data stored in redis
2871
+ local throttle = redis.call('hmget', ReqlessThrottle.ns .. self.id, 'id', 'maximum')
2872
+
2873
+ if throttle[2] then
2874
+ data.maximum = tonumber(throttle[2])
2875
+ end
2876
+
2877
+ return data
2878
+ end
2879
+
2880
+ -- Like data, but includes ttl.
2881
+ function ReqlessThrottle:dataWithTtl()
2882
+ local data = self:data()
2883
+ data.ttl = self:ttl()
2884
+ return data
2885
+ end
2886
+
2887
+ -- Set the data for a throttled resource
2888
+ function ReqlessThrottle:set(data, expiration)
2889
+ redis.call('hmset', ReqlessThrottle.ns .. self.id, 'id', self.id, 'maximum', data.maximum)
2890
+ if expiration > 0 then
2891
+ redis.call('expire', ReqlessThrottle.ns .. self.id, expiration)
2892
+ end
2893
+ end
2894
+
2895
+ -- Delete a throttled resource
2896
+ function ReqlessThrottle:unset()
2897
+ redis.call('del', ReqlessThrottle.ns .. self.id)
2898
+ end
2899
+
2900
+ -- Acquire a throttled resource for a job.
2901
+ -- Returns true of the job acquired the resource, false otherwise
2902
+ function ReqlessThrottle:acquire(jid)
2903
+ if not self:available() then
2904
+ return false
2905
+ end
2906
+
2907
+ self.locks.add(1, jid)
2908
+ return true
2909
+ end
2910
+
2911
+ function ReqlessThrottle:pend(now, jid)
2912
+ self.pending.add(now, jid)
2913
+ end
2914
+
2915
+ -- Releases the lock taken by the specified jid.
2916
+ -- number of jobs released back into the queues is determined by the locks_available method.
2917
+ function ReqlessThrottle:release(now, jid)
2918
+ -- Only attempt to remove from the pending set if the job wasn't found in the
2919
+ -- locks set
2920
+ if self.locks.remove(jid) == 0 then
2921
+ self.pending.remove(jid)
2922
+ end
2923
+
2924
+ local available_locks = self:locks_available()
2925
+ if self.pending.length() == 0 or available_locks < 1 then
2926
+ return
2927
+ end
2928
+
2929
+ -- subtract one to ensure we pop the correct amount. peek(0, 0) returns the first element
2930
+ -- peek(0,1) return the first two.
2931
+ for _, jid in ipairs(self.pending.peek(0, available_locks - 1)) do
2932
+ local job = Reqless.job(jid)
2933
+ local data = job:data()
2934
+ local queue = Reqless.queue(data['queue'])
2935
+
2936
+ queue.throttled.remove(jid)
2937
+ queue.work.add(now, data.priority, jid)
2938
+ end
2939
+
2940
+ -- subtract one to ensure we pop the correct amount. pop(0, 0) pops the first element
2941
+ -- pop(0,1) pops the first two.
2942
+ local popped = self.pending.pop(0, available_locks - 1)
2943
+ end
2944
+
2945
+ -- Returns true if the throttle has locks available, false otherwise.
2946
+ function ReqlessThrottle:available()
2947
+ return self.maximum == 0 or self.locks.length() < self.maximum
2948
+ end
2949
+
2950
+ -- Returns the TTL of the throttle
2951
+ function ReqlessThrottle:ttl()
2952
+ return redis.call('ttl', ReqlessThrottle.ns .. self.id)
2953
+ end
2954
+
2955
+ -- Returns the number of locks available for the throttle.
2956
+ -- calculated by maximum - locks.length(), if the throttle is unlimited
2957
+ -- then up to 10 jobs are released.
2958
+ function ReqlessThrottle:locks_available()
2959
+ if self.maximum == 0 then
2960
+ -- Arbitrarily chosen value. might want to make it configurable in the future.
2961
+ return 10
2962
+ end
2963
+
2964
+ return self.maximum - self.locks.length()
2965
+ end