qless 0.9.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (43) hide show
  1. data/Gemfile +8 -0
  2. data/HISTORY.md +168 -0
  3. data/README.md +571 -0
  4. data/Rakefile +28 -0
  5. data/bin/qless-campfire +106 -0
  6. data/bin/qless-growl +99 -0
  7. data/bin/qless-web +23 -0
  8. data/lib/qless.rb +185 -0
  9. data/lib/qless/config.rb +31 -0
  10. data/lib/qless/job.rb +259 -0
  11. data/lib/qless/job_reservers/ordered.rb +23 -0
  12. data/lib/qless/job_reservers/round_robin.rb +34 -0
  13. data/lib/qless/lua.rb +25 -0
  14. data/lib/qless/qless-core/cancel.lua +71 -0
  15. data/lib/qless/qless-core/complete.lua +218 -0
  16. data/lib/qless/qless-core/config.lua +44 -0
  17. data/lib/qless/qless-core/depends.lua +65 -0
  18. data/lib/qless/qless-core/fail.lua +107 -0
  19. data/lib/qless/qless-core/failed.lua +83 -0
  20. data/lib/qless/qless-core/get.lua +37 -0
  21. data/lib/qless/qless-core/heartbeat.lua +50 -0
  22. data/lib/qless/qless-core/jobs.lua +41 -0
  23. data/lib/qless/qless-core/peek.lua +155 -0
  24. data/lib/qless/qless-core/pop.lua +278 -0
  25. data/lib/qless/qless-core/priority.lua +32 -0
  26. data/lib/qless/qless-core/put.lua +156 -0
  27. data/lib/qless/qless-core/queues.lua +58 -0
  28. data/lib/qless/qless-core/recur.lua +181 -0
  29. data/lib/qless/qless-core/retry.lua +73 -0
  30. data/lib/qless/qless-core/ruby/lib/qless-core.rb +1 -0
  31. data/lib/qless/qless-core/ruby/lib/qless/core.rb +13 -0
  32. data/lib/qless/qless-core/ruby/lib/qless/core/version.rb +5 -0
  33. data/lib/qless/qless-core/ruby/spec/qless_core_spec.rb +13 -0
  34. data/lib/qless/qless-core/stats.lua +92 -0
  35. data/lib/qless/qless-core/tag.lua +100 -0
  36. data/lib/qless/qless-core/track.lua +79 -0
  37. data/lib/qless/qless-core/workers.lua +69 -0
  38. data/lib/qless/queue.rb +141 -0
  39. data/lib/qless/server.rb +411 -0
  40. data/lib/qless/tasks.rb +10 -0
  41. data/lib/qless/version.rb +3 -0
  42. data/lib/qless/worker.rb +195 -0
  43. metadata +239 -0
@@ -0,0 +1,278 @@
1
+ -- This script takes the name of the queue and then checks
2
+ -- for any expired locks, then inserts any scheduled items
3
+ -- that are now valid, and lastly returns any work items
4
+ -- that can be handed over.
5
+ --
6
+ -- Keys:
7
+ -- 1) queue name
8
+ -- Args:
9
+ -- 1) worker name
10
+ -- 2) the number of items to return
11
+ -- 3) the current time
12
+
13
+ if #KEYS ~= 1 then
14
+ if #KEYS < 1 then
15
+ error('Pop(): Expected 1 KEYS argument')
16
+ else
17
+ error('Pop(): Got ' .. #KEYS .. ', expected 1 KEYS argument')
18
+ end
19
+ end
20
+
21
+ local queue = assert(KEYS[1] , 'Pop(): Key "queue" missing')
22
+ local key = 'ql:q:' .. queue
23
+ local worker = assert(ARGV[1] , 'Pop(): Arg "worker" missing')
24
+ local count = assert(tonumber(ARGV[2]) , 'Pop(): Arg "count" missing or not a number: ' .. (ARGV[2] or 'nil'))
25
+ local now = assert(tonumber(ARGV[3]) , 'Pop(): Arg "now" missing or not a number: ' .. (ARGV[3] or 'nil'))
26
+
27
+ -- We should find the heartbeat interval for this queue
28
+ -- heartbeat
29
+ local _hb, _qhb = unpack(redis.call('hmget', 'ql:config', 'heartbeat', queue .. '-heartbeat'))
30
+ local expires = now + tonumber(_qhb or _hb or 60)
31
+
32
+ -- The bin is midnight of the provided day
33
+ -- 24 * 60 * 60 = 86400
34
+ local bin = now - (now % 86400)
35
+
36
+ -- These are the ids that we're going to return
37
+ local keys = {}
38
+
39
+ -- Make sure we this worker to the list of seen workers
40
+ redis.call('zadd', 'ql:workers', now, worker)
41
+
42
+ -- Iterate through all the expired locks and add them to the list
43
+ -- of keys that we'll return
44
+ for index, jid in ipairs(redis.call('zrangebyscore', key .. '-locks', 0, now, 'LIMIT', 0, count)) do
45
+ -- For each of these, decrement their retries. If any of them
46
+ -- have exhausted their retries, then we should mark them as
47
+ -- failed.
48
+ if redis.call('hincrby', 'ql:j:' .. jid, 'remaining', -1) < 0 then
49
+ -- Now remove the instance from the schedule, and work queues for the queue it's in
50
+ redis.call('zrem', 'ql:q:' .. queue .. '-work', jid)
51
+ redis.call('zrem', 'ql:q:' .. queue .. '-locks', jid)
52
+ redis.call('zrem', 'ql:q:' .. queue .. '-scheduled', jid)
53
+
54
+ local group = 'failed-retries-' .. queue
55
+ -- First things first, we should get the history
56
+ local history = redis.call('hget', 'ql:j:' .. jid, 'history')
57
+
58
+ -- Now, take the element of the history for which our provided worker is the worker, and update 'failed'
59
+ history = cjson.decode(history or '[]')
60
+ history[#history]['failed'] = now
61
+
62
+ redis.call('hmset', 'ql:j:' .. jid, 'state', 'failed', 'worker', '',
63
+ 'expires', '', 'history', cjson.encode(history), 'failure', cjson.encode({
64
+ ['group'] = group,
65
+ ['message'] = 'Job exhuasted retries in queue "' .. queue .. '"',
66
+ ['when'] = now,
67
+ ['worker'] = history[#history]['worker']
68
+ }))
69
+
70
+ -- Add this type of failure to the list of failures
71
+ redis.call('sadd', 'ql:failures', group)
72
+ -- And add this particular instance to the failed types
73
+ redis.call('lpush', 'ql:f:' .. group, jid)
74
+
75
+ if redis.call('zscore', 'ql:tracked', jid) ~= false then
76
+ redis.call('publish', 'failed', jid)
77
+ end
78
+ else
79
+ table.insert(keys, jid)
80
+
81
+ if redis.call('zscore', 'ql:tracked', jid) ~= false then
82
+ redis.call('publish', 'stalled', jid)
83
+ end
84
+ end
85
+
86
+ -- Remove this job from the jobs that the worker that was running it has
87
+ local w = redis.call('hget', 'ql:j:' .. jid, 'worker')
88
+ redis.call('zrem', 'ql:w:' .. w .. ':jobs', jid)
89
+ end
90
+ -- Now we've checked __all__ the locks for this queue the could
91
+ -- have expired, and are no more than the number requested.
92
+
93
+ -- If we got any expired locks, then we should increment the
94
+ -- number of retries for this stage for this bin
95
+ redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'retries', #keys)
96
+
97
+ -- If we still need jobs in order to meet demand, then we should
98
+ -- look for all the recurring jobs that need jobs run
99
+ if #keys < count then
100
+ local r = redis.call('zrangebyscore', key .. '-recur', 0, now)
101
+ for index, jid in ipairs(r) do
102
+ -- For each of the jids that need jobs scheduled, first
103
+ -- get the last time each of them was run, and then increment
104
+ -- it by its interval. While this time is less than now,
105
+ -- we need to keep putting jobs on the queue
106
+ local klass, data, priority, tags, retries, interval = unpack(redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval'))
107
+ local _tags = cjson.decode(tags)
108
+
109
+ while math.floor(tonumber(redis.call('zscore', key .. '-recur', jid))) <= now do
110
+ local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1)
111
+
112
+ -- Add this job to the list of jobs tagged with whatever tags were supplied
113
+ for i, tag in ipairs(_tags) do
114
+ redis.call('zadd', 'ql:t:' .. tag, now, jid .. '-' .. count)
115
+ redis.call('zincrby', 'ql:tags', 1, tag)
116
+ end
117
+
118
+ -- First, let's save its data
119
+ redis.call('hmset', 'ql:j:' .. jid .. '-' .. count,
120
+ 'jid' , jid .. '-' .. count,
121
+ 'klass' , klass,
122
+ 'data' , data,
123
+ 'priority' , priority,
124
+ 'tags' , tags,
125
+ 'state' , 'waiting',
126
+ 'worker' , '',
127
+ 'expires' , 0,
128
+ 'queue' , queue,
129
+ 'retries' , retries,
130
+ 'remaining', retries,
131
+ 'history' , cjson.encode({{
132
+ q = queue,
133
+ put = math.floor(now)
134
+ }}))
135
+
136
+ -- Now, if a delay was provided, and if it's in the future,
137
+ -- then we'll have to schedule it. Otherwise, we're just
138
+ -- going to add it to the work queue.
139
+ redis.call('zadd', key .. '-work', priority - (now / 10000000000), jid .. '-' .. count)
140
+
141
+ redis.call('zincrby', key .. '-recur', interval, jid)
142
+ end
143
+ end
144
+ end
145
+
146
+ -- If we still need values in order to meet the demand, then we
147
+ -- should check if any scheduled items, and if so, we should
148
+ -- insert them to ensure correctness when pulling off the next
149
+ -- unit of work.
150
+ if #keys < count then
151
+ -- zadd is a list of arguments that we'll be able to use to
152
+ -- insert into the work queue
153
+ local zadd = {}
154
+ local r = redis.call('zrangebyscore', key .. '-scheduled', 0, now, 'LIMIT', 0, (count - #keys))
155
+ for index, jid in ipairs(r) do
156
+ -- With these in hand, we'll have to go out and find the
157
+ -- priorities of these jobs, and then we'll insert them
158
+ -- into the work queue and then when that's complete, we'll
159
+ -- remove them from the scheduled queue
160
+ table.insert(zadd, tonumber(redis.call('hget', 'ql:j:' .. jid, 'priority') or 0))
161
+ table.insert(zadd, jid)
162
+ end
163
+
164
+ -- Now add these to the work list, and then remove them
165
+ -- from the scheduled list
166
+ if #zadd > 0 then
167
+ redis.call('zadd', key .. '-work', unpack(zadd))
168
+ redis.call('zrem', key .. '-scheduled', unpack(r))
169
+ end
170
+
171
+ -- And now we should get up to the maximum number of requested
172
+ -- work items from the work queue.
173
+ for index, jid in ipairs(redis.call('zrevrange', key .. '-work', 0, (count - #keys) - 1)) do
174
+ table.insert(keys, jid)
175
+ end
176
+ end
177
+
178
+ -- Alright, now the `keys` table is filled with all the job
179
+ -- ids which we'll be returning. Now we need to get the
180
+ -- metadeata about each of these, update their metadata to
181
+ -- reflect which worker they're on, when the lock expires,
182
+ -- etc., add them to the locks queue and then we have to
183
+ -- finally return a list of json blobs
184
+
185
+ local response = {}
186
+ local state
187
+ local history
188
+ for index, jid in ipairs(keys) do
189
+ -- First, we should get the state and history of the item
190
+ state, history = unpack(redis.call('hmget', 'ql:j:' .. jid, 'state', 'history'))
191
+
192
+ history = cjson.decode(history or '{}')
193
+ history[#history]['worker'] = worker
194
+ history[#history]['popped'] = math.floor(now)
195
+
196
+ ----------------------------------------------------------
197
+ -- This is the massive stats update that we have to do
198
+ ----------------------------------------------------------
199
+ -- This is how long we've been waiting to get popped
200
+ local waiting = math.floor(now) - history[#history]['put']
201
+ -- Now we'll go through the apparently long and arduous process of update
202
+ local count, mean, vk = unpack(redis.call('hmget', 'ql:s:wait:' .. bin .. ':' .. queue, 'total', 'mean', 'vk'))
203
+ count = count or 0
204
+ if count == 0 then
205
+ mean = waiting
206
+ vk = 0
207
+ count = 1
208
+ else
209
+ count = count + 1
210
+ local oldmean = mean
211
+ mean = mean + (waiting - mean) / count
212
+ vk = vk + (waiting - mean) * (waiting - oldmean)
213
+ end
214
+ -- Now, update the histogram
215
+ -- - `s1`, `s2`, ..., -- second-resolution histogram counts
216
+ -- - `m1`, `m2`, ..., -- minute-resolution
217
+ -- - `h1`, `h2`, ..., -- hour-resolution
218
+ -- - `d1`, `d2`, ..., -- day-resolution
219
+ waiting = math.floor(waiting)
220
+ if waiting < 60 then -- seconds
221
+ redis.call('hincrby', 'ql:s:wait:' .. bin .. ':' .. queue, 's' .. waiting, 1)
222
+ elseif waiting < 3600 then -- minutes
223
+ redis.call('hincrby', 'ql:s:wait:' .. bin .. ':' .. queue, 'm' .. math.floor(waiting / 60), 1)
224
+ elseif waiting < 86400 then -- hours
225
+ redis.call('hincrby', 'ql:s:wait:' .. bin .. ':' .. queue, 'h' .. math.floor(waiting / 3600), 1)
226
+ else -- days
227
+ redis.call('hincrby', 'ql:s:wait:' .. bin .. ':' .. queue, 'd' .. math.floor(waiting / 86400), 1)
228
+ end
229
+ redis.call('hmset', 'ql:s:wait:' .. bin .. ':' .. queue, 'total', count, 'mean', mean, 'vk', vk)
230
+ ----------------------------------------------------------
231
+
232
+ -- Add this job to the list of jobs handled by this worker
233
+ redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid)
234
+
235
+ -- Update the jobs data, and add its locks, and return the job
236
+ redis.call(
237
+ 'hmset', 'ql:j:' .. jid, 'worker', worker, 'expires', expires,
238
+ 'state', 'running', 'history', cjson.encode(history))
239
+
240
+ redis.call('zadd', key .. '-locks', expires, jid)
241
+ local job = redis.call(
242
+ 'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
243
+ 'expires', 'retries', 'remaining', 'data', 'tags', 'history', 'failure')
244
+
245
+ local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false
246
+ if tracked then
247
+ redis.call('publish', 'popped', jid)
248
+ end
249
+
250
+ table.insert(response, cjson.encode({
251
+ jid = job[1],
252
+ klass = job[2],
253
+ state = job[3],
254
+ queue = job[4],
255
+ worker = job[5] or '',
256
+ tracked = tracked,
257
+ priority = tonumber(job[6]),
258
+ expires = tonumber(job[7]) or 0,
259
+ retries = tonumber(job[8]),
260
+ remaining = tonumber(job[9]),
261
+ data = cjson.decode(job[10]),
262
+ tags = cjson.decode(job[11]),
263
+ history = cjson.decode(job[12]),
264
+ failure = cjson.decode(job[13] or '{}'),
265
+ dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents'),
266
+ -- A job in the waiting state can not have dependencies
267
+ -- because it has been popped off of a queue, which
268
+ -- means all of its dependencies have been satisfied
269
+ dependencies = {}
270
+
271
+ }))
272
+ end
273
+
274
+ if #keys > 0 then
275
+ redis.call('zrem', key .. '-work', unpack(keys))
276
+ end
277
+
278
+ return response
@@ -0,0 +1,32 @@
1
+ -- priority(0, jid, priority)
2
+ -- --------------------------
3
+ -- Accepts a jid, and a new priority for the job. If the job
4
+ -- doesn't exist, then return false. Otherwise, return the
5
+ -- updated priority. If the job is waiting, then the change
6
+ -- will be reflected in the order in which it's popped
7
+
8
+ if #KEYS ~= 0 then
9
+ error('Priority(): Got ' .. #KEYS .. ', expected 0')
10
+ end
11
+
12
+ local jid = assert(ARGV[1] , 'Priority(): Arg "jid" missing')
13
+ local priority = assert(tonumber(ARGV[2]), 'Priority(): Arg "priority" missing or not a number: ' .. tostring(ARGV[2]))
14
+
15
+ -- Get the queue the job is currently in, if any
16
+ local queue = redis.call('hget', 'ql:j:' .. jid, 'queue')
17
+
18
+ if queue == nil then
19
+ return false
20
+ elseif queue == '' then
21
+ -- Just adjust the priority
22
+ redis.call('hset', 'ql:j:' .. jid, 'priority', priority)
23
+ return priority
24
+ else
25
+ -- Adjust the priority and see if it's a candidate for updating
26
+ -- its priority in the queue it's currently in
27
+ if redis.call('zscore', 'ql:q:' .. queue .. '-work', jid) then
28
+ redis.call('zadd', 'ql:q:' .. queue .. '-work', priority, jid)
29
+ end
30
+ redis.call('hset', 'ql:j:' .. jid, 'priority', priority)
31
+ return priority
32
+ end
@@ -0,0 +1,156 @@
1
+ -- Put(1, queue, jid, klass, data, now, delay, [priority, p], [tags, t], [retries, r], [depends, '[...]'])
2
+ -- -------------------------------------------------------------------------------------------------------
3
+ -- This script takes the name of the queue and then the
4
+ -- info about the work item, and makes sure that it's
5
+ -- enqueued.
6
+ --
7
+ -- At some point, I'd like to able to provide functionality
8
+ -- that enables this to generate a unique ID for this piece
9
+ -- of work. As such, client libraries should not expose
10
+ -- setting the id from the user, as this is an implementation
11
+ -- detail that's likely to change and users should not grow
12
+ -- to depend on it.
13
+ --
14
+ -- Keys:
15
+ -- 1) queue name
16
+ -- Args:
17
+ -- 1) jid
18
+ -- 2) klass
19
+ -- 3) data
20
+ -- 4) now
21
+ -- 5) delay
22
+ -- *) [priority, p], [tags, t], [retries, r], [depends, '[...]']
23
+
24
+ if #KEYS ~= 1 then
25
+ if #KEYS < 1 then
26
+ error('Put(): Expected 1 KEYS argument')
27
+ else
28
+ error('Put(): Got ' .. #KEYS .. ', expected 1 KEYS argument')
29
+ end
30
+ end
31
+
32
+ local queue = assert(KEYS[1] , 'Put(): Key "queue" missing')
33
+ local jid = assert(ARGV[1] , 'Put(): Arg "jid" missing')
34
+ local klass = assert(ARGV[2] , 'Put(): Arg "klass" missing')
35
+ local data = assert(cjson.decode(ARGV[3]) , 'Put(): Arg "data" missing or not JSON: ' .. tostring(ARGV[3]))
36
+ local now = assert(tonumber(ARGV[4]) , 'Put(): Arg "now" missing or not a number: ' .. tostring(ARGV[4]))
37
+ local delay = assert(tonumber(ARGV[5]) , 'Put(): Arg "delay" not a number: ' .. tostring(ARGV[5]))
38
+
39
+ -- Read in all the optional parameters
40
+ local options = {}
41
+ for i = 6, #ARGV, 2 do options[ARGV[i]] = ARGV[i + 1] end
42
+
43
+ -- Let's see what the old priority, history and tags were
44
+ local history, priority, tags, oldqueue, state, failure, retries, worker = unpack(redis.call('hmget', 'ql:j:' .. jid, 'history', 'priority', 'tags', 'queue', 'state', 'failure', 'retries', 'worker'))
45
+
46
+ -- Sanity check on optional args
47
+ retries = assert(tonumber(options['retries'] or retries or 5) , 'Put(): Arg "retries" not a number: ' .. tostring(options['retries']))
48
+ tags = assert(cjson.decode(options['tags'] or tags or '[]' ), 'Put(): Arg "tags" not JSON' .. tostring(options['tags']))
49
+ priority = assert(tonumber(options['priority'] or priority or 0), 'Put(): Arg "priority" not a number' .. tostring(options['priority']))
50
+ local depends = assert(cjson.decode(options['depends'] or '[]') , 'Put(): Arg "depends" not JSON: ' .. tostring(options['depends']))
51
+
52
+ -- Delay and depends are not allowed together
53
+ if delay > 0 and #depends > 0 then
54
+ error('Put(): "delay" and "depends" are not allowed to be used together')
55
+ end
56
+
57
+ -- Update the history to include this new change
58
+ local history = cjson.decode(history or '{}')
59
+ table.insert(history, {
60
+ q = queue,
61
+ put = math.floor(now)
62
+ })
63
+
64
+ -- If this item was previously in another queue, then we should remove it from there
65
+ if oldqueue then
66
+ redis.call('zrem', 'ql:q:' .. oldqueue .. '-work', jid)
67
+ redis.call('zrem', 'ql:q:' .. oldqueue .. '-locks', jid)
68
+ redis.call('zrem', 'ql:q:' .. oldqueue .. '-scheduled', jid)
69
+ redis.call('zrem', 'ql:q:' .. oldqueue .. '-depends', jid)
70
+ end
71
+
72
+ -- If this had previously been given out to a worker,
73
+ -- make sure to remove it from that worker's jobs
74
+ if worker then
75
+ redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
76
+ end
77
+
78
+ -- If the job was previously in the 'completed' state, then we should remove
79
+ -- it from being enqueued for destructination
80
+ if state == 'complete' then
81
+ redis.call('zrem', 'ql:completed', jid)
82
+ end
83
+
84
+ -- Add this job to the list of jobs tagged with whatever tags were supplied
85
+ for i, tag in ipairs(tags) do
86
+ redis.call('zadd', 'ql:t:' .. tag, now, jid)
87
+ redis.call('zincrby', 'ql:tags', 1, tag)
88
+ end
89
+
90
+ -- If we're in the failed state, remove all of our data
91
+ if state == 'failed' then
92
+ failure = cjson.decode(failure)
93
+ -- We need to make this remove it from the failed queues
94
+ redis.call('lrem', 'ql:f:' .. failure.group, 0, jid)
95
+ if redis.call('llen', 'ql:f:' .. failure.group) == 0 then
96
+ redis.call('srem', 'ql:failures', failure.group)
97
+ end
98
+ -- The bin is midnight of the provided day
99
+ -- 24 * 60 * 60 = 86400
100
+ local bin = failure.when - (failure.when % 86400)
101
+ -- We also need to decrement the stats about the queue on
102
+ -- the day that this failure actually happened.
103
+ redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failed' , -1)
104
+ end
105
+
106
+ -- First, let's save its data
107
+ redis.call('hmset', 'ql:j:' .. jid,
108
+ 'jid' , jid,
109
+ 'klass' , klass,
110
+ 'data' , cjson.encode(data),
111
+ 'priority' , priority,
112
+ 'tags' , cjson.encode(tags),
113
+ 'state' , ((delay > 0) and 'scheduled') or 'waiting',
114
+ 'worker' , '',
115
+ 'expires' , 0,
116
+ 'queue' , queue,
117
+ 'retries' , retries,
118
+ 'remaining', retries,
119
+ 'history' , cjson.encode(history))
120
+
121
+ -- These are the jids we legitimately have to wait on
122
+ for i, j in ipairs(depends) do
123
+ -- Make sure it's something other than 'nil' or complete.
124
+ local state = redis.call('hget', 'ql:j:' .. j, 'state')
125
+ if (state and state ~= 'complete') then
126
+ redis.call('sadd', 'ql:j:' .. j .. '-dependents' , jid)
127
+ redis.call('sadd', 'ql:j:' .. jid .. '-dependencies', j)
128
+ end
129
+ end
130
+
131
+ -- Now, if a delay was provided, and if it's in the future,
132
+ -- then we'll have to schedule it. Otherwise, we're just
133
+ -- going to add it to the work queue.
134
+ if delay > 0 then
135
+ redis.call('zadd', 'ql:q:' .. queue .. '-scheduled', now + delay, jid)
136
+ else
137
+ if redis.call('scard', 'ql:j:' .. jid .. '-dependencies') > 0 then
138
+ redis.call('zadd', 'ql:q:' .. queue .. '-depends', now, jid)
139
+ redis.call('hset', 'ql:j:' .. jid, 'state', 'depends')
140
+ else
141
+ redis.call('zadd', 'ql:q:' .. queue .. '-work', priority - (now / 10000000000), jid)
142
+ end
143
+ end
144
+
145
+ -- Lastly, we're going to make sure that this item is in the
146
+ -- set of known queues. We should keep this sorted by the
147
+ -- order in which we saw each of these queues
148
+ if redis.call('zscore', 'ql:queues', queue) == false then
149
+ redis.call('zadd', 'ql:queues', now, queue)
150
+ end
151
+
152
+ if redis.call('zscore', 'ql:tracked', jid) ~= false then
153
+ redis.call('publish', 'put', jid)
154
+ end
155
+
156
+ return jid