qless 0.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. data/Gemfile +8 -0
  2. data/HISTORY.md +168 -0
  3. data/README.md +571 -0
  4. data/Rakefile +28 -0
  5. data/bin/qless-campfire +106 -0
  6. data/bin/qless-growl +99 -0
  7. data/bin/qless-web +23 -0
  8. data/lib/qless.rb +185 -0
  9. data/lib/qless/config.rb +31 -0
  10. data/lib/qless/job.rb +259 -0
  11. data/lib/qless/job_reservers/ordered.rb +23 -0
  12. data/lib/qless/job_reservers/round_robin.rb +34 -0
  13. data/lib/qless/lua.rb +25 -0
  14. data/lib/qless/qless-core/cancel.lua +71 -0
  15. data/lib/qless/qless-core/complete.lua +218 -0
  16. data/lib/qless/qless-core/config.lua +44 -0
  17. data/lib/qless/qless-core/depends.lua +65 -0
  18. data/lib/qless/qless-core/fail.lua +107 -0
  19. data/lib/qless/qless-core/failed.lua +83 -0
  20. data/lib/qless/qless-core/get.lua +37 -0
  21. data/lib/qless/qless-core/heartbeat.lua +50 -0
  22. data/lib/qless/qless-core/jobs.lua +41 -0
  23. data/lib/qless/qless-core/peek.lua +155 -0
  24. data/lib/qless/qless-core/pop.lua +278 -0
  25. data/lib/qless/qless-core/priority.lua +32 -0
  26. data/lib/qless/qless-core/put.lua +156 -0
  27. data/lib/qless/qless-core/queues.lua +58 -0
  28. data/lib/qless/qless-core/recur.lua +181 -0
  29. data/lib/qless/qless-core/retry.lua +73 -0
  30. data/lib/qless/qless-core/ruby/lib/qless-core.rb +1 -0
  31. data/lib/qless/qless-core/ruby/lib/qless/core.rb +13 -0
  32. data/lib/qless/qless-core/ruby/lib/qless/core/version.rb +5 -0
  33. data/lib/qless/qless-core/ruby/spec/qless_core_spec.rb +13 -0
  34. data/lib/qless/qless-core/stats.lua +92 -0
  35. data/lib/qless/qless-core/tag.lua +100 -0
  36. data/lib/qless/qless-core/track.lua +79 -0
  37. data/lib/qless/qless-core/workers.lua +69 -0
  38. data/lib/qless/queue.rb +141 -0
  39. data/lib/qless/server.rb +411 -0
  40. data/lib/qless/tasks.rb +10 -0
  41. data/lib/qless/version.rb +3 -0
  42. data/lib/qless/worker.rb +195 -0
  43. metadata +239 -0
data/lib/qless/job.rb ADDED
@@ -0,0 +1,259 @@
1
+ require "qless"
2
+ require "qless/queue"
3
+ require "qless/lua"
4
+ require "redis"
5
+ require "json"
6
+
7
+ module Qless
8
+ class BaseJob
9
+ def initialize(client, jid)
10
+ @client = client
11
+ @jid = jid
12
+ end
13
+
14
+ def klass
15
+ @klass ||= @klass_name.split('::').inject(Kernel) { |context, name| context.const_get(name) }
16
+ end
17
+
18
+ def queue
19
+ @queue ||= Queue.new(@queue_name, @client)
20
+ end
21
+ end
22
+
23
+ class Job < BaseJob
24
+ attr_reader :jid, :expires_at, :state, :queue_name, :history, :worker_name, :failure, :klass_name, :tracked, :dependencies, :dependents
25
+ attr_reader :original_retries, :retries_left
26
+ attr_accessor :data, :priority, :tags
27
+
28
+ def perform
29
+ klass.perform(self)
30
+ end
31
+
32
+ def self.build(client, klass, attributes = {})
33
+ defaults = {
34
+ "jid" => Qless.generate_jid,
35
+ "data" => {},
36
+ "klass" => klass.to_s,
37
+ "priority" => 0,
38
+ "tags" => [],
39
+ "worker" => "mock_worker",
40
+ "expires" => Time.now + (60 * 60), # an hour from now
41
+ "state" => "running",
42
+ "tracked" => false,
43
+ "queue" => "mock_queue",
44
+ "retries" => 5,
45
+ "remaining" => 5,
46
+ "failure" => {},
47
+ "history" => [],
48
+ "dependencies" => [],
49
+ "dependents" => []
50
+ }
51
+ attributes = defaults.merge(Qless.stringify_hash_keys(attributes))
52
+ attributes["data"] = JSON.load(JSON.dump attributes["data"])
53
+ new(client, attributes)
54
+ end
55
+
56
+ def initialize(client, atts)
57
+ super(client, atts.fetch('jid'))
58
+ %w{jid data priority tags state tracked
59
+ failure history dependencies dependents}.each do |att|
60
+ self.instance_variable_set("@#{att}".to_sym, atts.fetch(att))
61
+ end
62
+
63
+ @expires_at = atts.fetch('expires')
64
+ @klass_name = atts.fetch('klass')
65
+ @queue_name = atts.fetch('queue')
66
+ @worker_name = atts.fetch('worker')
67
+ @original_retries = atts.fetch('retries')
68
+ @retries_left = atts.fetch('remaining')
69
+
70
+ # This is a silly side-effect of Lua doing JSON parsing
71
+ @tags = [] if @tags == {}
72
+ @dependents = [] if @dependents == {}
73
+ @dependencies = [] if @dependencies == {}
74
+ @state_changed = false
75
+ end
76
+
77
+ def priority=(priority)
78
+ if @client._priority.call([], [@jid, priority])
79
+ @priority = priority
80
+ end
81
+ end
82
+
83
+ def [](key)
84
+ @data[key]
85
+ end
86
+
87
+ def []=(key, val)
88
+ @data[key] = val
89
+ end
90
+
91
+ def to_s
92
+ inspect
93
+ end
94
+
95
+ def description
96
+ "#{@jid} (#{@klass_name} / #{@queue_name})"
97
+ end
98
+
99
+ def inspect
100
+ "<Qless::Job #{description}>"
101
+ end
102
+
103
+ def ttl
104
+ @expires_at - Time.now.to_f
105
+ end
106
+
107
+ # Move this from it's current queue into another
108
+ def move(queue)
109
+ note_state_change do
110
+ @client._put.call([queue], [
111
+ @jid, @klass_name, JSON.generate(@data), Time.now.to_f, 0
112
+ ])
113
+ end
114
+ end
115
+
116
+ # Fail a job
117
+ def fail(group, message)
118
+ note_state_change do
119
+ @client._fail.call([], [
120
+ @jid,
121
+ @worker_name,
122
+ group, message,
123
+ Time.now.to_f,
124
+ JSON.generate(@data)]) || false
125
+ end
126
+ end
127
+
128
+ # Heartbeat a job
129
+ def heartbeat()
130
+ @client._heartbeat.call([], [
131
+ @jid,
132
+ @worker_name,
133
+ Time.now.to_f,
134
+ JSON.generate(@data)]) || false
135
+ end
136
+
137
+ # Complete a job
138
+ # Options include
139
+ # => next (String) the next queue
140
+ # => delay (int) how long to delay it in the next queue
141
+ def complete(nxt=nil, options={})
142
+ response = note_state_change do
143
+ if nxt.nil?
144
+ @client._complete.call([], [
145
+ @jid, @worker_name, @queue_name, Time.now.to_f, JSON.generate(@data)])
146
+ else
147
+ @client._complete.call([], [
148
+ @jid, @worker_name, @queue_name, Time.now.to_f, JSON.generate(@data), 'next', nxt, 'delay',
149
+ options.fetch(:delay, 0), 'depends', JSON.generate(options.fetch(:depends, []))])
150
+ end
151
+ end
152
+ response.nil? ? false : response
153
+ end
154
+
155
+ def state_changed?
156
+ @state_changed
157
+ end
158
+
159
+ def cancel
160
+ note_state_change do
161
+ @client._cancel.call([], [@jid])
162
+ end
163
+ end
164
+
165
+ def track()
166
+ @client._track.call([], ['track', @jid, Time.now.to_f])
167
+ end
168
+
169
+ def untrack
170
+ @client._track.call([], ['untrack', @jid, Time.now.to_f])
171
+ end
172
+
173
+ def tag(*tags)
174
+ @client._tag.call([], ['add', @jid, Time.now.to_f] + tags)
175
+ end
176
+
177
+ def untag(*tags)
178
+ @client._tag.call([], ['remove', @jid, Time.now.to_f] + tags)
179
+ end
180
+
181
+ def retry(delay=0)
182
+ results = @client._retry.call([], [@jid, @queue_name, @worker_name, Time.now.to_f, delay])
183
+ results.nil? ? false : results
184
+ end
185
+
186
+ def depend(*jids)
187
+ !!@client._depends.call([], [@jid, 'on'] + jids)
188
+ end
189
+
190
+ def undepend(*jids)
191
+ !!@client._depends.call([], [@jid, 'off'] + jids)
192
+ end
193
+
194
+ private
195
+
196
+ def note_state_change
197
+ result = yield
198
+ @state_changed = true
199
+ result
200
+ end
201
+ end
202
+
203
+ class RecurringJob < BaseJob
204
+ attr_reader :jid, :data, :priority, :tags, :retries, :interval, :count, :queue_name, :klass_name
205
+
206
+ def initialize(client, atts)
207
+ super(client, atts.fetch('jid'))
208
+ %w{jid data priority tags retries interval count}.each do |att|
209
+ self.instance_variable_set("@#{att}".to_sym, atts.fetch(att))
210
+ end
211
+
212
+ @klass_name = atts.fetch('klass')
213
+ @queue_name = atts.fetch('queue')
214
+ @tags = [] if @tags == {}
215
+ end
216
+
217
+ def priority=(value)
218
+ @client._recur.call([], ['update', @jid, 'priority', value])
219
+ @priority = value
220
+ end
221
+
222
+ def retries=(value)
223
+ @client._recur.call([], ['update', @jid, 'retries', value])
224
+ @retries = value
225
+ end
226
+
227
+ def interval=(value)
228
+ @client._recur.call([], ['update', @jid, 'interval', value])
229
+ @interval = value
230
+ end
231
+
232
+ def data=(value)
233
+ @client._recur.call([], ['update', @jid, 'data', JSON.generate(value)])
234
+ @data = value
235
+ end
236
+
237
+ def klass=(value)
238
+ @client._recur.call([], ['update', @jid, 'klass', value.to_s])
239
+ @klass_name = value.to_s
240
+ end
241
+
242
+ def move(queue)
243
+ @client._recur.call([], ['update', @jid, 'queue', queue])
244
+ @queue_name = queue
245
+ end
246
+
247
+ def cancel
248
+ @client._recur.call([], ['off', @jid])
249
+ end
250
+
251
+ def tag(*tags)
252
+ @client._recur.call([], ['tag', @jid] + tags)
253
+ end
254
+
255
+ def untag(*tags)
256
+ @client._recur.call([], ['untag', @jid] + tags)
257
+ end
258
+ end
259
+ end
@@ -0,0 +1,23 @@
1
+ module Qless
2
+ module JobReservers
3
+ class Ordered
4
+ attr_reader :queues
5
+
6
+ def initialize(queues)
7
+ @queues = queues
8
+ end
9
+
10
+ def reserve
11
+ @queues.each do |q|
12
+ job = q.pop
13
+ return job if job
14
+ end
15
+ nil
16
+ end
17
+
18
+ def description
19
+ @description ||= @queues.map(&:name).join(', ') + " (ordered)"
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,34 @@
1
+ module Qless
2
+ module JobReservers
3
+ class RoundRobin
4
+ attr_reader :queues
5
+
6
+ def initialize(queues)
7
+ @queues = queues
8
+ @num_queues = queues.size
9
+ @last_popped_queue_index = @num_queues - 1
10
+ end
11
+
12
+ def reserve
13
+ @num_queues.times do |i|
14
+ if job = next_queue.pop
15
+ return job
16
+ end
17
+ end
18
+ nil
19
+ end
20
+
21
+ def description
22
+ @description ||= @queues.map(&:name).join(', ') + " (round robin)"
23
+ end
24
+
25
+ private
26
+
27
+ def next_queue
28
+ @last_popped_queue_index = (@last_popped_queue_index + 1) % @num_queues
29
+ @queues[@last_popped_queue_index]
30
+ end
31
+ end
32
+ end
33
+ end
34
+
data/lib/qless/lua.rb ADDED
@@ -0,0 +1,25 @@
1
+ module Qless
2
+ class Lua
3
+ LUA_SCRIPT_DIR = File.expand_path("../qless-core/", __FILE__)
4
+
5
+ def initialize(name, redis)
6
+ @sha = nil
7
+ @name = name
8
+ @redis = redis
9
+ reload()
10
+ end
11
+
12
+ def reload()
13
+ @sha = @redis.script(:load, File.read(File.join(LUA_SCRIPT_DIR, "#{@name}.lua")))
14
+ end
15
+
16
+ def call(keys, args)
17
+ begin
18
+ return @redis.evalsha(@sha, keys.length, *(keys + args))
19
+ rescue
20
+ reload
21
+ return @redis.evalsha(@sha, keys.length, *(keys + args))
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,71 @@
1
+ -- Cancel(0, jid)
2
+ -- --------------
3
+ -- Cancel a job from taking place. It will be deleted from the system, and any
4
+ -- attempts to renew a heartbeat will fail, and any attempts to complete it
5
+ -- will fail. If you try to get the data on the object, you will get nothing.
6
+ --
7
+ -- Args:
8
+ -- 1) jid
9
+
10
+ if #KEYS > 0 then error('Cancel(): No Keys should be provided') end
11
+
12
+ local jid = assert(ARGV[1], 'Cancel(): Arg "jid" missing.')
13
+
14
+ -- Find any stage it's associated with and remove its from that stage
15
+ local state, queue, failure, worker = unpack(redis.call('hmget', 'ql:j:' .. jid, 'state', 'queue', 'failure', 'worker'))
16
+
17
+ if state == 'complete' then
18
+ return False
19
+ else
20
+ -- If this job has dependents, then we should probably fail
21
+ if redis.call('scard', 'ql:j:' .. jid .. '-dependents') > 0 then
22
+ error('Cancel(): ' .. jid .. ' has un-canceled jobs that depend on it')
23
+ end
24
+
25
+ -- Remove this job from whatever worker has it, if any
26
+ if worker then
27
+ redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
28
+ end
29
+
30
+ -- Remove it from that queue
31
+ if queue then
32
+ redis.call('zrem', 'ql:q:' .. queue .. '-work', jid)
33
+ redis.call('zrem', 'ql:q:' .. queue .. '-locks', jid)
34
+ redis.call('zrem', 'ql:q:' .. queue .. '-scheduled', jid)
35
+ redis.call('zrem', 'ql:q:' .. queue .. '-depends', jid)
36
+ end
37
+
38
+ -- We should probably go through all our dependencies and remove ourselves
39
+ -- from the list of dependents
40
+ for i, j in ipairs(redis.call('smembers', 'ql:j:' .. jid .. '-dependencies')) do
41
+ redis.call('srem', 'ql:j:' .. j .. '-dependents', jid)
42
+ end
43
+
44
+ -- Delete any notion of dependencies it has
45
+ redis.call('del', 'ql:j:' .. jid .. '-dependencies')
46
+
47
+ -- If we're in the failed state, remove all of our data
48
+ if state == 'failed' then
49
+ failure = cjson.decode(failure)
50
+ -- We need to make this remove it from the failed queues
51
+ redis.call('lrem', 'ql:f:' .. failure.group, 0, jid)
52
+ if redis.call('llen', 'ql:f:' .. failure.group) == 0 then
53
+ redis.call('srem', 'ql:failures', failure.group)
54
+ end
55
+ end
56
+
57
+ -- Remove it as a job that's tagged with this particular tag
58
+ local tags = cjson.decode(redis.call('hget', 'ql:j:' .. jid, 'tags') or '{}')
59
+ for i, tag in ipairs(tags) do
60
+ redis.call('zrem', 'ql:t:' .. tag, jid)
61
+ redis.call('zincrby', 'ql:tags', -1, tag)
62
+ end
63
+
64
+ -- If the job was being tracked, we should notify
65
+ if redis.call('zscore', 'ql:tracked', jid) ~= false then
66
+ redis.call('publish', 'canceled', jid)
67
+ end
68
+
69
+ -- Just go ahead and delete our data
70
+ redis.call('del', 'ql:j:' .. jid)
71
+ end
@@ -0,0 +1,218 @@
1
+ -- Complete(0, jid, worker, queue, now, data, [next, q, [(delay, d) | (depends, '["jid1","jid2",...]')])
2
+ -- -----------------------------------------------------------------------------------------------------
3
+ -- Complete a job and optionally put it in another queue, either scheduled or to
4
+ -- be considered waiting immediately. It can also optionally accept other jids
5
+ -- on which this job will be considered dependent before it's considered valid.
6
+ --
7
+ -- Args:
8
+ -- 1) jid
9
+ -- 2) worker
10
+ -- 3) queue
11
+ -- 4) now
12
+ -- 5) data
13
+ -- *) [next, q, [delay, d]], [depends, '...']
14
+
15
+ if #KEYS > 0 then error('Complete(): No Keys should be provided') end
16
+
17
+ local jid = assert(ARGV[1] , 'Complete(): Arg "jid" missing.')
18
+ local worker = assert(ARGV[2] , 'Complete(): Arg "worker" missing.')
19
+ local queue = assert(ARGV[3] , 'Complete(): Arg "queue" missing.')
20
+ local now = assert(tonumber(ARGV[4]) , 'Complete(): Arg "now" not a number or missing: ' .. tostring(ARGV[4]))
21
+ local data = assert(cjson.decode(ARGV[5]) , 'Complete(): Arg "data" missing or not JSON: ' .. tostring(ARGV[5]))
22
+
23
+ -- Read in all the optional parameters
24
+ local options = {}
25
+ for i = 6, #ARGV, 2 do options[ARGV[i]] = ARGV[i + 1] end
26
+
27
+ -- Sanity check on optional args
28
+ local nextq = options['next']
29
+ local delay = assert(tonumber(options['delay'] or 0))
30
+ local depends = assert(cjson.decode(options['depends'] or '[]'), 'Complete(): Arg "depends" not JSON: ' .. tostring(options['depends']))
31
+
32
+ -- Delay and depends are not allowed together
33
+ if delay > 0 and #depends > 0 then
34
+ error('Complete(): "delay" and "depends" are not allowed to be used together')
35
+ end
36
+
37
+ -- Depends doesn't make sense without nextq
38
+ if options['delay'] and nextq == nil then
39
+ error('Complete(): "delay" cannot be used without a "next".')
40
+ end
41
+
42
+ -- Depends doesn't make sense without nextq
43
+ if options['depends'] and nextq == nil then
44
+ error('Complete(): "depends" cannot be used without a "next".')
45
+ end
46
+
47
+ -- The bin is midnight of the provided day
48
+ -- 24 * 60 * 60 = 86400
49
+ local bin = now - (now % 86400)
50
+
51
+ -- First things first, we should see if the worker still owns this job
52
+ local lastworker, history, state, priority, retries = unpack(redis.call('hmget', 'ql:j:' .. jid, 'worker', 'history', 'state', 'priority', 'retries', 'dependents'))
53
+
54
+ if (lastworker ~= worker) or (state ~= 'running') then
55
+ return false
56
+ end
57
+
58
+ -- Now we can assume that the worker does own the job. We need to
59
+ -- 1) Remove the job from the 'locks' from the old queue
60
+ -- 2) Enqueue it in the next stage if necessary
61
+ -- 3) Update the data
62
+ -- 4) Mark the job as completed, remove the worker, remove expires, and update history
63
+
64
+ -- Unpack the history, and update it
65
+ history = cjson.decode(history)
66
+ history[#history]['done'] = math.floor(now)
67
+
68
+ if data then
69
+ redis.call('hset', 'ql:j:' .. jid, 'data', cjson.encode(data))
70
+ end
71
+
72
+ -- Remove the job from the previous queue
73
+ redis.call('zrem', 'ql:q:' .. queue .. '-work', jid)
74
+ redis.call('zrem', 'ql:q:' .. queue .. '-locks', jid)
75
+ redis.call('zrem', 'ql:q:' .. queue .. '-scheduled', jid)
76
+
77
+ ----------------------------------------------------------
78
+ -- This is the massive stats update that we have to do
79
+ ----------------------------------------------------------
80
+ -- This is how long we've been waiting to get popped
81
+ local waiting = math.floor(now) - history[#history]['popped']
82
+ -- Now we'll go through the apparently long and arduous process of update
83
+ local count, mean, vk = unpack(redis.call('hmget', 'ql:s:run:' .. bin .. ':' .. queue, 'total', 'mean', 'vk'))
84
+ count = count or 0
85
+ if count == 0 then
86
+ mean = waiting
87
+ vk = 0
88
+ count = 1
89
+ else
90
+ count = count + 1
91
+ local oldmean = mean
92
+ mean = mean + (waiting - mean) / count
93
+ vk = vk + (waiting - mean) * (waiting - oldmean)
94
+ end
95
+ -- Now, update the histogram
96
+ -- - `s1`, `s2`, ..., -- second-resolution histogram counts
97
+ -- - `m1`, `m2`, ..., -- minute-resolution
98
+ -- - `h1`, `h2`, ..., -- hour-resolution
99
+ -- - `d1`, `d2`, ..., -- day-resolution
100
+ waiting = math.floor(waiting)
101
+ if waiting < 60 then -- seconds
102
+ redis.call('hincrby', 'ql:s:run:' .. bin .. ':' .. queue, 's' .. waiting, 1)
103
+ elseif waiting < 3600 then -- minutes
104
+ redis.call('hincrby', 'ql:s:run:' .. bin .. ':' .. queue, 'm' .. math.floor(waiting / 60), 1)
105
+ elseif waiting < 86400 then -- hours
106
+ redis.call('hincrby', 'ql:s:run:' .. bin .. ':' .. queue, 'h' .. math.floor(waiting / 3600), 1)
107
+ else -- days
108
+ redis.call('hincrby', 'ql:s:run:' .. bin .. ':' .. queue, 'd' .. math.floor(waiting / 86400), 1)
109
+ end
110
+ redis.call('hmset', 'ql:s:run:' .. bin .. ':' .. queue, 'total', count, 'mean', mean, 'vk', vk)
111
+ ----------------------------------------------------------
112
+
113
+ -- Remove this job from the jobs that the worker that was running it has
114
+ redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
115
+
116
+ if redis.call('zscore', 'ql:tracked', jid) ~= false then
117
+ redis.call('publish', 'completed', jid)
118
+ end
119
+
120
+ if nextq then
121
+ -- Enqueue the job
122
+ table.insert(history, {
123
+ q = nextq,
124
+ put = math.floor(now)
125
+ })
126
+
127
+ -- We're going to make sure that this queue is in the
128
+ -- set of known queues
129
+ if redis.call('zscore', 'ql:queues', nextq) == false then
130
+ redis.call('zadd', 'ql:queues', now, nextq)
131
+ end
132
+
133
+ redis.call('hmset', 'ql:j:' .. jid, 'state', 'waiting', 'worker', '', 'failure', '{}',
134
+ 'queue', nextq, 'expires', 0, 'history', cjson.encode(history), 'remaining', tonumber(retries))
135
+
136
+ if delay > 0 then
137
+ redis.call('zadd', 'ql:q:' .. nextq .. '-scheduled', now + delay, jid)
138
+ return 'scheduled'
139
+ else
140
+ -- These are the jids we legitimately have to wait on
141
+ local count = 0
142
+ for i, j in ipairs(depends) do
143
+ -- Make sure it's something other than 'nil' or complete.
144
+ local state = redis.call('hget', 'ql:j:' .. j, 'state')
145
+ if (state and state ~= 'complete') then
146
+ count = count + 1
147
+ redis.call('sadd', 'ql:j:' .. j .. '-dependents' , jid)
148
+ redis.call('sadd', 'ql:j:' .. jid .. '-dependencies', j)
149
+ end
150
+ end
151
+ if count > 0 then
152
+ redis.call('zadd', 'ql:q:' .. nextq .. '-depends', now, jid)
153
+ redis.call('hset', 'ql:j:' .. jid, 'state', 'depends')
154
+ return 'depends'
155
+ else
156
+ redis.call('zadd', 'ql:q:' .. nextq .. '-work', priority - (now / 10000000000), jid)
157
+ return 'waiting'
158
+ end
159
+ end
160
+ else
161
+ redis.call('hmset', 'ql:j:' .. jid, 'state', 'complete', 'worker', '', 'failure', '{}',
162
+ 'queue', '', 'expires', 0, 'history', cjson.encode(history), 'remaining', tonumber(retries))
163
+
164
+ -- Do the completion dance
165
+ local count, time = unpack(redis.call('hmget', 'ql:config', 'jobs-history-count', 'jobs-history'))
166
+
167
+ -- These are the default values
168
+ count = tonumber(count or 50000)
169
+ time = tonumber(time or 7 * 24 * 60 * 60)
170
+
171
+ -- Schedule this job for destructination eventually
172
+ redis.call('zadd', 'ql:completed', now, jid)
173
+
174
+ -- Now look at the expired job data. First, based on the current time
175
+ local jids = redis.call('zrangebyscore', 'ql:completed', 0, now - time)
176
+ -- Any jobs that need to be expired... delete
177
+ for index, jid in ipairs(jids) do
178
+ local tags = cjson.decode(redis.call('hget', 'ql:j:' .. jid, 'tags') or '{}')
179
+ for i, tag in ipairs(tags) do
180
+ redis.call('zrem', 'ql:t:' .. tag, jid)
181
+ redis.call('zincrby', 'ql:tags', -1, tag)
182
+ end
183
+ redis.call('del', 'ql:j:' .. jid)
184
+ end
185
+ -- And now remove those from the queued-for-cleanup queue
186
+ redis.call('zremrangebyscore', 'ql:completed', 0, now - time)
187
+
188
+ -- Now take the all by the most recent 'count' ids
189
+ jids = redis.call('zrange', 'ql:completed', 0, (-1-count))
190
+ for index, jid in ipairs(jids) do
191
+ local tags = cjson.decode(redis.call('hget', 'ql:j:' .. jid, 'tags') or '{}')
192
+ for i, tag in ipairs(tags) do
193
+ redis.call('zrem', 'ql:t:' .. tag, jid)
194
+ redis.call('zincrby', 'ql:tags', -1, tag)
195
+ end
196
+ redis.call('del', 'ql:j:' .. jid)
197
+ end
198
+ redis.call('zremrangebyrank', 'ql:completed', 0, (-1-count))
199
+
200
+ -- Alright, if this has any dependents, then we should go ahead
201
+ -- and unstick those guys.
202
+ for i, j in ipairs(redis.call('smembers', 'ql:j:' .. jid .. '-dependents')) do
203
+ redis.call('srem', 'ql:j:' .. j .. '-dependencies', jid)
204
+ if redis.call('scard', 'ql:j:' .. j .. '-dependencies') == 0 then
205
+ local q, p = unpack(redis.call('hmget', 'ql:j:' .. j, 'queue', 'priority'))
206
+ if q then
207
+ redis.call('zrem', 'ql:q:' .. q .. '-depends', j)
208
+ redis.call('zadd', 'ql:q:' .. q .. '-work', p, j)
209
+ redis.call('hset', 'ql:j:' .. j, 'state', 'waiting')
210
+ end
211
+ end
212
+ end
213
+
214
+ -- Delete our dependents key
215
+ redis.call('del', 'ql:j:' .. jid .. '-dependents')
216
+
217
+ return 'complete'
218
+ end