qless 0.9.3 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/Gemfile +9 -3
- data/README.md +70 -25
- data/Rakefile +125 -9
- data/exe/install_phantomjs +21 -0
- data/lib/qless.rb +115 -76
- data/lib/qless/config.rb +11 -9
- data/lib/qless/failure_formatter.rb +43 -0
- data/lib/qless/job.rb +201 -102
- data/lib/qless/job_reservers/ordered.rb +7 -1
- data/lib/qless/job_reservers/round_robin.rb +16 -6
- data/lib/qless/job_reservers/shuffled_round_robin.rb +9 -2
- data/lib/qless/lua/qless-lib.lua +2463 -0
- data/lib/qless/lua/qless.lua +2012 -0
- data/lib/qless/lua_script.rb +63 -12
- data/lib/qless/middleware/memory_usage_monitor.rb +62 -0
- data/lib/qless/middleware/metriks.rb +45 -0
- data/lib/qless/middleware/redis_reconnect.rb +6 -3
- data/lib/qless/middleware/requeue_exceptions.rb +94 -0
- data/lib/qless/middleware/retry_exceptions.rb +38 -9
- data/lib/qless/middleware/sentry.rb +3 -7
- data/lib/qless/middleware/timeout.rb +64 -0
- data/lib/qless/queue.rb +90 -55
- data/lib/qless/server.rb +177 -130
- data/lib/qless/server/views/_job.erb +33 -15
- data/lib/qless/server/views/completed.erb +11 -0
- data/lib/qless/server/views/layout.erb +70 -11
- data/lib/qless/server/views/overview.erb +93 -53
- data/lib/qless/server/views/queue.erb +9 -8
- data/lib/qless/server/views/queues.erb +18 -1
- data/lib/qless/subscriber.rb +37 -22
- data/lib/qless/tasks.rb +5 -10
- data/lib/qless/test_helpers/worker_helpers.rb +55 -0
- data/lib/qless/version.rb +3 -1
- data/lib/qless/worker.rb +4 -413
- data/lib/qless/worker/base.rb +247 -0
- data/lib/qless/worker/forking.rb +245 -0
- data/lib/qless/worker/serial.rb +41 -0
- metadata +135 -52
- data/lib/qless/qless-core/cancel.lua +0 -101
- data/lib/qless/qless-core/complete.lua +0 -233
- data/lib/qless/qless-core/config.lua +0 -56
- data/lib/qless/qless-core/depends.lua +0 -65
- data/lib/qless/qless-core/deregister_workers.lua +0 -12
- data/lib/qless/qless-core/fail.lua +0 -117
- data/lib/qless/qless-core/failed.lua +0 -83
- data/lib/qless/qless-core/get.lua +0 -37
- data/lib/qless/qless-core/heartbeat.lua +0 -51
- data/lib/qless/qless-core/jobs.lua +0 -41
- data/lib/qless/qless-core/pause.lua +0 -18
- data/lib/qless/qless-core/peek.lua +0 -165
- data/lib/qless/qless-core/pop.lua +0 -314
- data/lib/qless/qless-core/priority.lua +0 -32
- data/lib/qless/qless-core/put.lua +0 -169
- data/lib/qless/qless-core/qless-lib.lua +0 -2354
- data/lib/qless/qless-core/qless.lua +0 -1862
- data/lib/qless/qless-core/queues.lua +0 -58
- data/lib/qless/qless-core/recur.lua +0 -190
- data/lib/qless/qless-core/retry.lua +0 -73
- data/lib/qless/qless-core/stats.lua +0 -92
- data/lib/qless/qless-core/tag.lua +0 -100
- data/lib/qless/qless-core/track.lua +0 -79
- data/lib/qless/qless-core/unfail.lua +0 -54
- data/lib/qless/qless-core/unpause.lua +0 -12
- data/lib/qless/qless-core/workers.lua +0 -69
- data/lib/qless/wait_until.rb +0 -19
@@ -0,0 +1,41 @@
|
|
1
|
+
# Encoding: utf-8
|
2
|
+
|
3
|
+
# Qless requires
|
4
|
+
require 'qless'
|
5
|
+
require 'qless/worker/base'
|
6
|
+
|
7
|
+
module Qless
|
8
|
+
module Workers
|
9
|
+
# A worker that keeps popping off jobs and processing them
|
10
|
+
class SerialWorker < BaseWorker
|
11
|
+
def initialize(reserver, options = {})
|
12
|
+
super(reserver, options)
|
13
|
+
end
|
14
|
+
|
15
|
+
def run
|
16
|
+
log(:info, "Starting #{reserver.description} in #{Process.pid}")
|
17
|
+
procline "Starting #{reserver.description}"
|
18
|
+
register_signal_handlers
|
19
|
+
|
20
|
+
reserver.prep_for_work!
|
21
|
+
|
22
|
+
listen_for_lost_lock do
|
23
|
+
procline "Running #{reserver.description}"
|
24
|
+
|
25
|
+
jobs.each do |job|
|
26
|
+
# Run the job we're working on
|
27
|
+
log(:debug, "Starting job #{job.klass_name} (#{job.jid} from #{job.queue_name})")
|
28
|
+
perform(job)
|
29
|
+
log(:debug, "Finished job #{job.klass_name} (#{job.jid} from #{job.queue_name})")
|
30
|
+
|
31
|
+
# So long as we're paused, we should wait
|
32
|
+
while paused
|
33
|
+
log(:debug, 'Paused...')
|
34
|
+
sleep interval
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
metadata
CHANGED
@@ -1,15 +1,16 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: qless
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.10.0
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
8
8
|
- Dan Lecocq
|
9
|
+
- Myron Marston
|
9
10
|
autorequire:
|
10
11
|
bindir: exe
|
11
12
|
cert_chain: []
|
12
|
-
date:
|
13
|
+
date: 2016-02-02 00:00:00.000000000 Z
|
13
14
|
dependencies:
|
14
15
|
- !ruby/object:Gem::Dependency
|
15
16
|
name: redis
|
@@ -28,37 +29,37 @@ dependencies:
|
|
28
29
|
- !ruby/object:Gem::Version
|
29
30
|
version: '2.2'
|
30
31
|
- !ruby/object:Gem::Dependency
|
31
|
-
name:
|
32
|
+
name: sinatra
|
32
33
|
requirement: !ruby/object:Gem::Requirement
|
33
34
|
none: false
|
34
35
|
requirements:
|
35
36
|
- - ~>
|
36
37
|
- !ruby/object:Gem::Version
|
37
|
-
version:
|
38
|
-
type: :
|
38
|
+
version: 1.3.2
|
39
|
+
type: :development
|
39
40
|
prerelease: false
|
40
41
|
version_requirements: !ruby/object:Gem::Requirement
|
41
42
|
none: false
|
42
43
|
requirements:
|
43
44
|
- - ~>
|
44
45
|
- !ruby/object:Gem::Version
|
45
|
-
version:
|
46
|
+
version: 1.3.2
|
46
47
|
- !ruby/object:Gem::Dependency
|
47
|
-
name:
|
48
|
+
name: vegas
|
48
49
|
requirement: !ruby/object:Gem::Requirement
|
49
50
|
none: false
|
50
51
|
requirements:
|
51
52
|
- - ~>
|
52
53
|
- !ruby/object:Gem::Version
|
53
|
-
version: 1.
|
54
|
-
type: :
|
54
|
+
version: 0.1.11
|
55
|
+
type: :development
|
55
56
|
prerelease: false
|
56
57
|
version_requirements: !ruby/object:Gem::Requirement
|
57
58
|
none: false
|
58
59
|
requirements:
|
59
60
|
- - ~>
|
60
61
|
- !ruby/object:Gem::Version
|
61
|
-
version: 1.
|
62
|
+
version: 0.1.11
|
62
63
|
- !ruby/object:Gem::Dependency
|
63
64
|
name: rspec
|
64
65
|
requirement: !ruby/object:Gem::Requirement
|
@@ -130,7 +131,23 @@ dependencies:
|
|
130
131
|
requirements:
|
131
132
|
- - ~>
|
132
133
|
- !ruby/object:Gem::Version
|
133
|
-
version:
|
134
|
+
version: 1.0.0
|
135
|
+
type: :development
|
136
|
+
prerelease: false
|
137
|
+
version_requirements: !ruby/object:Gem::Requirement
|
138
|
+
none: false
|
139
|
+
requirements:
|
140
|
+
- - ~>
|
141
|
+
- !ruby/object:Gem::Version
|
142
|
+
version: 1.0.0
|
143
|
+
- !ruby/object:Gem::Dependency
|
144
|
+
name: faye-websocket
|
145
|
+
requirement: !ruby/object:Gem::Requirement
|
146
|
+
none: false
|
147
|
+
requirements:
|
148
|
+
- - ~>
|
149
|
+
- !ruby/object:Gem::Version
|
150
|
+
version: 0.4.0
|
134
151
|
type: :development
|
135
152
|
prerelease: false
|
136
153
|
version_requirements: !ruby/object:Gem::Requirement
|
@@ -138,7 +155,7 @@ dependencies:
|
|
138
155
|
requirements:
|
139
156
|
- - ~>
|
140
157
|
- !ruby/object:Gem::Version
|
141
|
-
version:
|
158
|
+
version: 0.4.0
|
142
159
|
- !ruby/object:Gem::Dependency
|
143
160
|
name: launchy
|
144
161
|
requirement: !ruby/object:Gem::Requirement
|
@@ -162,7 +179,7 @@ dependencies:
|
|
162
179
|
requirements:
|
163
180
|
- - ~>
|
164
181
|
- !ruby/object:Gem::Version
|
165
|
-
version: 0.
|
182
|
+
version: 0.7.1
|
166
183
|
type: :development
|
167
184
|
prerelease: false
|
168
185
|
version_requirements: !ruby/object:Gem::Requirement
|
@@ -170,7 +187,7 @@ dependencies:
|
|
170
187
|
requirements:
|
171
188
|
- - ~>
|
172
189
|
- !ruby/object:Gem::Version
|
173
|
-
version: 0.
|
190
|
+
version: 0.7.1
|
174
191
|
- !ruby/object:Gem::Dependency
|
175
192
|
name: sentry-raven
|
176
193
|
requirement: !ruby/object:Gem::Requirement
|
@@ -187,16 +204,96 @@ dependencies:
|
|
187
204
|
- - ~>
|
188
205
|
- !ruby/object:Gem::Version
|
189
206
|
version: '0.4'
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
207
|
+
- !ruby/object:Gem::Dependency
|
208
|
+
name: metriks
|
209
|
+
requirement: !ruby/object:Gem::Requirement
|
210
|
+
none: false
|
211
|
+
requirements:
|
212
|
+
- - ~>
|
213
|
+
- !ruby/object:Gem::Version
|
214
|
+
version: '0.9'
|
215
|
+
type: :development
|
216
|
+
prerelease: false
|
217
|
+
version_requirements: !ruby/object:Gem::Requirement
|
218
|
+
none: false
|
219
|
+
requirements:
|
220
|
+
- - ~>
|
221
|
+
- !ruby/object:Gem::Version
|
222
|
+
version: '0.9'
|
223
|
+
- !ruby/object:Gem::Dependency
|
224
|
+
name: rubocop
|
225
|
+
requirement: !ruby/object:Gem::Requirement
|
226
|
+
none: false
|
227
|
+
requirements:
|
228
|
+
- - ~>
|
229
|
+
- !ruby/object:Gem::Version
|
230
|
+
version: 0.13.1
|
231
|
+
type: :development
|
232
|
+
prerelease: false
|
233
|
+
version_requirements: !ruby/object:Gem::Requirement
|
234
|
+
none: false
|
235
|
+
requirements:
|
236
|
+
- - ~>
|
237
|
+
- !ruby/object:Gem::Version
|
238
|
+
version: 0.13.1
|
239
|
+
- !ruby/object:Gem::Dependency
|
240
|
+
name: rusage
|
241
|
+
requirement: !ruby/object:Gem::Requirement
|
242
|
+
none: false
|
243
|
+
requirements:
|
244
|
+
- - ~>
|
245
|
+
- !ruby/object:Gem::Version
|
246
|
+
version: 0.2.0
|
247
|
+
type: :development
|
248
|
+
prerelease: false
|
249
|
+
version_requirements: !ruby/object:Gem::Requirement
|
250
|
+
none: false
|
251
|
+
requirements:
|
252
|
+
- - ~>
|
253
|
+
- !ruby/object:Gem::Version
|
254
|
+
version: 0.2.0
|
255
|
+
- !ruby/object:Gem::Dependency
|
256
|
+
name: timecop
|
257
|
+
requirement: !ruby/object:Gem::Requirement
|
258
|
+
none: false
|
259
|
+
requirements:
|
260
|
+
- - ~>
|
261
|
+
- !ruby/object:Gem::Version
|
262
|
+
version: 0.7.1
|
263
|
+
type: :development
|
264
|
+
prerelease: false
|
265
|
+
version_requirements: !ruby/object:Gem::Requirement
|
266
|
+
none: false
|
267
|
+
requirements:
|
268
|
+
- - ~>
|
269
|
+
- !ruby/object:Gem::Version
|
270
|
+
version: 0.7.1
|
271
|
+
- !ruby/object:Gem::Dependency
|
272
|
+
name: thin
|
273
|
+
requirement: !ruby/object:Gem::Requirement
|
274
|
+
none: false
|
275
|
+
requirements:
|
276
|
+
- - ~>
|
277
|
+
- !ruby/object:Gem::Version
|
278
|
+
version: 1.6.4
|
279
|
+
type: :development
|
280
|
+
prerelease: false
|
281
|
+
version_requirements: !ruby/object:Gem::Requirement
|
282
|
+
none: false
|
283
|
+
requirements:
|
284
|
+
- - ~>
|
285
|
+
- !ruby/object:Gem::Version
|
286
|
+
version: 1.6.4
|
287
|
+
description: ! "\n`qless` is meant to be a performant alternative to other queueing\nsystems,
|
288
|
+
with statistics collection, a browser interface, and\nstrong guarantees about job
|
289
|
+
losses.\n\nIt's written as a collection of Lua scipts that are loaded into the\nRedis
|
290
|
+
instance to be used, and then executed by the client library.\nAs such, it's intended
|
291
|
+
to be extremely easy to port to other languages,\nwithout sacrificing performance
|
292
|
+
and not requiring a lot of logic\nreplication between clients. Keep the Lua scripts
|
293
|
+
updated, and your\nlanguage-specific extension will also remain up to date.\n "
|
198
294
|
email:
|
199
|
-
- dan@
|
295
|
+
- dan@moz.com
|
296
|
+
- myron@moz.com
|
200
297
|
executables:
|
201
298
|
- qless-web
|
202
299
|
extensions: []
|
@@ -207,48 +304,34 @@ files:
|
|
207
304
|
- Rakefile
|
208
305
|
- HISTORY.md
|
209
306
|
- lib/qless/config.rb
|
307
|
+
- lib/qless/failure_formatter.rb
|
210
308
|
- lib/qless/job.rb
|
211
309
|
- lib/qless/job_reservers/ordered.rb
|
212
310
|
- lib/qless/job_reservers/round_robin.rb
|
213
311
|
- lib/qless/job_reservers/shuffled_round_robin.rb
|
214
312
|
- lib/qless/lua_script.rb
|
313
|
+
- lib/qless/middleware/memory_usage_monitor.rb
|
314
|
+
- lib/qless/middleware/metriks.rb
|
215
315
|
- lib/qless/middleware/redis_reconnect.rb
|
316
|
+
- lib/qless/middleware/requeue_exceptions.rb
|
216
317
|
- lib/qless/middleware/retry_exceptions.rb
|
217
318
|
- lib/qless/middleware/sentry.rb
|
319
|
+
- lib/qless/middleware/timeout.rb
|
218
320
|
- lib/qless/queue.rb
|
219
321
|
- lib/qless/server.rb
|
220
322
|
- lib/qless/subscriber.rb
|
221
323
|
- lib/qless/tasks.rb
|
324
|
+
- lib/qless/test_helpers/worker_helpers.rb
|
222
325
|
- lib/qless/version.rb
|
223
|
-
- lib/qless/
|
326
|
+
- lib/qless/worker/base.rb
|
327
|
+
- lib/qless/worker/forking.rb
|
328
|
+
- lib/qless/worker/serial.rb
|
224
329
|
- lib/qless/worker.rb
|
225
330
|
- lib/qless.rb
|
226
|
-
- lib/qless/qless-
|
227
|
-
- lib/qless/qless
|
228
|
-
-
|
229
|
-
-
|
230
|
-
- lib/qless/qless-core/deregister_workers.lua
|
231
|
-
- lib/qless/qless-core/fail.lua
|
232
|
-
- lib/qless/qless-core/failed.lua
|
233
|
-
- lib/qless/qless-core/get.lua
|
234
|
-
- lib/qless/qless-core/heartbeat.lua
|
235
|
-
- lib/qless/qless-core/jobs.lua
|
236
|
-
- lib/qless/qless-core/pause.lua
|
237
|
-
- lib/qless/qless-core/peek.lua
|
238
|
-
- lib/qless/qless-core/pop.lua
|
239
|
-
- lib/qless/qless-core/priority.lua
|
240
|
-
- lib/qless/qless-core/put.lua
|
241
|
-
- lib/qless/qless-core/qless-lib.lua
|
242
|
-
- lib/qless/qless-core/qless.lua
|
243
|
-
- lib/qless/qless-core/queues.lua
|
244
|
-
- lib/qless/qless-core/recur.lua
|
245
|
-
- lib/qless/qless-core/retry.lua
|
246
|
-
- lib/qless/qless-core/stats.lua
|
247
|
-
- lib/qless/qless-core/tag.lua
|
248
|
-
- lib/qless/qless-core/track.lua
|
249
|
-
- lib/qless/qless-core/unfail.lua
|
250
|
-
- lib/qless/qless-core/unpause.lua
|
251
|
-
- lib/qless/qless-core/workers.lua
|
331
|
+
- lib/qless/lua/qless-lib.lua
|
332
|
+
- lib/qless/lua/qless.lua
|
333
|
+
- exe/install_phantomjs
|
334
|
+
- exe/qless-web
|
252
335
|
- lib/qless/server/static/css/bootstrap-responsive.css
|
253
336
|
- lib/qless/server/static/css/bootstrap-responsive.min.css
|
254
337
|
- lib/qless/server/static/css/bootstrap.css
|
@@ -284,6 +367,7 @@ files:
|
|
284
367
|
- lib/qless/server/views/_job_list.erb
|
285
368
|
- lib/qless/server/views/_pagination.erb
|
286
369
|
- lib/qless/server/views/about.erb
|
370
|
+
- lib/qless/server/views/completed.erb
|
287
371
|
- lib/qless/server/views/config.erb
|
288
372
|
- lib/qless/server/views/failed.erb
|
289
373
|
- lib/qless/server/views/failed_type.erb
|
@@ -296,7 +380,6 @@ files:
|
|
296
380
|
- lib/qless/server/views/track.erb
|
297
381
|
- lib/qless/server/views/worker.erb
|
298
382
|
- lib/qless/server/views/workers.erb
|
299
|
-
- exe/qless-web
|
300
383
|
homepage: http://github.com/seomoz/qless
|
301
384
|
licenses: []
|
302
385
|
post_install_message:
|
@@ -317,7 +400,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
317
400
|
version: '0'
|
318
401
|
requirements: []
|
319
402
|
rubyforge_project: qless
|
320
|
-
rubygems_version: 1.8.
|
403
|
+
rubygems_version: 1.8.23
|
321
404
|
signing_key:
|
322
405
|
specification_version: 3
|
323
406
|
summary: A Redis-Based Queueing System
|
@@ -1,101 +0,0 @@
|
|
1
|
-
-- Cancel(0, jid)
|
2
|
-
-- --------------
|
3
|
-
-- Cancel a job from taking place. It will be deleted from the system, and any
|
4
|
-
-- attempts to renew a heartbeat will fail, and any attempts to complete it
|
5
|
-
-- will fail. If you try to get the data on the object, you will get nothing.
|
6
|
-
--
|
7
|
-
-- Args:
|
8
|
-
-- 1) jid
|
9
|
-
|
10
|
-
if #KEYS > 0 then error('Cancel(): No Keys should be provided') end
|
11
|
-
|
12
|
-
local function cancel(jid, jid_set)
|
13
|
-
if not jid_set[jid] then
|
14
|
-
error('Cancel(): ' .. jid .. ' is a dependency of one of the jobs but is not in the provided jid set')
|
15
|
-
end
|
16
|
-
|
17
|
-
-- Find any stage it's associated with and remove its from that stage
|
18
|
-
local state, queue, failure, worker = unpack(redis.call('hmget', 'ql:j:' .. jid, 'state', 'queue', 'failure', 'worker'))
|
19
|
-
|
20
|
-
if state == 'complete' then
|
21
|
-
return false
|
22
|
-
else
|
23
|
-
-- If this job has dependents, then we should probably fail
|
24
|
-
local dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents')
|
25
|
-
for _, dependent_jid in ipairs(dependents) do
|
26
|
-
cancel(dependent_jid, jid_set)
|
27
|
-
end
|
28
|
-
|
29
|
-
-- Send a message out on the appropriate channels
|
30
|
-
local encoded = cjson.encode({
|
31
|
-
jid = jid,
|
32
|
-
worker = worker,
|
33
|
-
event = 'canceled',
|
34
|
-
queue = queue
|
35
|
-
})
|
36
|
-
redis.call('publish', 'ql:log', encoded)
|
37
|
-
|
38
|
-
-- Remove this job from whatever worker has it, if any
|
39
|
-
if worker then
|
40
|
-
redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
|
41
|
-
-- If necessary, send a message to the appropriate worker, too
|
42
|
-
redis.call('publish', 'ql:w:' .. worker, encoded)
|
43
|
-
end
|
44
|
-
|
45
|
-
-- Remove it from that queue
|
46
|
-
if queue then
|
47
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-work', jid)
|
48
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-locks', jid)
|
49
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-scheduled', jid)
|
50
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-depends', jid)
|
51
|
-
end
|
52
|
-
|
53
|
-
-- We should probably go through all our dependencies and remove ourselves
|
54
|
-
-- from the list of dependents
|
55
|
-
for i, j in ipairs(redis.call('smembers', 'ql:j:' .. jid .. '-dependencies')) do
|
56
|
-
redis.call('srem', 'ql:j:' .. j .. '-dependents', jid)
|
57
|
-
end
|
58
|
-
|
59
|
-
-- Delete any notion of dependencies it has
|
60
|
-
redis.call('del', 'ql:j:' .. jid .. '-dependencies')
|
61
|
-
|
62
|
-
-- If we're in the failed state, remove all of our data
|
63
|
-
if state == 'failed' then
|
64
|
-
failure = cjson.decode(failure)
|
65
|
-
-- We need to make this remove it from the failed queues
|
66
|
-
redis.call('lrem', 'ql:f:' .. failure.group, 0, jid)
|
67
|
-
if redis.call('llen', 'ql:f:' .. failure.group) == 0 then
|
68
|
-
redis.call('srem', 'ql:failures', failure.group)
|
69
|
-
end
|
70
|
-
end
|
71
|
-
|
72
|
-
-- Remove it as a job that's tagged with this particular tag
|
73
|
-
local tags = cjson.decode(redis.call('hget', 'ql:j:' .. jid, 'tags') or '{}')
|
74
|
-
for i, tag in ipairs(tags) do
|
75
|
-
redis.call('zrem', 'ql:t:' .. tag, jid)
|
76
|
-
redis.call('zincrby', 'ql:tags', -1, tag)
|
77
|
-
end
|
78
|
-
|
79
|
-
-- If the job was being tracked, we should notify
|
80
|
-
if redis.call('zscore', 'ql:tracked', jid) ~= false then
|
81
|
-
redis.call('publish', 'canceled', jid)
|
82
|
-
end
|
83
|
-
|
84
|
-
-- Just go ahead and delete our data
|
85
|
-
redis.call('del', 'ql:j:' .. jid)
|
86
|
-
end
|
87
|
-
end
|
88
|
-
|
89
|
-
-- Taken from: http://www.lua.org/pil/11.5.html
|
90
|
-
local function to_set(list)
|
91
|
-
local set = {}
|
92
|
-
for _, l in ipairs(list) do set[l] = true end
|
93
|
-
return set
|
94
|
-
end
|
95
|
-
|
96
|
-
local jids = assert(ARGV, 'Cancel(): Arg "jid" missing.')
|
97
|
-
local jid_set = to_set(jids)
|
98
|
-
|
99
|
-
for _, jid in ipairs(jids) do
|
100
|
-
cancel(jid, jid_set)
|
101
|
-
end
|
@@ -1,233 +0,0 @@
|
|
1
|
-
-- Complete(0, jid, worker, queue, now, data, [next, q, [(delay, d) | (depends, '["jid1","jid2",...]')])
|
2
|
-
-- -----------------------------------------------------------------------------------------------------
|
3
|
-
-- Complete a job and optionally put it in another queue, either scheduled or to
|
4
|
-
-- be considered waiting immediately. It can also optionally accept other jids
|
5
|
-
-- on which this job will be considered dependent before it's considered valid.
|
6
|
-
--
|
7
|
-
-- Args:
|
8
|
-
-- 1) jid
|
9
|
-
-- 2) worker
|
10
|
-
-- 3) queue
|
11
|
-
-- 4) now
|
12
|
-
-- 5) data
|
13
|
-
-- *) [next, q, [delay, d]], [depends, '...']
|
14
|
-
|
15
|
-
if #KEYS > 0 then error('Complete(): No Keys should be provided') end
|
16
|
-
|
17
|
-
local jid = assert(ARGV[1] , 'Complete(): Arg "jid" missing.')
|
18
|
-
local worker = assert(ARGV[2] , 'Complete(): Arg "worker" missing.')
|
19
|
-
local queue = assert(ARGV[3] , 'Complete(): Arg "queue" missing.')
|
20
|
-
local now = assert(tonumber(ARGV[4]) , 'Complete(): Arg "now" not a number or missing: ' .. tostring(ARGV[4]))
|
21
|
-
local data = assert(cjson.decode(ARGV[5]) , 'Complete(): Arg "data" missing or not JSON: ' .. tostring(ARGV[5]))
|
22
|
-
|
23
|
-
-- Read in all the optional parameters
|
24
|
-
local options = {}
|
25
|
-
for i = 6, #ARGV, 2 do options[ARGV[i]] = ARGV[i + 1] end
|
26
|
-
|
27
|
-
-- Sanity check on optional args
|
28
|
-
local nextq = options['next']
|
29
|
-
local delay = assert(tonumber(options['delay'] or 0))
|
30
|
-
local depends = assert(cjson.decode(options['depends'] or '[]'), 'Complete(): Arg "depends" not JSON: ' .. tostring(options['depends']))
|
31
|
-
|
32
|
-
-- Delay and depends are not allowed together
|
33
|
-
if delay > 0 and #depends > 0 then
|
34
|
-
error('Complete(): "delay" and "depends" are not allowed to be used together')
|
35
|
-
end
|
36
|
-
|
37
|
-
-- Depends doesn't make sense without nextq
|
38
|
-
if options['delay'] and nextq == nil then
|
39
|
-
error('Complete(): "delay" cannot be used without a "next".')
|
40
|
-
end
|
41
|
-
|
42
|
-
-- Depends doesn't make sense without nextq
|
43
|
-
if options['depends'] and nextq == nil then
|
44
|
-
error('Complete(): "depends" cannot be used without a "next".')
|
45
|
-
end
|
46
|
-
|
47
|
-
-- The bin is midnight of the provided day
|
48
|
-
-- 24 * 60 * 60 = 86400
|
49
|
-
local bin = now - (now % 86400)
|
50
|
-
|
51
|
-
-- First things first, we should see if the worker still owns this job
|
52
|
-
local lastworker, history, state, priority, retries = unpack(redis.call('hmget', 'ql:j:' .. jid, 'worker', 'history', 'state', 'priority', 'retries', 'dependents'))
|
53
|
-
|
54
|
-
if (lastworker ~= worker) or (state ~= 'running') then
|
55
|
-
return false
|
56
|
-
end
|
57
|
-
|
58
|
-
-- Now we can assume that the worker does own the job. We need to
|
59
|
-
-- 1) Remove the job from the 'locks' from the old queue
|
60
|
-
-- 2) Enqueue it in the next stage if necessary
|
61
|
-
-- 3) Update the data
|
62
|
-
-- 4) Mark the job as completed, remove the worker, remove expires, and update history
|
63
|
-
|
64
|
-
-- Unpack the history, and update it
|
65
|
-
history = cjson.decode(history)
|
66
|
-
history[#history]['done'] = math.floor(now)
|
67
|
-
|
68
|
-
if data then
|
69
|
-
redis.call('hset', 'ql:j:' .. jid, 'data', cjson.encode(data))
|
70
|
-
end
|
71
|
-
|
72
|
-
-- Remove the job from the previous queue
|
73
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-work', jid)
|
74
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-locks', jid)
|
75
|
-
redis.call('zrem', 'ql:q:' .. queue .. '-scheduled', jid)
|
76
|
-
|
77
|
-
----------------------------------------------------------
|
78
|
-
-- This is the massive stats update that we have to do
|
79
|
-
----------------------------------------------------------
|
80
|
-
-- This is how long we've been waiting to get popped
|
81
|
-
local waiting = math.floor(now) - history[#history]['popped']
|
82
|
-
-- Now we'll go through the apparently long and arduous process of update
|
83
|
-
local count, mean, vk = unpack(redis.call('hmget', 'ql:s:run:' .. bin .. ':' .. queue, 'total', 'mean', 'vk'))
|
84
|
-
count = count or 0
|
85
|
-
if count == 0 then
|
86
|
-
mean = waiting
|
87
|
-
vk = 0
|
88
|
-
count = 1
|
89
|
-
else
|
90
|
-
count = count + 1
|
91
|
-
local oldmean = mean
|
92
|
-
mean = mean + (waiting - mean) / count
|
93
|
-
vk = vk + (waiting - mean) * (waiting - oldmean)
|
94
|
-
end
|
95
|
-
-- Now, update the histogram
|
96
|
-
-- - `s1`, `s2`, ..., -- second-resolution histogram counts
|
97
|
-
-- - `m1`, `m2`, ..., -- minute-resolution
|
98
|
-
-- - `h1`, `h2`, ..., -- hour-resolution
|
99
|
-
-- - `d1`, `d2`, ..., -- day-resolution
|
100
|
-
waiting = math.floor(waiting)
|
101
|
-
if waiting < 60 then -- seconds
|
102
|
-
redis.call('hincrby', 'ql:s:run:' .. bin .. ':' .. queue, 's' .. waiting, 1)
|
103
|
-
elseif waiting < 3600 then -- minutes
|
104
|
-
redis.call('hincrby', 'ql:s:run:' .. bin .. ':' .. queue, 'm' .. math.floor(waiting / 60), 1)
|
105
|
-
elseif waiting < 86400 then -- hours
|
106
|
-
redis.call('hincrby', 'ql:s:run:' .. bin .. ':' .. queue, 'h' .. math.floor(waiting / 3600), 1)
|
107
|
-
else -- days
|
108
|
-
redis.call('hincrby', 'ql:s:run:' .. bin .. ':' .. queue, 'd' .. math.floor(waiting / 86400), 1)
|
109
|
-
end
|
110
|
-
redis.call('hmset', 'ql:s:run:' .. bin .. ':' .. queue, 'total', count, 'mean', mean, 'vk', vk)
|
111
|
-
----------------------------------------------------------
|
112
|
-
|
113
|
-
-- Remove this job from the jobs that the worker that was running it has
|
114
|
-
redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
|
115
|
-
|
116
|
-
if redis.call('zscore', 'ql:tracked', jid) ~= false then
|
117
|
-
redis.call('publish', 'completed', jid)
|
118
|
-
end
|
119
|
-
|
120
|
-
if nextq then
|
121
|
-
-- Send a message out to log
|
122
|
-
redis.call('publish', 'ql:log', cjson.encode({
|
123
|
-
jid = jid,
|
124
|
-
event = 'advanced',
|
125
|
-
queue = queue,
|
126
|
-
to = nextq
|
127
|
-
}))
|
128
|
-
|
129
|
-
-- Enqueue the job
|
130
|
-
table.insert(history, {
|
131
|
-
q = nextq,
|
132
|
-
put = math.floor(now)
|
133
|
-
})
|
134
|
-
|
135
|
-
-- We're going to make sure that this queue is in the
|
136
|
-
-- set of known queues
|
137
|
-
if redis.call('zscore', 'ql:queues', nextq) == false then
|
138
|
-
redis.call('zadd', 'ql:queues', now, nextq)
|
139
|
-
end
|
140
|
-
|
141
|
-
redis.call('hmset', 'ql:j:' .. jid, 'state', 'waiting', 'worker', '', 'failure', '{}',
|
142
|
-
'queue', nextq, 'expires', 0, 'history', cjson.encode(history), 'remaining', tonumber(retries))
|
143
|
-
|
144
|
-
if delay > 0 then
|
145
|
-
redis.call('zadd', 'ql:q:' .. nextq .. '-scheduled', now + delay, jid)
|
146
|
-
return 'scheduled'
|
147
|
-
else
|
148
|
-
-- These are the jids we legitimately have to wait on
|
149
|
-
local count = 0
|
150
|
-
for i, j in ipairs(depends) do
|
151
|
-
-- Make sure it's something other than 'nil' or complete.
|
152
|
-
local state = redis.call('hget', 'ql:j:' .. j, 'state')
|
153
|
-
if (state and state ~= 'complete') then
|
154
|
-
count = count + 1
|
155
|
-
redis.call('sadd', 'ql:j:' .. j .. '-dependents' , jid)
|
156
|
-
redis.call('sadd', 'ql:j:' .. jid .. '-dependencies', j)
|
157
|
-
end
|
158
|
-
end
|
159
|
-
if count > 0 then
|
160
|
-
redis.call('zadd', 'ql:q:' .. nextq .. '-depends', now, jid)
|
161
|
-
redis.call('hset', 'ql:j:' .. jid, 'state', 'depends')
|
162
|
-
return 'depends'
|
163
|
-
else
|
164
|
-
redis.call('zadd', 'ql:q:' .. nextq .. '-work', priority - (now / 10000000000), jid)
|
165
|
-
return 'waiting'
|
166
|
-
end
|
167
|
-
end
|
168
|
-
else
|
169
|
-
-- Send a message out to log
|
170
|
-
redis.call('publish', 'ql:log', cjson.encode({
|
171
|
-
jid = jid,
|
172
|
-
event = 'completed',
|
173
|
-
queue = queue
|
174
|
-
}))
|
175
|
-
|
176
|
-
redis.call('hmset', 'ql:j:' .. jid, 'state', 'complete', 'worker', '', 'failure', '{}',
|
177
|
-
'queue', '', 'expires', 0, 'history', cjson.encode(history), 'remaining', tonumber(retries))
|
178
|
-
|
179
|
-
-- Do the completion dance
|
180
|
-
local count, time = unpack(redis.call('hmget', 'ql:config', 'jobs-history-count', 'jobs-history'))
|
181
|
-
|
182
|
-
-- These are the default values
|
183
|
-
count = tonumber(count or 50000)
|
184
|
-
time = tonumber(time or 7 * 24 * 60 * 60)
|
185
|
-
|
186
|
-
-- Schedule this job for destructination eventually
|
187
|
-
redis.call('zadd', 'ql:completed', now, jid)
|
188
|
-
|
189
|
-
-- Now look at the expired job data. First, based on the current time
|
190
|
-
local jids = redis.call('zrangebyscore', 'ql:completed', 0, now - time)
|
191
|
-
-- Any jobs that need to be expired... delete
|
192
|
-
for index, jid in ipairs(jids) do
|
193
|
-
local tags = cjson.decode(redis.call('hget', 'ql:j:' .. jid, 'tags') or '{}')
|
194
|
-
for i, tag in ipairs(tags) do
|
195
|
-
redis.call('zrem', 'ql:t:' .. tag, jid)
|
196
|
-
redis.call('zincrby', 'ql:tags', -1, tag)
|
197
|
-
end
|
198
|
-
redis.call('del', 'ql:j:' .. jid)
|
199
|
-
end
|
200
|
-
-- And now remove those from the queued-for-cleanup queue
|
201
|
-
redis.call('zremrangebyscore', 'ql:completed', 0, now - time)
|
202
|
-
|
203
|
-
-- Now take the all by the most recent 'count' ids
|
204
|
-
jids = redis.call('zrange', 'ql:completed', 0, (-1-count))
|
205
|
-
for index, jid in ipairs(jids) do
|
206
|
-
local tags = cjson.decode(redis.call('hget', 'ql:j:' .. jid, 'tags') or '{}')
|
207
|
-
for i, tag in ipairs(tags) do
|
208
|
-
redis.call('zrem', 'ql:t:' .. tag, jid)
|
209
|
-
redis.call('zincrby', 'ql:tags', -1, tag)
|
210
|
-
end
|
211
|
-
redis.call('del', 'ql:j:' .. jid)
|
212
|
-
end
|
213
|
-
redis.call('zremrangebyrank', 'ql:completed', 0, (-1-count))
|
214
|
-
|
215
|
-
-- Alright, if this has any dependents, then we should go ahead
|
216
|
-
-- and unstick those guys.
|
217
|
-
for i, j in ipairs(redis.call('smembers', 'ql:j:' .. jid .. '-dependents')) do
|
218
|
-
redis.call('srem', 'ql:j:' .. j .. '-dependencies', jid)
|
219
|
-
if redis.call('scard', 'ql:j:' .. j .. '-dependencies') == 0 then
|
220
|
-
local q, p = unpack(redis.call('hmget', 'ql:j:' .. j, 'queue', 'priority'))
|
221
|
-
if q then
|
222
|
-
redis.call('zrem', 'ql:q:' .. q .. '-depends', j)
|
223
|
-
redis.call('zadd', 'ql:q:' .. q .. '-work', p, j)
|
224
|
-
redis.call('hset', 'ql:j:' .. j, 'state', 'waiting')
|
225
|
-
end
|
226
|
-
end
|
227
|
-
end
|
228
|
-
|
229
|
-
-- Delete our dependents key
|
230
|
-
redis.call('del', 'ql:j:' .. jid .. '-dependents')
|
231
|
-
|
232
|
-
return 'complete'
|
233
|
-
end
|