qless 0.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. data/Gemfile +8 -0
  2. data/HISTORY.md +168 -0
  3. data/README.md +571 -0
  4. data/Rakefile +28 -0
  5. data/bin/qless-campfire +106 -0
  6. data/bin/qless-growl +99 -0
  7. data/bin/qless-web +23 -0
  8. data/lib/qless.rb +185 -0
  9. data/lib/qless/config.rb +31 -0
  10. data/lib/qless/job.rb +259 -0
  11. data/lib/qless/job_reservers/ordered.rb +23 -0
  12. data/lib/qless/job_reservers/round_robin.rb +34 -0
  13. data/lib/qless/lua.rb +25 -0
  14. data/lib/qless/qless-core/cancel.lua +71 -0
  15. data/lib/qless/qless-core/complete.lua +218 -0
  16. data/lib/qless/qless-core/config.lua +44 -0
  17. data/lib/qless/qless-core/depends.lua +65 -0
  18. data/lib/qless/qless-core/fail.lua +107 -0
  19. data/lib/qless/qless-core/failed.lua +83 -0
  20. data/lib/qless/qless-core/get.lua +37 -0
  21. data/lib/qless/qless-core/heartbeat.lua +50 -0
  22. data/lib/qless/qless-core/jobs.lua +41 -0
  23. data/lib/qless/qless-core/peek.lua +155 -0
  24. data/lib/qless/qless-core/pop.lua +278 -0
  25. data/lib/qless/qless-core/priority.lua +32 -0
  26. data/lib/qless/qless-core/put.lua +156 -0
  27. data/lib/qless/qless-core/queues.lua +58 -0
  28. data/lib/qless/qless-core/recur.lua +181 -0
  29. data/lib/qless/qless-core/retry.lua +73 -0
  30. data/lib/qless/qless-core/ruby/lib/qless-core.rb +1 -0
  31. data/lib/qless/qless-core/ruby/lib/qless/core.rb +13 -0
  32. data/lib/qless/qless-core/ruby/lib/qless/core/version.rb +5 -0
  33. data/lib/qless/qless-core/ruby/spec/qless_core_spec.rb +13 -0
  34. data/lib/qless/qless-core/stats.lua +92 -0
  35. data/lib/qless/qless-core/tag.lua +100 -0
  36. data/lib/qless/qless-core/track.lua +79 -0
  37. data/lib/qless/qless-core/workers.lua +69 -0
  38. data/lib/qless/queue.rb +141 -0
  39. data/lib/qless/server.rb +411 -0
  40. data/lib/qless/tasks.rb +10 -0
  41. data/lib/qless/version.rb +3 -0
  42. data/lib/qless/worker.rb +195 -0
  43. metadata +239 -0
@@ -0,0 +1,58 @@
1
+ -- Queues(0, now, [queue])
2
+ -- -----------------------
3
+ --
4
+ -- Return all the queues we know about, with how many jobs are scheduled, waiting,
5
+ -- and running in that queue. If a queue name is provided, then only the appropriate
6
+ -- response hash should be returned. The response is JSON:
7
+ --
8
+ -- [
9
+ -- {
10
+ -- 'name': 'testing',
11
+ -- 'stalled': 2,
12
+ -- 'waiting': 5,
13
+ -- 'running': 5,
14
+ -- 'scheduled': 10,
15
+ -- 'depends': 5,
16
+ -- 'recurring': 0
17
+ -- }, {
18
+ -- ...
19
+ -- }
20
+ -- ]
21
+
22
+ if #KEYS > 0 then
23
+ error('Queues(): Got '.. #KEYS .. ' expected 0 KEYS arguments')
24
+ end
25
+
26
+ local now = assert(tonumber(ARGV[1]), 'Queues(): Arg "now" missing or not a number: ' .. (ARGV[1] or 'nil'))
27
+ local queue = ARGV[2]
28
+
29
+ local response = {}
30
+ local queuenames = redis.call('zrange', 'ql:queues', 0, -1)
31
+
32
+ if queue then
33
+ local stalled = redis.call('zcount', 'ql:q:' .. queue .. '-locks', 0, now)
34
+ response = {
35
+ name = queue,
36
+ waiting = redis.call('zcard', 'ql:q:' .. queue .. '-work'),
37
+ stalled = stalled,
38
+ running = redis.call('zcard', 'ql:q:' .. queue .. '-locks') - stalled,
39
+ scheduled = redis.call('zcard', 'ql:q:' .. queue .. '-scheduled'),
40
+ depends = redis.call('zcard', 'ql:q:' .. queue .. '-depends'),
41
+ recurring = redis.call('zcard', 'ql:q:' .. queue .. '-recur')
42
+ }
43
+ else
44
+ for index, qname in ipairs(queuenames) do
45
+ local stalled = redis.call('zcount', 'ql:q:' .. qname .. '-locks', 0, now)
46
+ table.insert(response, {
47
+ name = qname,
48
+ waiting = redis.call('zcard', 'ql:q:' .. qname .. '-work'),
49
+ stalled = stalled,
50
+ running = redis.call('zcard', 'ql:q:' .. qname .. '-locks') - stalled,
51
+ scheduled = redis.call('zcard', 'ql:q:' .. qname .. '-scheduled'),
52
+ depends = redis.call('zcard', 'ql:q:' .. qname .. '-depends'),
53
+ recurring = redis.call('zcard', 'ql:q:' .. qname .. '-recur')
54
+ })
55
+ end
56
+ end
57
+
58
+ return cjson.encode(response)
@@ -0,0 +1,181 @@
1
+ -- Recur(0, 'on', queue, jid, klass, data, now, 'interval', second, offset, [priority p], [tags t], [retries r])
2
+ -- Recur(0, 'off', jid)
3
+ -- Recur(0, 'get', jid)
4
+ -- Recur(0, 'update', jid, ['priority', priority], ['interval', interval], ['retries', retries], ['data', data], ['klass', klass], ['queue', queue])
5
+ -- Recur(0, 'tag', jid, tag, [tag, [...]])
6
+ -- Recur(0, 'untag', jid, tag, [tag, [...]])
7
+ -- -------------------------------------------------------------------------------------------------------
8
+ -- This script takes the name of a queue, and then the info
9
+ -- info about the work item, and makes sure that jobs matching
10
+ -- its criteria are regularly made available.
11
+
12
+ if #KEYS ~= 0 then
13
+ error('Recur(): Got ' .. #KEYS .. ', expected 0 KEYS arguments')
14
+ end
15
+
16
+ local command = assert(ARGV[1] , 'Recur(): Missing first argument')
17
+
18
+ if command == 'on' then
19
+ local queue = assert(ARGV[2] , 'Recur(): Arg "queue" missing')
20
+ local jid = assert(ARGV[3] , 'Recur(): Arg "jid" missing')
21
+ local klass = assert(ARGV[4] , 'Recur(): Arg "klass" missing')
22
+ local data = assert(cjson.decode(ARGV[5]) , 'Recur(): Arg "data" missing or not JSON: ' .. tostring(ARGV[5]))
23
+ local now = assert(tonumber(ARGV[6]) , 'Recur(): Arg "now" missing or not a number: ' .. tostring(ARGV[6]))
24
+ local spec = assert(ARGV[7] , 'Recur(): Arg "schedule type" missing')
25
+ if spec == 'interval' then
26
+ local interval = assert(tonumber(ARGV[8]) , 'Recur(): Arg "interval" must be a number: ' .. tostring(ARGV[8]))
27
+ local offset = assert(tonumber(ARGV[9]) , 'Recur(): Arg "offset" must be a number: ' .. tostring(ARGV[9]))
28
+ if interval <= 0 then
29
+ error('Recur(): Arg "interval" must be greater than or equal to 0')
30
+ end
31
+ -- Read in all the optional parameters
32
+ local options = {}
33
+ for i = 10, #ARGV, 2 do options[ARGV[i]] = ARGV[i + 1] end
34
+ options.tags = assert(cjson.decode(options.tags or {}), 'Recur(): Arg "tags" must be JSON-encoded array of string. Got: ' .. tostring(options.tags))
35
+ options.priority = assert(tonumber(options.priority or 0) , 'Recur(): Arg "priority" must be a number. Got: ' .. tostring(options.priority))
36
+ options.retries = assert(tonumber(options.retries or 0) , 'Recur(): Arg "retries" must be a number. Got: ' .. tostring(options.retries))
37
+
38
+ -- Do some insertions
39
+ redis.call('hmset', 'ql:r:' .. jid,
40
+ 'jid' , jid,
41
+ 'klass' , klass,
42
+ 'data' , cjson.encode(data),
43
+ 'priority', options.priority,
44
+ 'tags' , cjson.encode(options.tags or {}),
45
+ 'state' , 'recur',
46
+ 'queue' , queue,
47
+ 'type' , 'interval',
48
+ -- How many jobs we've spawned from this
49
+ 'count' , 0,
50
+ 'interval', interval,
51
+ 'retries' , options.retries)
52
+ -- Now, we should schedule the next run of the job
53
+ redis.call('zadd', 'ql:q:' .. queue .. '-recur', now + offset, jid)
54
+
55
+ -- Lastly, we're going to make sure that this item is in the
56
+ -- set of known queues. We should keep this sorted by the
57
+ -- order in which we saw each of these queues
58
+ if redis.call('zscore', 'ql:queues', queue) == false then
59
+ redis.call('zadd', 'ql:queues', now, queue)
60
+ end
61
+
62
+ return jid
63
+ else
64
+ error('Recur(): schedule type "' .. tostring(spec) .. '" unknown')
65
+ end
66
+ elseif command == 'off' then
67
+ local jid = assert(ARGV[2], 'Recur(): Arg "jid" missing')
68
+ -- First, find out what queue it was attached to
69
+ local queue = redis.call('hget', 'ql:r:' .. jid, 'queue')
70
+ if queue then
71
+ -- Now, delete it from the queue it was attached to, and delete the thing itself
72
+ redis.call('zrem', 'ql:q:' .. queue .. '-recur', jid)
73
+ redis.call('del', 'ql:r:' .. jid)
74
+ return true
75
+ else
76
+ return true
77
+ end
78
+ elseif command == 'get' then
79
+ local jid = assert(ARGV[2], 'Recur(): Arg "jid" missing')
80
+ local job = redis.call(
81
+ 'hmget', 'ql:r:' .. jid, 'jid', 'klass', 'state', 'queue',
82
+ 'priority', 'interval', 'retries', 'count', 'data', 'tags')
83
+
84
+ if not job[1] then
85
+ return false
86
+ end
87
+
88
+ return cjson.encode({
89
+ jid = job[1],
90
+ klass = job[2],
91
+ state = job[3],
92
+ queue = job[4],
93
+ priority = tonumber(job[5]),
94
+ interval = tonumber(job[6]),
95
+ retries = tonumber(job[7]),
96
+ count = tonumber(job[8]),
97
+ data = cjson.decode(job[9]),
98
+ tags = cjson.decode(job[10])
99
+ })
100
+ elseif command == 'update' then
101
+ local jid = assert(ARGV[2], 'Recur(): Arg "jid" missing')
102
+ local options = {}
103
+
104
+ -- Make sure that the job exists
105
+ if redis.call('exists', 'ql:r:' .. jid) then
106
+ for i = 3, #ARGV, 2 do
107
+ local key = ARGV[i]
108
+ local value = ARGV[i+1]
109
+ if key == 'priority' or key == 'interval' or key == 'retries' then
110
+ value = assert(tonumber(value), 'Recur(): Arg "' .. key .. '" must be a number: ' .. tostring(value))
111
+ -- If the command is 'interval', then we need to update the time
112
+ -- when it should next be scheduled
113
+ if key == 'interval' then
114
+ local queue, interval = unpack(redis.call('hmget', 'ql:r:' .. jid, 'queue', 'interval'))
115
+ redis.call('zincrby', 'ql:q:' .. queue .. '-recur', value - tonumber(interval), jid)
116
+ end
117
+ redis.call('hset', 'ql:r:' .. jid, key, value)
118
+ elseif key == 'data' then
119
+ value = assert(cjson.decode(value), 'Recur(): Arg "data" is not JSON-encoded: ' .. tostring(value))
120
+ redis.call('hset', 'ql:r:' .. jid, 'data', cjson.encode(value))
121
+ elseif key == 'klass' then
122
+ redis.call('hset', 'ql:r:' .. jid, 'klass', value)
123
+ elseif key == 'queue' then
124
+ local queue = redis.call('hget', 'ql:r:' .. jid, 'queue')
125
+ local score = redis.call('zscore', 'ql:q:' .. queue .. '-recur', jid)
126
+ redis.call('zrem', 'ql:q:' .. queue .. '-recur', jid)
127
+ redis.call('zadd', 'ql:q:' .. value .. '-recur', score, jid)
128
+ redis.call('hset', 'ql:r:' .. jid, 'queue', value)
129
+ else
130
+ error('Recur(): Unrecognized option "' .. key .. '"')
131
+ end
132
+ end
133
+ return true
134
+ else
135
+ return false
136
+ end
137
+ elseif command == 'tag' then
138
+ local jid = assert(ARGV[2], 'Recur(): Arg "jid" missing')
139
+ local tags = redis.call('hget', 'ql:r:' .. jid, 'tags')
140
+ -- If the job has been canceled / deleted, then return false
141
+ if tags then
142
+ -- Decode the json blob, convert to dictionary
143
+ tags = cjson.decode(tags)
144
+ local _tags = {}
145
+ for i,v in ipairs(tags) do _tags[v] = true end
146
+
147
+ -- Otherwise, add the job to the sorted set with that tags
148
+ for i=3,#ARGV do if _tags[ARGV[i]] == nil then table.insert(tags, ARGV[i]) end end
149
+
150
+ tags = cjson.encode(tags)
151
+ redis.call('hset', 'ql:r:' .. jid, 'tags', tags)
152
+ return tags
153
+ else
154
+ return false
155
+ end
156
+ elseif command == 'untag' then
157
+ local jid = assert(ARGV[2], 'Recur(): Arg "jid" missing')
158
+ -- Get the existing tags
159
+ local tags = redis.call('hget', 'ql:r:' .. jid, 'tags')
160
+ -- If the job has been canceled / deleted, then return false
161
+ if tags then
162
+ -- Decode the json blob, convert to dictionary
163
+ tags = cjson.decode(tags)
164
+ local _tags = {}
165
+ -- Make a hash
166
+ for i,v in ipairs(tags) do _tags[v] = true end
167
+ -- Delete these from the hash
168
+ for i = 3,#ARGV do _tags[ARGV[i]] = nil end
169
+ -- Back into a list
170
+ local results = {}
171
+ for i, tag in ipairs(tags) do if _tags[tag] then table.insert(results, tag) end end
172
+ -- json encode them, set, and return
173
+ tags = cjson.encode(results)
174
+ redis.call('hset', 'ql:r:' .. jid, 'tags', tags)
175
+ return tags
176
+ else
177
+ return false
178
+ end
179
+ else
180
+ error('Recur(): First argument must be one of [on, off, get, update, tag, untag]. Got ' .. tostring(ARGV[1]))
181
+ end
@@ -0,0 +1,73 @@
1
+ -- retry(0, jid, queue, worker, now, [delay])
2
+ -- ------------------------------------------
3
+ -- This script accepts jid, queue, worker and delay for
4
+ -- retrying a job. This is similar in functionality to
5
+ -- `put`, except that this counts against the retries
6
+ -- a job has for a stage.
7
+ --
8
+ -- If the worker is not the worker with a lock on the job,
9
+ -- then it returns false. If the job is not actually running,
10
+ -- then it returns false. Otherwise, it returns the number
11
+ -- of retries remaining. If the allowed retries have been
12
+ -- exhausted, then it is automatically failed, and a negative
13
+ -- number is returned.
14
+
15
+ if #KEYS ~= 0 then
16
+ error('Retry(): Got ' .. #KEYS .. ', expected 0')
17
+ end
18
+
19
+ local jid = assert(ARGV[1] , 'Retry(): Arg "jid" missing')
20
+ local queue = assert(ARGV[2] , 'Retry(): Arg "queue" missing')
21
+ local worker = assert(ARGV[3] , 'Retry(): Arg "worker" missing')
22
+ local now = assert(tonumber(ARGV[4]) , 'Retry(): Arg "now" missing')
23
+ local delay = assert(tonumber(ARGV[5] or 0), 'Retry(): Arg "delay" not a number: ' .. tostring(ARGV[5]))
24
+
25
+ -- Let's see what the old priority, history and tags were
26
+ local oldqueue, state, retries, oldworker, priority = unpack(redis.call('hmget', 'ql:j:' .. jid, 'queue', 'state', 'retries', 'worker', 'priority'))
27
+
28
+ -- If this isn't the worker that owns
29
+ if oldworker ~= worker or (state ~= 'running') then
30
+ return false
31
+ end
32
+
33
+ -- Remove it from the locks key of the old queue
34
+ redis.call('zrem', 'ql:q:' .. oldqueue .. '-locks', jid)
35
+
36
+ local remaining = redis.call('hincrby', 'ql:j:' .. jid, 'remaining', -1)
37
+
38
+ -- Remove this job from the worker that was previously working it
39
+ redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
40
+
41
+ if remaining < 0 then
42
+ -- Now remove the instance from the schedule, and work queues for the queue it's in
43
+ local group = 'failed-retries-' .. queue
44
+ -- First things first, we should get the history
45
+ local history = redis.call('hget', 'ql:j:' .. jid, 'history')
46
+ -- Now, take the element of the history for which our provided worker is the worker, and update 'failed'
47
+ history = cjson.decode(history or '[]')
48
+ history[#history]['failed'] = now
49
+
50
+ redis.call('hmset', 'ql:j:' .. jid, 'state', 'failed', 'worker', '',
51
+ 'expires', '', 'history', cjson.encode(history), 'failure', cjson.encode({
52
+ ['group'] = group,
53
+ ['message'] = 'Job exhuasted retries in queue "' .. queue .. '"',
54
+ ['when'] = now,
55
+ ['worker'] = worker
56
+ }))
57
+
58
+ -- Add this type of failure to the list of failures
59
+ redis.call('sadd', 'ql:failures', group)
60
+ -- And add this particular instance to the failed types
61
+ redis.call('lpush', 'ql:f:' .. group, jid)
62
+ else
63
+ -- Put it in the queue again with a delay. Like put()
64
+ if delay > 0 then
65
+ redis.call('zadd', 'ql:q:' .. queue .. '-scheduled', now + delay, jid)
66
+ redis.call('hset', 'ql:j:' .. jid, 'state', 'scheduled')
67
+ else
68
+ redis.call('zadd', 'ql:q:' .. queue .. '-work', priority - (now / 10000000000), jid)
69
+ redis.call('hset', 'ql:j:' .. jid, 'state', 'waiting')
70
+ end
71
+ end
72
+
73
+ return remaining
@@ -0,0 +1 @@
1
+ require "qless/core"
@@ -0,0 +1,13 @@
1
+ require "qless/core/version"
2
+
3
+ module Qless
4
+ module Core
5
+ extend self
6
+ LUA_SCRIPT_DIR = File.expand_path("../core/lua_scripts", __FILE__)
7
+
8
+ def script_contents(name)
9
+ File.read(File.join(LUA_SCRIPT_DIR, "#{name}.lua"))
10
+ end
11
+ end
12
+ end
13
+
@@ -0,0 +1,5 @@
1
+ module Qless
2
+ module Core
3
+ VERSION = "0.0.1"
4
+ end
5
+ end
@@ -0,0 +1,13 @@
1
+ require 'minitest/autorun'
2
+ require 'minitest/spec'
3
+ $LOAD_PATH.unshift "lib"
4
+ require 'qless/core'
5
+
6
+ module Qless
7
+ describe Core do
8
+ it "can read the lua files" do
9
+ Qless::Core.script_contents("put").must_include "Put"
10
+ end
11
+ end
12
+ end
13
+
@@ -0,0 +1,92 @@
1
+ -- Stats(0, queue, date)
2
+ -- ---------------------
3
+ -- Return the current statistics for a given queue on a given date. The results
4
+ -- are returned are a JSON blob:
5
+ --
6
+ --
7
+ -- {
8
+ -- # These are unimplemented as of yet
9
+ -- 'failed': 3,
10
+ -- 'retries': 5,
11
+ -- 'wait' : {
12
+ -- 'total' : ...,
13
+ -- 'mean' : ...,
14
+ -- 'variance' : ...,
15
+ -- 'histogram': [
16
+ -- ...
17
+ -- ]
18
+ -- }, 'run': {
19
+ -- 'total' : ...,
20
+ -- 'mean' : ...,
21
+ -- 'variance' : ...,
22
+ -- 'histogram': [
23
+ -- ...
24
+ -- ]
25
+ -- }
26
+ -- }
27
+ --
28
+ -- The histogram's data points are at the second resolution for the first minute,
29
+ -- the minute resolution for the first hour, the 15-minute resolution for the first
30
+ -- day, the hour resolution for the first 3 days, and then at the day resolution
31
+ -- from there on out. The `histogram` key is a list of those values.
32
+ --
33
+ -- Args:
34
+ -- 1) queue
35
+ -- 2) time
36
+
37
+ if #KEYS > 0 then error('Stats(): No Keys should be provided') end
38
+
39
+ local queue = assert(ARGV[1] , 'Stats(): Arg "queue" missing')
40
+ local time = assert(tonumber(ARGV[2]), 'Stats(): Arg "time" missing or not a number: ' .. (ARGV[2] or 'nil'))
41
+
42
+ -- The bin is midnight of the provided day
43
+ -- 24 * 60 * 60 = 86400
44
+ local bin = time - (time % 86400)
45
+
46
+ -- This a table of all the keys we want to use in order to produce a histogram
47
+ local histokeys = {
48
+ 's0','s1','s2','s3','s4','s5','s6','s7','s8','s9','s10','s11','s12','s13','s14','s15','s16','s17','s18','s19','s20','s21','s22','s23','s24','s25','s26','s27','s28','s29','s30','s31','s32','s33','s34','s35','s36','s37','s38','s39','s40','s41','s42','s43','s44','s45','s46','s47','s48','s49','s50','s51','s52','s53','s54','s55','s56','s57','s58','s59',
49
+ 'm1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12','m13','m14','m15','m16','m17','m18','m19','m20','m21','m22','m23','m24','m25','m26','m27','m28','m29','m30','m31','m32','m33','m34','m35','m36','m37','m38','m39','m40','m41','m42','m43','m44','m45','m46','m47','m48','m49','m50','m51','m52','m53','m54','m55','m56','m57','m58','m59',
50
+ 'h1','h2','h3','h4','h5','h6','h7','h8','h9','h10','h11','h12','h13','h14','h15','h16','h17','h18','h19','h20','h21','h22','h23',
51
+ 'd1','d2','d3','d4','d5','d6'
52
+ }
53
+
54
+ local mkstats = function(name, bin, queue)
55
+ -- The results we'll be sending back
56
+ local results = {}
57
+
58
+ local count, mean, vk = unpack(redis.call('hmget', 'ql:s:' .. name .. ':' .. bin .. ':' .. queue, 'total', 'mean', 'vk'))
59
+
60
+ count = tonumber(count) or 0
61
+ mean = tonumber(mean) or 0
62
+ vk = tonumber(vk)
63
+
64
+ results.count = count or 0
65
+ results.mean = mean or 0
66
+ results.histogram = {}
67
+
68
+ if not count then
69
+ results.std = 0
70
+ else
71
+ if count > 1 then
72
+ results.std = math.sqrt(vk / (count - 1))
73
+ else
74
+ results.std = 0
75
+ end
76
+ end
77
+
78
+ local histogram = redis.call('hmget', 'ql:s:' .. name .. ':' .. bin .. ':' .. queue, unpack(histokeys))
79
+ for i=1,#histokeys do
80
+ table.insert(results.histogram, tonumber(histogram[i]) or 0)
81
+ end
82
+ return results
83
+ end
84
+
85
+ local retries, failed, failures = unpack(redis.call('hmget', 'ql:s:stats:' .. bin .. ':' .. queue, 'retries', 'failed', 'failures'))
86
+ return cjson.encode({
87
+ retries = tonumber(retries or 0),
88
+ failed = tonumber(failed or 0),
89
+ failures = tonumber(failures or 0),
90
+ wait = mkstats('wait', bin, queue),
91
+ run = mkstats('run' , bin, queue)
92
+ })