qless 0.9.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (43) hide show
  1. data/Gemfile +8 -0
  2. data/HISTORY.md +168 -0
  3. data/README.md +571 -0
  4. data/Rakefile +28 -0
  5. data/bin/qless-campfire +106 -0
  6. data/bin/qless-growl +99 -0
  7. data/bin/qless-web +23 -0
  8. data/lib/qless.rb +185 -0
  9. data/lib/qless/config.rb +31 -0
  10. data/lib/qless/job.rb +259 -0
  11. data/lib/qless/job_reservers/ordered.rb +23 -0
  12. data/lib/qless/job_reservers/round_robin.rb +34 -0
  13. data/lib/qless/lua.rb +25 -0
  14. data/lib/qless/qless-core/cancel.lua +71 -0
  15. data/lib/qless/qless-core/complete.lua +218 -0
  16. data/lib/qless/qless-core/config.lua +44 -0
  17. data/lib/qless/qless-core/depends.lua +65 -0
  18. data/lib/qless/qless-core/fail.lua +107 -0
  19. data/lib/qless/qless-core/failed.lua +83 -0
  20. data/lib/qless/qless-core/get.lua +37 -0
  21. data/lib/qless/qless-core/heartbeat.lua +50 -0
  22. data/lib/qless/qless-core/jobs.lua +41 -0
  23. data/lib/qless/qless-core/peek.lua +155 -0
  24. data/lib/qless/qless-core/pop.lua +278 -0
  25. data/lib/qless/qless-core/priority.lua +32 -0
  26. data/lib/qless/qless-core/put.lua +156 -0
  27. data/lib/qless/qless-core/queues.lua +58 -0
  28. data/lib/qless/qless-core/recur.lua +181 -0
  29. data/lib/qless/qless-core/retry.lua +73 -0
  30. data/lib/qless/qless-core/ruby/lib/qless-core.rb +1 -0
  31. data/lib/qless/qless-core/ruby/lib/qless/core.rb +13 -0
  32. data/lib/qless/qless-core/ruby/lib/qless/core/version.rb +5 -0
  33. data/lib/qless/qless-core/ruby/spec/qless_core_spec.rb +13 -0
  34. data/lib/qless/qless-core/stats.lua +92 -0
  35. data/lib/qless/qless-core/tag.lua +100 -0
  36. data/lib/qless/qless-core/track.lua +79 -0
  37. data/lib/qless/qless-core/workers.lua +69 -0
  38. data/lib/qless/queue.rb +141 -0
  39. data/lib/qless/server.rb +411 -0
  40. data/lib/qless/tasks.rb +10 -0
  41. data/lib/qless/version.rb +3 -0
  42. data/lib/qless/worker.rb +195 -0
  43. metadata +239 -0
@@ -0,0 +1,100 @@
1
+ -- tag(0, ('add' | 'remove'), jid, now, tag, [tag, ...])
2
+ -- tag(0, 'get', tag, [offset, [count]])
3
+ -- tag(0, 'top', [offset, [count]])
4
+ -- ------------------------------------------------------------------------------------------------------------------
5
+ -- Accepts a jid, 'add' or 'remove', and then a list of tags
6
+ -- to either add or remove from the job. Alternatively, 'get',
7
+ -- a tag to get jobs associated with that tag, and offset and
8
+ -- count
9
+ --
10
+ -- If 'add' or 'remove', the response is a list of the jobs
11
+ -- current tags, or False if the job doesn't exist. If 'get',
12
+ -- the response is of the form:
13
+ --
14
+ -- {
15
+ -- total: ...,
16
+ -- jobs: [
17
+ -- jid,
18
+ -- ...
19
+ -- ]
20
+ -- }
21
+ --
22
+ -- If 'top' is supplied, it returns the most commonly-used tags
23
+ -- in a paginated fashion.
24
+
25
+ if #KEYS ~= 0 then
26
+ error('Tag(): Got ' .. #KEYS .. ', expected 0')
27
+ end
28
+
29
+ local command = assert(ARGV[1], 'Tag(): Missing first arg "add", "remove" or "get"')
30
+
31
+ if command == 'add' then
32
+ local jid = assert(ARGV[2] , 'Tag(): Arg "jid" missing')
33
+ local now = assert(tonumber(ARGV[3]), 'Tag(): Arg "now" is not a number')
34
+ local tags = redis.call('hget', 'ql:j:' .. jid, 'tags')
35
+ -- If the job has been canceled / deleted, then return false
36
+ if tags then
37
+ -- Decode the json blob, convert to dictionary
38
+ tags = cjson.decode(tags)
39
+ local _tags = {}
40
+ for i,v in ipairs(tags) do _tags[v] = true end
41
+
42
+ -- Otherwise, add the job to the sorted set with that tags
43
+ for i=4,#ARGV do
44
+ local tag = ARGV[i]
45
+ if _tags[tag] == nil then
46
+ table.insert(tags, tag)
47
+ end
48
+ redis.call('zadd', 'ql:t:' .. tag, now, jid)
49
+ redis.call('zincrby', 'ql:tags', 1, tag)
50
+ end
51
+
52
+ tags = cjson.encode(tags)
53
+ redis.call('hset', 'ql:j:' .. jid, 'tags', tags)
54
+ return tags
55
+ else
56
+ return false
57
+ end
58
+ elseif command == 'remove' then
59
+ local jid = assert(ARGV[2] , 'Tag(): Arg "jid" missing')
60
+ local now = assert(tonumber(ARGV[3]), 'Tag(): Arg "now" is not a number')
61
+ local tags = redis.call('hget', 'ql:j:' .. jid, 'tags')
62
+ -- If the job has been canceled / deleted, then return false
63
+ if tags then
64
+ -- Decode the json blob, convert to dictionary
65
+ tags = cjson.decode(tags)
66
+ local _tags = {}
67
+ for i,v in ipairs(tags) do _tags[v] = true end
68
+
69
+ -- Otherwise, add the job to the sorted set with that tags
70
+ for i=4,#ARGV do
71
+ local tag = ARGV[i]
72
+ _tags[tag] = nil
73
+ redis.call('zrem', 'ql:t:' .. tag, jid)
74
+ redis.call('zincrby', 'ql:tags', -1, tag)
75
+ end
76
+
77
+ local results = {}
78
+ for i,tag in ipairs(tags) do if _tags[tag] then table.insert(results, tag) end end
79
+
80
+ tags = cjson.encode(results)
81
+ redis.call('hset', 'ql:j:' .. jid, 'tags', tags)
82
+ return tags
83
+ else
84
+ return false
85
+ end
86
+ elseif command == 'get' then
87
+ local tag = assert(ARGV[2] , 'Tag(): Arg "tag" missing')
88
+ local offset = assert(tonumber(ARGV[3] or 0) , 'Tag(): Arg "offset" not a number: ' .. tostring(ARGV[3]))
89
+ local count = assert(tonumber(ARGV[4] or 25), 'Tag(): Arg "count" not a number: ' .. tostring(ARGV[4]))
90
+ return cjson.encode({
91
+ total = redis.call('zcard', 'ql:t:' .. tag),
92
+ jobs = redis.call('zrange', 'ql:t:' .. tag, offset, count)
93
+ })
94
+ elseif command == 'top' then
95
+ local offset = assert(tonumber(ARGV[2] or 0) , 'Tag(): Arg "offset" not a number: ' .. tostring(ARGV[2]))
96
+ local count = assert(tonumber(ARGV[3] or 25), 'Tag(): Arg "count" not a number: ' .. tostring(ARGV[3]))
97
+ return cjson.encode(redis.call('zrevrangebyscore', 'ql:tags', '+inf', 2, 'limit', offset, count))
98
+ else
99
+ error('Tag(): First argument must be "add", "remove" or "get"')
100
+ end
@@ -0,0 +1,79 @@
1
+ -- Track(0)
2
+ -- Track(0, ('track' | 'untrack'), jid, now)
3
+ -- ------------------------------------------
4
+ -- If no arguments are provided, it returns details of all currently-tracked jobs.
5
+ -- If the first argument is 'track', then it will start tracking the job associated
6
+ -- with that id, and 'untrack' stops tracking it. In this context, tracking is
7
+ -- nothing more than saving the job to a list of jobs that are considered special.
8
+ -- __Returns__ JSON:
9
+ --
10
+ -- {
11
+ -- 'jobs': [
12
+ -- {
13
+ -- 'jid': ...,
14
+ -- # All the other details you'd get from 'get'
15
+ -- }, {
16
+ -- ...
17
+ -- }
18
+ -- ], 'expired': [
19
+ -- # These are all the jids that are completed and whose data expired
20
+ -- 'deadbeef',
21
+ -- ...,
22
+ -- ...,
23
+ -- ]
24
+ -- }
25
+ --
26
+
27
+ if #KEYS ~= 0 then
28
+ error('Track(): No keys expected. Got ' .. #KEYS)
29
+ end
30
+
31
+ if ARGV[1] ~= nil then
32
+ local jid = assert(ARGV[2] , 'Track(): Arg "jid" missing')
33
+ local now = assert(tonumber(ARGV[3]), 'Track(): Arg "now" missing or not a number: ' .. (ARGV[3] or 'nil'))
34
+ if string.lower(ARGV[1]) == 'track' then
35
+ redis.call('publish', 'track', jid)
36
+ return redis.call('zadd', 'ql:tracked', now, jid)
37
+ elseif string.lower(ARGV[1]) == 'untrack' then
38
+ redis.call('publish', 'untrack', jid)
39
+ return redis.call('zrem', 'ql:tracked', jid)
40
+ else
41
+ error('Track(): Unknown action "' .. ARGV[1] .. '"')
42
+ end
43
+ else
44
+ local response = {
45
+ jobs = {},
46
+ expired = {}
47
+ }
48
+ local jids = redis.call('zrange', 'ql:tracked', 0, -1)
49
+ for index, jid in ipairs(jids) do
50
+ local job = redis.call(
51
+ 'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
52
+ 'expires', 'retries', 'remaining', 'data', 'tags', 'history', 'failure')
53
+
54
+ if job[1] then
55
+ table.insert(response.jobs, {
56
+ jid = job[1],
57
+ klass = job[2],
58
+ state = job[3],
59
+ queue = job[4],
60
+ worker = job[5] or '',
61
+ tracked = true,
62
+ priority = tonumber(job[6]),
63
+ expires = tonumber(job[7]) or 0,
64
+ retries = tonumber(job[8]),
65
+ remaining = tonumber(job[9]),
66
+ data = cjson.decode(job[10]),
67
+ tags = cjson.decode(job[11]),
68
+ history = cjson.decode(job[12]),
69
+ failure = cjson.decode(job[13] or '{}'),
70
+ dependents = redis.call('smembers', 'ql:j:' .. jid .. '-dependents'),
71
+ dependencies = redis.call('smembers', 'ql:j:' .. jid .. '-dependencies')
72
+
73
+ })
74
+ else
75
+ table.insert(response.expired, jid)
76
+ end
77
+ end
78
+ return cjson.encode(response)
79
+ end
@@ -0,0 +1,69 @@
1
+ -- Workers(0, now, [worker])
2
+ -- -------------------------
3
+ -- Provide data about all the workers, or if a specific worker is provided, then
4
+ -- which jobs that worker is responsible for. If no worker is provided, expect a
5
+ -- response of the form:
6
+ --
7
+ -- [
8
+ -- # This is sorted by the recency of activity from that worker
9
+ -- {
10
+ -- 'name' : 'hostname1-pid1',
11
+ -- 'jobs' : 20,
12
+ -- 'stalled': 0
13
+ -- }, {
14
+ -- ...
15
+ -- }
16
+ -- ]
17
+ --
18
+ -- If a worker id is provided, then expect a response of the form:
19
+ --
20
+ -- {
21
+ -- 'jobs': [
22
+ -- jid1,
23
+ -- jid2,
24
+ -- ...
25
+ -- ], 'stalled': [
26
+ -- jid1,
27
+ -- ...
28
+ -- ]
29
+ -- }
30
+ --
31
+ if #KEYS > 0 then
32
+ error('Workers(): No key arguments expected')
33
+ end
34
+
35
+ local now = assert(tonumber(ARGV[1]), 'Workers(): Arg "now" missing or not a number: ' .. (ARGV[1] or 'nil'))
36
+
37
+ -- Clean up all the workers' job lists if they're too old. This is determined
38
+ -- by the `max-worker-age` configuration, defaulting to the last day. Seems
39
+ -- like a 'reasonable' default
40
+ local interval = tonumber(
41
+ redis.call('hget', 'ql:config', 'max-worker-age')) or 86400
42
+
43
+ local workers = redis.call('zrangebyscore', 'ql:workers', 0, now - interval)
44
+ for index, worker in ipairs(workers) do
45
+ redis.call('del', 'ql:w:' .. worker .. ':jobs')
46
+ end
47
+
48
+ -- And now remove them from the list of known workers
49
+ redis.call('zremrangebyscore', 'ql:workers', 0, now - interval)
50
+
51
+ if #ARGV == 1 then
52
+ local response = {}
53
+ local workers = redis.call('zrevrange', 'ql:workers', 0, -1)
54
+ for index, worker in ipairs(workers) do
55
+ table.insert(response, {
56
+ name = worker,
57
+ jobs = redis.call('zcount', 'ql:w:' .. worker .. ':jobs', now, now + 8640000),
58
+ stalled = redis.call('zcount', 'ql:w:' .. worker .. ':jobs', 0, now)
59
+ })
60
+ end
61
+ return cjson.encode(response)
62
+ else
63
+ local worker = assert(ARGV[2], 'Workers(): Arg "worker" missing.')
64
+ local response = {
65
+ jobs = redis.call('zrevrangebyscore', 'ql:w:' .. worker .. ':jobs', now + 8640000, now),
66
+ stalled = redis.call('zrevrangebyscore', 'ql:w:' .. worker .. ':jobs', now, 0)
67
+ }
68
+ return cjson.encode(response)
69
+ end
@@ -0,0 +1,141 @@
1
+ require "qless/lua"
2
+ require "qless/job"
3
+ require "redis"
4
+ require "json"
5
+
6
+ module Qless
7
+ class QueueJobs
8
+ def initialize(name, client)
9
+ @name = name
10
+ @client = client
11
+ end
12
+
13
+ def running(start=0, count=25)
14
+ @client._jobs.call([], ['running', Time.now.to_f, @name, start, count])
15
+ end
16
+
17
+ def stalled(start=0, count=25)
18
+ @client._jobs.call([], ['stalled', Time.now.to_f, @name, start, count])
19
+ end
20
+
21
+ def scheduled(start=0, count=25)
22
+ @client._jobs.call([], ['scheduled', Time.now.to_f, @name, start, count])
23
+ end
24
+
25
+ def depends(start=0, count=25)
26
+ @client._jobs.call([], ['depends', Time.now.to_f, @name, start, count])
27
+ end
28
+
29
+ def recurring(start=0, count=25)
30
+ @client._jobs.call([], ['recurring', Time.now.to_f, @name, start, count])
31
+ end
32
+ end
33
+
34
+ class Queue
35
+ attr_reader :name
36
+ attr_accessor :worker_name
37
+
38
+ def initialize(name, client)
39
+ @client = client
40
+ @name = name
41
+ self.worker_name = Qless.worker_name
42
+ end
43
+
44
+ def jobs
45
+ @jobs ||= QueueJobs.new(@name, @client)
46
+ end
47
+
48
+ def counts
49
+ JSON.parse(@client._queues.call([], [Time.now.to_i, @name]))
50
+ end
51
+
52
+ def heartbeat
53
+ @client.config["#{@name}-heartbeat"]
54
+ end
55
+
56
+ def heartbeat=(value)
57
+ @client.config["#{@name}-heartbeat"] = value
58
+ end
59
+
60
+ # Put the described job in this queue
61
+ # Options include:
62
+ # => priority (int)
63
+ # => tags (array of strings)
64
+ # => delay (int)
65
+ def put(klass, data, opts={})
66
+ opts = job_options(klass, data, opts)
67
+
68
+ @client._put.call([@name], [
69
+ (opts[:jid] or Qless.generate_jid),
70
+ klass.name,
71
+ JSON.generate(data),
72
+ Time.now.to_f,
73
+ opts.fetch(:delay, 0),
74
+ 'priority', opts.fetch(:priority, 0),
75
+ 'tags', JSON.generate(opts.fetch(:tags, [])),
76
+ 'retries', opts.fetch(:retries, 5),
77
+ 'depends', JSON.generate(opts.fetch(:depends, []))
78
+ ])
79
+ end
80
+
81
+ # Make a recurring job in this queue
82
+ # Options include:
83
+ # => priority (int)
84
+ # => tags (array of strings)
85
+ # => retries (int)
86
+ # => offset (int)
87
+ def recur(klass, data, interval, opts={})
88
+ opts = job_options(klass, data, opts)
89
+
90
+ @client._recur.call([], [
91
+ 'on',
92
+ @name,
93
+ (opts[:jid] or Qless.generate_jid),
94
+ klass.to_s,
95
+ JSON.generate(data),
96
+ Time.now.to_f,
97
+ 'interval', interval, opts.fetch(:offset, 0),
98
+ 'priority', opts.fetch(:priority, 0),
99
+ 'tags', JSON.generate(opts.fetch(:tags, [])),
100
+ 'retries', opts.fetch(:retries, 5)
101
+ ])
102
+ end
103
+
104
+ # Pop a work item off the queue
105
+ def pop(count=nil)
106
+ results = @client._pop.call([@name], [worker_name, (count || 1), Time.now.to_f]).map { |j| Job.new(@client, JSON.parse(j)) }
107
+ count.nil? ? results[0] : results
108
+ end
109
+
110
+ # Peek at a work item
111
+ def peek(count=nil)
112
+ results = @client._peek.call([@name], [(count || 1), Time.now.to_f]).map { |j| Job.new(@client, JSON.parse(j)) }
113
+ count.nil? ? results[0] : results
114
+ end
115
+
116
+ def stats(date=nil)
117
+ JSON.parse(@client._stats.call([], [@name, (date || Time.now.to_f)]))
118
+ end
119
+
120
+ # How many items in the queue?
121
+ def length
122
+ (@client.redis.multi do
123
+ @client.redis.zcard("ql:q:#{@name}-locks")
124
+ @client.redis.zcard("ql:q:#{@name}-work")
125
+ @client.redis.zcard("ql:q:#{@name}-scheduled")
126
+ end).inject(0, :+)
127
+ end
128
+
129
+ def to_s
130
+ "#<Qless::Queue #{@name}>"
131
+ end
132
+ alias inspect to_s
133
+
134
+ private
135
+
136
+ def job_options(klass, data, opts)
137
+ return opts unless klass.respond_to?(:default_job_options)
138
+ klass.default_job_options(data).merge(opts)
139
+ end
140
+ end
141
+ end
@@ -0,0 +1,411 @@
1
+ require 'sinatra/base'
2
+ require 'qless'
3
+
4
+ # Much of this is shamelessly poached from the resque web client
5
+
6
+ module Qless
7
+ class Server < Sinatra::Base
8
+ # Path-y-ness
9
+ dir = File.dirname(File.expand_path(__FILE__))
10
+ set :views , "#{dir}/server/views"
11
+ set :public_folder, "#{dir}/server/static"
12
+
13
+ # For debugging purposes at least, I want this
14
+ set :reload_templates, true
15
+
16
+ # I'm not sure what this option is -- I'll look it up later
17
+ # set :static, true
18
+
19
+ def self.client
20
+ @client ||= Qless::Client.new
21
+ end
22
+
23
+ def self.client=(client)
24
+ @client = client
25
+ end
26
+
27
+ helpers do
28
+ include Rack::Utils
29
+
30
+ def url_path(*path_parts)
31
+ [ path_prefix, path_parts ].join("/").squeeze('/')
32
+ end
33
+ alias_method :u, :url_path
34
+
35
+ def path_prefix
36
+ request.env['SCRIPT_NAME']
37
+ end
38
+
39
+ def tabs
40
+ return [
41
+ {:name => 'Queues' , :path => '/queues' },
42
+ {:name => 'Workers' , :path => '/workers' },
43
+ {:name => 'Track' , :path => '/track' },
44
+ {:name => 'Failed' , :path => '/failed' },
45
+ {:name => 'Config' , :path => '/config' },
46
+ {:name => 'About' , :path => '/about' }
47
+ ]
48
+ end
49
+
50
+ def application_name
51
+ return Server.client.config['application']
52
+ end
53
+
54
+ def queues
55
+ return Server.client.queues.counts
56
+ end
57
+
58
+ def tracked
59
+ return Server.client.jobs.tracked
60
+ end
61
+
62
+ def workers
63
+ return Server.client.workers.counts
64
+ end
65
+
66
+ def failed
67
+ return Server.client.jobs.failed
68
+ end
69
+
70
+ # Return the supplied object back as JSON
71
+ def json(obj)
72
+ content_type :json
73
+ obj.to_json
74
+ end
75
+
76
+ # Make the id acceptable as an id / att in HTML
77
+ def sanitize_attr(attr)
78
+ return attr.gsub(/[^a-zA-Z\:\_]/, '-')
79
+ end
80
+
81
+ # What are the top tags? Since it might go on, say, every
82
+ # page, then we should probably be caching it
83
+ def top_tags
84
+ @top_tags ||= {
85
+ :top => Server.client.tags,
86
+ :fetched => Time.now
87
+ }
88
+ if (Time.now - @top_tags[:fetched]) > 60 then
89
+ @top_tags = {
90
+ :top => Server.client.tags,
91
+ :fetched => Time.now
92
+ }
93
+ end
94
+ @top_tags[:top]
95
+ end
96
+
97
+ def strftime(t)
98
+ # From http://stackoverflow.com/questions/195740/how-do-you-do-relative-time-in-rails
99
+ diff_seconds = Time.now - t
100
+ case diff_seconds
101
+ when 0 .. 59
102
+ "#{diff_seconds.to_i} seconds ago"
103
+ when 60 ... 3600
104
+ "#{(diff_seconds/60).to_i} minutes ago"
105
+ when 3600 ... 3600*24
106
+ "#{(diff_seconds/3600).to_i} hours ago"
107
+ when (3600*24) ... (3600*24*30)
108
+ "#{(diff_seconds/(3600*24)).to_i} days ago"
109
+ else
110
+ t.strftime('%b %e, %Y %H:%M:%S %Z (%z)')
111
+ end
112
+ end
113
+ end
114
+
115
+ get '/?' do
116
+ erb :overview, :layout => true, :locals => { :title => "Overview" }
117
+ end
118
+
119
+ # Returns a JSON blob with the job counts for various queues
120
+ get '/queues.json' do
121
+ json(Server.client.queues.counts)
122
+ end
123
+
124
+ get '/queues/?' do
125
+ erb :queues, :layout => true, :locals => {
126
+ :title => 'Queues'
127
+ }
128
+ end
129
+
130
+ # Return the job counts for a specific queue
131
+ get '/queues/:name.json' do
132
+ json(Server.client.queues[params[:name]].counts)
133
+ end
134
+
135
+ get '/queues/:name/?:tab?' do
136
+ queue = Server.client.queues[params[:name]]
137
+ tab = params.fetch('tab', 'stats')
138
+ jobs = []
139
+ case tab
140
+ when 'running'
141
+ jobs = queue.jobs.running
142
+ when 'scheduled'
143
+ jobs = queue.jobs.scheduled
144
+ when 'stalled'
145
+ jobs = queue.jobs.stalled
146
+ when 'depends'
147
+ jobs = queue.jobs.depends
148
+ when 'recurring'
149
+ jobs = queue.jobs.recurring
150
+ end
151
+ jobs = jobs.map { |jid| Server.client.jobs[jid] }
152
+ if tab == 'waiting'
153
+ jobs = queue.peek(20)
154
+ end
155
+ erb :queue, :layout => true, :locals => {
156
+ :title => "Queue #{params[:name]}",
157
+ :tab => tab,
158
+ :jobs => jobs,
159
+ :queue => Server.client.queues[params[:name]].counts,
160
+ :stats => queue.stats
161
+ }
162
+ end
163
+
164
+ get '/failed/?' do
165
+ # qless-core doesn't provide functionality this way, so we'll
166
+ # do it ourselves. I'm not sure if this is how the core library
167
+ # should behave or not.
168
+ erb :failed, :layout => true, :locals => {
169
+ :title => 'Failed',
170
+ :failed => Server.client.jobs.failed.keys.map { |t| Server.client.jobs.failed(t).tap { |f| f['type'] = t } }
171
+ }
172
+ end
173
+
174
+ get '/failed/:type/?' do
175
+ erb :failed_type, :layout => true, :locals => {
176
+ :title => 'Failed | ' + params[:type],
177
+ :type => params[:type],
178
+ :failed => Server.client.jobs.failed(params[:type])
179
+ }
180
+ end
181
+
182
+ get '/track/?' do
183
+ erb :track, :layout => true, :locals => {
184
+ :title => 'Track'
185
+ }
186
+ end
187
+
188
+ get '/jobs/:jid' do
189
+ erb :job, :layout => true, :locals => {
190
+ :title => "Job | #{params[:jid]}",
191
+ :jid => params[:jid],
192
+ :job => Server.client.jobs[params[:jid]]
193
+ }
194
+ end
195
+
196
+ get '/workers/?' do
197
+ erb :workers, :layout => true, :locals => {
198
+ :title => 'Workers'
199
+ }
200
+ end
201
+
202
+ get '/workers/:worker' do
203
+ erb :worker, :layout => true, :locals => {
204
+ :title => 'Worker | ' + params[:worker],
205
+ :worker => Server.client.workers[params[:worker]].tap { |w|
206
+ w['jobs'] = w['jobs'].map { |j| Server.client.jobs[j] }
207
+ w['stalled'] = w['stalled'].map { |j| Server.client.jobs[j] }
208
+ w['name'] = params[:worker]
209
+ }
210
+ }
211
+ end
212
+
213
+ get '/tag/?' do
214
+ jobs = Server.client.jobs.tagged(params[:tag])
215
+ erb :tag, :layout => true, :locals => {
216
+ :title => "Tag | #{params[:tag]}",
217
+ :tag => params[:tag],
218
+ :jobs => jobs['jobs'].map { |jid| Server.client.jobs[jid] },
219
+ :total => jobs['total']
220
+ }
221
+ end
222
+
223
+ get '/config/?' do
224
+ erb :config, :layout => true, :locals => {
225
+ :title => 'Config',
226
+ :options => Server.client.config.all
227
+ }
228
+ end
229
+
230
+ get '/about/?' do
231
+ erb :about, :layout => true, :locals => {
232
+ :title => 'About'
233
+ }
234
+ end
235
+
236
+
237
+
238
+
239
+
240
+
241
+
242
+ # These are the bits where we accept AJAX requests
243
+ post "/track/?" do
244
+ # Expects a JSON-encoded hash with a job id, and optionally some tags
245
+ data = JSON.parse(request.body.read)
246
+ job = Server.client.jobs[data["id"]]
247
+ if not job.nil?
248
+ data.fetch("tags", false) ? job.track(*data["tags"]) : job.track()
249
+ if request.xhr?
250
+ json({ :tracked => [job.jid] })
251
+ else
252
+ redirect to('/track')
253
+ end
254
+ else
255
+ if request.xhr?
256
+ json({ :tracked => [] })
257
+ else
258
+ redirect to(request.referrer)
259
+ end
260
+ end
261
+ end
262
+
263
+ post "/untrack/?" do
264
+ # Expects a JSON-encoded array of job ids to stop tracking
265
+ jobs = JSON.parse(request.body.read).map { |jid| Server.client.jobs[jid] }.select { |j| not j.nil? }
266
+ # Go ahead and cancel all the jobs!
267
+ jobs.each do |job|
268
+ job.untrack()
269
+ end
270
+ return json({ :untracked => jobs.map { |job| job.jid } })
271
+ end
272
+
273
+ post "/priority/?" do
274
+ # Expects a JSON-encoded dictionary of jid => priority
275
+ response = Hash.new
276
+ r = JSON.parse(request.body.read)
277
+ r.each_pair do |jid, priority|
278
+ begin
279
+ Server.client.jobs[jid].priority = priority
280
+ response[jid] = priority
281
+ rescue
282
+ response[jid] = 'failed'
283
+ end
284
+ end
285
+ return json(response)
286
+ end
287
+
288
+ post "/tag/?" do
289
+ # Expects a JSON-encoded dictionary of jid => [tag, tag, tag]
290
+ response = Hash.new
291
+ JSON.parse(request.body.read).each_pair do |jid, tags|
292
+ begin
293
+ Server.client.jobs[jid].tag(*tags)
294
+ response[jid] = tags
295
+ rescue
296
+ response[jid] = 'failed'
297
+ end
298
+ end
299
+ return json(response)
300
+ end
301
+
302
+ post "/untag/?" do
303
+ # Expects a JSON-encoded dictionary of jid => [tag, tag, tag]
304
+ response = Hash.new
305
+ JSON.parse(request.body.read).each_pair do |jid, tags|
306
+ begin
307
+ Server.client.jobs[jid].untag(*tags)
308
+ response[jid] = tags
309
+ rescue
310
+ response[jid] = 'failed'
311
+ end
312
+ end
313
+ return json(response)
314
+ end
315
+
316
+ post "/move/?" do
317
+ # Expects a JSON-encoded hash of id: jid, and queue: queue_name
318
+ data = JSON.parse(request.body.read)
319
+ if data["id"].nil? or data["queue"].nil?
320
+ halt 400, "Need id and queue arguments"
321
+ else
322
+ job = Server.client.jobs[data["id"]]
323
+ if job.nil?
324
+ halt 404, "Could not find job"
325
+ else
326
+ job.move(data["queue"])
327
+ return json({ :id => data["id"], :queue => data["queue"]})
328
+ end
329
+ end
330
+ end
331
+
332
+ post "/undepend/?" do
333
+ # Expects a JSON-encoded hash of id: jid, and queue: queue_name
334
+ data = JSON.parse(request.body.read)
335
+ if data["id"].nil?
336
+ halt 400, "Need id"
337
+ else
338
+ job = Server.client.jobs[data["id"]]
339
+ if job.nil?
340
+ halt 404, "Could not find job"
341
+ else
342
+ job.undepend(data['dependency'])
343
+ return json({:id => data["id"]})
344
+ end
345
+ end
346
+ end
347
+
348
+ post "/retry/?" do
349
+ # Expects a JSON-encoded hash of id: jid, and queue: queue_name
350
+ data = JSON.parse(request.body.read)
351
+ if data["id"].nil?
352
+ halt 400, "Need id"
353
+ else
354
+ job = Server.client.jobs[data["id"]]
355
+ if job.nil?
356
+ halt 404, "Could not find job"
357
+ else
358
+ queue = job.history[-1]["q"]
359
+ job.move(queue)
360
+ return json({ :id => data["id"], :queue => queue})
361
+ end
362
+ end
363
+ end
364
+
365
+ # Retry all the failures of a particular type
366
+ post "/retryall/?" do
367
+ # Expects a JSON-encoded hash of type: failure-type
368
+ data = JSON.parse(request.body.read)
369
+ if data["type"].nil?
370
+ halt 400, "Neet type"
371
+ else
372
+ return json(Server.client.jobs.failed(data["type"], 0, 500)['jobs'].map do |job|
373
+ queue = job.history[-1]["q"]
374
+ job.move(queue)
375
+ { :id => job.jid, :queue => queue}
376
+ end)
377
+ end
378
+ end
379
+
380
+ post "/cancel/?" do
381
+ # Expects a JSON-encoded array of job ids to cancel
382
+ jobs = JSON.parse(request.body.read).map { |jid| Server.client.jobs[jid] }.select { |j| not j.nil? }
383
+ # Go ahead and cancel all the jobs!
384
+ jobs.each do |job|
385
+ job.cancel()
386
+ end
387
+
388
+ if request.xhr?
389
+ return json({ :canceled => jobs.map { |job| job.jid } })
390
+ else
391
+ redirect to(request.referrer)
392
+ end
393
+ end
394
+
395
+ post "/cancelall/?" do
396
+ # Expects a JSON-encoded hash of type: failure-type
397
+ data = JSON.parse(request.body.read)
398
+ if data["type"].nil?
399
+ halt 400, "Neet type"
400
+ else
401
+ return json(Server.client.jobs.failed(data["type"])['jobs'].map do |job|
402
+ job.cancel()
403
+ { :id => job.jid }
404
+ end)
405
+ end
406
+ end
407
+
408
+ # start the server if ruby file executed directly
409
+ run! if app_file == $0
410
+ end
411
+ end