qless 0.9.1 → 0.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. data/bin/install_phantomjs +7 -0
  2. data/lib/qless.rb +4 -0
  3. data/lib/qless/job.rb +40 -38
  4. data/lib/qless/qless-core/cancel.lua +9 -9
  5. data/lib/qless/qless-core/failed.lua +1 -1
  6. data/lib/qless/qless-core/peek.lua +22 -12
  7. data/lib/qless/qless-core/pop.lua +31 -16
  8. data/lib/qless/qless-core/recur.lua +12 -3
  9. data/lib/qless/server.rb +96 -66
  10. data/lib/qless/server/static/css/bootstrap-responsive.css +686 -0
  11. data/lib/qless/server/static/css/bootstrap-responsive.min.css +12 -0
  12. data/lib/qless/server/static/css/bootstrap.css +3991 -0
  13. data/lib/qless/server/static/css/bootstrap.min.css +689 -0
  14. data/lib/qless/server/static/css/codemirror.css +112 -0
  15. data/lib/qless/server/static/css/docs.css +819 -0
  16. data/lib/qless/server/static/css/jquery.noty.css +105 -0
  17. data/lib/qless/server/static/css/noty_theme_twitter.css +137 -0
  18. data/lib/qless/server/static/css/style.css +204 -0
  19. data/lib/qless/server/static/favicon.ico +0 -0
  20. data/lib/qless/server/static/img/glyphicons-halflings-white.png +0 -0
  21. data/lib/qless/server/static/img/glyphicons-halflings.png +0 -0
  22. data/lib/qless/server/static/js/bootstrap-alert.js +94 -0
  23. data/lib/qless/server/static/js/bootstrap-scrollspy.js +125 -0
  24. data/lib/qless/server/static/js/bootstrap-tab.js +130 -0
  25. data/lib/qless/server/static/js/bootstrap-tooltip.js +270 -0
  26. data/lib/qless/server/static/js/bootstrap-typeahead.js +285 -0
  27. data/lib/qless/server/static/js/bootstrap.js +1726 -0
  28. data/lib/qless/server/static/js/bootstrap.min.js +6 -0
  29. data/lib/qless/server/static/js/codemirror.js +2972 -0
  30. data/lib/qless/server/static/js/jquery.noty.js +220 -0
  31. data/lib/qless/server/static/js/mode/javascript.js +360 -0
  32. data/lib/qless/server/static/js/theme/cobalt.css +18 -0
  33. data/lib/qless/server/static/js/theme/eclipse.css +25 -0
  34. data/lib/qless/server/static/js/theme/elegant.css +10 -0
  35. data/lib/qless/server/static/js/theme/lesser-dark.css +45 -0
  36. data/lib/qless/server/static/js/theme/monokai.css +28 -0
  37. data/lib/qless/server/static/js/theme/neat.css +9 -0
  38. data/lib/qless/server/static/js/theme/night.css +21 -0
  39. data/lib/qless/server/static/js/theme/rubyblue.css +21 -0
  40. data/lib/qless/server/static/js/theme/xq-dark.css +46 -0
  41. data/lib/qless/server/views/_job.erb +219 -0
  42. data/lib/qless/server/views/_job_list.erb +8 -0
  43. data/lib/qless/server/views/_pagination.erb +7 -0
  44. data/lib/qless/server/views/about.erb +130 -0
  45. data/lib/qless/server/views/config.erb +14 -0
  46. data/lib/qless/server/views/failed.erb +48 -0
  47. data/lib/qless/server/views/failed_type.erb +18 -0
  48. data/lib/qless/server/views/job.erb +17 -0
  49. data/lib/qless/server/views/layout.erb +341 -0
  50. data/lib/qless/server/views/overview.erb +90 -0
  51. data/lib/qless/server/views/queue.erb +122 -0
  52. data/lib/qless/server/views/queues.erb +26 -0
  53. data/lib/qless/server/views/tag.erb +6 -0
  54. data/lib/qless/server/views/track.erb +69 -0
  55. data/lib/qless/server/views/worker.erb +34 -0
  56. data/lib/qless/server/views/workers.erb +14 -0
  57. data/lib/qless/version.rb +1 -1
  58. data/lib/qless/worker.rb +11 -2
  59. metadata +72 -6
  60. data/lib/qless/qless-core/ruby/lib/qless-core.rb +0 -1
  61. data/lib/qless/qless-core/ruby/lib/qless/core.rb +0 -13
  62. data/lib/qless/qless-core/ruby/lib/qless/core/version.rb +0 -5
  63. data/lib/qless/qless-core/ruby/spec/qless_core_spec.rb +0 -13
@@ -0,0 +1,7 @@
1
+ #!/bin/bash
2
+
3
+ version=phantomjs-1.7.0-linux-i686
4
+ wget http://phantomjs.googlecode.com/files/$version.tar.bz2
5
+ tar xjf $version.tar.bz2
6
+ mv $version phantomjs
7
+
@@ -153,6 +153,10 @@ module Qless
153
153
  @queues = ClientQueues.new(self)
154
154
  @workers = ClientWorkers.new(self)
155
155
  end
156
+
157
+ def inspect
158
+ "<Qless::Client #{@options} >"
159
+ end
156
160
 
157
161
  def events
158
162
  # Events needs its own redis instance of the same configuration, because
@@ -10,21 +10,21 @@ module Qless
10
10
  @client = client
11
11
  @jid = jid
12
12
  end
13
-
13
+
14
14
  def klass
15
15
  @klass ||= @klass_name.split('::').inject(Kernel) { |context, name| context.const_get(name) }
16
16
  end
17
-
17
+
18
18
  def queue
19
19
  @queue ||= Queue.new(@queue_name, @client)
20
20
  end
21
21
  end
22
-
22
+
23
23
  class Job < BaseJob
24
24
  attr_reader :jid, :expires_at, :state, :queue_name, :history, :worker_name, :failure, :klass_name, :tracked, :dependencies, :dependents
25
25
  attr_reader :original_retries, :retries_left
26
26
  attr_accessor :data, :priority, :tags
27
-
27
+
28
28
  def perform
29
29
  klass.perform(self)
30
30
  end
@@ -52,14 +52,14 @@ module Qless
52
52
  attributes["data"] = JSON.load(JSON.dump attributes["data"])
53
53
  new(client, attributes)
54
54
  end
55
-
55
+
56
56
  def initialize(client, atts)
57
57
  super(client, atts.fetch('jid'))
58
58
  %w{jid data priority tags state tracked
59
59
  failure history dependencies dependents}.each do |att|
60
60
  self.instance_variable_set("@#{att}".to_sym, atts.fetch(att))
61
61
  end
62
-
62
+
63
63
  @expires_at = atts.fetch('expires')
64
64
  @klass_name = atts.fetch('klass')
65
65
  @queue_name = atts.fetch('queue')
@@ -73,21 +73,21 @@ module Qless
73
73
  @dependencies = [] if @dependencies == {}
74
74
  @state_changed = false
75
75
  end
76
-
76
+
77
77
  def priority=(priority)
78
78
  if @client._priority.call([], [@jid, priority])
79
79
  @priority = priority
80
80
  end
81
81
  end
82
-
82
+
83
83
  def [](key)
84
84
  @data[key]
85
85
  end
86
-
86
+
87
87
  def []=(key, val)
88
88
  @data[key] = val
89
89
  end
90
-
90
+
91
91
  def to_s
92
92
  inspect
93
93
  end
@@ -95,15 +95,15 @@ module Qless
95
95
  def description
96
96
  "#{@jid} (#{@klass_name} / #{@queue_name})"
97
97
  end
98
-
98
+
99
99
  def inspect
100
100
  "<Qless::Job #{description}>"
101
101
  end
102
-
102
+
103
103
  def ttl
104
104
  @expires_at - Time.now.to_f
105
105
  end
106
-
106
+
107
107
  # Move this from it's current queue into another
108
108
  def move(queue)
109
109
  note_state_change do
@@ -112,7 +112,7 @@ module Qless
112
112
  ])
113
113
  end
114
114
  end
115
-
115
+
116
116
  # Fail a job
117
117
  def fail(group, message)
118
118
  note_state_change do
@@ -124,7 +124,7 @@ module Qless
124
124
  JSON.generate(@data)]) || false
125
125
  end
126
126
  end
127
-
127
+
128
128
  # Heartbeat a job
129
129
  def heartbeat()
130
130
  @client._heartbeat.call([], [
@@ -133,7 +133,7 @@ module Qless
133
133
  Time.now.to_f,
134
134
  JSON.generate(@data)]) || false
135
135
  end
136
-
136
+
137
137
  # Complete a job
138
138
  # Options include
139
139
  # => next (String) the next queue
@@ -155,38 +155,40 @@ module Qless
155
155
  def state_changed?
156
156
  @state_changed
157
157
  end
158
-
158
+
159
159
  def cancel
160
160
  note_state_change do
161
161
  @client._cancel.call([], [@jid])
162
162
  end
163
163
  end
164
-
164
+
165
165
  def track()
166
166
  @client._track.call([], ['track', @jid, Time.now.to_f])
167
167
  end
168
-
168
+
169
169
  def untrack
170
170
  @client._track.call([], ['untrack', @jid, Time.now.to_f])
171
171
  end
172
-
172
+
173
173
  def tag(*tags)
174
174
  @client._tag.call([], ['add', @jid, Time.now.to_f] + tags)
175
175
  end
176
-
176
+
177
177
  def untag(*tags)
178
178
  @client._tag.call([], ['remove', @jid, Time.now.to_f] + tags)
179
179
  end
180
-
180
+
181
181
  def retry(delay=0)
182
- results = @client._retry.call([], [@jid, @queue_name, @worker_name, Time.now.to_f, delay])
183
- results.nil? ? false : results
182
+ note_state_change do
183
+ results = @client._retry.call([], [@jid, @queue_name, @worker_name, Time.now.to_f, delay])
184
+ results.nil? ? false : results
185
+ end
184
186
  end
185
-
187
+
186
188
  def depend(*jids)
187
189
  !!@client._depends.call([], [@jid, 'on'] + jids)
188
190
  end
189
-
191
+
190
192
  def undepend(*jids)
191
193
  !!@client._depends.call([], [@jid, 'off'] + jids)
192
194
  end
@@ -199,59 +201,59 @@ module Qless
199
201
  result
200
202
  end
201
203
  end
202
-
204
+
203
205
  class RecurringJob < BaseJob
204
206
  attr_reader :jid, :data, :priority, :tags, :retries, :interval, :count, :queue_name, :klass_name
205
-
207
+
206
208
  def initialize(client, atts)
207
209
  super(client, atts.fetch('jid'))
208
210
  %w{jid data priority tags retries interval count}.each do |att|
209
211
  self.instance_variable_set("@#{att}".to_sym, atts.fetch(att))
210
212
  end
211
-
213
+
212
214
  @klass_name = atts.fetch('klass')
213
215
  @queue_name = atts.fetch('queue')
214
216
  @tags = [] if @tags == {}
215
217
  end
216
-
218
+
217
219
  def priority=(value)
218
220
  @client._recur.call([], ['update', @jid, 'priority', value])
219
221
  @priority = value
220
222
  end
221
-
223
+
222
224
  def retries=(value)
223
225
  @client._recur.call([], ['update', @jid, 'retries', value])
224
226
  @retries = value
225
227
  end
226
-
228
+
227
229
  def interval=(value)
228
230
  @client._recur.call([], ['update', @jid, 'interval', value])
229
231
  @interval = value
230
232
  end
231
-
233
+
232
234
  def data=(value)
233
235
  @client._recur.call([], ['update', @jid, 'data', JSON.generate(value)])
234
236
  @data = value
235
237
  end
236
-
238
+
237
239
  def klass=(value)
238
240
  @client._recur.call([], ['update', @jid, 'klass', value.to_s])
239
241
  @klass_name = value.to_s
240
242
  end
241
-
243
+
242
244
  def move(queue)
243
245
  @client._recur.call([], ['update', @jid, 'queue', queue])
244
246
  @queue_name = queue
245
247
  end
246
-
248
+
247
249
  def cancel
248
250
  @client._recur.call([], ['off', @jid])
249
251
  end
250
-
252
+
251
253
  def tag(*tags)
252
254
  @client._recur.call([], ['tag', @jid] + tags)
253
255
  end
254
-
256
+
255
257
  def untag(*tags)
256
258
  @client._recur.call([], ['untag', @jid] + tags)
257
259
  end
@@ -15,18 +15,18 @@ local jid = assert(ARGV[1], 'Cancel(): Arg "jid" missing.')
15
15
  local state, queue, failure, worker = unpack(redis.call('hmget', 'ql:j:' .. jid, 'state', 'queue', 'failure', 'worker'))
16
16
 
17
17
  if state == 'complete' then
18
- return False
18
+ return false
19
19
  else
20
20
  -- If this job has dependents, then we should probably fail
21
21
  if redis.call('scard', 'ql:j:' .. jid .. '-dependents') > 0 then
22
22
  error('Cancel(): ' .. jid .. ' has un-canceled jobs that depend on it')
23
23
  end
24
-
24
+
25
25
  -- Remove this job from whatever worker has it, if any
26
26
  if worker then
27
27
  redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid)
28
28
  end
29
-
29
+
30
30
  -- Remove it from that queue
31
31
  if queue then
32
32
  redis.call('zrem', 'ql:q:' .. queue .. '-work', jid)
@@ -34,16 +34,16 @@ else
34
34
  redis.call('zrem', 'ql:q:' .. queue .. '-scheduled', jid)
35
35
  redis.call('zrem', 'ql:q:' .. queue .. '-depends', jid)
36
36
  end
37
-
37
+
38
38
  -- We should probably go through all our dependencies and remove ourselves
39
39
  -- from the list of dependents
40
40
  for i, j in ipairs(redis.call('smembers', 'ql:j:' .. jid .. '-dependencies')) do
41
41
  redis.call('srem', 'ql:j:' .. j .. '-dependents', jid)
42
42
  end
43
-
43
+
44
44
  -- Delete any notion of dependencies it has
45
45
  redis.call('del', 'ql:j:' .. jid .. '-dependencies')
46
-
46
+
47
47
  -- If we're in the failed state, remove all of our data
48
48
  if state == 'failed' then
49
49
  failure = cjson.decode(failure)
@@ -53,19 +53,19 @@ else
53
53
  redis.call('srem', 'ql:failures', failure.group)
54
54
  end
55
55
  end
56
-
56
+
57
57
  -- Remove it as a job that's tagged with this particular tag
58
58
  local tags = cjson.decode(redis.call('hget', 'ql:j:' .. jid, 'tags') or '{}')
59
59
  for i, tag in ipairs(tags) do
60
60
  redis.call('zrem', 'ql:t:' .. tag, jid)
61
61
  redis.call('zincrby', 'ql:tags', -1, tag)
62
62
  end
63
-
63
+
64
64
  -- If the job was being tracked, we should notify
65
65
  if redis.call('zscore', 'ql:tracked', jid) ~= false then
66
66
  redis.call('publish', 'canceled', jid)
67
67
  end
68
-
68
+
69
69
  -- Just go ahead and delete our data
70
70
  redis.call('del', 'ql:j:' .. jid)
71
71
  end
@@ -43,7 +43,7 @@ if group then
43
43
  total = redis.call('llen', 'ql:f:' .. group),
44
44
  jobs = {}
45
45
  }
46
- local jids = redis.call('lrange', 'ql:f:' .. group, start, limit)
46
+ local jids = redis.call('lrange', 'ql:f:' .. group, start, limit - 1)
47
47
  for index, jid in ipairs(jids) do
48
48
  local job = redis.call(
49
49
  'hmget', 'ql:j:' .. jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority',
@@ -34,7 +34,10 @@ end
34
34
  -- If we still need jobs in order to meet demand, then we should
35
35
  -- look for all the recurring jobs that need jobs run
36
36
  if #keys < count then
37
- local r = redis.call('zrangebyscore', key .. '-recur', 0, now)
37
+ -- This is how many jobs we've moved so far
38
+ local moved = 0
39
+ -- These are the recurring jobs that need work
40
+ local r = redis.call('zrangebyscore', key .. '-recur', 0, now, 'LIMIT', 0, count)
38
41
  for index, jid in ipairs(r) do
39
42
  -- For each of the jids that need jobs scheduled, first
40
43
  -- get the last time each of them was run, and then increment
@@ -43,8 +46,12 @@ if #keys < count then
43
46
  local klass, data, priority, tags, retries, interval = unpack(redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval'))
44
47
  local _tags = cjson.decode(tags)
45
48
 
46
- while math.floor(tonumber(redis.call('zscore', key .. '-recur', jid))) < now do
49
+ -- We're saving this value so that in the history, we can accurately
50
+ -- reflect when the job would normally have been scheduled
51
+ local score = math.floor(tonumber(redis.call('zscore', key .. '-recur', jid)))
52
+ while (score <= now) and (moved < (count - #keys)) do
47
53
  local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1)
54
+ moved = moved + 1
48
55
 
49
56
  -- Add this job to the list of jobs tagged with whatever tags were supplied
50
57
  for i, tag in ipairs(_tags) do
@@ -54,28 +61,31 @@ if #keys < count then
54
61
 
55
62
  -- First, let's save its data
56
63
  redis.call('hmset', 'ql:j:' .. jid .. '-' .. count,
57
- 'jid' , jid .. '-' .. count,
64
+ 'jid' , jid .. '-' .. count,
58
65
  'klass' , klass,
59
- 'data' , data,
60
- 'priority' , priority,
61
- 'tags' , tags,
62
- 'state' , 'waiting',
63
- 'worker' , '',
66
+ 'data' , data,
67
+ 'priority' , priority,
68
+ 'tags' , tags,
69
+ 'state' , 'waiting',
70
+ 'worker' , '',
64
71
  'expires' , 0,
65
- 'queue' , queue,
72
+ 'queue' , queue,
66
73
  'retries' , retries,
67
74
  'remaining', retries,
68
- 'history' , cjson.encode({{
75
+ 'history' , cjson.encode({{
76
+ -- The job was essentially put in this queue at this time,
77
+ -- and not the current time
69
78
  q = queue,
70
- put = math.floor(now)
79
+ put = math.floor(score)
71
80
  }}))
72
81
 
73
82
  -- Now, if a delay was provided, and if it's in the future,
74
83
  -- then we'll have to schedule it. Otherwise, we're just
75
84
  -- going to add it to the work queue.
76
- redis.call('zadd', key .. '-work', priority - (now / 10000000000), jid .. '-' .. count)
85
+ redis.call('zadd', key .. '-work', priority - (score / 10000000000), jid .. '-' .. count)
77
86
 
78
87
  redis.call('zincrby', key .. '-recur', interval, jid)
88
+ score = score + interval
79
89
  end
80
90
  end
81
91
  end
@@ -42,6 +42,10 @@ redis.call('zadd', 'ql:workers', now, worker)
42
42
  -- Iterate through all the expired locks and add them to the list
43
43
  -- of keys that we'll return
44
44
  for index, jid in ipairs(redis.call('zrangebyscore', key .. '-locks', 0, now, 'LIMIT', 0, count)) do
45
+ -- Remove this job from the jobs that the worker that was running it has
46
+ local w = redis.call('hget', 'ql:j:' .. jid, 'worker')
47
+ redis.call('zrem', 'ql:w:' .. w .. ':jobs', jid)
48
+
45
49
  -- For each of these, decrement their retries. If any of them
46
50
  -- have exhausted their retries, then we should mark them as
47
51
  -- failed.
@@ -82,10 +86,6 @@ for index, jid in ipairs(redis.call('zrangebyscore', key .. '-locks', 0, now, 'L
82
86
  redis.call('publish', 'stalled', jid)
83
87
  end
84
88
  end
85
-
86
- -- Remove this job from the jobs that the worker that was running it has
87
- local w = redis.call('hget', 'ql:j:' .. jid, 'worker')
88
- redis.call('zrem', 'ql:w:' .. w .. ':jobs', jid)
89
89
  end
90
90
  -- Now we've checked __all__ the locks for this queue the could
91
91
  -- have expired, and are no more than the number requested.
@@ -97,7 +97,10 @@ redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'retries', #keys)
97
97
  -- If we still need jobs in order to meet demand, then we should
98
98
  -- look for all the recurring jobs that need jobs run
99
99
  if #keys < count then
100
- local r = redis.call('zrangebyscore', key .. '-recur', 0, now)
100
+ -- This is how many jobs we've moved so far
101
+ local moved = 0
102
+ -- These are the recurring jobs that need work
103
+ local r = redis.call('zrangebyscore', key .. '-recur', 0, now, 'LIMIT', 0, (count - #keys))
101
104
  for index, jid in ipairs(r) do
102
105
  -- For each of the jids that need jobs scheduled, first
103
106
  -- get the last time each of them was run, and then increment
@@ -106,7 +109,16 @@ if #keys < count then
106
109
  local klass, data, priority, tags, retries, interval = unpack(redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval'))
107
110
  local _tags = cjson.decode(tags)
108
111
 
109
- while math.floor(tonumber(redis.call('zscore', key .. '-recur', jid))) <= now do
112
+ -- We're saving this value so that in the history, we can accurately
113
+ -- reflect when the job would normally have been scheduled
114
+ local score = math.floor(tonumber(redis.call('zscore', key .. '-recur', jid)))
115
+
116
+ while (score <= now) and (moved < (count - #keys)) do
117
+ -- Increment the count of how many jobs we've moved from recurring
118
+ -- to 'work'
119
+ moved = moved + 1
120
+
121
+ -- the count'th job that we've moved from this recurring job
110
122
  local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1)
111
123
 
112
124
  -- Add this job to the list of jobs tagged with whatever tags were supplied
@@ -117,28 +129,31 @@ if #keys < count then
117
129
 
118
130
  -- First, let's save its data
119
131
  redis.call('hmset', 'ql:j:' .. jid .. '-' .. count,
120
- 'jid' , jid .. '-' .. count,
132
+ 'jid' , jid .. '-' .. count,
121
133
  'klass' , klass,
122
- 'data' , data,
123
- 'priority' , priority,
124
- 'tags' , tags,
125
- 'state' , 'waiting',
126
- 'worker' , '',
134
+ 'data' , data,
135
+ 'priority' , priority,
136
+ 'tags' , tags,
137
+ 'state' , 'waiting',
138
+ 'worker' , '',
127
139
  'expires' , 0,
128
- 'queue' , queue,
140
+ 'queue' , queue,
129
141
  'retries' , retries,
130
142
  'remaining', retries,
131
- 'history' , cjson.encode({{
143
+ 'history' , cjson.encode({{
144
+ -- The job was essentially put in this queue at this time,
145
+ -- and not the current time
132
146
  q = queue,
133
- put = math.floor(now)
147
+ put = math.floor(score)
134
148
  }}))
135
149
 
136
150
  -- Now, if a delay was provided, and if it's in the future,
137
151
  -- then we'll have to schedule it. Otherwise, we're just
138
152
  -- going to add it to the work queue.
139
- redis.call('zadd', key .. '-work', priority - (now / 10000000000), jid .. '-' .. count)
153
+ redis.call('zadd', key .. '-work', priority - (score / 10000000000), jid .. '-' .. count)
140
154
 
141
155
  redis.call('zincrby', key .. '-recur', interval, jid)
156
+ score = score + interval
142
157
  end
143
158
  end
144
159
  end