sideq 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG +7 -0
- data/README.md +2 -2
- data/bin/sideq +46 -331
- data/lib/sideq.rb +6 -0
- data/lib/sideq/dead.rb +88 -0
- data/lib/sideq/processes.rb +36 -0
- data/lib/sideq/queue.rb +82 -0
- data/lib/sideq/retries.rb +97 -0
- data/lib/sideq/stats.rb +31 -0
- data/lib/sideq/workers.rb +19 -0
- metadata +10 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 95870eb124f8f2adde844e2f60e789792f1bd625
|
4
|
+
data.tar.gz: ad25d13479358fa547fe22a73835aed32989e2ca
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 811fd2cbbbc2a3f8998418e9fe1e7a83dd979aaac7c7ea966d00656d7aa7f071ca76e3ad92c043ed489bbbff481c55b5e6703209aa722ae7bf79fcaf24982fb6
|
7
|
+
data.tar.gz: d411abb0ca7d411b58efbdb9c1db0d0a3137b2b05845c7a2b01ba21eb609b0ada923ebd33ca496f7f904ccaf6960974521a6d3a023d67455dbadb4e774f2793d
|
data/CHANGELOG
ADDED
data/README.md
CHANGED
data/bin/sideq
CHANGED
@@ -4,6 +4,8 @@ require 'optparse'
|
|
4
4
|
require 'sidekiq'
|
5
5
|
require 'sidekiq/api'
|
6
6
|
|
7
|
+
require_relative '../lib/sideq'
|
8
|
+
|
7
9
|
RACK_ENV = ENV['RACK_ENV'] || "development"
|
8
10
|
|
9
11
|
class Parser
|
@@ -16,24 +18,28 @@ class Parser
|
|
16
18
|
opts.banner = "Usage: #{$0} [options] command [subcommand]\n" <<
|
17
19
|
"\n" <<
|
18
20
|
"Commands and subcommands:\n" <<
|
19
|
-
"stats
|
20
|
-
"queue list
|
21
|
-
"
|
22
|
-
"
|
23
|
-
"
|
24
|
-
"
|
25
|
-
"retry
|
26
|
-
"retry
|
27
|
-
"
|
28
|
-
"
|
29
|
-
"
|
30
|
-
"
|
31
|
-
"dead
|
32
|
-
"
|
33
|
-
"
|
34
|
-
"
|
35
|
-
"
|
36
|
-
"
|
21
|
+
"stats Print sidekiq statistics\n" <<
|
22
|
+
"queue list List all known queues\n" <<
|
23
|
+
"queue list name List contents of named queue\n" <<
|
24
|
+
"queue show name jid [jid...] Show details of jobs in named queue\n" <<
|
25
|
+
"queue del name jid [jid...] Delete jobs from the named queue\n" <<
|
26
|
+
"queue clear name Clear all jobs from the named queue\n" <<
|
27
|
+
"retry list List contents of the retry set\n" <<
|
28
|
+
"retry show jid [jid ...] Show details of entrie sin the retry set\n" <<
|
29
|
+
"retry del jid [jid ...] Delete entries from the retry set\n" <<
|
30
|
+
"retry kill jid [jid ...] Move jobs from the retry set to the dead set\n" <<
|
31
|
+
"retry now jid [jid ...] Retry jobs in the retry set right now\n" <<
|
32
|
+
"retry clear Clears all entries in the retry set\n" <<
|
33
|
+
"dead list List contents of the dead set\n" <<
|
34
|
+
"dead show jid [jid...] Show details of entries in the dead set\n" <<
|
35
|
+
"dead del jid [jid...] Delete jobs from the dead set\n" <<
|
36
|
+
"dead now jid [jid...] Retry jobs from the dead set right now\n" <<
|
37
|
+
"dead clear Clears all entries of the dead set\n" <<
|
38
|
+
"processes list Lists all processes known to sidekiq\n" <<
|
39
|
+
"processes quiet Send the quiet signal to all sidekiq processes\n" <<
|
40
|
+
"processes kill Send the kill signal to all sidekiq processes\n" <<
|
41
|
+
"processes clean Clear dead process entries from the process list\n" <<
|
42
|
+
"workers list List all workers\n"
|
37
43
|
opts.separator "\nOptions:\n"
|
38
44
|
|
39
45
|
opts.on("-n redisdb", "--database=redisdb", "Number of the redis database") do |n|
|
@@ -63,311 +69,6 @@ class Parser
|
|
63
69
|
end
|
64
70
|
end
|
65
71
|
|
66
|
-
class Stats
|
67
|
-
attr_reader :stats, :retry_set, :dead_set
|
68
|
-
|
69
|
-
def initialize
|
70
|
-
@stats = Sidekiq::Stats.new
|
71
|
-
@retry_set = Sidekiq::RetrySet.new
|
72
|
-
@dead_set = Sidekiq::DeadSet.new
|
73
|
-
end
|
74
|
-
|
75
|
-
def to_s
|
76
|
-
stat_ary = [ "Processed: #{stats.processed}",
|
77
|
-
"Failed: #{stats.failed}",
|
78
|
-
"Scheduled size: #{stats.scheduled_size}",
|
79
|
-
"Retry size: #{stats.retry_size}",
|
80
|
-
"Dead size: #{stats.dead_size}",
|
81
|
-
"Enqueued: #{stats.enqueued}",
|
82
|
-
"Processes: #{stats.processes_size}",
|
83
|
-
"Workers: #{stats.workers_size}",
|
84
|
-
"Default queue latency: #{stats.default_queue_latency}",
|
85
|
-
|
86
|
-
"Queues: dead: #{dead_set.size}",
|
87
|
-
" retry: #{retry_set.size}" ]
|
88
|
-
stats.queues.each do |(queue_name, queue_size)|
|
89
|
-
stat_ary << " #{queue_name}: #{queue_size}"
|
90
|
-
end
|
91
|
-
|
92
|
-
stat_ary.join( "\n" )
|
93
|
-
end
|
94
|
-
end
|
95
|
-
|
96
|
-
class Queues
|
97
|
-
attr_reader :retry_set, :dead_set
|
98
|
-
|
99
|
-
def initialize
|
100
|
-
@retry_set = Sidekiq::RetrySet.new
|
101
|
-
@dead_set = Sidekiq::DeadSet.new
|
102
|
-
end
|
103
|
-
|
104
|
-
def to_s
|
105
|
-
ary = Sidekiq::Queue.all.each_with_object( [] ) do |queue, memo|
|
106
|
-
memo << sprintf( "%-30s %5d (%8.2f s latency), %spaused",
|
107
|
-
queue.name,
|
108
|
-
queue.size,
|
109
|
-
queue.latency,
|
110
|
-
queue.paused? ? '' : "not " )
|
111
|
-
end
|
112
|
-
ary << sprintf( "%-30s %5d", "retry", retry_set.size )
|
113
|
-
ary << sprintf( "%-30s %5d", "dead", dead_set.size )
|
114
|
-
ary.join( "\n" )
|
115
|
-
end
|
116
|
-
end
|
117
|
-
|
118
|
-
class Retries
|
119
|
-
attr_reader :retry_set
|
120
|
-
|
121
|
-
def initialize
|
122
|
-
@retry_set = Sidekiq::RetrySet.new
|
123
|
-
end
|
124
|
-
|
125
|
-
def to_s
|
126
|
-
retry_set.each_with_object( [ "Retry entries: #{retry_set.size}" ] ) do |job, memo|
|
127
|
-
memo << sprintf( "%24s - %19s\n %-22s - %-37s\n e: %19s - f: %19s\n retry (%2d) at %-19s Continue retries?: %s\n %s\n",
|
128
|
-
job.jid,
|
129
|
-
job.created_at.strftime( "%F %T" ),
|
130
|
-
job.display_class,
|
131
|
-
job.item["error_class"],
|
132
|
-
job.enqueued_at.strftime( "%F %T" ),
|
133
|
-
Time.at( job.item["failed_at"] ).strftime( "%F %T" ),
|
134
|
-
job.item["retry_count"],
|
135
|
-
job.item["retried_at"] ? Time.at( job.item["retried_at"] ).strftime( "%F %T" ) : "never",
|
136
|
-
job.item["retry"],
|
137
|
-
"#{job.item["error_class"]}: #{job.item["error_message"][0,77-job.item["error_class"].size]}" )
|
138
|
-
end.join( "\n" )
|
139
|
-
end
|
140
|
-
|
141
|
-
def details( job_ids )
|
142
|
-
retry_set.each_with_object( [] ) do |job, memo|
|
143
|
-
next unless job_ids.include?( job.jid )
|
144
|
-
memo << job_details( job )
|
145
|
-
end.join( "\n\n" )
|
146
|
-
end
|
147
|
-
|
148
|
-
def delete_entries( job_ids )
|
149
|
-
deleted = 0
|
150
|
-
job_ids.each do |job_id|
|
151
|
-
# TODO: Inefficient in the free(beer) sidekiq version;
|
152
|
-
# find something more efficient here (sr 2016-04-06)
|
153
|
-
job = retry_set.find_job( job_id )
|
154
|
-
if job
|
155
|
-
job.delete
|
156
|
-
puts "#{job_id}: deleted"
|
157
|
-
deleted += 1
|
158
|
-
else
|
159
|
-
puts "#{job_id}: not found"
|
160
|
-
end
|
161
|
-
end
|
162
|
-
puts "Retry Set: Deleted #{deleted} entries"
|
163
|
-
end
|
164
|
-
|
165
|
-
def kill_entries( job_ids )
|
166
|
-
killed = 0
|
167
|
-
job_ids.each do |job_id|
|
168
|
-
# TODO: Inefficient in the free(beer) sidekiq version;
|
169
|
-
# find something more efficient here (sr 2016-04-06)
|
170
|
-
job = retry_set.find_job( job_id )
|
171
|
-
if job
|
172
|
-
begin
|
173
|
-
job.kill
|
174
|
-
puts "#{job_id}: moved to dead set"
|
175
|
-
killed += 1
|
176
|
-
rescue
|
177
|
-
puts "#{job_id}: failed - #{$!.message}"
|
178
|
-
end
|
179
|
-
else
|
180
|
-
puts "#{job_id}: not found"
|
181
|
-
end
|
182
|
-
end
|
183
|
-
|
184
|
-
puts "Retry Set: Moved #{killed} entries to Dead Set"
|
185
|
-
end
|
186
|
-
|
187
|
-
def retry_entries( job_ids )
|
188
|
-
retried = 0
|
189
|
-
job_ids.each do |job_id|
|
190
|
-
# TODO: Inefficient in the free(beer) sidekiq version;
|
191
|
-
# find something more efficient here (sr 2016-04-06)
|
192
|
-
job = retry_set.find_job( job_id )
|
193
|
-
if job
|
194
|
-
begin
|
195
|
-
job.retry
|
196
|
-
puts "#{job_id}: retrying"
|
197
|
-
retried += 1
|
198
|
-
rescue
|
199
|
-
puts "#{job_id}: failed - #{$!.message}"
|
200
|
-
end
|
201
|
-
else
|
202
|
-
puts "#{job_id}: not found"
|
203
|
-
end
|
204
|
-
end
|
205
|
-
|
206
|
-
puts "Retry Set: Retried #{retried} entries"
|
207
|
-
end
|
208
|
-
|
209
|
-
def clear
|
210
|
-
puts "Retry Set: Deleted #{retry_set.clear} entries"
|
211
|
-
end
|
212
|
-
|
213
|
-
protected
|
214
|
-
def job_details( job )
|
215
|
-
[ "JobID: #{job.jid}",
|
216
|
-
"Created at: #{job.created_at.strftime( "%F %T" )}",
|
217
|
-
"Enqueued at: #{job.enqueued_at.strftime( "%F %T")}",
|
218
|
-
"Worker class: #{job.display_class}",
|
219
|
-
"Arguments: #{job.display_args}",
|
220
|
-
"Failed at: #{Time.at( job.item["failed_at"] ).strftime( "%F %T" )}",
|
221
|
-
"Retried at: #{job.item["retried_at"] ? Time.at( job.item["retried_at"] ).strftime( "%F %T" ) : "never"}",
|
222
|
-
"Retries: #{job.item["retry_count"]}",
|
223
|
-
"Retry?: #{job.item["retry"]}",
|
224
|
-
"Error Class: #{job.item["error_class"]}",
|
225
|
-
"Error Message: #{job.item["error_message"]}" ].join( "\n" )
|
226
|
-
end
|
227
|
-
end
|
228
|
-
|
229
|
-
class Dead
|
230
|
-
attr_reader :dead_set
|
231
|
-
|
232
|
-
def initialize
|
233
|
-
@dead_set = Sidekiq::DeadSet.new
|
234
|
-
end
|
235
|
-
|
236
|
-
def to_s
|
237
|
-
dead_set.each_with_object( [ "Dead entries: #{dead_set.size}" ] ) do |job, memo|
|
238
|
-
memo << sprintf( "%24s - %19s\n %-22s - %-37s\n e: %19s - f: %19s\n retry (%2d) at %-19s Continue retries?: %s\n %s\n",
|
239
|
-
job.jid,
|
240
|
-
job.created_at.strftime( "%F %T" ),
|
241
|
-
job.display_class,
|
242
|
-
job.item["error_class"],
|
243
|
-
job.enqueued_at.strftime( "%F %T" ),
|
244
|
-
Time.at( job.item["failed_at"] ).strftime( "%F %T" ),
|
245
|
-
job.item["retry_count"],
|
246
|
-
job.item["retried_at"] ? Time.at( job.item["retried_at"] ).strftime( "%F %T" ) : "never",
|
247
|
-
job.item["retry"],
|
248
|
-
"#{job.item["error_class"]}: #{job.item["error_message"][0,77-job.item["error_class"].size]}" )
|
249
|
-
end.join( "\n" )
|
250
|
-
end
|
251
|
-
|
252
|
-
def details( job_ids )
|
253
|
-
dead_set.each_with_object( [] ) do |job, memo|
|
254
|
-
next unless job_ids.include?( job.jid )
|
255
|
-
memo << job_details( job )
|
256
|
-
end.join( "\n\n" )
|
257
|
-
end
|
258
|
-
|
259
|
-
def delete_entries( job_ids )
|
260
|
-
deleted = 0
|
261
|
-
job_ids.each do |job_id|
|
262
|
-
# TODO: Inefficient in the free(beer) sidekiq version;
|
263
|
-
# find something more efficient here (sr 2016-04-06)
|
264
|
-
job = dead_set.find_job( job_id )
|
265
|
-
if job
|
266
|
-
job.delete
|
267
|
-
puts "#{job_id}: deleted"
|
268
|
-
deleted += 1
|
269
|
-
else
|
270
|
-
puts "#{job_id}: not found"
|
271
|
-
end
|
272
|
-
end
|
273
|
-
puts "Dead Set: Deleted #{deleted} entries"
|
274
|
-
end
|
275
|
-
|
276
|
-
def retry_entries( job_ids )
|
277
|
-
retried = 0
|
278
|
-
job_ids.each do |job_id|
|
279
|
-
# TODO: Inefficient in the free(beer) sidekiq version;
|
280
|
-
# find something more efficient here (sr 2016-04-06)
|
281
|
-
job = dead_set.find_job( job_id )
|
282
|
-
if job
|
283
|
-
begin
|
284
|
-
job.retry
|
285
|
-
puts "#{job_id}: retrying"
|
286
|
-
retried += 1
|
287
|
-
rescue
|
288
|
-
puts "#{job_id}: failed - #{$!.message}"
|
289
|
-
end
|
290
|
-
else
|
291
|
-
puts "#{job_id}: not found"
|
292
|
-
end
|
293
|
-
end
|
294
|
-
|
295
|
-
puts "Dead Set: Retried #{retried} entries"
|
296
|
-
end
|
297
|
-
|
298
|
-
def clear
|
299
|
-
puts "Dead Set: Deleted #{dead_set.clear} entries"
|
300
|
-
end
|
301
|
-
|
302
|
-
protected
|
303
|
-
def job_details( job )
|
304
|
-
[ "JobID: #{job.jid}",
|
305
|
-
"Created at: #{job.created_at.strftime( "%F %T" )}",
|
306
|
-
"Enqueued at: #{job.enqueued_at.strftime( "%F %T")}",
|
307
|
-
"Worker class: #{job.display_class}",
|
308
|
-
"Arguments: #{job.display_args}",
|
309
|
-
"Failed at: #{Time.at( job.item["failed_at"] ).strftime( "%F %T" )}",
|
310
|
-
"Retried at: #{job.item["retried_at"] ? Time.at( job.item["retried_at"] ).strftime( "%F %T" ) : "never"}",
|
311
|
-
"Retries: #{job.item["retry_count"]}",
|
312
|
-
"Retry?: #{job.item["retry"]}",
|
313
|
-
"Error Class: #{job.item["error_class"]}",
|
314
|
-
"Error Message: #{job.item["error_message"]}"
|
315
|
-
].join( "\n" )
|
316
|
-
end
|
317
|
-
end
|
318
|
-
|
319
|
-
class Processes
|
320
|
-
attr_reader :process_set
|
321
|
-
|
322
|
-
def initialize
|
323
|
-
@process_set = Sidekiq::ProcessSet.new
|
324
|
-
end
|
325
|
-
|
326
|
-
def to_s
|
327
|
-
process_set.each_with_object( ["Processes: #{process_set.size}"] ) do |process, memo|
|
328
|
-
memo << process.inspect
|
329
|
-
end.join( "\n" )
|
330
|
-
end
|
331
|
-
|
332
|
-
def quiet
|
333
|
-
size = process_set.size
|
334
|
-
process_set.each do |process|
|
335
|
-
process.quiet!
|
336
|
-
end
|
337
|
-
puts "Quieted #{size} processes"
|
338
|
-
end
|
339
|
-
|
340
|
-
def kill
|
341
|
-
size = process_set.size
|
342
|
-
process_set.each do |process|
|
343
|
-
process.kill!
|
344
|
-
end
|
345
|
-
puts "Killed #{size} processes"
|
346
|
-
end
|
347
|
-
|
348
|
-
def clean
|
349
|
-
cleaned_up = Sidekiq::ProcessSet.cleanup
|
350
|
-
puts "Cleaned up #{cleaned_up} processes"
|
351
|
-
end
|
352
|
-
end
|
353
|
-
|
354
|
-
class Workers
|
355
|
-
attr_reader :worker_set
|
356
|
-
|
357
|
-
def initialize
|
358
|
-
@worker_set = Sidekiq::Workers.new
|
359
|
-
end
|
360
|
-
|
361
|
-
def to_s
|
362
|
-
ary = [ "Workers: #{worker_set.size}" ]
|
363
|
-
|
364
|
-
worker_set.each do |key, tid, json|
|
365
|
-
ary << sprintf( "%15s %15s %20s\n", key, tid, json )
|
366
|
-
end
|
367
|
-
|
368
|
-
ary.join( "\n" )
|
369
|
-
end
|
370
|
-
end
|
371
72
|
|
372
73
|
options = Parser.parse( ARGV )
|
373
74
|
|
@@ -376,17 +77,31 @@ Sidekiq.configure_client do |config|
|
|
376
77
|
config.redis = { :url => url, :size => 1 }
|
377
78
|
end
|
378
79
|
|
379
|
-
|
380
|
-
|
80
|
+
object = ARGV.shift
|
81
|
+
case object
|
82
|
+
when "stats" then puts Sideq::Stats.new
|
381
83
|
|
382
|
-
when "
|
84
|
+
when "queues"
|
383
85
|
case ARGV.shift
|
384
|
-
when "list"
|
86
|
+
when "list" then puts Sideq::Queue.overview
|
87
|
+
else Parser.parse( %w[ --help ] )
|
88
|
+
end
|
89
|
+
|
90
|
+
when "queue"
|
91
|
+
command = ARGV.shift
|
92
|
+
queue_name = ARGV.shift
|
93
|
+
queue = Sideq::Queue.new( queue_name )
|
94
|
+
|
95
|
+
case command
|
96
|
+
when "list" then puts queue
|
97
|
+
when "show" then puts queue.details( ARGV )
|
98
|
+
when "del" then queue.delete_entries( ARGV )
|
99
|
+
when "clear" then queue.clear
|
385
100
|
else Parser.parse( %w[ --help ] )
|
386
101
|
end
|
387
102
|
|
388
103
|
when "retry"
|
389
|
-
retries = Retries.new
|
104
|
+
retries = Sideq::Retries.new
|
390
105
|
|
391
106
|
case ARGV.shift
|
392
107
|
when "list" then puts retries
|
@@ -399,7 +114,7 @@ case ARGV.shift
|
|
399
114
|
end
|
400
115
|
|
401
116
|
when "dead"
|
402
|
-
dead = Dead.new
|
117
|
+
dead = Sideq::Dead.new
|
403
118
|
|
404
119
|
case ARGV.shift
|
405
120
|
when "list" then puts dead
|
@@ -411,7 +126,7 @@ case ARGV.shift
|
|
411
126
|
end
|
412
127
|
|
413
128
|
when "processes"
|
414
|
-
processes = Processes.new
|
129
|
+
processes = Sideq::Processes.new
|
415
130
|
|
416
131
|
case ARGV.shift
|
417
132
|
when "list" then puts processes
|
@@ -422,7 +137,7 @@ case ARGV.shift
|
|
422
137
|
end
|
423
138
|
|
424
139
|
when "workers"
|
425
|
-
workers = Workers.new
|
140
|
+
workers = Sideq::Workers.new
|
426
141
|
case ARGV.shift
|
427
142
|
when "list" then puts workers
|
428
143
|
else Parser.parse( %w[ --help ] )
|
data/lib/sideq.rb
ADDED
data/lib/sideq/dead.rb
ADDED
@@ -0,0 +1,88 @@
|
|
1
|
+
module Sideq
|
2
|
+
class Dead
|
3
|
+
attr_reader :dead_set
|
4
|
+
|
5
|
+
def initialize
|
6
|
+
@dead_set = Sidekiq::DeadSet.new
|
7
|
+
end
|
8
|
+
|
9
|
+
def to_s
|
10
|
+
dead_set.each_with_object( [ "Dead entries: #{dead_set.size}" ] ) do |job, memo|
|
11
|
+
memo << sprintf( "%24s - %19s\n %-22s - %-37s\n e: %19s - f: %19s\n retry (%2d) at %-19s Continue retries?: %s\n %s\n",
|
12
|
+
job.jid,
|
13
|
+
job.created_at.strftime( "%F %T" ),
|
14
|
+
job.display_class,
|
15
|
+
job["error_class"],
|
16
|
+
job.enqueued_at.strftime( "%F %T" ),
|
17
|
+
Time.at( job["failed_at"] ).strftime( "%F %T" ),
|
18
|
+
job["retry_count"],
|
19
|
+
job["retried_at"] ? Time.at( job["retried_at"] ).strftime( "%F %T" ) : "never",
|
20
|
+
job["retry"],
|
21
|
+
"#{job["error_class"]}: #{job["error_message"][0,77-job["error_class"].size]}" )
|
22
|
+
end.join( "\n" )
|
23
|
+
end
|
24
|
+
|
25
|
+
def details( job_ids )
|
26
|
+
dead_set.each_with_object( [] ) do |job, memo|
|
27
|
+
next unless job_ids.include?( job.jid )
|
28
|
+
memo << job_details( job )
|
29
|
+
end.join( "\n\n" )
|
30
|
+
end
|
31
|
+
|
32
|
+
def delete_entries( job_ids )
|
33
|
+
deleted = 0
|
34
|
+
each_job( job_ids ) do |job|
|
35
|
+
job.delete
|
36
|
+
puts "#{job_id}: deleted"
|
37
|
+
deleted += 1
|
38
|
+
end
|
39
|
+
puts "Dead Set: Deleted #{deleted} entries"
|
40
|
+
end
|
41
|
+
|
42
|
+
def retry_entries( job_ids )
|
43
|
+
retried = 0
|
44
|
+
each_job( job_ids ) do |job|
|
45
|
+
job.retry
|
46
|
+
puts "#{job_id}: retrying"
|
47
|
+
retried += 1
|
48
|
+
end
|
49
|
+
|
50
|
+
puts "Dead Set: Retried #{retried} entries"
|
51
|
+
end
|
52
|
+
|
53
|
+
def clear
|
54
|
+
puts "Dead Set: Deleted #{dead_set.clear} entries"
|
55
|
+
end
|
56
|
+
|
57
|
+
protected
|
58
|
+
def each_job( job_ids )
|
59
|
+
job_ids.each do |job_id|
|
60
|
+
job = dead_set.find_job( job_id )
|
61
|
+
if job
|
62
|
+
begin
|
63
|
+
yield( job )
|
64
|
+
rescue
|
65
|
+
puts "#{job_id}: failed - #{$!.message}"
|
66
|
+
end
|
67
|
+
else
|
68
|
+
puts "#{job_id}: not found"
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def job_details( job )
|
74
|
+
[ "JobID: #{job.jid}",
|
75
|
+
"Created at: #{job.created_at.strftime( "%F %T" )}",
|
76
|
+
"Enqueued at: #{job.enqueued_at.strftime( "%F %T")}",
|
77
|
+
"Worker class: #{job.display_class}",
|
78
|
+
"Arguments: #{job.display_args}",
|
79
|
+
"Failed at: #{Time.at( job["failed_at"] ).strftime( "%F %T" )}",
|
80
|
+
"Retried at: #{job["retried_at"] ? Time.at( job["retried_at"] ).strftime( "%F %T" ) : "never"}",
|
81
|
+
"Retries: #{job["retry_count"]}",
|
82
|
+
"Retry?: #{job["retry"]}",
|
83
|
+
"Error Class: #{job["error_class"]}",
|
84
|
+
"Error Message: #{job["error_message"]}"
|
85
|
+
].join( "\n" )
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
module Sideq
|
2
|
+
class Processes
|
3
|
+
attr_reader :process_set
|
4
|
+
|
5
|
+
def initialize
|
6
|
+
@process_set = Sidekiq::ProcessSet.new
|
7
|
+
end
|
8
|
+
|
9
|
+
def to_s
|
10
|
+
process_set.each_with_object( ["Processes: #{process_set.size}"] ) do |process, memo|
|
11
|
+
memo << process.inspect
|
12
|
+
end.join( "\n" )
|
13
|
+
end
|
14
|
+
|
15
|
+
def quiet
|
16
|
+
size = process_set.size
|
17
|
+
process_set.each do |process|
|
18
|
+
process.quiet!
|
19
|
+
end
|
20
|
+
puts "Quieted #{size} processes"
|
21
|
+
end
|
22
|
+
|
23
|
+
def kill
|
24
|
+
size = process_set.size
|
25
|
+
process_set.each do |process|
|
26
|
+
process.kill!
|
27
|
+
end
|
28
|
+
puts "Killed #{size} processes"
|
29
|
+
end
|
30
|
+
|
31
|
+
def clean
|
32
|
+
cleaned_up = Sidekiq::ProcessSet.cleanup
|
33
|
+
puts "Cleaned up #{cleaned_up} processes"
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
data/lib/sideq/queue.rb
ADDED
@@ -0,0 +1,82 @@
|
|
1
|
+
module Sideq
|
2
|
+
class Queue
|
3
|
+
attr_reader :queue, :name
|
4
|
+
|
5
|
+
def self.overview
|
6
|
+
retry_set = Sidekiq::RetrySet.new
|
7
|
+
dead_set = Sidekiq::DeadSet.new
|
8
|
+
|
9
|
+
ary = Sidekiq::Queue.all.each_with_object( [] ) do |queue, memo|
|
10
|
+
memo << sprintf( "%-30s %5d (%8.2f s latency), %spaused",
|
11
|
+
queue.name,
|
12
|
+
queue.size,
|
13
|
+
queue.latency,
|
14
|
+
queue.paused? ? '' : "not " )
|
15
|
+
end
|
16
|
+
ary << sprintf( "%-30s %5d", "retry", retry_set.size )
|
17
|
+
ary << sprintf( "%-30s %5d", "dead", dead_set.size )
|
18
|
+
ary.join( "\n" )
|
19
|
+
end
|
20
|
+
|
21
|
+
def initialize( name )
|
22
|
+
@name = name
|
23
|
+
@queue = Sidekiq::Queue.new( name )
|
24
|
+
end
|
25
|
+
|
26
|
+
def to_s
|
27
|
+
queue.each_with_object( [ "Queue entries: #{queue.size}" ] ) do |job, memo|
|
28
|
+
memo << sprintf( "%24s - %19s\n %-77s\n e: %19s - lat: %19s\n",
|
29
|
+
job.jid,
|
30
|
+
job.created_at.strftime( "%F %T" ),
|
31
|
+
job.display_class,
|
32
|
+
job.enqueued_at.strftime( "%F %T" ),
|
33
|
+
job.latency )
|
34
|
+
end.join( "\n" )
|
35
|
+
end
|
36
|
+
|
37
|
+
def details( job_ids )
|
38
|
+
queue.each_with_object( [] ) do |job, memo|
|
39
|
+
next unless job_ids.include?( job.jid )
|
40
|
+
memo << job_details( job )
|
41
|
+
end.join( "\n\n" )
|
42
|
+
end
|
43
|
+
|
44
|
+
def delete_entries( job_ids )
|
45
|
+
deleted = 0
|
46
|
+
each_job( job_ids ) do |job|
|
47
|
+
job.delete
|
48
|
+
puts "#{job_id}: deleted"
|
49
|
+
deleted += 1
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def clear
|
54
|
+
puts "Queue #{name}: Deleted #{queue.clear.first} entries"
|
55
|
+
end
|
56
|
+
|
57
|
+
protected
|
58
|
+
def each_job( job_ids )
|
59
|
+
job_ids.each do |job_id|
|
60
|
+
job = queue.find_job( job_id )
|
61
|
+
if job
|
62
|
+
begin
|
63
|
+
yield( job )
|
64
|
+
rescue
|
65
|
+
puts "#{job_id}: failed - #{$!.message}"
|
66
|
+
end
|
67
|
+
else
|
68
|
+
puts "#{job_id}: not found"
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def job_details( job )
|
74
|
+
[ "JobID: #{job.jid}",
|
75
|
+
"Created at: #{job.created_at.strftime( "%F %T" )}",
|
76
|
+
"Enqueued at: #{job.enqueued_at.strftime( "%F %T")}",
|
77
|
+
"Latency: #{job.latency}",
|
78
|
+
"Worker class: #{job.display_class}",
|
79
|
+
"Arguments: #{job.display_args}" ].join( "\n" )
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
@@ -0,0 +1,97 @@
|
|
1
|
+
module Sideq
|
2
|
+
class Retries
|
3
|
+
attr_reader :retry_set
|
4
|
+
|
5
|
+
def initialize
|
6
|
+
@retry_set = Sidekiq::RetrySet.new
|
7
|
+
end
|
8
|
+
|
9
|
+
def to_s
|
10
|
+
retry_set.each_with_object( [ "Retry entries: #{retry_set.size}" ] ) do |job, memo|
|
11
|
+
memo << sprintf( "%24s - %19s\n %-22s - %-37s\n e: %19s - f: %19s\n retry (%2d) at %-19s Continue retries?: %s\n %s\n",
|
12
|
+
job.jid,
|
13
|
+
job.created_at.strftime( "%F %T" ),
|
14
|
+
job.display_class,
|
15
|
+
job["error_class"],
|
16
|
+
job.enqueued_at.strftime( "%F %T" ),
|
17
|
+
Time.at( job["failed_at"] ).strftime( "%F %T" ),
|
18
|
+
job["retry_count"],
|
19
|
+
job["retried_at"] ? Time.at( job["retried_at"] ).strftime( "%F %T" ) : "never",
|
20
|
+
job["retry"],
|
21
|
+
"#{job["error_class"]}: #{job["error_message"][0,77-job["error_class"].size]}" )
|
22
|
+
end.join( "\n" )
|
23
|
+
end
|
24
|
+
|
25
|
+
def details( job_ids )
|
26
|
+
retry_set.each_with_object( [] ) do |job, memo|
|
27
|
+
next unless job_ids.include?( job.jid )
|
28
|
+
memo << job_details( job )
|
29
|
+
end.join( "\n\n" )
|
30
|
+
end
|
31
|
+
|
32
|
+
def delete_entries( job_ids )
|
33
|
+
deleted = 0
|
34
|
+
each_job( job_ids ) do |job_id|
|
35
|
+
job.delete
|
36
|
+
puts "#{job_id}: deleted"
|
37
|
+
deleted += 1
|
38
|
+
end
|
39
|
+
puts "Retry Set: Deleted #{deleted} entries"
|
40
|
+
end
|
41
|
+
|
42
|
+
def kill_entries( job_ids )
|
43
|
+
killed = 0
|
44
|
+
each_job( job_ids ) do |job|
|
45
|
+
job.kill
|
46
|
+
puts "#{job_id}: moved to dead set"
|
47
|
+
killed += 1
|
48
|
+
end
|
49
|
+
puts "Retry Set: Moved #{killed} entries to Dead Set"
|
50
|
+
end
|
51
|
+
|
52
|
+
def retry_entries( job_ids )
|
53
|
+
retried = 0
|
54
|
+
each_job( job_ids ) do |job|
|
55
|
+
job.retry
|
56
|
+
puts "#{job_id}: retrying"
|
57
|
+
retried += 1
|
58
|
+
end
|
59
|
+
puts "Retry Set: Retried #{retried} entries"
|
60
|
+
end
|
61
|
+
|
62
|
+
def clear
|
63
|
+
puts "Retry Set: Deleted #{retry_set.clear} entries"
|
64
|
+
end
|
65
|
+
|
66
|
+
protected
|
67
|
+
def each_job( job_ids )
|
68
|
+
job_ids.each do |job_id|
|
69
|
+
job = retry_set.find_job( job_id )
|
70
|
+
if job
|
71
|
+
begin
|
72
|
+
yield( job )
|
73
|
+
rescue
|
74
|
+
puts "#{job_id}: failed - #{$!.message}"
|
75
|
+
end
|
76
|
+
else
|
77
|
+
puts "#{job_id}: not found"
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
def job_details( job )
|
83
|
+
[ "JobID: #{job.jid}",
|
84
|
+
"Created at: #{job.created_at.strftime( "%F %T" )}",
|
85
|
+
"Enqueued at: #{job.enqueued_at.strftime( "%F %T")}",
|
86
|
+
"Worker class: #{job.display_class}",
|
87
|
+
"Arguments: #{job.display_args}",
|
88
|
+
"Failed at: #{Time.at( job.item["failed_at"] ).strftime( "%F %T" )}",
|
89
|
+
"Retried at: #{job["retried_at"] ? Time.at( job["retried_at"] ).strftime( "%F %T" ) : "never"}",
|
90
|
+
"Retries: #{job["retry_count"]}",
|
91
|
+
"Retry?: #{job["retry"]}",
|
92
|
+
"Next retry at #{job.at.strftime( "%F %T" )}",
|
93
|
+
"Error Class: #{job["error_class"]}",
|
94
|
+
"Error Message: #{job["error_message"]}" ].join( "\n" )
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
data/lib/sideq/stats.rb
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
module Sideq
|
2
|
+
class Stats
|
3
|
+
attr_reader :stats, :retry_set, :dead_set
|
4
|
+
|
5
|
+
def initialize
|
6
|
+
@stats = Sidekiq::Stats.new
|
7
|
+
@retry_set = Sidekiq::RetrySet.new
|
8
|
+
@dead_set = Sidekiq::DeadSet.new
|
9
|
+
end
|
10
|
+
|
11
|
+
def to_s
|
12
|
+
stat_ary = [ "Processed: #{stats.processed}",
|
13
|
+
"Failed: #{stats.failed}",
|
14
|
+
"Scheduled size: #{stats.scheduled_size}",
|
15
|
+
"Retry size: #{stats.retry_size}",
|
16
|
+
"Dead size: #{stats.dead_size}",
|
17
|
+
"Enqueued: #{stats.enqueued}",
|
18
|
+
"Processes: #{stats.processes_size}",
|
19
|
+
"Workers: #{stats.workers_size}",
|
20
|
+
"Default queue latency: #{stats.default_queue_latency}",
|
21
|
+
|
22
|
+
"Queues: dead: #{dead_set.size}",
|
23
|
+
" retry: #{retry_set.size}" ]
|
24
|
+
stats.queues.each do |(queue_name, queue_size)|
|
25
|
+
stat_ary << " #{queue_name}: #{queue_size}"
|
26
|
+
end
|
27
|
+
|
28
|
+
stat_ary.join( "\n" )
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
module Sideq
|
2
|
+
class Workers
|
3
|
+
attr_reader :worker_set
|
4
|
+
|
5
|
+
def initialize
|
6
|
+
@worker_set = Sidekiq::Workers.new
|
7
|
+
end
|
8
|
+
|
9
|
+
def to_s
|
10
|
+
ary = [ "Workers: #{worker_set.size}" ]
|
11
|
+
|
12
|
+
worker_set.each do |key, tid, json|
|
13
|
+
ary << sprintf( "%15s %15s %20s\n", key, tid, json )
|
14
|
+
end
|
15
|
+
|
16
|
+
ary.join( "\n" )
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sideq
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sven Riedel
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-04-
|
11
|
+
date: 2016-04-19 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: sidekiq
|
@@ -38,8 +38,16 @@ executables:
|
|
38
38
|
extensions: []
|
39
39
|
extra_rdoc_files: []
|
40
40
|
files:
|
41
|
+
- CHANGELOG
|
41
42
|
- README.md
|
42
43
|
- bin/sideq
|
44
|
+
- lib/sideq.rb
|
45
|
+
- lib/sideq/dead.rb
|
46
|
+
- lib/sideq/processes.rb
|
47
|
+
- lib/sideq/queue.rb
|
48
|
+
- lib/sideq/retries.rb
|
49
|
+
- lib/sideq/stats.rb
|
50
|
+
- lib/sideq/workers.rb
|
43
51
|
homepage: https://github.com/sriedel/sideq
|
44
52
|
licenses:
|
45
53
|
- GPL-2.0
|