sidekiq_cleaner 5.3.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (122) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +61 -0
  3. data/.github/contributing.md +32 -0
  4. data/.github/issue_template.md +11 -0
  5. data/.gitignore +15 -0
  6. data/.travis.yml +11 -0
  7. data/3.0-Upgrade.md +70 -0
  8. data/4.0-Upgrade.md +53 -0
  9. data/5.0-Upgrade.md +56 -0
  10. data/COMM-LICENSE +97 -0
  11. data/Changes.md +1536 -0
  12. data/Ent-Changes.md +238 -0
  13. data/Gemfile +23 -0
  14. data/LICENSE +9 -0
  15. data/Pro-2.0-Upgrade.md +138 -0
  16. data/Pro-3.0-Upgrade.md +44 -0
  17. data/Pro-4.0-Upgrade.md +35 -0
  18. data/Pro-Changes.md +759 -0
  19. data/README.md +55 -0
  20. data/Rakefile +9 -0
  21. data/bin/sidekiq +18 -0
  22. data/bin/sidekiqctl +20 -0
  23. data/bin/sidekiqload +149 -0
  24. data/cleaner/assets/images/favicon.ico +0 -0
  25. data/cleaner/assets/images/logo.png +0 -0
  26. data/cleaner/assets/images/status.png +0 -0
  27. data/cleaner/assets/javascripts/application.js +172 -0
  28. data/cleaner/assets/javascripts/dashboard.js +315 -0
  29. data/cleaner/assets/stylesheets/application-rtl.css +246 -0
  30. data/cleaner/assets/stylesheets/application.css +1144 -0
  31. data/cleaner/assets/stylesheets/bootstrap-rtl.min.css +9 -0
  32. data/cleaner/assets/stylesheets/bootstrap.css +5 -0
  33. data/cleaner/locales/ar.yml +81 -0
  34. data/cleaner/locales/cs.yml +78 -0
  35. data/cleaner/locales/da.yml +68 -0
  36. data/cleaner/locales/de.yml +69 -0
  37. data/cleaner/locales/el.yml +68 -0
  38. data/cleaner/locales/en.yml +81 -0
  39. data/cleaner/locales/es.yml +70 -0
  40. data/cleaner/locales/fa.yml +80 -0
  41. data/cleaner/locales/fr.yml +78 -0
  42. data/cleaner/locales/he.yml +79 -0
  43. data/cleaner/locales/hi.yml +75 -0
  44. data/cleaner/locales/it.yml +69 -0
  45. data/cleaner/locales/ja.yml +80 -0
  46. data/cleaner/locales/ko.yml +68 -0
  47. data/cleaner/locales/nb.yml +77 -0
  48. data/cleaner/locales/nl.yml +68 -0
  49. data/cleaner/locales/pl.yml +59 -0
  50. data/cleaner/locales/pt-br.yml +68 -0
  51. data/cleaner/locales/pt.yml +67 -0
  52. data/cleaner/locales/ru.yml +78 -0
  53. data/cleaner/locales/sv.yml +68 -0
  54. data/cleaner/locales/ta.yml +75 -0
  55. data/cleaner/locales/uk.yml +76 -0
  56. data/cleaner/locales/ur.yml +80 -0
  57. data/cleaner/locales/zh-cn.yml +68 -0
  58. data/cleaner/locales/zh-tw.yml +68 -0
  59. data/cleaner/views/_footer.erb +20 -0
  60. data/cleaner/views/_job_info.erb +88 -0
  61. data/cleaner/views/_nav.erb +52 -0
  62. data/cleaner/views/_paging.erb +23 -0
  63. data/cleaner/views/_poll_link.erb +7 -0
  64. data/cleaner/views/_status.erb +4 -0
  65. data/cleaner/views/_summary.erb +40 -0
  66. data/cleaner/views/busy.erb +98 -0
  67. data/cleaner/views/dashboard.erb +75 -0
  68. data/cleaner/views/dead.erb +34 -0
  69. data/cleaner/views/errors.erb +84 -0
  70. data/cleaner/views/layout.erb +40 -0
  71. data/cleaner/views/morgue.erb +75 -0
  72. data/cleaner/views/queue.erb +46 -0
  73. data/cleaner/views/queues.erb +30 -0
  74. data/cleaner/views/retries.erb +80 -0
  75. data/cleaner/views/retry.erb +34 -0
  76. data/cleaner/views/scheduled.erb +54 -0
  77. data/cleaner/views/scheduled_job_info.erb +8 -0
  78. data/cleaner-stats.png +0 -0
  79. data/cleaner.png +0 -0
  80. data/code_of_conduct.md +50 -0
  81. data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
  82. data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
  83. data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
  84. data/lib/generators/sidekiq/worker_generator.rb +49 -0
  85. data/lib/sidekiq/api.rb +940 -0
  86. data/lib/sidekiq/cleaner/action.rb +89 -0
  87. data/lib/sidekiq/cleaner/application.rb +385 -0
  88. data/lib/sidekiq/cleaner/helpers.rb +325 -0
  89. data/lib/sidekiq/cleaner/router.rb +100 -0
  90. data/lib/sidekiq/cleaner.rb +214 -0
  91. data/lib/sidekiq/cli.rb +445 -0
  92. data/lib/sidekiq/client.rb +243 -0
  93. data/lib/sidekiq/core_ext.rb +1 -0
  94. data/lib/sidekiq/ctl.rb +221 -0
  95. data/lib/sidekiq/delay.rb +42 -0
  96. data/lib/sidekiq/exception_handler.rb +29 -0
  97. data/lib/sidekiq/extensions/action_mailer.rb +57 -0
  98. data/lib/sidekiq/extensions/active_record.rb +40 -0
  99. data/lib/sidekiq/extensions/class_methods.rb +40 -0
  100. data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
  101. data/lib/sidekiq/fetch.rb +81 -0
  102. data/lib/sidekiq/job_logger.rb +25 -0
  103. data/lib/sidekiq/job_retry.rb +262 -0
  104. data/lib/sidekiq/launcher.rb +173 -0
  105. data/lib/sidekiq/logging.rb +122 -0
  106. data/lib/sidekiq/manager.rb +137 -0
  107. data/lib/sidekiq/middleware/chain.rb +150 -0
  108. data/lib/sidekiq/middleware/i18n.rb +42 -0
  109. data/lib/sidekiq/middleware/server/active_record.rb +23 -0
  110. data/lib/sidekiq/paginator.rb +43 -0
  111. data/lib/sidekiq/processor.rb +279 -0
  112. data/lib/sidekiq/rails.rb +58 -0
  113. data/lib/sidekiq/redis_connection.rb +144 -0
  114. data/lib/sidekiq/scheduled.rb +174 -0
  115. data/lib/sidekiq/testing/inline.rb +29 -0
  116. data/lib/sidekiq/testing.rb +333 -0
  117. data/lib/sidekiq/util.rb +66 -0
  118. data/lib/sidekiq/version.rb +4 -0
  119. data/lib/sidekiq/worker.rb +220 -0
  120. data/lib/sidekiq.rb +237 -0
  121. data/sidekiq_cleaner.gemspec +21 -0
  122. metadata +235 -0
@@ -0,0 +1,940 @@
1
+ # frozen_string_literal: true
2
+ require 'sidekiq'
3
+
4
+ module Sidekiq
5
+
6
+ module RedisScanner
7
+ def sscan(conn, key)
8
+ cursor = '0'
9
+ result = []
10
+ loop do
11
+ cursor, values = conn.sscan(key, cursor)
12
+ result.push(*values)
13
+ break if cursor == '0'
14
+ end
15
+ result
16
+ end
17
+ end
18
+
19
+ class Stats
20
+ include RedisScanner
21
+
22
+ def initialize
23
+ fetch_stats!
24
+ end
25
+
26
+ def processed
27
+ stat :processed
28
+ end
29
+
30
+ def failed
31
+ stat :failed
32
+ end
33
+
34
+ def scheduled_size
35
+ stat :scheduled_size
36
+ end
37
+
38
+ def retry_size
39
+ stat :retry_size
40
+ end
41
+
42
+ def dead_size
43
+ stat :dead_size
44
+ end
45
+
46
+ def enqueued
47
+ stat :enqueued
48
+ end
49
+
50
+ def processes_size
51
+ stat :processes_size
52
+ end
53
+
54
+ def workers_size
55
+ stat :workers_size
56
+ end
57
+
58
+ def default_queue_latency
59
+ stat :default_queue_latency
60
+ end
61
+
62
+ def queues
63
+ Sidekiq::Stats::Queues.new.lengths
64
+ end
65
+
66
+ def fetch_stats!
67
+ pipe1_res = Sidekiq.redis do |conn|
68
+ conn.pipelined do
69
+ conn.get('stat:processed')
70
+ conn.get('stat:failed')
71
+ conn.zcard('schedule')
72
+ conn.zcard('retry')
73
+ conn.zcard('dead')
74
+ conn.scard('processes')
75
+ conn.lrange('queue:default', -1, -1)
76
+ end
77
+ end
78
+
79
+ processes = Sidekiq.redis do |conn|
80
+ sscan(conn, 'processes')
81
+ end
82
+
83
+ queues = Sidekiq.redis do |conn|
84
+ sscan(conn, 'queues')
85
+ end
86
+
87
+ pipe2_res = Sidekiq.redis do |conn|
88
+ conn.pipelined do
89
+ processes.each {|key| conn.hget(key, 'busy') }
90
+ queues.each {|queue| conn.llen("queue:#{queue}") }
91
+ end
92
+ end
93
+
94
+ s = processes.size
95
+ workers_size = pipe2_res[0...s].map(&:to_i).inject(0, &:+)
96
+ enqueued = pipe2_res[s..-1].map(&:to_i).inject(0, &:+)
97
+
98
+ default_queue_latency = if (entry = pipe1_res[6].first)
99
+ job = Sidekiq.load_json(entry) rescue {}
100
+ now = Time.now.to_f
101
+ thence = job['enqueued_at'] || now
102
+ now - thence
103
+ else
104
+ 0
105
+ end
106
+ @stats = {
107
+ processed: pipe1_res[0].to_i,
108
+ failed: pipe1_res[1].to_i,
109
+ scheduled_size: pipe1_res[2],
110
+ retry_size: pipe1_res[3],
111
+ dead_size: pipe1_res[4],
112
+ processes_size: pipe1_res[5],
113
+
114
+ default_queue_latency: default_queue_latency,
115
+ workers_size: workers_size,
116
+ enqueued: enqueued
117
+ }
118
+ end
119
+
120
+ def reset(*stats)
121
+ all = %w(failed processed)
122
+ stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s)
123
+
124
+ mset_args = []
125
+ stats.each do |stat|
126
+ mset_args << "stat:#{stat}"
127
+ mset_args << 0
128
+ end
129
+ Sidekiq.redis do |conn|
130
+ conn.mset(*mset_args)
131
+ end
132
+ end
133
+
134
+ private
135
+
136
+ def stat(s)
137
+ @stats[s]
138
+ end
139
+
140
+ class Queues
141
+ include RedisScanner
142
+
143
+ def lengths
144
+ Sidekiq.redis do |conn|
145
+ queues = sscan(conn, 'queues')
146
+
147
+ lengths = conn.pipelined do
148
+ queues.each do |queue|
149
+ conn.llen("queue:#{queue}")
150
+ end
151
+ end
152
+
153
+ i = 0
154
+ array_of_arrays = queues.inject({}) do |memo, queue|
155
+ memo[queue] = lengths[i]
156
+ i += 1
157
+ memo
158
+ end.sort_by { |_, size| size }
159
+
160
+ Hash[array_of_arrays.reverse]
161
+ end
162
+ end
163
+ end
164
+
165
+ class History
166
+ def initialize(days_previous, start_date = nil)
167
+ @days_previous = days_previous
168
+ @start_date = start_date || Time.now.utc.to_date
169
+ end
170
+
171
+ def processed
172
+ @processed ||= date_stat_hash("processed")
173
+ end
174
+
175
+ def failed
176
+ @failed ||= date_stat_hash("failed")
177
+ end
178
+
179
+ private
180
+
181
+ def date_stat_hash(stat)
182
+ i = 0
183
+ stat_hash = {}
184
+ keys = []
185
+ dates = []
186
+
187
+ while i < @days_previous
188
+ date = @start_date - i
189
+ datestr = date.strftime("%Y-%m-%d")
190
+ keys << "stat:#{stat}:#{datestr}"
191
+ dates << datestr
192
+ i += 1
193
+ end
194
+
195
+ begin
196
+ Sidekiq.redis do |conn|
197
+ conn.mget(keys).each_with_index do |value, idx|
198
+ stat_hash[dates[idx]] = value ? value.to_i : 0
199
+ end
200
+ end
201
+ rescue Redis::CommandError
202
+ # mget will trigger a CROSSSLOT error when run against a Cluster
203
+ # TODO Someone want to add Cluster support?
204
+ end
205
+
206
+ stat_hash
207
+ end
208
+ end
209
+ end
210
+
211
+ ##
212
+ # Encapsulates a queue within Sidekiq.
213
+ # Allows enumeration of all jobs within the queue
214
+ # and deletion of jobs.
215
+ #
216
+ # queue = Sidekiq::Queue.new("mailer")
217
+ # queue.each do |job|
218
+ # job.klass # => 'MyWorker'
219
+ # job.args # => [1, 2, 3]
220
+ # job.delete if job.jid == 'abcdef1234567890'
221
+ # end
222
+ #
223
+ class Queue
224
+ include Enumerable
225
+ extend RedisScanner
226
+
227
+ ##
228
+ # Return all known queues within Redis.
229
+ #
230
+ def self.all
231
+ Sidekiq.redis { |c| sscan(c, 'queues') }.sort.map { |q| Sidekiq::Queue.new(q) }
232
+ end
233
+
234
+ attr_reader :name
235
+
236
+ def initialize(name="default")
237
+ @name = name.to_s
238
+ @rname = "queue:#{name}"
239
+ end
240
+
241
+ def size
242
+ Sidekiq.redis { |con| con.llen(@rname) }
243
+ end
244
+
245
+ # Sidekiq Pro overrides this
246
+ def paused?
247
+ false
248
+ end
249
+
250
+ ##
251
+ # Calculates this queue's latency, the difference in seconds since the oldest
252
+ # job in the queue was enqueued.
253
+ #
254
+ # @return Float
255
+ def latency
256
+ entry = Sidekiq.redis do |conn|
257
+ conn.lrange(@rname, -1, -1)
258
+ end.first
259
+ return 0 unless entry
260
+ job = Sidekiq.load_json(entry)
261
+ now = Time.now.to_f
262
+ thence = job['enqueued_at'] || now
263
+ now - thence
264
+ end
265
+
266
+ def each
267
+ initial_size = size
268
+ deleted_size = 0
269
+ page = 0
270
+ page_size = 50
271
+
272
+ while true do
273
+ range_start = page * page_size - deleted_size
274
+ range_end = range_start + page_size - 1
275
+ entries = Sidekiq.redis do |conn|
276
+ conn.lrange @rname, range_start, range_end
277
+ end
278
+ break if entries.empty?
279
+ page += 1
280
+ entries.each do |entry|
281
+ yield Job.new(entry, @name)
282
+ end
283
+ deleted_size = initial_size - size
284
+ end
285
+ end
286
+
287
+ ##
288
+ # Find the job with the given JID within this queue.
289
+ #
290
+ # This is a slow, inefficient operation. Do not use under
291
+ # normal conditions. Sidekiq Pro contains a faster version.
292
+ def find_job(jid)
293
+ detect { |j| j.jid == jid }
294
+ end
295
+
296
+ def clear
297
+ Sidekiq.redis do |conn|
298
+ conn.multi do
299
+ conn.del(@rname)
300
+ conn.srem("queues", name)
301
+ end
302
+ end
303
+ end
304
+ alias_method :💣, :clear
305
+ end
306
+
307
+ ##
308
+ # Encapsulates a pending job within a Sidekiq queue or
309
+ # sorted set.
310
+ #
311
+ # The job should be considered immutable but may be
312
+ # removed from the queue via Job#delete.
313
+ #
314
+ class Job
315
+ attr_reader :item
316
+ attr_reader :value
317
+
318
+ def initialize(item, queue_name=nil)
319
+ @args = nil
320
+ @value = item
321
+ @item = item.is_a?(Hash) ? item : parse(item)
322
+ @queue = queue_name || @item['queue']
323
+ end
324
+
325
+ def parse(item)
326
+ Sidekiq.load_json(item)
327
+ rescue JSON::ParserError
328
+ # If the job payload in Redis is invalid JSON, we'll load
329
+ # the item as an empty hash and store the invalid JSON as
330
+ # the job 'args' for display in the Web UI.
331
+ @invalid = true
332
+ @args = [item]
333
+ {}
334
+ end
335
+
336
+ def klass
337
+ self['class']
338
+ end
339
+
340
+ def display_class
341
+ # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
342
+ @klass ||= case klass
343
+ when /\ASidekiq::Extensions::Delayed/
344
+ safe_load(args[0], klass) do |target, method, _|
345
+ "#{target}.#{method}"
346
+ end
347
+ when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
348
+ job_class = @item['wrapped'] || args[0]
349
+ if 'ActionMailer::DeliveryJob' == job_class
350
+ # MailerClass#mailer_method
351
+ args[0]['arguments'][0..1].join('#')
352
+ else
353
+ job_class
354
+ end
355
+ else
356
+ klass
357
+ end
358
+ end
359
+
360
+ def display_args
361
+ # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
362
+ @display_args ||= case klass
363
+ when /\ASidekiq::Extensions::Delayed/
364
+ safe_load(args[0], args) do |_, _, arg|
365
+ arg
366
+ end
367
+ when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
368
+ job_args = self['wrapped'] ? args[0]["arguments"] : []
369
+ if 'ActionMailer::DeliveryJob' == (self['wrapped'] || args[0])
370
+ # remove MailerClass, mailer_method and 'deliver_now'
371
+ job_args.drop(3)
372
+ else
373
+ job_args
374
+ end
375
+ else
376
+ if self['encrypt']
377
+ # no point in showing 150+ bytes of random garbage
378
+ args[-1] = '[encrypted data]'
379
+ end
380
+ args
381
+ end
382
+ end
383
+
384
+ def args
385
+ @args || @item['args']
386
+ end
387
+
388
+ def jid
389
+ self['jid']
390
+ end
391
+
392
+ def enqueued_at
393
+ self['enqueued_at'] ? Time.at(self['enqueued_at']).utc : nil
394
+ end
395
+
396
+ def created_at
397
+ Time.at(self['created_at'] || self['enqueued_at'] || 0).utc
398
+ end
399
+
400
+ def queue
401
+ @queue
402
+ end
403
+
404
+ def latency
405
+ now = Time.now.to_f
406
+ now - (@item['enqueued_at'] || @item['created_at'] || now)
407
+ end
408
+
409
+ ##
410
+ # Remove this job from the queue.
411
+ def delete
412
+ count = Sidekiq.redis do |conn|
413
+ conn.lrem("queue:#{@queue}", 1, @value)
414
+ end
415
+ count != 0
416
+ end
417
+
418
+ def [](name)
419
+ # nil will happen if the JSON fails to parse.
420
+ # We don't guarantee Sidekiq will work with bad job JSON but we should
421
+ # make a best effort to minimize the damage.
422
+ @item ? @item[name] : nil
423
+ end
424
+
425
+ private
426
+
427
+ def safe_load(content, default)
428
+ begin
429
+ yield(*YAML.load(content))
430
+ rescue => ex
431
+ # #1761 in dev mode, it's possible to have jobs enqueued which haven't been loaded into
432
+ # memory yet so the YAML can't be loaded.
433
+ Sidekiq.logger.warn "Unable to load YAML: #{ex.message}" unless Sidekiq.options[:environment] == 'development'
434
+ default
435
+ end
436
+ end
437
+ end
438
+
439
+ class SortedEntry < Job
440
+ attr_reader :score
441
+ attr_reader :parent
442
+
443
+ def initialize(parent, score, item)
444
+ super(item)
445
+ @score = score
446
+ @parent = parent
447
+ end
448
+
449
+ def at
450
+ Time.at(score).utc
451
+ end
452
+
453
+ def delete
454
+ if @value
455
+ @parent.delete_by_value(@parent.name, @value)
456
+ else
457
+ @parent.delete_by_jid(score, jid)
458
+ end
459
+ end
460
+
461
+ def reschedule(at)
462
+ delete
463
+ @parent.schedule(at, item)
464
+ end
465
+
466
+ def add_to_queue
467
+ remove_job do |message|
468
+ msg = Sidekiq.load_json(message)
469
+ Sidekiq::Client.push(msg)
470
+ end
471
+ end
472
+
473
+ def retry
474
+ remove_job do |message|
475
+ msg = Sidekiq.load_json(message)
476
+ msg['retry_count'] -= 1 if msg['retry_count']
477
+ Sidekiq::Client.push(msg)
478
+ end
479
+ end
480
+
481
+ ##
482
+ # Place job in the dead set
483
+ def kill
484
+ remove_job do |message|
485
+ DeadSet.new.kill(message)
486
+ end
487
+ end
488
+
489
+ def error?
490
+ !!item['error_class']
491
+ end
492
+
493
+ private
494
+
495
+ def remove_job
496
+ Sidekiq.redis do |conn|
497
+ results = conn.multi do
498
+ conn.zrangebyscore(parent.name, score, score)
499
+ conn.zremrangebyscore(parent.name, score, score)
500
+ end.first
501
+
502
+ if results.size == 1
503
+ yield results.first
504
+ else
505
+ # multiple jobs with the same score
506
+ # find the one with the right JID and push it
507
+ hash = results.group_by do |message|
508
+ if message.index(jid)
509
+ msg = Sidekiq.load_json(message)
510
+ msg['jid'] == jid
511
+ else
512
+ false
513
+ end
514
+ end
515
+
516
+ msg = hash.fetch(true, []).first
517
+ yield msg if msg
518
+
519
+ # push the rest back onto the sorted set
520
+ conn.multi do
521
+ hash.fetch(false, []).each do |message|
522
+ conn.zadd(parent.name, score.to_f.to_s, message)
523
+ end
524
+ end
525
+ end
526
+ end
527
+ end
528
+
529
+ end
530
+
531
+ class SortedSet
532
+ include Enumerable
533
+
534
+ attr_reader :name
535
+
536
+ def initialize(name)
537
+ @name = name
538
+ @_size = size
539
+ end
540
+
541
+ def size
542
+ Sidekiq.redis { |c| c.zcard(name) }
543
+ end
544
+
545
+ def clear
546
+ Sidekiq.redis do |conn|
547
+ conn.del(name)
548
+ end
549
+ end
550
+ alias_method :💣, :clear
551
+ end
552
+
553
+ class JobSet < SortedSet
554
+
555
+ def schedule(timestamp, message)
556
+ Sidekiq.redis do |conn|
557
+ conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(message))
558
+ end
559
+ end
560
+
561
+ def each
562
+ initial_size = @_size
563
+ offset_size = 0
564
+ page = -1
565
+ page_size = 50
566
+
567
+ while true do
568
+ range_start = page * page_size + offset_size
569
+ range_end = range_start + page_size - 1
570
+ elements = Sidekiq.redis do |conn|
571
+ conn.zrange name, range_start, range_end, with_scores: true
572
+ end
573
+ break if elements.empty?
574
+ page -= 1
575
+ elements.reverse.each do |element, score|
576
+ yield SortedEntry.new(self, score, element)
577
+ end
578
+ offset_size = initial_size - @_size
579
+ end
580
+ end
581
+
582
+ def fetch(score, jid = nil)
583
+ elements = Sidekiq.redis do |conn|
584
+ conn.zrangebyscore(name, score, score)
585
+ end
586
+
587
+ elements.inject([]) do |result, element|
588
+ entry = SortedEntry.new(self, score, element)
589
+ if jid
590
+ result << entry if entry.jid == jid
591
+ else
592
+ result << entry
593
+ end
594
+ result
595
+ end
596
+ end
597
+
598
+ ##
599
+ # Find the job with the given JID within this sorted set.
600
+ #
601
+ # This is a slow, inefficient operation. Do not use under
602
+ # normal conditions. Sidekiq Pro contains a faster version.
603
+ def find_job(jid)
604
+ self.detect { |j| j.jid == jid }
605
+ end
606
+
607
+ def delete_by_value(name, value)
608
+ Sidekiq.redis do |conn|
609
+ ret = conn.zrem(name, value)
610
+ @_size -= 1 if ret
611
+ ret
612
+ end
613
+ end
614
+
615
+ def delete_by_jid(score, jid)
616
+ Sidekiq.redis do |conn|
617
+ elements = conn.zrangebyscore(name, score, score)
618
+ elements.each do |element|
619
+ message = Sidekiq.load_json(element)
620
+ if message["jid"] == jid
621
+ ret = conn.zrem(name, element)
622
+ @_size -= 1 if ret
623
+ break ret
624
+ end
625
+ false
626
+ end
627
+ end
628
+ end
629
+
630
+ alias_method :delete, :delete_by_jid
631
+ end
632
+
633
+ ##
634
+ # Allows enumeration of scheduled jobs within Sidekiq.
635
+ # Based on this, you can search/filter for jobs. Here's an
636
+ # example where I'm selecting all jobs of a certain type
637
+ # and deleting them from the schedule queue.
638
+ #
639
+ # r = Sidekiq::ScheduledSet.new
640
+ # r.select do |scheduled|
641
+ # scheduled.klass == 'Sidekiq::Extensions::DelayedClass' &&
642
+ # scheduled.args[0] == 'User' &&
643
+ # scheduled.args[1] == 'setup_new_subscriber'
644
+ # end.map(&:delete)
645
+ class ScheduledSet < JobSet
646
+ def initialize
647
+ super 'schedule'
648
+ end
649
+ end
650
+
651
+ ##
652
+ # Allows enumeration of retries within Sidekiq.
653
+ # Based on this, you can search/filter for jobs. Here's an
654
+ # example where I'm selecting all jobs of a certain type
655
+ # and deleting them from the retry queue.
656
+ #
657
+ # r = Sidekiq::RetrySet.new
658
+ # r.select do |retri|
659
+ # retri.klass == 'Sidekiq::Extensions::DelayedClass' &&
660
+ # retri.args[0] == 'User' &&
661
+ # retri.args[1] == 'setup_new_subscriber'
662
+ # end.map(&:delete)
663
+ class RetrySet < JobSet
664
+ def initialize
665
+ super 'retry'
666
+ end
667
+
668
+ def retry_all
669
+ while size > 0
670
+ each(&:retry)
671
+ end
672
+ end
673
+
674
+ def kill_all
675
+ while size > 0
676
+ each(&:kill)
677
+ end
678
+ end
679
+ end
680
+
681
+ ##
682
+ # Allows enumeration of dead jobs within Sidekiq.
683
+ #
684
+ class DeadSet < JobSet
685
+ def initialize
686
+ super 'dead'
687
+ end
688
+
689
+ def kill(message, opts={})
690
+ now = Time.now.to_f
691
+ Sidekiq.redis do |conn|
692
+ conn.multi do
693
+ conn.zadd(name, now.to_s, message)
694
+ conn.zremrangebyscore(name, '-inf', now - self.class.timeout)
695
+ conn.zremrangebyrank(name, 0, - self.class.max_jobs)
696
+ end
697
+ end
698
+
699
+ if opts[:notify_failure] != false
700
+ job = Sidekiq.load_json(message)
701
+ r = RuntimeError.new("Job killed by API")
702
+ r.set_backtrace(caller)
703
+ Sidekiq.death_handlers.each do |handle|
704
+ handle.call(job, r)
705
+ end
706
+ end
707
+ true
708
+ end
709
+
710
+ def retry_all
711
+ while size > 0
712
+ each(&:retry)
713
+ end
714
+ end
715
+
716
+ def self.max_jobs
717
+ Sidekiq.options[:dead_max_jobs]
718
+ end
719
+
720
+ def self.timeout
721
+ Sidekiq.options[:dead_timeout_in_seconds]
722
+ end
723
+ end
724
+
725
+ ##
726
+ # Enumerates the set of Sidekiq processes which are actively working
727
+ # right now. Each process send a heartbeat to Redis every 5 seconds
728
+ # so this set should be relatively accurate, barring network partitions.
729
+ #
730
+ # Yields a Sidekiq::Process.
731
+ #
732
+ class ProcessSet
733
+ include Enumerable
734
+ include RedisScanner
735
+
736
+ def initialize(clean_plz=true)
737
+ cleanup if clean_plz
738
+ end
739
+
740
+ # Cleans up dead processes recorded in Redis.
741
+ # Returns the number of processes cleaned.
742
+ def cleanup
743
+ count = 0
744
+ Sidekiq.redis do |conn|
745
+ procs = sscan(conn, 'processes').sort
746
+ heartbeats = conn.pipelined do
747
+ procs.each do |key|
748
+ conn.hget(key, 'info')
749
+ end
750
+ end
751
+
752
+ # the hash named key has an expiry of 60 seconds.
753
+ # if it's not found, that means the process has not reported
754
+ # in to Redis and probably died.
755
+ to_prune = []
756
+ heartbeats.each_with_index do |beat, i|
757
+ to_prune << procs[i] if beat.nil?
758
+ end
759
+ count = conn.srem('processes', to_prune) unless to_prune.empty?
760
+ end
761
+ count
762
+ end
763
+
764
+ def each
765
+ procs = Sidekiq.redis { |conn| sscan(conn, 'processes') }.sort
766
+
767
+ Sidekiq.redis do |conn|
768
+ # We're making a tradeoff here between consuming more memory instead of
769
+ # making more roundtrips to Redis, but if you have hundreds or thousands of workers,
770
+ # you'll be happier this way
771
+ result = conn.pipelined do
772
+ procs.each do |key|
773
+ conn.hmget(key, 'info', 'busy', 'beat', 'quiet')
774
+ end
775
+ end
776
+
777
+ result.each do |info, busy, at_s, quiet|
778
+ # If a process is stopped between when we query Redis for `procs` and
779
+ # when we query for `result`, we will have an item in `result` that is
780
+ # composed of `nil` values.
781
+ next if info.nil?
782
+
783
+ hash = Sidekiq.load_json(info)
784
+ yield Process.new(hash.merge('busy' => busy.to_i, 'beat' => at_s.to_f, 'quiet' => quiet))
785
+ end
786
+ end
787
+
788
+ nil
789
+ end
790
+
791
+ # This method is not guaranteed accurate since it does not prune the set
792
+ # based on current heartbeat. #each does that and ensures the set only
793
+ # contains Sidekiq processes which have sent a heartbeat within the last
794
+ # 60 seconds.
795
+ def size
796
+ Sidekiq.redis { |conn| conn.scard('processes') }
797
+ end
798
+
799
+ # Returns the identity of the current cluster leader or "" if no leader.
800
+ # This is a Sidekiq Enterprise feature, will always return "" in Sidekiq
801
+ # or Sidekiq Pro.
802
+ def leader
803
+ @leader ||= begin
804
+ x = Sidekiq.redis {|c| c.get("dear-leader") }
805
+ # need a non-falsy value so we can memoize
806
+ x = "" unless x
807
+ x
808
+ end
809
+ end
810
+ end
811
+
812
+ #
813
+ # Sidekiq::Process represents an active Sidekiq process talking with Redis.
814
+ # Each process has a set of attributes which look like this:
815
+ #
816
+ # {
817
+ # 'hostname' => 'app-1.example.com',
818
+ # 'started_at' => <process start time>,
819
+ # 'pid' => 12345,
820
+ # 'tag' => 'myapp'
821
+ # 'concurrency' => 25,
822
+ # 'queues' => ['default', 'low'],
823
+ # 'busy' => 10,
824
+ # 'beat' => <last heartbeat>,
825
+ # 'identity' => <unique string identifying the process>,
826
+ # }
827
+ class Process
828
+ def initialize(hash)
829
+ @attribs = hash
830
+ end
831
+
832
+ def tag
833
+ self['tag']
834
+ end
835
+
836
+ def labels
837
+ Array(self['labels'])
838
+ end
839
+
840
+ def [](key)
841
+ @attribs[key]
842
+ end
843
+
844
+ def identity
845
+ self['identity']
846
+ end
847
+
848
+ def quiet!
849
+ signal('TSTP')
850
+ end
851
+
852
+ def stop!
853
+ signal('TERM')
854
+ end
855
+
856
+ def dump_threads
857
+ signal('TTIN')
858
+ end
859
+
860
+ def stopping?
861
+ self['quiet'] == 'true'
862
+ end
863
+
864
+ private
865
+
866
+ def signal(sig)
867
+ key = "#{identity}-signals"
868
+ Sidekiq.redis do |c|
869
+ c.multi do
870
+ c.lpush(key, sig)
871
+ c.expire(key, 60)
872
+ end
873
+ end
874
+ end
875
+
876
+ end
877
+
878
+ ##
879
+ # A worker is a thread that is currently processing a job.
880
+ # Programmatic access to the current active worker set.
881
+ #
882
+ # WARNING WARNING WARNING
883
+ #
884
+ # This is live data that can change every millisecond.
885
+ # If you call #size => 5 and then expect #each to be
886
+ # called 5 times, you're going to have a bad time.
887
+ #
888
+ # workers = Sidekiq::Workers.new
889
+ # workers.size => 2
890
+ # workers.each do |process_id, thread_id, work|
891
+ # # process_id is a unique identifier per Sidekiq process
892
+ # # thread_id is a unique identifier per thread
893
+ # # work is a Hash which looks like:
894
+ # # { 'queue' => name, 'run_at' => timestamp, 'payload' => msg }
895
+ # # run_at is an epoch Integer.
896
+ # end
897
+ #
898
+ class Workers
899
+ include Enumerable
900
+ include RedisScanner
901
+
902
+ def each
903
+ Sidekiq.redis do |conn|
904
+ procs = sscan(conn, 'processes')
905
+ procs.sort.each do |key|
906
+ valid, workers = conn.pipelined do
907
+ conn.exists(key)
908
+ conn.hgetall("#{key}:workers")
909
+ end
910
+ next unless valid
911
+ workers.each_pair do |tid, json|
912
+ yield key, tid, Sidekiq.load_json(json)
913
+ end
914
+ end
915
+ end
916
+ end
917
+
918
+ # Note that #size is only as accurate as Sidekiq's heartbeat,
919
+ # which happens every 5 seconds. It is NOT real-time.
920
+ #
921
+ # Not very efficient if you have lots of Sidekiq
922
+ # processes but the alternative is a global counter
923
+ # which can easily get out of sync with crashy processes.
924
+ def size
925
+ Sidekiq.redis do |conn|
926
+ procs = sscan(conn, 'processes')
927
+ if procs.empty?
928
+ 0
929
+ else
930
+ conn.pipelined do
931
+ procs.each do |key|
932
+ conn.hget(key, 'busy')
933
+ end
934
+ end.map(&:to_i).inject(:+)
935
+ end
936
+ end
937
+ end
938
+ end
939
+
940
+ end