sidekiq 6.0.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (121) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +61 -0
  3. data/.github/contributing.md +32 -0
  4. data/.github/issue_template.md +11 -0
  5. data/.gitignore +13 -0
  6. data/.standard.yml +20 -0
  7. data/3.0-Upgrade.md +70 -0
  8. data/4.0-Upgrade.md +53 -0
  9. data/5.0-Upgrade.md +56 -0
  10. data/6.0-Upgrade.md +70 -0
  11. data/COMM-LICENSE +97 -0
  12. data/Changes.md +1570 -0
  13. data/Ent-2.0-Upgrade.md +37 -0
  14. data/Ent-Changes.md +250 -0
  15. data/Gemfile +24 -0
  16. data/Gemfile.lock +196 -0
  17. data/LICENSE +9 -0
  18. data/Pro-2.0-Upgrade.md +138 -0
  19. data/Pro-3.0-Upgrade.md +44 -0
  20. data/Pro-4.0-Upgrade.md +35 -0
  21. data/Pro-5.0-Upgrade.md +25 -0
  22. data/Pro-Changes.md +768 -0
  23. data/README.md +95 -0
  24. data/Rakefile +10 -0
  25. data/bin/sidekiq +18 -0
  26. data/bin/sidekiqload +153 -0
  27. data/bin/sidekiqmon +9 -0
  28. data/code_of_conduct.md +50 -0
  29. data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
  30. data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
  31. data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
  32. data/lib/generators/sidekiq/worker_generator.rb +47 -0
  33. data/lib/sidekiq.rb +248 -0
  34. data/lib/sidekiq/api.rb +927 -0
  35. data/lib/sidekiq/cli.rb +380 -0
  36. data/lib/sidekiq/client.rb +242 -0
  37. data/lib/sidekiq/delay.rb +41 -0
  38. data/lib/sidekiq/exception_handler.rb +27 -0
  39. data/lib/sidekiq/extensions/action_mailer.rb +47 -0
  40. data/lib/sidekiq/extensions/active_record.rb +42 -0
  41. data/lib/sidekiq/extensions/class_methods.rb +42 -0
  42. data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
  43. data/lib/sidekiq/fetch.rb +80 -0
  44. data/lib/sidekiq/job_logger.rb +55 -0
  45. data/lib/sidekiq/job_retry.rb +249 -0
  46. data/lib/sidekiq/launcher.rb +181 -0
  47. data/lib/sidekiq/logger.rb +69 -0
  48. data/lib/sidekiq/manager.rb +135 -0
  49. data/lib/sidekiq/middleware/chain.rb +151 -0
  50. data/lib/sidekiq/middleware/i18n.rb +40 -0
  51. data/lib/sidekiq/monitor.rb +148 -0
  52. data/lib/sidekiq/paginator.rb +42 -0
  53. data/lib/sidekiq/processor.rb +282 -0
  54. data/lib/sidekiq/rails.rb +52 -0
  55. data/lib/sidekiq/redis_connection.rb +138 -0
  56. data/lib/sidekiq/scheduled.rb +172 -0
  57. data/lib/sidekiq/testing.rb +332 -0
  58. data/lib/sidekiq/testing/inline.rb +30 -0
  59. data/lib/sidekiq/util.rb +69 -0
  60. data/lib/sidekiq/version.rb +5 -0
  61. data/lib/sidekiq/web.rb +205 -0
  62. data/lib/sidekiq/web/action.rb +93 -0
  63. data/lib/sidekiq/web/application.rb +356 -0
  64. data/lib/sidekiq/web/helpers.rb +324 -0
  65. data/lib/sidekiq/web/router.rb +103 -0
  66. data/lib/sidekiq/worker.rb +247 -0
  67. data/sidekiq.gemspec +21 -0
  68. data/web/assets/images/favicon.ico +0 -0
  69. data/web/assets/images/logo.png +0 -0
  70. data/web/assets/images/status.png +0 -0
  71. data/web/assets/javascripts/application.js +92 -0
  72. data/web/assets/javascripts/dashboard.js +296 -0
  73. data/web/assets/stylesheets/application-rtl.css +246 -0
  74. data/web/assets/stylesheets/application.css +1144 -0
  75. data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
  76. data/web/assets/stylesheets/bootstrap.css +5 -0
  77. data/web/locales/ar.yml +81 -0
  78. data/web/locales/cs.yml +78 -0
  79. data/web/locales/da.yml +68 -0
  80. data/web/locales/de.yml +69 -0
  81. data/web/locales/el.yml +68 -0
  82. data/web/locales/en.yml +81 -0
  83. data/web/locales/es.yml +70 -0
  84. data/web/locales/fa.yml +80 -0
  85. data/web/locales/fr.yml +78 -0
  86. data/web/locales/he.yml +79 -0
  87. data/web/locales/hi.yml +75 -0
  88. data/web/locales/it.yml +69 -0
  89. data/web/locales/ja.yml +81 -0
  90. data/web/locales/ko.yml +68 -0
  91. data/web/locales/nb.yml +77 -0
  92. data/web/locales/nl.yml +68 -0
  93. data/web/locales/pl.yml +59 -0
  94. data/web/locales/pt-br.yml +68 -0
  95. data/web/locales/pt.yml +67 -0
  96. data/web/locales/ru.yml +78 -0
  97. data/web/locales/sv.yml +68 -0
  98. data/web/locales/ta.yml +75 -0
  99. data/web/locales/uk.yml +76 -0
  100. data/web/locales/ur.yml +80 -0
  101. data/web/locales/zh-cn.yml +68 -0
  102. data/web/locales/zh-tw.yml +68 -0
  103. data/web/views/_footer.erb +20 -0
  104. data/web/views/_job_info.erb +88 -0
  105. data/web/views/_nav.erb +52 -0
  106. data/web/views/_paging.erb +23 -0
  107. data/web/views/_poll_link.erb +7 -0
  108. data/web/views/_status.erb +4 -0
  109. data/web/views/_summary.erb +40 -0
  110. data/web/views/busy.erb +98 -0
  111. data/web/views/dashboard.erb +75 -0
  112. data/web/views/dead.erb +34 -0
  113. data/web/views/layout.erb +40 -0
  114. data/web/views/morgue.erb +75 -0
  115. data/web/views/queue.erb +46 -0
  116. data/web/views/queues.erb +30 -0
  117. data/web/views/retries.erb +80 -0
  118. data/web/views/retry.erb +34 -0
  119. data/web/views/scheduled.erb +54 -0
  120. data/web/views/scheduled_job_info.erb +8 -0
  121. metadata +220 -0
@@ -0,0 +1,927 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "sidekiq"
4
+
5
+ module Sidekiq
6
+ module RedisScanner
7
+ def sscan(conn, key)
8
+ cursor = "0"
9
+ result = []
10
+ loop do
11
+ cursor, values = conn.sscan(key, cursor)
12
+ result.push(*values)
13
+ break if cursor == "0"
14
+ end
15
+ result
16
+ end
17
+ end
18
+
19
+ class Stats
20
+ include RedisScanner
21
+
22
+ def initialize
23
+ fetch_stats!
24
+ end
25
+
26
+ def processed
27
+ stat :processed
28
+ end
29
+
30
+ def failed
31
+ stat :failed
32
+ end
33
+
34
+ def scheduled_size
35
+ stat :scheduled_size
36
+ end
37
+
38
+ def retry_size
39
+ stat :retry_size
40
+ end
41
+
42
+ def dead_size
43
+ stat :dead_size
44
+ end
45
+
46
+ def enqueued
47
+ stat :enqueued
48
+ end
49
+
50
+ def processes_size
51
+ stat :processes_size
52
+ end
53
+
54
+ def workers_size
55
+ stat :workers_size
56
+ end
57
+
58
+ def default_queue_latency
59
+ stat :default_queue_latency
60
+ end
61
+
62
+ def queues
63
+ Sidekiq::Stats::Queues.new.lengths
64
+ end
65
+
66
+ def fetch_stats!
67
+ pipe1_res = Sidekiq.redis { |conn|
68
+ conn.pipelined do
69
+ conn.get("stat:processed")
70
+ conn.get("stat:failed")
71
+ conn.zcard("schedule")
72
+ conn.zcard("retry")
73
+ conn.zcard("dead")
74
+ conn.scard("processes")
75
+ conn.lrange("queue:default", -1, -1)
76
+ end
77
+ }
78
+
79
+ processes = Sidekiq.redis { |conn|
80
+ sscan(conn, "processes")
81
+ }
82
+
83
+ queues = Sidekiq.redis { |conn|
84
+ sscan(conn, "queues")
85
+ }
86
+
87
+ pipe2_res = Sidekiq.redis { |conn|
88
+ conn.pipelined do
89
+ processes.each { |key| conn.hget(key, "busy") }
90
+ queues.each { |queue| conn.llen("queue:#{queue}") }
91
+ end
92
+ }
93
+
94
+ s = processes.size
95
+ workers_size = pipe2_res[0...s].map(&:to_i).inject(0, &:+)
96
+ enqueued = pipe2_res[s..-1].map(&:to_i).inject(0, &:+)
97
+
98
+ default_queue_latency = if (entry = pipe1_res[6].first)
99
+ job = begin
100
+ Sidekiq.load_json(entry)
101
+ rescue
102
+ {}
103
+ end
104
+ now = Time.now.to_f
105
+ thence = job["enqueued_at"] || now
106
+ now - thence
107
+ else
108
+ 0
109
+ end
110
+ @stats = {
111
+ processed: pipe1_res[0].to_i,
112
+ failed: pipe1_res[1].to_i,
113
+ scheduled_size: pipe1_res[2],
114
+ retry_size: pipe1_res[3],
115
+ dead_size: pipe1_res[4],
116
+ processes_size: pipe1_res[5],
117
+
118
+ default_queue_latency: default_queue_latency,
119
+ workers_size: workers_size,
120
+ enqueued: enqueued,
121
+ }
122
+ end
123
+
124
+ def reset(*stats)
125
+ all = %w[failed processed]
126
+ stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s)
127
+
128
+ mset_args = []
129
+ stats.each do |stat|
130
+ mset_args << "stat:#{stat}"
131
+ mset_args << 0
132
+ end
133
+ Sidekiq.redis do |conn|
134
+ conn.mset(*mset_args)
135
+ end
136
+ end
137
+
138
+ private
139
+
140
+ def stat(s)
141
+ @stats[s]
142
+ end
143
+
144
+ class Queues
145
+ include RedisScanner
146
+
147
+ def lengths
148
+ Sidekiq.redis do |conn|
149
+ queues = sscan(conn, "queues")
150
+
151
+ lengths = conn.pipelined {
152
+ queues.each do |queue|
153
+ conn.llen("queue:#{queue}")
154
+ end
155
+ }
156
+
157
+ i = 0
158
+ array_of_arrays = queues.each_with_object({}) { |queue, memo|
159
+ memo[queue] = lengths[i]
160
+ i += 1
161
+ }.sort_by { |_, size| size }
162
+
163
+ Hash[array_of_arrays.reverse]
164
+ end
165
+ end
166
+ end
167
+
168
+ class History
169
+ def initialize(days_previous, start_date = nil)
170
+ @days_previous = days_previous
171
+ @start_date = start_date || Time.now.utc.to_date
172
+ end
173
+
174
+ def processed
175
+ @processed ||= date_stat_hash("processed")
176
+ end
177
+
178
+ def failed
179
+ @failed ||= date_stat_hash("failed")
180
+ end
181
+
182
+ private
183
+
184
+ def date_stat_hash(stat)
185
+ i = 0
186
+ stat_hash = {}
187
+ keys = []
188
+ dates = []
189
+
190
+ while i < @days_previous
191
+ date = @start_date - i
192
+ datestr = date.strftime("%Y-%m-%d")
193
+ keys << "stat:#{stat}:#{datestr}"
194
+ dates << datestr
195
+ i += 1
196
+ end
197
+
198
+ begin
199
+ Sidekiq.redis do |conn|
200
+ conn.mget(keys).each_with_index do |value, idx|
201
+ stat_hash[dates[idx]] = value ? value.to_i : 0
202
+ end
203
+ end
204
+ rescue Redis::CommandError
205
+ # mget will trigger a CROSSSLOT error when run against a Cluster
206
+ # TODO Someone want to add Cluster support?
207
+ end
208
+
209
+ stat_hash
210
+ end
211
+ end
212
+ end
213
+
214
+ ##
215
+ # Encapsulates a queue within Sidekiq.
216
+ # Allows enumeration of all jobs within the queue
217
+ # and deletion of jobs.
218
+ #
219
+ # queue = Sidekiq::Queue.new("mailer")
220
+ # queue.each do |job|
221
+ # job.klass # => 'MyWorker'
222
+ # job.args # => [1, 2, 3]
223
+ # job.delete if job.jid == 'abcdef1234567890'
224
+ # end
225
+ #
226
+ class Queue
227
+ include Enumerable
228
+ extend RedisScanner
229
+
230
+ ##
231
+ # Return all known queues within Redis.
232
+ #
233
+ def self.all
234
+ Sidekiq.redis { |c| sscan(c, "queues") }.sort.map { |q| Sidekiq::Queue.new(q) }
235
+ end
236
+
237
+ attr_reader :name
238
+
239
+ def initialize(name = "default")
240
+ @name = name.to_s
241
+ @rname = "queue:#{name}"
242
+ end
243
+
244
+ def size
245
+ Sidekiq.redis { |con| con.llen(@rname) }
246
+ end
247
+
248
+ # Sidekiq Pro overrides this
249
+ def paused?
250
+ false
251
+ end
252
+
253
+ ##
254
+ # Calculates this queue's latency, the difference in seconds since the oldest
255
+ # job in the queue was enqueued.
256
+ #
257
+ # @return Float
258
+ def latency
259
+ entry = Sidekiq.redis { |conn|
260
+ conn.lrange(@rname, -1, -1)
261
+ }.first
262
+ return 0 unless entry
263
+ job = Sidekiq.load_json(entry)
264
+ now = Time.now.to_f
265
+ thence = job["enqueued_at"] || now
266
+ now - thence
267
+ end
268
+
269
+ def each
270
+ initial_size = size
271
+ deleted_size = 0
272
+ page = 0
273
+ page_size = 50
274
+
275
+ loop do
276
+ range_start = page * page_size - deleted_size
277
+ range_end = range_start + page_size - 1
278
+ entries = Sidekiq.redis { |conn|
279
+ conn.lrange @rname, range_start, range_end
280
+ }
281
+ break if entries.empty?
282
+ page += 1
283
+ entries.each do |entry|
284
+ yield Job.new(entry, @name)
285
+ end
286
+ deleted_size = initial_size - size
287
+ end
288
+ end
289
+
290
+ ##
291
+ # Find the job with the given JID within this queue.
292
+ #
293
+ # This is a slow, inefficient operation. Do not use under
294
+ # normal conditions. Sidekiq Pro contains a faster version.
295
+ def find_job(jid)
296
+ detect { |j| j.jid == jid }
297
+ end
298
+
299
+ def clear
300
+ Sidekiq.redis do |conn|
301
+ conn.multi do
302
+ conn.del(@rname)
303
+ conn.srem("queues", name)
304
+ end
305
+ end
306
+ end
307
+ alias_method :💣, :clear
308
+ end
309
+
310
+ ##
311
+ # Encapsulates a pending job within a Sidekiq queue or
312
+ # sorted set.
313
+ #
314
+ # The job should be considered immutable but may be
315
+ # removed from the queue via Job#delete.
316
+ #
317
+ class Job
318
+ attr_reader :item
319
+ attr_reader :value
320
+
321
+ def initialize(item, queue_name = nil)
322
+ @args = nil
323
+ @value = item
324
+ @item = item.is_a?(Hash) ? item : parse(item)
325
+ @queue = queue_name || @item["queue"]
326
+ end
327
+
328
+ def parse(item)
329
+ Sidekiq.load_json(item)
330
+ rescue JSON::ParserError
331
+ # If the job payload in Redis is invalid JSON, we'll load
332
+ # the item as an empty hash and store the invalid JSON as
333
+ # the job 'args' for display in the Web UI.
334
+ @invalid = true
335
+ @args = [item]
336
+ {}
337
+ end
338
+
339
+ def klass
340
+ self["class"]
341
+ end
342
+
343
+ def display_class
344
+ # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
345
+ @klass ||= case klass
346
+ when /\ASidekiq::Extensions::Delayed/
347
+ safe_load(args[0], klass) do |target, method, _|
348
+ "#{target}.#{method}"
349
+ end
350
+ when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
351
+ job_class = @item["wrapped"] || args[0]
352
+ if job_class == "ActionMailer::DeliveryJob"
353
+ # MailerClass#mailer_method
354
+ args[0]["arguments"][0..1].join("#")
355
+ else
356
+ job_class
357
+ end
358
+ else
359
+ klass
360
+ end
361
+ end
362
+
363
+ def display_args
364
+ # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
365
+ @display_args ||= case klass
366
+ when /\ASidekiq::Extensions::Delayed/
367
+ safe_load(args[0], args) do |_, _, arg|
368
+ arg
369
+ end
370
+ when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
371
+ job_args = self["wrapped"] ? args[0]["arguments"] : []
372
+ if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
373
+ # remove MailerClass, mailer_method and 'deliver_now'
374
+ job_args.drop(3)
375
+ else
376
+ job_args
377
+ end
378
+ else
379
+ if self["encrypt"]
380
+ # no point in showing 150+ bytes of random garbage
381
+ args[-1] = "[encrypted data]"
382
+ end
383
+ args
384
+ end
385
+ end
386
+
387
+ def args
388
+ @args || @item["args"]
389
+ end
390
+
391
+ def jid
392
+ self["jid"]
393
+ end
394
+
395
+ def enqueued_at
396
+ self["enqueued_at"] ? Time.at(self["enqueued_at"]).utc : nil
397
+ end
398
+
399
+ def created_at
400
+ Time.at(self["created_at"] || self["enqueued_at"] || 0).utc
401
+ end
402
+
403
+ attr_reader :queue
404
+
405
+ def latency
406
+ now = Time.now.to_f
407
+ now - (@item["enqueued_at"] || @item["created_at"] || now)
408
+ end
409
+
410
+ ##
411
+ # Remove this job from the queue.
412
+ def delete
413
+ count = Sidekiq.redis { |conn|
414
+ conn.lrem("queue:#{@queue}", 1, @value)
415
+ }
416
+ count != 0
417
+ end
418
+
419
+ def [](name)
420
+ # nil will happen if the JSON fails to parse.
421
+ # We don't guarantee Sidekiq will work with bad job JSON but we should
422
+ # make a best effort to minimize the damage.
423
+ @item ? @item[name] : nil
424
+ end
425
+
426
+ private
427
+
428
+ def safe_load(content, default)
429
+ yield(*YAML.load(content))
430
+ rescue => ex
431
+ # #1761 in dev mode, it's possible to have jobs enqueued which haven't been loaded into
432
+ # memory yet so the YAML can't be loaded.
433
+ Sidekiq.logger.warn "Unable to load YAML: #{ex.message}" unless Sidekiq.options[:environment] == "development"
434
+ default
435
+ end
436
+ end
437
+
438
+ class SortedEntry < Job
439
+ attr_reader :score
440
+ attr_reader :parent
441
+
442
+ def initialize(parent, score, item)
443
+ super(item)
444
+ @score = score
445
+ @parent = parent
446
+ end
447
+
448
+ def at
449
+ Time.at(score).utc
450
+ end
451
+
452
+ def delete
453
+ if @value
454
+ @parent.delete_by_value(@parent.name, @value)
455
+ else
456
+ @parent.delete_by_jid(score, jid)
457
+ end
458
+ end
459
+
460
+ def reschedule(at)
461
+ delete
462
+ @parent.schedule(at, item)
463
+ end
464
+
465
+ def add_to_queue
466
+ remove_job do |message|
467
+ msg = Sidekiq.load_json(message)
468
+ Sidekiq::Client.push(msg)
469
+ end
470
+ end
471
+
472
+ def retry
473
+ remove_job do |message|
474
+ msg = Sidekiq.load_json(message)
475
+ msg["retry_count"] -= 1 if msg["retry_count"]
476
+ Sidekiq::Client.push(msg)
477
+ end
478
+ end
479
+
480
+ ##
481
+ # Place job in the dead set
482
+ def kill
483
+ remove_job do |message|
484
+ DeadSet.new.kill(message)
485
+ end
486
+ end
487
+
488
+ def error?
489
+ !!item["error_class"]
490
+ end
491
+
492
+ private
493
+
494
+ def remove_job
495
+ Sidekiq.redis do |conn|
496
+ results = conn.multi {
497
+ conn.zrangebyscore(parent.name, score, score)
498
+ conn.zremrangebyscore(parent.name, score, score)
499
+ }.first
500
+
501
+ if results.size == 1
502
+ yield results.first
503
+ else
504
+ # multiple jobs with the same score
505
+ # find the one with the right JID and push it
506
+ hash = results.group_by { |message|
507
+ if message.index(jid)
508
+ msg = Sidekiq.load_json(message)
509
+ msg["jid"] == jid
510
+ else
511
+ false
512
+ end
513
+ }
514
+
515
+ msg = hash.fetch(true, []).first
516
+ yield msg if msg
517
+
518
+ # push the rest back onto the sorted set
519
+ conn.multi do
520
+ hash.fetch(false, []).each do |message|
521
+ conn.zadd(parent.name, score.to_f.to_s, message)
522
+ end
523
+ end
524
+ end
525
+ end
526
+ end
527
+ end
528
+
529
+ class SortedSet
530
+ include Enumerable
531
+
532
+ attr_reader :name
533
+
534
+ def initialize(name)
535
+ @name = name
536
+ @_size = size
537
+ end
538
+
539
+ def size
540
+ Sidekiq.redis { |c| c.zcard(name) }
541
+ end
542
+
543
+ def clear
544
+ Sidekiq.redis do |conn|
545
+ conn.del(name)
546
+ end
547
+ end
548
+ alias_method :💣, :clear
549
+ end
550
+
551
+ class JobSet < SortedSet
552
+ def schedule(timestamp, message)
553
+ Sidekiq.redis do |conn|
554
+ conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(message))
555
+ end
556
+ end
557
+
558
+ def each
559
+ initial_size = @_size
560
+ offset_size = 0
561
+ page = -1
562
+ page_size = 50
563
+
564
+ loop do
565
+ range_start = page * page_size + offset_size
566
+ range_end = range_start + page_size - 1
567
+ elements = Sidekiq.redis { |conn|
568
+ conn.zrange name, range_start, range_end, with_scores: true
569
+ }
570
+ break if elements.empty?
571
+ page -= 1
572
+ elements.reverse_each do |element, score|
573
+ yield SortedEntry.new(self, score, element)
574
+ end
575
+ offset_size = initial_size - @_size
576
+ end
577
+ end
578
+
579
+ def fetch(score, jid = nil)
580
+ elements = Sidekiq.redis { |conn|
581
+ conn.zrangebyscore(name, score, score)
582
+ }
583
+
584
+ elements.each_with_object([]) do |element, result|
585
+ entry = SortedEntry.new(self, score, element)
586
+ if jid
587
+ result << entry if entry.jid == jid
588
+ else
589
+ result << entry
590
+ end
591
+ end
592
+ end
593
+
594
+ ##
595
+ # Find the job with the given JID within this sorted set.
596
+ #
597
+ # This is a slow, inefficient operation. Do not use under
598
+ # normal conditions. Sidekiq Pro contains a faster version.
599
+ def find_job(jid)
600
+ detect { |j| j.jid == jid }
601
+ end
602
+
603
+ def delete_by_value(name, value)
604
+ Sidekiq.redis do |conn|
605
+ ret = conn.zrem(name, value)
606
+ @_size -= 1 if ret
607
+ ret
608
+ end
609
+ end
610
+
611
+ def delete_by_jid(score, jid)
612
+ Sidekiq.redis do |conn|
613
+ elements = conn.zrangebyscore(name, score, score)
614
+ elements.each do |element|
615
+ message = Sidekiq.load_json(element)
616
+ if message["jid"] == jid
617
+ ret = conn.zrem(name, element)
618
+ @_size -= 1 if ret
619
+ break ret
620
+ end
621
+ end
622
+ end
623
+ end
624
+
625
+ alias_method :delete, :delete_by_jid
626
+ end
627
+
628
+ ##
629
+ # Allows enumeration of scheduled jobs within Sidekiq.
630
+ # Based on this, you can search/filter for jobs. Here's an
631
+ # example where I'm selecting all jobs of a certain type
632
+ # and deleting them from the schedule queue.
633
+ #
634
+ # r = Sidekiq::ScheduledSet.new
635
+ # r.select do |scheduled|
636
+ # scheduled.klass == 'Sidekiq::Extensions::DelayedClass' &&
637
+ # scheduled.args[0] == 'User' &&
638
+ # scheduled.args[1] == 'setup_new_subscriber'
639
+ # end.map(&:delete)
640
+ class ScheduledSet < JobSet
641
+ def initialize
642
+ super "schedule"
643
+ end
644
+ end
645
+
646
+ ##
647
+ # Allows enumeration of retries within Sidekiq.
648
+ # Based on this, you can search/filter for jobs. Here's an
649
+ # example where I'm selecting all jobs of a certain type
650
+ # and deleting them from the retry queue.
651
+ #
652
+ # r = Sidekiq::RetrySet.new
653
+ # r.select do |retri|
654
+ # retri.klass == 'Sidekiq::Extensions::DelayedClass' &&
655
+ # retri.args[0] == 'User' &&
656
+ # retri.args[1] == 'setup_new_subscriber'
657
+ # end.map(&:delete)
658
+ class RetrySet < JobSet
659
+ def initialize
660
+ super "retry"
661
+ end
662
+
663
+ def retry_all
664
+ each(&:retry) while size > 0
665
+ end
666
+
667
+ def kill_all
668
+ each(&:kill) while size > 0
669
+ end
670
+ end
671
+
672
+ ##
673
+ # Allows enumeration of dead jobs within Sidekiq.
674
+ #
675
+ class DeadSet < JobSet
676
+ def initialize
677
+ super "dead"
678
+ end
679
+
680
+ def kill(message, opts = {})
681
+ now = Time.now.to_f
682
+ Sidekiq.redis do |conn|
683
+ conn.multi do
684
+ conn.zadd(name, now.to_s, message)
685
+ conn.zremrangebyscore(name, "-inf", now - self.class.timeout)
686
+ conn.zremrangebyrank(name, 0, - self.class.max_jobs)
687
+ end
688
+ end
689
+
690
+ if opts[:notify_failure] != false
691
+ job = Sidekiq.load_json(message)
692
+ r = RuntimeError.new("Job killed by API")
693
+ r.set_backtrace(caller)
694
+ Sidekiq.death_handlers.each do |handle|
695
+ handle.call(job, r)
696
+ end
697
+ end
698
+ true
699
+ end
700
+
701
+ def retry_all
702
+ each(&:retry) while size > 0
703
+ end
704
+
705
+ def self.max_jobs
706
+ Sidekiq.options[:dead_max_jobs]
707
+ end
708
+
709
+ def self.timeout
710
+ Sidekiq.options[:dead_timeout_in_seconds]
711
+ end
712
+ end
713
+
714
+ ##
715
+ # Enumerates the set of Sidekiq processes which are actively working
716
+ # right now. Each process sends a heartbeat to Redis every 5 seconds
717
+ # so this set should be relatively accurate, barring network partitions.
718
+ #
719
+ # Yields a Sidekiq::Process.
720
+ #
721
+ class ProcessSet
722
+ include Enumerable
723
+ include RedisScanner
724
+
725
+ def initialize(clean_plz = true)
726
+ cleanup if clean_plz
727
+ end
728
+
729
+ # Cleans up dead processes recorded in Redis.
730
+ # Returns the number of processes cleaned.
731
+ def cleanup
732
+ count = 0
733
+ Sidekiq.redis do |conn|
734
+ procs = sscan(conn, "processes").sort
735
+ heartbeats = conn.pipelined {
736
+ procs.each do |key|
737
+ conn.hget(key, "info")
738
+ end
739
+ }
740
+
741
+ # the hash named key has an expiry of 60 seconds.
742
+ # if it's not found, that means the process has not reported
743
+ # in to Redis and probably died.
744
+ to_prune = []
745
+ heartbeats.each_with_index do |beat, i|
746
+ to_prune << procs[i] if beat.nil?
747
+ end
748
+ count = conn.srem("processes", to_prune) unless to_prune.empty?
749
+ end
750
+ count
751
+ end
752
+
753
+ def each
754
+ procs = Sidekiq.redis { |conn| sscan(conn, "processes") }.sort
755
+
756
+ Sidekiq.redis do |conn|
757
+ # We're making a tradeoff here between consuming more memory instead of
758
+ # making more roundtrips to Redis, but if you have hundreds or thousands of workers,
759
+ # you'll be happier this way
760
+ result = conn.pipelined {
761
+ procs.each do |key|
762
+ conn.hmget(key, "info", "busy", "beat", "quiet")
763
+ end
764
+ }
765
+
766
+ result.each do |info, busy, at_s, quiet|
767
+ # If a process is stopped between when we query Redis for `procs` and
768
+ # when we query for `result`, we will have an item in `result` that is
769
+ # composed of `nil` values.
770
+ next if info.nil?
771
+
772
+ hash = Sidekiq.load_json(info)
773
+ yield Process.new(hash.merge("busy" => busy.to_i, "beat" => at_s.to_f, "quiet" => quiet))
774
+ end
775
+ end
776
+
777
+ nil
778
+ end
779
+
780
+ # This method is not guaranteed accurate since it does not prune the set
781
+ # based on current heartbeat. #each does that and ensures the set only
782
+ # contains Sidekiq processes which have sent a heartbeat within the last
783
+ # 60 seconds.
784
+ def size
785
+ Sidekiq.redis { |conn| conn.scard("processes") }
786
+ end
787
+
788
+ # Returns the identity of the current cluster leader or "" if no leader.
789
+ # This is a Sidekiq Enterprise feature, will always return "" in Sidekiq
790
+ # or Sidekiq Pro.
791
+ def leader
792
+ @leader ||= begin
793
+ x = Sidekiq.redis { |c| c.get("dear-leader") }
794
+ # need a non-falsy value so we can memoize
795
+ x ||= ""
796
+ x
797
+ end
798
+ end
799
+ end
800
+
801
+ #
802
+ # Sidekiq::Process represents an active Sidekiq process talking with Redis.
803
+ # Each process has a set of attributes which look like this:
804
+ #
805
+ # {
806
+ # 'hostname' => 'app-1.example.com',
807
+ # 'started_at' => <process start time>,
808
+ # 'pid' => 12345,
809
+ # 'tag' => 'myapp'
810
+ # 'concurrency' => 25,
811
+ # 'queues' => ['default', 'low'],
812
+ # 'busy' => 10,
813
+ # 'beat' => <last heartbeat>,
814
+ # 'identity' => <unique string identifying the process>,
815
+ # }
816
+ class Process
817
+ def initialize(hash)
818
+ @attribs = hash
819
+ end
820
+
821
+ def tag
822
+ self["tag"]
823
+ end
824
+
825
+ def labels
826
+ Array(self["labels"])
827
+ end
828
+
829
+ def [](key)
830
+ @attribs[key]
831
+ end
832
+
833
+ def identity
834
+ self["identity"]
835
+ end
836
+
837
+ def quiet!
838
+ signal("TSTP")
839
+ end
840
+
841
+ def stop!
842
+ signal("TERM")
843
+ end
844
+
845
+ def dump_threads
846
+ signal("TTIN")
847
+ end
848
+
849
+ def stopping?
850
+ self["quiet"] == "true"
851
+ end
852
+
853
+ private
854
+
855
+ def signal(sig)
856
+ key = "#{identity}-signals"
857
+ Sidekiq.redis do |c|
858
+ c.multi do
859
+ c.lpush(key, sig)
860
+ c.expire(key, 60)
861
+ end
862
+ end
863
+ end
864
+ end
865
+
866
+ ##
867
+ # A worker is a thread that is currently processing a job.
868
+ # Programmatic access to the current active worker set.
869
+ #
870
+ # WARNING WARNING WARNING
871
+ #
872
+ # This is live data that can change every millisecond.
873
+ # If you call #size => 5 and then expect #each to be
874
+ # called 5 times, you're going to have a bad time.
875
+ #
876
+ # workers = Sidekiq::Workers.new
877
+ # workers.size => 2
878
+ # workers.each do |process_id, thread_id, work|
879
+ # # process_id is a unique identifier per Sidekiq process
880
+ # # thread_id is a unique identifier per thread
881
+ # # work is a Hash which looks like:
882
+ # # { 'queue' => name, 'run_at' => timestamp, 'payload' => msg }
883
+ # # run_at is an epoch Integer.
884
+ # end
885
+ #
886
+ class Workers
887
+ include Enumerable
888
+ include RedisScanner
889
+
890
+ def each
891
+ Sidekiq.redis do |conn|
892
+ procs = sscan(conn, "processes")
893
+ procs.sort.each do |key|
894
+ valid, workers = conn.pipelined {
895
+ conn.exists(key)
896
+ conn.hgetall("#{key}:workers")
897
+ }
898
+ next unless valid
899
+ workers.each_pair do |tid, json|
900
+ yield key, tid, Sidekiq.load_json(json)
901
+ end
902
+ end
903
+ end
904
+ end
905
+
906
+ # Note that #size is only as accurate as Sidekiq's heartbeat,
907
+ # which happens every 5 seconds. It is NOT real-time.
908
+ #
909
+ # Not very efficient if you have lots of Sidekiq
910
+ # processes but the alternative is a global counter
911
+ # which can easily get out of sync with crashy processes.
912
+ def size
913
+ Sidekiq.redis do |conn|
914
+ procs = sscan(conn, "processes")
915
+ if procs.empty?
916
+ 0
917
+ else
918
+ conn.pipelined {
919
+ procs.each do |key|
920
+ conn.hget(key, "busy")
921
+ end
922
+ }.map(&:to_i).inject(:+)
923
+ end
924
+ end
925
+ end
926
+ end
927
+ end