sidekiq 5.2.10 → 6.0.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.standard.yml +20 -0
- data/.travis.yml +5 -2
- data/6.0-Upgrade.md +58 -0
- data/Changes.md +21 -16
- data/Gemfile +15 -10
- data/Rakefile +5 -4
- data/bin/sidekiqctl +1 -10
- data/lib/generators/sidekiq/worker_generator.rb +12 -14
- data/lib/sidekiq/api.rb +133 -148
- data/lib/sidekiq/cli.rb +95 -147
- data/lib/sidekiq/client.rb +44 -45
- data/lib/sidekiq/ctl.rb +35 -109
- data/lib/sidekiq/delay.rb +5 -6
- data/lib/sidekiq/exception_handler.rb +10 -12
- data/lib/sidekiq/extensions/action_mailer.rb +10 -20
- data/lib/sidekiq/extensions/active_record.rb +9 -7
- data/lib/sidekiq/extensions/class_methods.rb +9 -7
- data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
- data/lib/sidekiq/fetch.rb +5 -6
- data/lib/sidekiq/job_logger.rb +37 -7
- data/lib/sidekiq/job_retry.rb +45 -58
- data/lib/sidekiq/launcher.rb +59 -48
- data/lib/sidekiq/logger.rb +69 -0
- data/lib/sidekiq/manager.rb +6 -8
- data/lib/sidekiq/middleware/chain.rb +2 -1
- data/lib/sidekiq/middleware/i18n.rb +5 -7
- data/lib/sidekiq/paginator.rb +11 -12
- data/lib/sidekiq/processor.rb +42 -45
- data/lib/sidekiq/rails.rb +2 -26
- data/lib/sidekiq/redis_connection.rb +31 -37
- data/lib/sidekiq/scheduled.rb +17 -19
- data/lib/sidekiq/testing/inline.rb +2 -1
- data/lib/sidekiq/testing.rb +22 -23
- data/lib/sidekiq/util.rb +18 -15
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +15 -11
- data/lib/sidekiq/web/application.rb +59 -59
- data/lib/sidekiq/web/helpers.rb +66 -67
- data/lib/sidekiq/web/router.rb +17 -14
- data/lib/sidekiq/web.rb +36 -44
- data/lib/sidekiq/worker.rb +12 -13
- data/lib/sidekiq.rb +53 -42
- data/sidekiq.gemspec +7 -7
- metadata +20 -32
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
data/lib/sidekiq/api.rb
CHANGED
@@ -1,16 +1,16 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
require 'sidekiq'
|
3
2
|
|
4
|
-
|
3
|
+
require "sidekiq"
|
5
4
|
|
5
|
+
module Sidekiq
|
6
6
|
module RedisScanner
|
7
7
|
def sscan(conn, key)
|
8
|
-
cursor =
|
8
|
+
cursor = "0"
|
9
9
|
result = []
|
10
10
|
loop do
|
11
11
|
cursor, values = conn.sscan(key, cursor)
|
12
12
|
result.push(*values)
|
13
|
-
break if cursor ==
|
13
|
+
break if cursor == "0"
|
14
14
|
end
|
15
15
|
result
|
16
16
|
end
|
@@ -64,61 +64,65 @@ module Sidekiq
|
|
64
64
|
end
|
65
65
|
|
66
66
|
def fetch_stats!
|
67
|
-
pipe1_res = Sidekiq.redis
|
67
|
+
pipe1_res = Sidekiq.redis { |conn|
|
68
68
|
conn.pipelined do
|
69
|
-
conn.get(
|
70
|
-
conn.get(
|
71
|
-
conn.zcard(
|
72
|
-
conn.zcard(
|
73
|
-
conn.zcard(
|
74
|
-
conn.scard(
|
75
|
-
conn.lrange(
|
69
|
+
conn.get("stat:processed")
|
70
|
+
conn.get("stat:failed")
|
71
|
+
conn.zcard("schedule")
|
72
|
+
conn.zcard("retry")
|
73
|
+
conn.zcard("dead")
|
74
|
+
conn.scard("processes")
|
75
|
+
conn.lrange("queue:default", -1, -1)
|
76
76
|
end
|
77
|
-
|
77
|
+
}
|
78
78
|
|
79
|
-
processes = Sidekiq.redis
|
80
|
-
sscan(conn,
|
81
|
-
|
79
|
+
processes = Sidekiq.redis { |conn|
|
80
|
+
sscan(conn, "processes")
|
81
|
+
}
|
82
82
|
|
83
|
-
queues = Sidekiq.redis
|
84
|
-
sscan(conn,
|
85
|
-
|
83
|
+
queues = Sidekiq.redis { |conn|
|
84
|
+
sscan(conn, "queues")
|
85
|
+
}
|
86
86
|
|
87
|
-
pipe2_res = Sidekiq.redis
|
87
|
+
pipe2_res = Sidekiq.redis { |conn|
|
88
88
|
conn.pipelined do
|
89
|
-
processes.each {|key| conn.hget(key,
|
89
|
+
processes.each {|key| conn.hget(key, "busy") }
|
90
90
|
queues.each {|queue| conn.llen("queue:#{queue}") }
|
91
91
|
end
|
92
|
-
|
92
|
+
}
|
93
93
|
|
94
94
|
s = processes.size
|
95
95
|
workers_size = pipe2_res[0...s].map(&:to_i).inject(0, &:+)
|
96
96
|
enqueued = pipe2_res[s..-1].map(&:to_i).inject(0, &:+)
|
97
97
|
|
98
98
|
default_queue_latency = if (entry = pipe1_res[6].first)
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
99
|
+
job = begin
|
100
|
+
Sidekiq.load_json(entry)
|
101
|
+
rescue
|
102
|
+
{}
|
103
|
+
end
|
104
|
+
now = Time.now.to_f
|
105
|
+
thence = job["enqueued_at"] || now
|
106
|
+
now - thence
|
107
|
+
else
|
108
|
+
0
|
109
|
+
end
|
106
110
|
@stats = {
|
107
|
-
processed:
|
108
|
-
failed:
|
109
|
-
scheduled_size:
|
110
|
-
retry_size:
|
111
|
-
dead_size:
|
112
|
-
processes_size:
|
111
|
+
processed: pipe1_res[0].to_i,
|
112
|
+
failed: pipe1_res[1].to_i,
|
113
|
+
scheduled_size: pipe1_res[2],
|
114
|
+
retry_size: pipe1_res[3],
|
115
|
+
dead_size: pipe1_res[4],
|
116
|
+
processes_size: pipe1_res[5],
|
113
117
|
|
114
118
|
default_queue_latency: default_queue_latency,
|
115
|
-
workers_size:
|
116
|
-
enqueued:
|
119
|
+
workers_size: workers_size,
|
120
|
+
enqueued: enqueued,
|
117
121
|
}
|
118
122
|
end
|
119
123
|
|
120
124
|
def reset(*stats)
|
121
|
-
all = %w
|
125
|
+
all = %w[failed processed]
|
122
126
|
stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s)
|
123
127
|
|
124
128
|
mset_args = []
|
@@ -142,20 +146,19 @@ module Sidekiq
|
|
142
146
|
|
143
147
|
def lengths
|
144
148
|
Sidekiq.redis do |conn|
|
145
|
-
queues = sscan(conn,
|
149
|
+
queues = sscan(conn, "queues")
|
146
150
|
|
147
|
-
lengths = conn.pipelined
|
151
|
+
lengths = conn.pipelined {
|
148
152
|
queues.each do |queue|
|
149
153
|
conn.llen("queue:#{queue}")
|
150
154
|
end
|
151
|
-
|
155
|
+
}
|
152
156
|
|
153
157
|
i = 0
|
154
|
-
array_of_arrays = queues.
|
158
|
+
array_of_arrays = queues.each_with_object({}) { |queue, memo|
|
155
159
|
memo[queue] = lengths[i]
|
156
160
|
i += 1
|
157
|
-
|
158
|
-
end.sort_by { |_, size| size }
|
161
|
+
}.sort_by { |_, size| size }
|
159
162
|
|
160
163
|
Hash[array_of_arrays.reverse]
|
161
164
|
end
|
@@ -164,8 +167,6 @@ module Sidekiq
|
|
164
167
|
|
165
168
|
class History
|
166
169
|
def initialize(days_previous, start_date = nil)
|
167
|
-
#we only store five years of data in Redis
|
168
|
-
raise ArgumentError if days_previous < 1 || days_previous > (5 * 365)
|
169
170
|
@days_previous = days_previous
|
170
171
|
@start_date = start_date || Time.now.utc.to_date
|
171
172
|
end
|
@@ -230,12 +231,12 @@ module Sidekiq
|
|
230
231
|
# Return all known queues within Redis.
|
231
232
|
#
|
232
233
|
def self.all
|
233
|
-
Sidekiq.redis { |c| sscan(c,
|
234
|
+
Sidekiq.redis { |c| sscan(c, "queues") }.sort.map { |q| Sidekiq::Queue.new(q) }
|
234
235
|
end
|
235
236
|
|
236
237
|
attr_reader :name
|
237
238
|
|
238
|
-
def initialize(name="default")
|
239
|
+
def initialize(name = "default")
|
239
240
|
@name = name.to_s
|
240
241
|
@rname = "queue:#{name}"
|
241
242
|
end
|
@@ -255,13 +256,13 @@ module Sidekiq
|
|
255
256
|
#
|
256
257
|
# @return Float
|
257
258
|
def latency
|
258
|
-
entry = Sidekiq.redis
|
259
|
+
entry = Sidekiq.redis { |conn|
|
259
260
|
conn.lrange(@rname, -1, -1)
|
260
|
-
|
261
|
+
}.first
|
261
262
|
return 0 unless entry
|
262
263
|
job = Sidekiq.load_json(entry)
|
263
264
|
now = Time.now.to_f
|
264
|
-
thence = job[
|
265
|
+
thence = job["enqueued_at"] || now
|
265
266
|
now - thence
|
266
267
|
end
|
267
268
|
|
@@ -271,12 +272,12 @@ module Sidekiq
|
|
271
272
|
page = 0
|
272
273
|
page_size = 50
|
273
274
|
|
274
|
-
|
275
|
+
loop do
|
275
276
|
range_start = page * page_size - deleted_size
|
276
277
|
range_end = range_start + page_size - 1
|
277
|
-
entries = Sidekiq.redis
|
278
|
+
entries = Sidekiq.redis { |conn|
|
278
279
|
conn.lrange @rname, range_start, range_end
|
279
|
-
|
280
|
+
}
|
280
281
|
break if entries.empty?
|
281
282
|
page += 1
|
282
283
|
entries.each do |entry|
|
@@ -317,11 +318,11 @@ module Sidekiq
|
|
317
318
|
attr_reader :item
|
318
319
|
attr_reader :value
|
319
320
|
|
320
|
-
def initialize(item, queue_name=nil)
|
321
|
+
def initialize(item, queue_name = nil)
|
321
322
|
@args = nil
|
322
323
|
@value = item
|
323
324
|
@item = item.is_a?(Hash) ? item : parse(item)
|
324
|
-
@queue = queue_name || @item[
|
325
|
+
@queue = queue_name || @item["queue"]
|
325
326
|
end
|
326
327
|
|
327
328
|
def parse(item)
|
@@ -336,7 +337,7 @@ module Sidekiq
|
|
336
337
|
end
|
337
338
|
|
338
339
|
def klass
|
339
|
-
self[
|
340
|
+
self["class"]
|
340
341
|
end
|
341
342
|
|
342
343
|
def display_class
|
@@ -347,16 +348,16 @@ module Sidekiq
|
|
347
348
|
"#{target}.#{method}"
|
348
349
|
end
|
349
350
|
when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
350
|
-
job_class = @item[
|
351
|
-
if
|
351
|
+
job_class = @item["wrapped"] || args[0]
|
352
|
+
if job_class == "ActionMailer::DeliveryJob"
|
352
353
|
# MailerClass#mailer_method
|
353
|
-
args[0][
|
354
|
+
args[0]["arguments"][0..1].join("#")
|
354
355
|
else
|
355
|
-
|
356
|
+
job_class
|
356
357
|
end
|
357
358
|
else
|
358
359
|
klass
|
359
|
-
|
360
|
+
end
|
360
361
|
end
|
361
362
|
|
362
363
|
def display_args
|
@@ -367,53 +368,51 @@ module Sidekiq
|
|
367
368
|
arg
|
368
369
|
end
|
369
370
|
when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
370
|
-
job_args = self[
|
371
|
-
if
|
371
|
+
job_args = self["wrapped"] ? args[0]["arguments"] : []
|
372
|
+
if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
|
372
373
|
# remove MailerClass, mailer_method and 'deliver_now'
|
373
374
|
job_args.drop(3)
|
374
375
|
else
|
375
376
|
job_args
|
376
377
|
end
|
377
378
|
else
|
378
|
-
if self[
|
379
|
+
if self["encrypt"]
|
379
380
|
# no point in showing 150+ bytes of random garbage
|
380
|
-
args[-1] =
|
381
|
+
args[-1] = "[encrypted data]"
|
381
382
|
end
|
382
383
|
args
|
383
|
-
|
384
|
+
end
|
384
385
|
end
|
385
386
|
|
386
387
|
def args
|
387
|
-
@args || @item[
|
388
|
+
@args || @item["args"]
|
388
389
|
end
|
389
390
|
|
390
391
|
def jid
|
391
|
-
self[
|
392
|
+
self["jid"]
|
392
393
|
end
|
393
394
|
|
394
395
|
def enqueued_at
|
395
|
-
self[
|
396
|
+
self["enqueued_at"] ? Time.at(self["enqueued_at"]).utc : nil
|
396
397
|
end
|
397
398
|
|
398
399
|
def created_at
|
399
|
-
Time.at(self[
|
400
|
+
Time.at(self["created_at"] || self["enqueued_at"] || 0).utc
|
400
401
|
end
|
401
402
|
|
402
|
-
|
403
|
-
@queue
|
404
|
-
end
|
403
|
+
attr_reader :queue
|
405
404
|
|
406
405
|
def latency
|
407
406
|
now = Time.now.to_f
|
408
|
-
now - (@item[
|
407
|
+
now - (@item["enqueued_at"] || @item["created_at"] || now)
|
409
408
|
end
|
410
409
|
|
411
410
|
##
|
412
411
|
# Remove this job from the queue.
|
413
412
|
def delete
|
414
|
-
count = Sidekiq.redis
|
413
|
+
count = Sidekiq.redis { |conn|
|
415
414
|
conn.lrem("queue:#{@queue}", 1, @value)
|
416
|
-
|
415
|
+
}
|
417
416
|
count != 0
|
418
417
|
end
|
419
418
|
|
@@ -427,14 +426,12 @@ module Sidekiq
|
|
427
426
|
private
|
428
427
|
|
429
428
|
def safe_load(content, default)
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
default
|
437
|
-
end
|
429
|
+
yield(*YAML.load(content))
|
430
|
+
rescue => ex
|
431
|
+
# #1761 in dev mode, it's possible to have jobs enqueued which haven't been loaded into
|
432
|
+
# memory yet so the YAML can't be loaded.
|
433
|
+
Sidekiq.logger.warn "Unable to load YAML: #{ex.message}" unless Sidekiq.options[:environment] == "development"
|
434
|
+
default
|
438
435
|
end
|
439
436
|
end
|
440
437
|
|
@@ -475,7 +472,7 @@ module Sidekiq
|
|
475
472
|
def retry
|
476
473
|
remove_job do |message|
|
477
474
|
msg = Sidekiq.load_json(message)
|
478
|
-
msg[
|
475
|
+
msg["retry_count"] -= 1 if msg["retry_count"]
|
479
476
|
Sidekiq::Client.push(msg)
|
480
477
|
end
|
481
478
|
end
|
@@ -489,31 +486,31 @@ module Sidekiq
|
|
489
486
|
end
|
490
487
|
|
491
488
|
def error?
|
492
|
-
!!item[
|
489
|
+
!!item["error_class"]
|
493
490
|
end
|
494
491
|
|
495
492
|
private
|
496
493
|
|
497
494
|
def remove_job
|
498
495
|
Sidekiq.redis do |conn|
|
499
|
-
results = conn.multi
|
496
|
+
results = conn.multi {
|
500
497
|
conn.zrangebyscore(parent.name, score, score)
|
501
498
|
conn.zremrangebyscore(parent.name, score, score)
|
502
|
-
|
499
|
+
}.first
|
503
500
|
|
504
501
|
if results.size == 1
|
505
502
|
yield results.first
|
506
503
|
else
|
507
504
|
# multiple jobs with the same score
|
508
505
|
# find the one with the right JID and push it
|
509
|
-
hash = results.group_by
|
506
|
+
hash = results.group_by { |message|
|
510
507
|
if message.index(jid)
|
511
508
|
msg = Sidekiq.load_json(message)
|
512
|
-
msg[
|
509
|
+
msg["jid"] == jid
|
513
510
|
else
|
514
511
|
false
|
515
512
|
end
|
516
|
-
|
513
|
+
}
|
517
514
|
|
518
515
|
msg = hash.fetch(true, []).first
|
519
516
|
yield msg if msg
|
@@ -527,7 +524,6 @@ module Sidekiq
|
|
527
524
|
end
|
528
525
|
end
|
529
526
|
end
|
530
|
-
|
531
527
|
end
|
532
528
|
|
533
529
|
class SortedSet
|
@@ -553,7 +549,6 @@ module Sidekiq
|
|
553
549
|
end
|
554
550
|
|
555
551
|
class JobSet < SortedSet
|
556
|
-
|
557
552
|
def schedule(timestamp, message)
|
558
553
|
Sidekiq.redis do |conn|
|
559
554
|
conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(message))
|
@@ -566,15 +561,15 @@ module Sidekiq
|
|
566
561
|
page = -1
|
567
562
|
page_size = 50
|
568
563
|
|
569
|
-
|
564
|
+
loop do
|
570
565
|
range_start = page * page_size + offset_size
|
571
566
|
range_end = range_start + page_size - 1
|
572
|
-
elements = Sidekiq.redis
|
567
|
+
elements = Sidekiq.redis { |conn|
|
573
568
|
conn.zrange name, range_start, range_end, with_scores: true
|
574
|
-
|
569
|
+
}
|
575
570
|
break if elements.empty?
|
576
571
|
page -= 1
|
577
|
-
elements.
|
572
|
+
elements.reverse_each do |element, score|
|
578
573
|
yield SortedEntry.new(self, score, element)
|
579
574
|
end
|
580
575
|
offset_size = initial_size - @_size
|
@@ -582,18 +577,17 @@ module Sidekiq
|
|
582
577
|
end
|
583
578
|
|
584
579
|
def fetch(score, jid = nil)
|
585
|
-
elements = Sidekiq.redis
|
580
|
+
elements = Sidekiq.redis { |conn|
|
586
581
|
conn.zrangebyscore(name, score, score)
|
587
|
-
|
582
|
+
}
|
588
583
|
|
589
|
-
elements.
|
584
|
+
elements.each_with_object([]) do |element, result|
|
590
585
|
entry = SortedEntry.new(self, score, element)
|
591
586
|
if jid
|
592
587
|
result << entry if entry.jid == jid
|
593
588
|
else
|
594
589
|
result << entry
|
595
590
|
end
|
596
|
-
result
|
597
591
|
end
|
598
592
|
end
|
599
593
|
|
@@ -603,7 +597,7 @@ module Sidekiq
|
|
603
597
|
# This is a slow, inefficient operation. Do not use under
|
604
598
|
# normal conditions. Sidekiq Pro contains a faster version.
|
605
599
|
def find_job(jid)
|
606
|
-
|
600
|
+
detect { |j| j.jid == jid }
|
607
601
|
end
|
608
602
|
|
609
603
|
def delete_by_value(name, value)
|
@@ -624,7 +618,6 @@ module Sidekiq
|
|
624
618
|
@_size -= 1 if ret
|
625
619
|
break ret
|
626
620
|
end
|
627
|
-
false
|
628
621
|
end
|
629
622
|
end
|
630
623
|
end
|
@@ -646,7 +639,7 @@ module Sidekiq
|
|
646
639
|
# end.map(&:delete)
|
647
640
|
class ScheduledSet < JobSet
|
648
641
|
def initialize
|
649
|
-
super
|
642
|
+
super "schedule"
|
650
643
|
end
|
651
644
|
end
|
652
645
|
|
@@ -664,19 +657,15 @@ module Sidekiq
|
|
664
657
|
# end.map(&:delete)
|
665
658
|
class RetrySet < JobSet
|
666
659
|
def initialize
|
667
|
-
super
|
660
|
+
super "retry"
|
668
661
|
end
|
669
662
|
|
670
663
|
def retry_all
|
671
|
-
while size > 0
|
672
|
-
each(&:retry)
|
673
|
-
end
|
664
|
+
each(&:retry) while size > 0
|
674
665
|
end
|
675
666
|
|
676
667
|
def kill_all
|
677
|
-
while size > 0
|
678
|
-
each(&:kill)
|
679
|
-
end
|
668
|
+
each(&:kill) while size > 0
|
680
669
|
end
|
681
670
|
end
|
682
671
|
|
@@ -685,15 +674,15 @@ module Sidekiq
|
|
685
674
|
#
|
686
675
|
class DeadSet < JobSet
|
687
676
|
def initialize
|
688
|
-
super
|
677
|
+
super "dead"
|
689
678
|
end
|
690
679
|
|
691
|
-
def kill(message, opts={})
|
680
|
+
def kill(message, opts = {})
|
692
681
|
now = Time.now.to_f
|
693
682
|
Sidekiq.redis do |conn|
|
694
683
|
conn.multi do
|
695
684
|
conn.zadd(name, now.to_s, message)
|
696
|
-
conn.zremrangebyscore(name,
|
685
|
+
conn.zremrangebyscore(name, "-inf", now - self.class.timeout)
|
697
686
|
conn.zremrangebyrank(name, 0, - self.class.max_jobs)
|
698
687
|
end
|
699
688
|
end
|
@@ -710,9 +699,7 @@ module Sidekiq
|
|
710
699
|
end
|
711
700
|
|
712
701
|
def retry_all
|
713
|
-
while size > 0
|
714
|
-
each(&:retry)
|
715
|
-
end
|
702
|
+
each(&:retry) while size > 0
|
716
703
|
end
|
717
704
|
|
718
705
|
def self.max_jobs
|
@@ -735,7 +722,7 @@ module Sidekiq
|
|
735
722
|
include Enumerable
|
736
723
|
include RedisScanner
|
737
724
|
|
738
|
-
def initialize(clean_plz=true)
|
725
|
+
def initialize(clean_plz = true)
|
739
726
|
cleanup if clean_plz
|
740
727
|
end
|
741
728
|
|
@@ -744,12 +731,12 @@ module Sidekiq
|
|
744
731
|
def cleanup
|
745
732
|
count = 0
|
746
733
|
Sidekiq.redis do |conn|
|
747
|
-
procs = sscan(conn,
|
748
|
-
heartbeats = conn.pipelined
|
734
|
+
procs = sscan(conn, "processes").sort
|
735
|
+
heartbeats = conn.pipelined {
|
749
736
|
procs.each do |key|
|
750
|
-
conn.hget(key,
|
737
|
+
conn.hget(key, "info")
|
751
738
|
end
|
752
|
-
|
739
|
+
}
|
753
740
|
|
754
741
|
# the hash named key has an expiry of 60 seconds.
|
755
742
|
# if it's not found, that means the process has not reported
|
@@ -758,23 +745,23 @@ module Sidekiq
|
|
758
745
|
heartbeats.each_with_index do |beat, i|
|
759
746
|
to_prune << procs[i] if beat.nil?
|
760
747
|
end
|
761
|
-
count = conn.srem(
|
748
|
+
count = conn.srem("processes", to_prune) unless to_prune.empty?
|
762
749
|
end
|
763
750
|
count
|
764
751
|
end
|
765
752
|
|
766
753
|
def each
|
767
|
-
procs = Sidekiq.redis { |conn| sscan(conn,
|
754
|
+
procs = Sidekiq.redis { |conn| sscan(conn, "processes") }.sort
|
768
755
|
|
769
756
|
Sidekiq.redis do |conn|
|
770
757
|
# We're making a tradeoff here between consuming more memory instead of
|
771
758
|
# making more roundtrips to Redis, but if you have hundreds or thousands of workers,
|
772
759
|
# you'll be happier this way
|
773
|
-
result = conn.pipelined
|
760
|
+
result = conn.pipelined {
|
774
761
|
procs.each do |key|
|
775
|
-
conn.hmget(key,
|
762
|
+
conn.hmget(key, "info", "busy", "beat", "quiet")
|
776
763
|
end
|
777
|
-
|
764
|
+
}
|
778
765
|
|
779
766
|
result.each do |info, busy, at_s, quiet|
|
780
767
|
# If a process is stopped between when we query Redis for `procs` and
|
@@ -783,7 +770,7 @@ module Sidekiq
|
|
783
770
|
next if info.nil?
|
784
771
|
|
785
772
|
hash = Sidekiq.load_json(info)
|
786
|
-
yield Process.new(hash.merge(
|
773
|
+
yield Process.new(hash.merge("busy" => busy.to_i, "beat" => at_s.to_f, "quiet" => quiet))
|
787
774
|
end
|
788
775
|
end
|
789
776
|
|
@@ -795,7 +782,7 @@ module Sidekiq
|
|
795
782
|
# contains Sidekiq processes which have sent a heartbeat within the last
|
796
783
|
# 60 seconds.
|
797
784
|
def size
|
798
|
-
Sidekiq.redis { |conn| conn.scard(
|
785
|
+
Sidekiq.redis { |conn| conn.scard("processes") }
|
799
786
|
end
|
800
787
|
|
801
788
|
# Returns the identity of the current cluster leader or "" if no leader.
|
@@ -805,7 +792,7 @@ module Sidekiq
|
|
805
792
|
@leader ||= begin
|
806
793
|
x = Sidekiq.redis {|c| c.get("dear-leader") }
|
807
794
|
# need a non-falsy value so we can memoize
|
808
|
-
x
|
795
|
+
x ||= ""
|
809
796
|
x
|
810
797
|
end
|
811
798
|
end
|
@@ -832,11 +819,11 @@ module Sidekiq
|
|
832
819
|
end
|
833
820
|
|
834
821
|
def tag
|
835
|
-
self[
|
822
|
+
self["tag"]
|
836
823
|
end
|
837
824
|
|
838
825
|
def labels
|
839
|
-
Array(self[
|
826
|
+
Array(self["labels"])
|
840
827
|
end
|
841
828
|
|
842
829
|
def [](key)
|
@@ -844,23 +831,23 @@ module Sidekiq
|
|
844
831
|
end
|
845
832
|
|
846
833
|
def identity
|
847
|
-
self[
|
834
|
+
self["identity"]
|
848
835
|
end
|
849
836
|
|
850
837
|
def quiet!
|
851
|
-
signal(
|
838
|
+
signal("TSTP")
|
852
839
|
end
|
853
840
|
|
854
841
|
def stop!
|
855
|
-
signal(
|
842
|
+
signal("TERM")
|
856
843
|
end
|
857
844
|
|
858
845
|
def dump_threads
|
859
|
-
signal(
|
846
|
+
signal("TTIN")
|
860
847
|
end
|
861
848
|
|
862
849
|
def stopping?
|
863
|
-
self[
|
850
|
+
self["quiet"] == "true"
|
864
851
|
end
|
865
852
|
|
866
853
|
private
|
@@ -874,7 +861,6 @@ module Sidekiq
|
|
874
861
|
end
|
875
862
|
end
|
876
863
|
end
|
877
|
-
|
878
864
|
end
|
879
865
|
|
880
866
|
##
|
@@ -903,12 +889,12 @@ module Sidekiq
|
|
903
889
|
|
904
890
|
def each
|
905
891
|
Sidekiq.redis do |conn|
|
906
|
-
procs = sscan(conn,
|
892
|
+
procs = sscan(conn, "processes")
|
907
893
|
procs.sort.each do |key|
|
908
|
-
valid, workers = conn.pipelined
|
909
|
-
conn.exists
|
894
|
+
valid, workers = conn.pipelined {
|
895
|
+
conn.exists(key)
|
910
896
|
conn.hgetall("#{key}:workers")
|
911
|
-
|
897
|
+
}
|
912
898
|
next unless valid
|
913
899
|
workers.each_pair do |tid, json|
|
914
900
|
yield key, tid, Sidekiq.load_json(json)
|
@@ -925,18 +911,17 @@ module Sidekiq
|
|
925
911
|
# which can easily get out of sync with crashy processes.
|
926
912
|
def size
|
927
913
|
Sidekiq.redis do |conn|
|
928
|
-
procs = sscan(conn,
|
914
|
+
procs = sscan(conn, "processes")
|
929
915
|
if procs.empty?
|
930
916
|
0
|
931
917
|
else
|
932
|
-
conn.pipelined
|
918
|
+
conn.pipelined {
|
933
919
|
procs.each do |key|
|
934
|
-
conn.hget(key,
|
920
|
+
conn.hget(key, "busy")
|
935
921
|
end
|
936
|
-
|
922
|
+
}.map(&:to_i).inject(:+)
|
937
923
|
end
|
938
924
|
end
|
939
925
|
end
|
940
926
|
end
|
941
|
-
|
942
927
|
end
|