sidekiq 6.5.12 → 7.0.6
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +63 -22
- data/README.md +40 -32
- data/bin/sidekiq +3 -8
- data/bin/sidekiqload +186 -118
- data/bin/sidekiqmon +3 -0
- data/lib/sidekiq/api.rb +84 -121
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +55 -74
- data/lib/sidekiq/client.rb +29 -16
- data/lib/sidekiq/component.rb +3 -0
- data/lib/sidekiq/config.rb +270 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +11 -14
- data/lib/sidekiq/job.rb +375 -10
- data/lib/sidekiq/job_logger.rb +2 -2
- data/lib/sidekiq/job_retry.rb +9 -9
- data/lib/sidekiq/job_util.rb +48 -14
- data/lib/sidekiq/launcher.rb +64 -61
- data/lib/sidekiq/logger.rb +1 -26
- data/lib/sidekiq/manager.rb +9 -11
- data/lib/sidekiq/metrics/query.rb +2 -2
- data/lib/sidekiq/metrics/shared.rb +4 -3
- data/lib/sidekiq/metrics/tracking.rb +20 -18
- data/lib/sidekiq/middleware/chain.rb +19 -18
- data/lib/sidekiq/middleware/current_attributes.rb +8 -15
- data/lib/sidekiq/monitor.rb +16 -3
- data/lib/sidekiq/processor.rb +21 -27
- data/lib/sidekiq/rails.rb +13 -17
- data/lib/sidekiq/redis_client_adapter.rb +8 -47
- data/lib/sidekiq/redis_connection.rb +11 -111
- data/lib/sidekiq/scheduled.rb +20 -21
- data/lib/sidekiq/testing.rb +5 -33
- data/lib/sidekiq/transaction_aware_client.rb +4 -5
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/application.rb +21 -6
- data/lib/sidekiq/web/csrf_protection.rb +1 -1
- data/lib/sidekiq/web/helpers.rb +16 -15
- data/lib/sidekiq/web.rb +6 -17
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +76 -274
- data/sidekiq.gemspec +20 -10
- data/web/assets/javascripts/application.js +18 -1
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +3 -223
- data/web/assets/javascripts/metrics.js +117 -115
- data/web/assets/stylesheets/application-dark.css +4 -0
- data/web/assets/stylesheets/application-rtl.css +2 -91
- data/web/assets/stylesheets/application.css +23 -298
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +2 -7
- data/web/locales/en.yml +76 -70
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +67 -67
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +64 -68
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +59 -69
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +20 -18
- data/web/locales/zh-tw.yml +10 -1
- data/web/views/_footer.erb +5 -2
- data/web/views/_job_info.erb +18 -2
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +1 -1
- data/web/views/busy.erb +37 -26
- data/web/views/dashboard.erb +36 -5
- data/web/views/metrics.erb +33 -20
- data/web/views/metrics_for_job.erb +22 -38
- data/web/views/morgue.erb +5 -9
- data/web/views/queue.erb +10 -14
- data/web/views/queues.erb +3 -1
- data/web/views/retries.erb +5 -9
- data/web/views/scheduled.erb +12 -13
- metadata +50 -40
- data/lib/sidekiq/delay.rb +0 -43
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/metrics/deploy.rb +0 -47
- data/lib/sidekiq/worker.rb +0 -370
- data/web/assets/javascripts/graph.js +0 -16
- /data/{LICENSE → LICENSE.txt} +0 -0
data/lib/sidekiq/api.rb
CHANGED
@@ -6,10 +6,7 @@ require "zlib"
|
|
6
6
|
require "set"
|
7
7
|
require "base64"
|
8
8
|
|
9
|
-
|
10
|
-
require "sidekiq/metrics/deploy"
|
11
|
-
require "sidekiq/metrics/query"
|
12
|
-
end
|
9
|
+
require "sidekiq/metrics/query"
|
13
10
|
|
14
11
|
#
|
15
12
|
# Sidekiq's Data API provides a Ruby object model on top
|
@@ -70,7 +67,18 @@ module Sidekiq
|
|
70
67
|
end
|
71
68
|
|
72
69
|
def queues
|
73
|
-
Sidekiq
|
70
|
+
Sidekiq.redis do |conn|
|
71
|
+
queues = conn.sscan("queues").to_a
|
72
|
+
|
73
|
+
lengths = conn.pipelined { |pipeline|
|
74
|
+
queues.each do |queue|
|
75
|
+
pipeline.llen("queue:#{queue}")
|
76
|
+
end
|
77
|
+
}
|
78
|
+
|
79
|
+
array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
|
80
|
+
array_of_arrays.to_h
|
81
|
+
end
|
74
82
|
end
|
75
83
|
|
76
84
|
# O(1) redis calls
|
@@ -117,11 +125,11 @@ module Sidekiq
|
|
117
125
|
# @api private
|
118
126
|
def fetch_stats_slow!
|
119
127
|
processes = Sidekiq.redis { |conn|
|
120
|
-
conn.
|
128
|
+
conn.sscan("processes").to_a
|
121
129
|
}
|
122
130
|
|
123
131
|
queues = Sidekiq.redis { |conn|
|
124
|
-
conn.
|
132
|
+
conn.sscan("queues").to_a
|
125
133
|
}
|
126
134
|
|
127
135
|
pipe2_res = Sidekiq.redis { |conn|
|
@@ -133,7 +141,7 @@ module Sidekiq
|
|
133
141
|
|
134
142
|
s = processes.size
|
135
143
|
workers_size = pipe2_res[0...s].sum(&:to_i)
|
136
|
-
enqueued = pipe2_res[s
|
144
|
+
enqueued = pipe2_res[s..].sum(&:to_i)
|
137
145
|
|
138
146
|
@stats[:workers_size] = workers_size
|
139
147
|
@stats[:enqueued] = enqueued
|
@@ -168,25 +176,8 @@ module Sidekiq
|
|
168
176
|
@stats[s] || raise(ArgumentError, "Unknown stat #{s}")
|
169
177
|
end
|
170
178
|
|
171
|
-
class Queues
|
172
|
-
def lengths
|
173
|
-
Sidekiq.redis do |conn|
|
174
|
-
queues = conn.sscan_each("queues").to_a
|
175
|
-
|
176
|
-
lengths = conn.pipelined { |pipeline|
|
177
|
-
queues.each do |queue|
|
178
|
-
pipeline.llen("queue:#{queue}")
|
179
|
-
end
|
180
|
-
}
|
181
|
-
|
182
|
-
array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
|
183
|
-
array_of_arrays.to_h
|
184
|
-
end
|
185
|
-
end
|
186
|
-
end
|
187
|
-
|
188
179
|
class History
|
189
|
-
def initialize(days_previous, start_date = nil)
|
180
|
+
def initialize(days_previous, start_date = nil, pool: nil)
|
190
181
|
# we only store five years of data in Redis
|
191
182
|
raise ArgumentError if days_previous < 1 || days_previous > (5 * 365)
|
192
183
|
@days_previous = days_previous
|
@@ -211,15 +202,10 @@ module Sidekiq
|
|
211
202
|
|
212
203
|
keys = dates.map { |datestr| "stat:#{stat}:#{datestr}" }
|
213
204
|
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
stat_hash[dates[idx]] = value ? value.to_i : 0
|
218
|
-
end
|
205
|
+
Sidekiq.redis do |conn|
|
206
|
+
conn.mget(keys).each_with_index do |value, idx|
|
207
|
+
stat_hash[dates[idx]] = value ? value.to_i : 0
|
219
208
|
end
|
220
|
-
rescue RedisConnection.adapter::CommandError
|
221
|
-
# mget will trigger a CROSSSLOT error when run against a Cluster
|
222
|
-
# TODO Someone want to add Cluster support?
|
223
209
|
end
|
224
210
|
|
225
211
|
stat_hash
|
@@ -247,7 +233,7 @@ module Sidekiq
|
|
247
233
|
#
|
248
234
|
# @return [Array<Sidekiq::Queue>]
|
249
235
|
def self.all
|
250
|
-
Sidekiq.redis { |c| c.
|
236
|
+
Sidekiq.redis { |c| c.sscan("queues").to_a }.sort.map { |q| Sidekiq::Queue.new(q) }
|
251
237
|
end
|
252
238
|
|
253
239
|
attr_reader :name
|
@@ -388,12 +374,7 @@ module Sidekiq
|
|
388
374
|
def display_class
|
389
375
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
390
376
|
@klass ||= self["display_class"] || begin
|
391
|
-
|
392
|
-
when /\ASidekiq::Extensions::Delayed/
|
393
|
-
safe_load(args[0], klass) do |target, method, _|
|
394
|
-
"#{target}.#{method}"
|
395
|
-
end
|
396
|
-
when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
377
|
+
if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
397
378
|
job_class = @item["wrapped"] || args[0]
|
398
379
|
if job_class == "ActionMailer::DeliveryJob" || job_class == "ActionMailer::MailDeliveryJob"
|
399
380
|
# MailerClass#mailer_method
|
@@ -409,16 +390,7 @@ module Sidekiq
|
|
409
390
|
|
410
391
|
def display_args
|
411
392
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
412
|
-
@display_args ||=
|
413
|
-
when /\ASidekiq::Extensions::Delayed/
|
414
|
-
safe_load(args[0], args) do |_, _, arg, kwarg|
|
415
|
-
if !kwarg || kwarg.empty?
|
416
|
-
arg
|
417
|
-
else
|
418
|
-
[arg, kwarg]
|
419
|
-
end
|
420
|
-
end
|
421
|
-
when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
393
|
+
@display_args ||= if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
422
394
|
job_args = self["wrapped"] ? args[0]["arguments"] : []
|
423
395
|
if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
|
424
396
|
# remove MailerClass, mailer_method and 'deliver_now'
|
@@ -446,6 +418,10 @@ module Sidekiq
|
|
446
418
|
self["jid"]
|
447
419
|
end
|
448
420
|
|
421
|
+
def bid
|
422
|
+
self["bid"]
|
423
|
+
end
|
424
|
+
|
449
425
|
def enqueued_at
|
450
426
|
self["enqueued_at"] ? Time.at(self["enqueued_at"]).utc : nil
|
451
427
|
end
|
@@ -491,31 +467,10 @@ module Sidekiq
|
|
491
467
|
|
492
468
|
private
|
493
469
|
|
494
|
-
def safe_load(content, default)
|
495
|
-
yield(*YAML.load(content))
|
496
|
-
rescue => ex
|
497
|
-
# #1761 in dev mode, it's possible to have jobs enqueued which haven't been loaded into
|
498
|
-
# memory yet so the YAML can't be loaded.
|
499
|
-
# TODO is this still necessary? Zeitwerk reloader should handle?
|
500
|
-
Sidekiq.logger.warn "Unable to load YAML: #{ex.message}" unless Sidekiq.options[:environment] == "development"
|
501
|
-
default
|
502
|
-
end
|
503
|
-
|
504
470
|
def uncompress_backtrace(backtrace)
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
else
|
509
|
-
decoded = Base64.decode64(backtrace)
|
510
|
-
uncompressed = Zlib::Inflate.inflate(decoded)
|
511
|
-
begin
|
512
|
-
Sidekiq.load_json(uncompressed)
|
513
|
-
rescue
|
514
|
-
# Handle old jobs with marshalled backtrace format
|
515
|
-
# TODO Remove in 7.x
|
516
|
-
Marshal.load(uncompressed)
|
517
|
-
end
|
518
|
-
end
|
471
|
+
decoded = Base64.decode64(backtrace)
|
472
|
+
uncompressed = Zlib::Inflate.inflate(decoded)
|
473
|
+
Sidekiq.load_json(uncompressed)
|
519
474
|
end
|
520
475
|
end
|
521
476
|
|
@@ -656,7 +611,7 @@ module Sidekiq
|
|
656
611
|
|
657
612
|
match = "*#{match}*" unless match.include?("*")
|
658
613
|
Sidekiq.redis do |conn|
|
659
|
-
conn.
|
614
|
+
conn.zscan(name, match: match, count: count) do |entry, score|
|
660
615
|
yield SortedEntry.new(self, score, entry)
|
661
616
|
end
|
662
617
|
end
|
@@ -746,8 +701,8 @@ module Sidekiq
|
|
746
701
|
# @return [SortedEntry] the record or nil
|
747
702
|
def find_job(jid)
|
748
703
|
Sidekiq.redis do |conn|
|
749
|
-
conn.
|
750
|
-
job =
|
704
|
+
conn.zscan(name, match: "*#{jid}*", count: 100) do |entry, score|
|
705
|
+
job = Sidekiq.load_json(entry)
|
751
706
|
matched = job["jid"] == jid
|
752
707
|
return SortedEntry.new(self, score, entry) if matched
|
753
708
|
end
|
@@ -792,12 +747,8 @@ module Sidekiq
|
|
792
747
|
# example where I'm selecting jobs based on some complex logic
|
793
748
|
# and deleting them from the scheduled set.
|
794
749
|
#
|
795
|
-
#
|
796
|
-
#
|
797
|
-
# scheduled.klass == 'Sidekiq::Extensions::DelayedClass' &&
|
798
|
-
# scheduled.args[0] == 'User' &&
|
799
|
-
# scheduled.args[1] == 'setup_new_subscriber'
|
800
|
-
# end.map(&:delete)
|
750
|
+
# See the API wiki page for usage notes and examples.
|
751
|
+
#
|
801
752
|
class ScheduledSet < JobSet
|
802
753
|
def initialize
|
803
754
|
super "schedule"
|
@@ -810,12 +761,8 @@ module Sidekiq
|
|
810
761
|
# example where I'm selecting all jobs of a certain type
|
811
762
|
# and deleting them from the retry queue.
|
812
763
|
#
|
813
|
-
#
|
814
|
-
#
|
815
|
-
# retri.klass == 'Sidekiq::Extensions::DelayedClass' &&
|
816
|
-
# retri.args[0] == 'User' &&
|
817
|
-
# retri.args[1] == 'setup_new_subscriber'
|
818
|
-
# end.map(&:delete)
|
764
|
+
# See the API wiki page for usage notes and examples.
|
765
|
+
#
|
819
766
|
class RetrySet < JobSet
|
820
767
|
def initialize
|
821
768
|
super "retry"
|
@@ -849,8 +796,8 @@ module Sidekiq
|
|
849
796
|
Sidekiq.redis do |conn|
|
850
797
|
conn.multi do |transaction|
|
851
798
|
transaction.zadd(name, now.to_s, message)
|
852
|
-
transaction.zremrangebyscore(name, "-inf", now -
|
853
|
-
transaction.zremrangebyrank(name, 0, -
|
799
|
+
transaction.zremrangebyscore(name, "-inf", now - Sidekiq::Config::DEFAULTS[:dead_timeout_in_seconds])
|
800
|
+
transaction.zremrangebyrank(name, 0, - Sidekiq::Config::DEFAULTS[:dead_max_jobs])
|
854
801
|
end
|
855
802
|
end
|
856
803
|
|
@@ -858,7 +805,7 @@ module Sidekiq
|
|
858
805
|
job = Sidekiq.load_json(message)
|
859
806
|
r = RuntimeError.new("Job killed by API")
|
860
807
|
r.set_backtrace(caller)
|
861
|
-
Sidekiq.death_handlers.each do |handle|
|
808
|
+
Sidekiq.default_configuration.death_handlers.each do |handle|
|
862
809
|
handle.call(job, r)
|
863
810
|
end
|
864
811
|
end
|
@@ -869,18 +816,6 @@ module Sidekiq
|
|
869
816
|
def retry_all
|
870
817
|
each(&:retry) while size > 0
|
871
818
|
end
|
872
|
-
|
873
|
-
# The maximum size of the Dead set. Older entries will be trimmed
|
874
|
-
# to stay within this limit. Default value is 10,000.
|
875
|
-
def self.max_jobs
|
876
|
-
Sidekiq[:dead_max_jobs]
|
877
|
-
end
|
878
|
-
|
879
|
-
# The time limit for entries within the Dead set. Older entries will be thrown away.
|
880
|
-
# Default value is six months.
|
881
|
-
def self.timeout
|
882
|
-
Sidekiq[:dead_timeout_in_seconds]
|
883
|
-
end
|
884
819
|
end
|
885
820
|
|
886
821
|
##
|
@@ -893,6 +828,24 @@ module Sidekiq
|
|
893
828
|
class ProcessSet
|
894
829
|
include Enumerable
|
895
830
|
|
831
|
+
def self.[](identity)
|
832
|
+
exists, (info, busy, beat, quiet, rss, rtt_us) = Sidekiq.redis { |conn|
|
833
|
+
conn.multi { |transaction|
|
834
|
+
transaction.sismember("processes", identity)
|
835
|
+
transaction.hmget(identity, "info", "busy", "beat", "quiet", "rss", "rtt_us")
|
836
|
+
}
|
837
|
+
}
|
838
|
+
|
839
|
+
return nil if exists == 0 || info.nil?
|
840
|
+
|
841
|
+
hash = Sidekiq.load_json(info)
|
842
|
+
Process.new(hash.merge("busy" => busy.to_i,
|
843
|
+
"beat" => beat.to_f,
|
844
|
+
"quiet" => quiet,
|
845
|
+
"rss" => rss.to_i,
|
846
|
+
"rtt_us" => rtt_us.to_i))
|
847
|
+
end
|
848
|
+
|
896
849
|
# :nodoc:
|
897
850
|
# @api private
|
898
851
|
def initialize(clean_plz = true)
|
@@ -909,7 +862,7 @@ module Sidekiq
|
|
909
862
|
|
910
863
|
count = 0
|
911
864
|
Sidekiq.redis do |conn|
|
912
|
-
procs = conn.
|
865
|
+
procs = conn.sscan("processes").to_a
|
913
866
|
heartbeats = conn.pipelined { |pipeline|
|
914
867
|
procs.each do |key|
|
915
868
|
pipeline.hget(key, "info")
|
@@ -929,7 +882,7 @@ module Sidekiq
|
|
929
882
|
|
930
883
|
def each
|
931
884
|
result = Sidekiq.redis { |conn|
|
932
|
-
procs = conn.
|
885
|
+
procs = conn.sscan("processes").to_a.sort
|
933
886
|
|
934
887
|
# We're making a tradeoff here between consuming more memory instead of
|
935
888
|
# making more roundtrips to Redis, but if you have hundreds or thousands of workers,
|
@@ -941,7 +894,7 @@ module Sidekiq
|
|
941
894
|
end
|
942
895
|
}
|
943
896
|
|
944
|
-
result.each do |info, busy,
|
897
|
+
result.each do |info, busy, beat, quiet, rss, rtt_us|
|
945
898
|
# If a process is stopped between when we query Redis for `procs` and
|
946
899
|
# when we query for `result`, we will have an item in `result` that is
|
947
900
|
# composed of `nil` values.
|
@@ -949,10 +902,10 @@ module Sidekiq
|
|
949
902
|
|
950
903
|
hash = Sidekiq.load_json(info)
|
951
904
|
yield Process.new(hash.merge("busy" => busy.to_i,
|
952
|
-
"beat" =>
|
905
|
+
"beat" => beat.to_f,
|
953
906
|
"quiet" => quiet,
|
954
907
|
"rss" => rss.to_i,
|
955
|
-
"rtt_us" =>
|
908
|
+
"rtt_us" => rtt_us.to_i))
|
956
909
|
end
|
957
910
|
end
|
958
911
|
|
@@ -1008,6 +961,7 @@ module Sidekiq
|
|
1008
961
|
# 'busy' => 10,
|
1009
962
|
# 'beat' => <last heartbeat>,
|
1010
963
|
# 'identity' => <unique string identifying the process>,
|
964
|
+
# 'embedded' => true,
|
1011
965
|
# }
|
1012
966
|
class Process
|
1013
967
|
# :nodoc:
|
@@ -1021,7 +975,7 @@ module Sidekiq
|
|
1021
975
|
end
|
1022
976
|
|
1023
977
|
def labels
|
1024
|
-
|
978
|
+
self["labels"].to_a
|
1025
979
|
end
|
1026
980
|
|
1027
981
|
def [](key)
|
@@ -1036,11 +990,25 @@ module Sidekiq
|
|
1036
990
|
self["queues"]
|
1037
991
|
end
|
1038
992
|
|
993
|
+
def weights
|
994
|
+
self["weights"]
|
995
|
+
end
|
996
|
+
|
997
|
+
def version
|
998
|
+
self["version"]
|
999
|
+
end
|
1000
|
+
|
1001
|
+
def embedded?
|
1002
|
+
self["embedded"]
|
1003
|
+
end
|
1004
|
+
|
1039
1005
|
# Signal this process to stop processing new jobs.
|
1040
1006
|
# It will continue to execute jobs it has already fetched.
|
1041
1007
|
# This method is *asynchronous* and it can take 5-10
|
1042
1008
|
# seconds for the process to quiet.
|
1043
1009
|
def quiet!
|
1010
|
+
raise "Can't quiet an embedded process" if embedded?
|
1011
|
+
|
1044
1012
|
signal("TSTP")
|
1045
1013
|
end
|
1046
1014
|
|
@@ -1049,6 +1017,8 @@ module Sidekiq
|
|
1049
1017
|
# This method is *asynchronous* and it can take 5-10
|
1050
1018
|
# seconds for the process to start shutting down.
|
1051
1019
|
def stop!
|
1020
|
+
raise "Can't stop an embedded process" if embedded?
|
1021
|
+
|
1052
1022
|
signal("TERM")
|
1053
1023
|
end
|
1054
1024
|
|
@@ -1107,8 +1077,7 @@ module Sidekiq
|
|
1107
1077
|
all_works = nil
|
1108
1078
|
|
1109
1079
|
Sidekiq.redis do |conn|
|
1110
|
-
procs = conn.
|
1111
|
-
|
1080
|
+
procs = conn.sscan("processes").to_a.sort
|
1112
1081
|
all_works = conn.pipelined do |pipeline|
|
1113
1082
|
procs.each do |key|
|
1114
1083
|
pipeline.hgetall("#{key}:work")
|
@@ -1118,13 +1087,7 @@ module Sidekiq
|
|
1118
1087
|
|
1119
1088
|
procs.zip(all_works).each do |key, workers|
|
1120
1089
|
workers.each_pair do |tid, json|
|
1121
|
-
|
1122
|
-
|
1123
|
-
hsh = Sidekiq.load_json(json)
|
1124
|
-
p = hsh["payload"]
|
1125
|
-
# avoid breaking API, this is a side effect of the JSON optimization in #4316
|
1126
|
-
hsh["payload"] = Sidekiq.load_json(p) if p.is_a?(String)
|
1127
|
-
results << [key, tid, hsh]
|
1090
|
+
results << [key, tid, Sidekiq.load_json(json)] unless json.empty?
|
1128
1091
|
end
|
1129
1092
|
end
|
1130
1093
|
|
@@ -1139,7 +1102,7 @@ module Sidekiq
|
|
1139
1102
|
# which can easily get out of sync with crashy processes.
|
1140
1103
|
def size
|
1141
1104
|
Sidekiq.redis do |conn|
|
1142
|
-
procs = conn.
|
1105
|
+
procs = conn.sscan("processes").to_a
|
1143
1106
|
if procs.empty?
|
1144
1107
|
0
|
1145
1108
|
else
|
@@ -0,0 +1,127 @@
|
|
1
|
+
require "sidekiq/component"
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
# A Sidekiq::Capsule is the set of resources necessary to
|
5
|
+
# process one or more queues with a given concurrency.
|
6
|
+
# One "default" Capsule is started but the user may declare additional
|
7
|
+
# Capsules in their initializer.
|
8
|
+
#
|
9
|
+
# This capsule will pull jobs from the "single" queue and process
|
10
|
+
# the jobs with one thread, meaning the jobs will be processed serially.
|
11
|
+
#
|
12
|
+
# Sidekiq.configure_server do |config|
|
13
|
+
# config.capsule("single-threaded") do |cap|
|
14
|
+
# cap.concurrency = 1
|
15
|
+
# cap.queues = %w(single)
|
16
|
+
# end
|
17
|
+
# end
|
18
|
+
class Capsule
|
19
|
+
include Sidekiq::Component
|
20
|
+
|
21
|
+
attr_reader :name
|
22
|
+
attr_reader :queues
|
23
|
+
attr_accessor :concurrency
|
24
|
+
attr_reader :mode
|
25
|
+
attr_reader :weights
|
26
|
+
|
27
|
+
def initialize(name, config)
|
28
|
+
@name = name
|
29
|
+
@config = config
|
30
|
+
@queues = ["default"]
|
31
|
+
@weights = {"default" => 0}
|
32
|
+
@concurrency = config[:concurrency]
|
33
|
+
@mode = :strict
|
34
|
+
end
|
35
|
+
|
36
|
+
def fetcher
|
37
|
+
@fetcher ||= begin
|
38
|
+
inst = (config[:fetch_class] || Sidekiq::BasicFetch).new(self)
|
39
|
+
inst.setup(config[:fetch_setup]) if inst.respond_to?(:setup)
|
40
|
+
inst
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
def stop
|
45
|
+
fetcher&.bulk_requeue([])
|
46
|
+
end
|
47
|
+
|
48
|
+
# Sidekiq checks queues in three modes:
|
49
|
+
# - :strict - all queues have 0 weight and are checked strictly in order
|
50
|
+
# - :weighted - queues have arbitrary weight between 1 and N
|
51
|
+
# - :random - all queues have weight of 1
|
52
|
+
def queues=(val)
|
53
|
+
@weights = {}
|
54
|
+
@queues = Array(val).each_with_object([]) do |qstr, memo|
|
55
|
+
arr = qstr
|
56
|
+
arr = qstr.split(",") if qstr.is_a?(String)
|
57
|
+
name, weight = arr
|
58
|
+
@weights[name] = weight.to_i
|
59
|
+
[weight.to_i, 1].max.times do
|
60
|
+
memo << name
|
61
|
+
end
|
62
|
+
end
|
63
|
+
@mode = if @weights.values.all?(&:zero?)
|
64
|
+
:strict
|
65
|
+
elsif @weights.values.all? { |x| x == 1 }
|
66
|
+
:random
|
67
|
+
else
|
68
|
+
:weighted
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
# Allow the middleware to be different per-capsule.
|
73
|
+
# Avoid if possible and add middleware globally so all
|
74
|
+
# capsules share the same chains. Easier to debug that way.
|
75
|
+
def client_middleware
|
76
|
+
@client_chain ||= config.client_middleware.copy_for(self)
|
77
|
+
yield @client_chain if block_given?
|
78
|
+
@client_chain
|
79
|
+
end
|
80
|
+
|
81
|
+
def server_middleware
|
82
|
+
@server_chain ||= config.server_middleware.copy_for(self)
|
83
|
+
yield @server_chain if block_given?
|
84
|
+
@server_chain
|
85
|
+
end
|
86
|
+
|
87
|
+
def redis_pool
|
88
|
+
Thread.current[:sidekiq_redis_pool] || local_redis_pool
|
89
|
+
end
|
90
|
+
|
91
|
+
def local_redis_pool
|
92
|
+
# connection pool is lazy, it will not create connections unless you actually need them
|
93
|
+
# so don't be skimpy!
|
94
|
+
@redis ||= config.new_redis_pool(@concurrency, name)
|
95
|
+
end
|
96
|
+
|
97
|
+
def redis
|
98
|
+
raise ArgumentError, "requires a block" unless block_given?
|
99
|
+
redis_pool.with do |conn|
|
100
|
+
retryable = true
|
101
|
+
begin
|
102
|
+
yield conn
|
103
|
+
rescue RedisClientAdapter::BaseError => ex
|
104
|
+
# 2550 Failover can cause the server to become a replica, need
|
105
|
+
# to disconnect and reopen the socket to get back to the primary.
|
106
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
107
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
108
|
+
# The same retry logic is also used in client.rb
|
109
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
110
|
+
conn.close
|
111
|
+
retryable = false
|
112
|
+
retry
|
113
|
+
end
|
114
|
+
raise
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
def lookup(name)
|
120
|
+
config.lookup(name)
|
121
|
+
end
|
122
|
+
|
123
|
+
def logger
|
124
|
+
config.logger
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|