sidekiq 7.0.0 → 7.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +261 -13
- data/README.md +34 -27
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiqload +204 -109
- data/bin/sidekiqmon +3 -0
- data/lib/sidekiq/api.rb +151 -23
- data/lib/sidekiq/capsule.rb +20 -0
- data/lib/sidekiq/cli.rb +9 -4
- data/lib/sidekiq/client.rb +40 -24
- data/lib/sidekiq/component.rb +3 -1
- data/lib/sidekiq/config.rb +32 -12
- data/lib/sidekiq/deploy.rb +5 -5
- data/lib/sidekiq/embedded.rb +3 -3
- data/lib/sidekiq/fetch.rb +3 -5
- data/lib/sidekiq/iterable_job.rb +53 -0
- data/lib/sidekiq/job/interrupt_handler.rb +22 -0
- data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
- data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
- data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
- data/lib/sidekiq/job/iterable.rb +231 -0
- data/lib/sidekiq/job.rb +17 -10
- data/lib/sidekiq/job_logger.rb +24 -11
- data/lib/sidekiq/job_retry.rb +34 -11
- data/lib/sidekiq/job_util.rb +51 -15
- data/lib/sidekiq/launcher.rb +38 -22
- data/lib/sidekiq/logger.rb +1 -1
- data/lib/sidekiq/metrics/query.rb +6 -3
- data/lib/sidekiq/metrics/shared.rb +4 -4
- data/lib/sidekiq/metrics/tracking.rb +9 -3
- data/lib/sidekiq/middleware/chain.rb +12 -9
- data/lib/sidekiq/middleware/current_attributes.rb +70 -17
- data/lib/sidekiq/monitor.rb +17 -4
- data/lib/sidekiq/paginator.rb +4 -4
- data/lib/sidekiq/processor.rb +41 -27
- data/lib/sidekiq/rails.rb +18 -8
- data/lib/sidekiq/redis_client_adapter.rb +31 -35
- data/lib/sidekiq/redis_connection.rb +29 -7
- data/lib/sidekiq/scheduled.rb +4 -4
- data/lib/sidekiq/testing.rb +27 -8
- data/lib/sidekiq/transaction_aware_client.rb +7 -0
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/action.rb +10 -4
- data/lib/sidekiq/web/application.rb +113 -16
- data/lib/sidekiq/web/csrf_protection.rb +9 -6
- data/lib/sidekiq/web/helpers.rb +104 -33
- data/lib/sidekiq/web.rb +63 -2
- data/lib/sidekiq.rb +2 -1
- data/sidekiq.gemspec +8 -29
- data/web/assets/javascripts/application.js +45 -0
- data/web/assets/javascripts/dashboard-charts.js +38 -12
- data/web/assets/javascripts/dashboard.js +8 -10
- data/web/assets/javascripts/metrics.js +64 -2
- data/web/assets/stylesheets/application-dark.css +4 -0
- data/web/assets/stylesheets/application-rtl.css +10 -0
- data/web/assets/stylesheets/application.css +38 -4
- data/web/locales/da.yml +11 -4
- data/web/locales/en.yml +2 -0
- data/web/locales/fr.yml +14 -0
- data/web/locales/gd.yml +99 -0
- data/web/locales/ja.yml +3 -1
- data/web/locales/pt-br.yml +20 -0
- data/web/locales/tr.yml +101 -0
- data/web/locales/zh-cn.yml +20 -19
- data/web/views/_footer.erb +14 -2
- data/web/views/_job_info.erb +18 -2
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +1 -1
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +46 -35
- data/web/views/dashboard.erb +25 -35
- data/web/views/filtering.erb +7 -0
- data/web/views/layout.erb +6 -6
- data/web/views/metrics.erb +42 -31
- data/web/views/metrics_for_job.erb +41 -51
- data/web/views/morgue.erb +5 -9
- data/web/views/queue.erb +10 -14
- data/web/views/queues.erb +9 -3
- data/web/views/retries.erb +5 -9
- data/web/views/scheduled.erb +12 -13
- metadata +37 -32
data/lib/sidekiq/api.rb
CHANGED
@@ -4,7 +4,6 @@ require "sidekiq"
|
|
4
4
|
|
5
5
|
require "zlib"
|
6
6
|
require "set"
|
7
|
-
require "base64"
|
8
7
|
|
9
8
|
require "sidekiq/metrics/query"
|
10
9
|
|
@@ -92,11 +91,11 @@ module Sidekiq
|
|
92
91
|
pipeline.zcard("retry")
|
93
92
|
pipeline.zcard("dead")
|
94
93
|
pipeline.scard("processes")
|
95
|
-
pipeline.
|
94
|
+
pipeline.lindex("queue:default", -1)
|
96
95
|
end
|
97
96
|
}
|
98
97
|
|
99
|
-
default_queue_latency = if (entry = pipe1_res[6]
|
98
|
+
default_queue_latency = if (entry = pipe1_res[6])
|
100
99
|
job = begin
|
101
100
|
Sidekiq.load_json(entry)
|
102
101
|
rescue
|
@@ -264,8 +263,8 @@ module Sidekiq
|
|
264
263
|
# @return [Float] in seconds
|
265
264
|
def latency
|
266
265
|
entry = Sidekiq.redis { |conn|
|
267
|
-
conn.
|
268
|
-
}
|
266
|
+
conn.lindex(@rname, -1)
|
267
|
+
}
|
269
268
|
return 0 unless entry
|
270
269
|
job = Sidekiq.load_json(entry)
|
271
270
|
now = Time.now.to_f
|
@@ -391,13 +390,13 @@ module Sidekiq
|
|
391
390
|
def display_args
|
392
391
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
393
392
|
@display_args ||= if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
394
|
-
job_args = self["wrapped"] ? args[0]["arguments"] : []
|
393
|
+
job_args = self["wrapped"] ? deserialize_argument(args[0]["arguments"]) : []
|
395
394
|
if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
|
396
395
|
# remove MailerClass, mailer_method and 'deliver_now'
|
397
396
|
job_args.drop(3)
|
398
397
|
elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob"
|
399
398
|
# remove MailerClass, mailer_method and 'deliver_now'
|
400
|
-
job_args.drop(3).first
|
399
|
+
job_args.drop(3).first.values_at("params", "args")
|
401
400
|
else
|
402
401
|
job_args
|
403
402
|
end
|
@@ -418,6 +417,10 @@ module Sidekiq
|
|
418
417
|
self["jid"]
|
419
418
|
end
|
420
419
|
|
420
|
+
def bid
|
421
|
+
self["bid"]
|
422
|
+
end
|
423
|
+
|
421
424
|
def enqueued_at
|
422
425
|
self["enqueued_at"] ? Time.at(self["enqueued_at"]).utc : nil
|
423
426
|
end
|
@@ -463,9 +466,32 @@ module Sidekiq
|
|
463
466
|
|
464
467
|
private
|
465
468
|
|
469
|
+
ACTIVE_JOB_PREFIX = "_aj_"
|
470
|
+
GLOBALID_KEY = "_aj_globalid"
|
471
|
+
|
472
|
+
def deserialize_argument(argument)
|
473
|
+
case argument
|
474
|
+
when Array
|
475
|
+
argument.map { |arg| deserialize_argument(arg) }
|
476
|
+
when Hash
|
477
|
+
if serialized_global_id?(argument)
|
478
|
+
argument[GLOBALID_KEY]
|
479
|
+
else
|
480
|
+
argument.transform_values { |v| deserialize_argument(v) }
|
481
|
+
.reject { |k, _| k.start_with?(ACTIVE_JOB_PREFIX) }
|
482
|
+
end
|
483
|
+
else
|
484
|
+
argument
|
485
|
+
end
|
486
|
+
end
|
487
|
+
|
488
|
+
def serialized_global_id?(hash)
|
489
|
+
hash.size == 1 && hash.include?(GLOBALID_KEY)
|
490
|
+
end
|
491
|
+
|
466
492
|
def uncompress_backtrace(backtrace)
|
467
|
-
|
468
|
-
uncompressed = Zlib::Inflate.inflate(
|
493
|
+
strict_base64_decoded = backtrace.unpack1("m")
|
494
|
+
uncompressed = Zlib::Inflate.inflate(strict_base64_decoded)
|
469
495
|
Sidekiq.load_json(uncompressed)
|
470
496
|
end
|
471
497
|
end
|
@@ -544,7 +570,7 @@ module Sidekiq
|
|
544
570
|
def remove_job
|
545
571
|
Sidekiq.redis do |conn|
|
546
572
|
results = conn.multi { |transaction|
|
547
|
-
transaction.
|
573
|
+
transaction.zrange(parent.name, score, score, "BYSCORE")
|
548
574
|
transaction.zremrangebyscore(parent.name, score, score)
|
549
575
|
}.first
|
550
576
|
|
@@ -652,7 +678,7 @@ module Sidekiq
|
|
652
678
|
range_start = page * page_size + offset_size
|
653
679
|
range_end = range_start + page_size - 1
|
654
680
|
elements = Sidekiq.redis { |conn|
|
655
|
-
conn.zrange name, range_start, range_end, withscores
|
681
|
+
conn.zrange name, range_start, range_end, "withscores"
|
656
682
|
}
|
657
683
|
break if elements.empty?
|
658
684
|
page -= 1
|
@@ -679,7 +705,7 @@ module Sidekiq
|
|
679
705
|
end
|
680
706
|
|
681
707
|
elements = Sidekiq.redis { |conn|
|
682
|
-
conn.
|
708
|
+
conn.zrange(name, begin_score, end_score, "BYSCORE", "withscores")
|
683
709
|
}
|
684
710
|
|
685
711
|
elements.each_with_object([]) do |element, result|
|
@@ -698,7 +724,7 @@ module Sidekiq
|
|
698
724
|
def find_job(jid)
|
699
725
|
Sidekiq.redis do |conn|
|
700
726
|
conn.zscan(name, match: "*#{jid}*", count: 100) do |entry, score|
|
701
|
-
job =
|
727
|
+
job = Sidekiq.load_json(entry)
|
702
728
|
matched = job["jid"] == jid
|
703
729
|
return SortedEntry.new(self, score, entry) if matched
|
704
730
|
end
|
@@ -720,7 +746,7 @@ module Sidekiq
|
|
720
746
|
# @api private
|
721
747
|
def delete_by_jid(score, jid)
|
722
748
|
Sidekiq.redis do |conn|
|
723
|
-
elements = conn.
|
749
|
+
elements = conn.zrange(name, score, score, "BYSCORE")
|
724
750
|
elements.each do |element|
|
725
751
|
if element.index(jid)
|
726
752
|
message = Sidekiq.load_json(element)
|
@@ -747,7 +773,7 @@ module Sidekiq
|
|
747
773
|
#
|
748
774
|
class ScheduledSet < JobSet
|
749
775
|
def initialize
|
750
|
-
super
|
776
|
+
super("schedule")
|
751
777
|
end
|
752
778
|
end
|
753
779
|
|
@@ -761,7 +787,7 @@ module Sidekiq
|
|
761
787
|
#
|
762
788
|
class RetrySet < JobSet
|
763
789
|
def initialize
|
764
|
-
super
|
790
|
+
super("retry")
|
765
791
|
end
|
766
792
|
|
767
793
|
# Enqueues all jobs pending within the retry set.
|
@@ -782,7 +808,7 @@ module Sidekiq
|
|
782
808
|
#
|
783
809
|
class DeadSet < JobSet
|
784
810
|
def initialize
|
785
|
-
super
|
811
|
+
super("dead")
|
786
812
|
end
|
787
813
|
|
788
814
|
# Add the given job to the Dead set.
|
@@ -824,6 +850,24 @@ module Sidekiq
|
|
824
850
|
class ProcessSet
|
825
851
|
include Enumerable
|
826
852
|
|
853
|
+
def self.[](identity)
|
854
|
+
exists, (info, busy, beat, quiet, rss, rtt_us) = Sidekiq.redis { |conn|
|
855
|
+
conn.multi { |transaction|
|
856
|
+
transaction.sismember("processes", identity)
|
857
|
+
transaction.hmget(identity, "info", "busy", "beat", "quiet", "rss", "rtt_us")
|
858
|
+
}
|
859
|
+
}
|
860
|
+
|
861
|
+
return nil if exists == 0 || info.nil?
|
862
|
+
|
863
|
+
hash = Sidekiq.load_json(info)
|
864
|
+
Process.new(hash.merge("busy" => busy.to_i,
|
865
|
+
"beat" => beat.to_f,
|
866
|
+
"quiet" => quiet,
|
867
|
+
"rss" => rss.to_i,
|
868
|
+
"rtt_us" => rtt_us.to_i))
|
869
|
+
end
|
870
|
+
|
827
871
|
# :nodoc:
|
828
872
|
# @api private
|
829
873
|
def initialize(clean_plz = true)
|
@@ -836,7 +880,7 @@ module Sidekiq
|
|
836
880
|
# @api private
|
837
881
|
def cleanup
|
838
882
|
# dont run cleanup more than once per minute
|
839
|
-
return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1",
|
883
|
+
return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", "NX", "EX", "60") }
|
840
884
|
|
841
885
|
count = 0
|
842
886
|
Sidekiq.redis do |conn|
|
@@ -872,7 +916,7 @@ module Sidekiq
|
|
872
916
|
end
|
873
917
|
}
|
874
918
|
|
875
|
-
result.each do |info, busy,
|
919
|
+
result.each do |info, busy, beat, quiet, rss, rtt_us|
|
876
920
|
# If a process is stopped between when we query Redis for `procs` and
|
877
921
|
# when we query for `result`, we will have an item in `result` that is
|
878
922
|
# composed of `nil` values.
|
@@ -880,10 +924,10 @@ module Sidekiq
|
|
880
924
|
|
881
925
|
hash = Sidekiq.load_json(info)
|
882
926
|
yield Process.new(hash.merge("busy" => busy.to_i,
|
883
|
-
"beat" =>
|
927
|
+
"beat" => beat.to_f,
|
884
928
|
"quiet" => quiet,
|
885
929
|
"rss" => rss.to_i,
|
886
|
-
"rtt_us" =>
|
930
|
+
"rtt_us" => rtt_us.to_i))
|
887
931
|
end
|
888
932
|
end
|
889
933
|
|
@@ -939,6 +983,7 @@ module Sidekiq
|
|
939
983
|
# 'busy' => 10,
|
940
984
|
# 'beat' => <last heartbeat>,
|
941
985
|
# 'identity' => <unique string identifying the process>,
|
986
|
+
# 'embedded' => true,
|
942
987
|
# }
|
943
988
|
class Process
|
944
989
|
# :nodoc:
|
@@ -967,11 +1012,25 @@ module Sidekiq
|
|
967
1012
|
self["queues"]
|
968
1013
|
end
|
969
1014
|
|
1015
|
+
def weights
|
1016
|
+
self["weights"]
|
1017
|
+
end
|
1018
|
+
|
1019
|
+
def version
|
1020
|
+
self["version"]
|
1021
|
+
end
|
1022
|
+
|
1023
|
+
def embedded?
|
1024
|
+
self["embedded"]
|
1025
|
+
end
|
1026
|
+
|
970
1027
|
# Signal this process to stop processing new jobs.
|
971
1028
|
# It will continue to execute jobs it has already fetched.
|
972
1029
|
# This method is *asynchronous* and it can take 5-10
|
973
1030
|
# seconds for the process to quiet.
|
974
1031
|
def quiet!
|
1032
|
+
raise "Can't quiet an embedded process" if embedded?
|
1033
|
+
|
975
1034
|
signal("TSTP")
|
976
1035
|
end
|
977
1036
|
|
@@ -980,6 +1039,8 @@ module Sidekiq
|
|
980
1039
|
# This method is *asynchronous* and it can take 5-10
|
981
1040
|
# seconds for the process to start shutting down.
|
982
1041
|
def stop!
|
1042
|
+
raise "Can't stop an embedded process" if embedded?
|
1043
|
+
|
983
1044
|
signal("TERM")
|
984
1045
|
end
|
985
1046
|
|
@@ -1048,11 +1109,11 @@ module Sidekiq
|
|
1048
1109
|
|
1049
1110
|
procs.zip(all_works).each do |key, workers|
|
1050
1111
|
workers.each_pair do |tid, json|
|
1051
|
-
results << [key, tid, Sidekiq.load_json(json)] unless json.empty?
|
1112
|
+
results << [key, tid, Sidekiq::Work.new(key, tid, Sidekiq.load_json(json))] unless json.empty?
|
1052
1113
|
end
|
1053
1114
|
end
|
1054
1115
|
|
1055
|
-
results.sort_by { |(_, _, hsh)| hsh
|
1116
|
+
results.sort_by { |(_, _, hsh)| hsh.raw("run_at") }.each(&block)
|
1056
1117
|
end
|
1057
1118
|
|
1058
1119
|
# Note that #size is only as accurate as Sidekiq's heartbeat,
|
@@ -1075,7 +1136,74 @@ module Sidekiq
|
|
1075
1136
|
end
|
1076
1137
|
end
|
1077
1138
|
end
|
1139
|
+
|
1140
|
+
##
|
1141
|
+
# Find the work which represents a job with the given JID.
|
1142
|
+
# *This is a slow O(n) operation*. Do not use for app logic.
|
1143
|
+
#
|
1144
|
+
# @param jid [String] the job identifier
|
1145
|
+
# @return [Sidekiq::Work] the work or nil
|
1146
|
+
def find_work_by_jid(jid)
|
1147
|
+
each do |_process_id, _thread_id, work|
|
1148
|
+
job = work.job
|
1149
|
+
return work if job.jid == jid
|
1150
|
+
end
|
1151
|
+
nil
|
1152
|
+
end
|
1078
1153
|
end
|
1154
|
+
|
1155
|
+
# Sidekiq::Work represents a job which is currently executing.
|
1156
|
+
class Work
|
1157
|
+
attr_reader :process_id
|
1158
|
+
attr_reader :thread_id
|
1159
|
+
|
1160
|
+
def initialize(pid, tid, hsh)
|
1161
|
+
@process_id = pid
|
1162
|
+
@thread_id = tid
|
1163
|
+
@hsh = hsh
|
1164
|
+
@job = nil
|
1165
|
+
end
|
1166
|
+
|
1167
|
+
def queue
|
1168
|
+
@hsh["queue"]
|
1169
|
+
end
|
1170
|
+
|
1171
|
+
def run_at
|
1172
|
+
Time.at(@hsh["run_at"])
|
1173
|
+
end
|
1174
|
+
|
1175
|
+
def job
|
1176
|
+
@job ||= Sidekiq::JobRecord.new(@hsh["payload"])
|
1177
|
+
end
|
1178
|
+
|
1179
|
+
def payload
|
1180
|
+
@hsh["payload"]
|
1181
|
+
end
|
1182
|
+
|
1183
|
+
# deprecated
|
1184
|
+
def [](key)
|
1185
|
+
kwargs = {uplevel: 1}
|
1186
|
+
kwargs[:category] = :deprecated if RUBY_VERSION > "3.0" # TODO
|
1187
|
+
warn("Direct access to `Sidekiq::Work` attributes is deprecated, please use `#payload`, `#queue`, `#run_at` or `#job` instead", **kwargs)
|
1188
|
+
|
1189
|
+
@hsh[key]
|
1190
|
+
end
|
1191
|
+
|
1192
|
+
# :nodoc:
|
1193
|
+
# @api private
|
1194
|
+
def raw(name)
|
1195
|
+
@hsh[name]
|
1196
|
+
end
|
1197
|
+
|
1198
|
+
def method_missing(*all)
|
1199
|
+
@hsh.send(*all)
|
1200
|
+
end
|
1201
|
+
|
1202
|
+
def respond_to_missing?(name, *args)
|
1203
|
+
@hsh.respond_to?(name)
|
1204
|
+
end
|
1205
|
+
end
|
1206
|
+
|
1079
1207
|
# Since "worker" is a nebulous term, we've deprecated the use of this class name.
|
1080
1208
|
# Is "worker" a process, a type of job, a thread? Undefined!
|
1081
1209
|
# WorkSet better describes the data.
|
data/lib/sidekiq/capsule.rb
CHANGED
@@ -17,16 +17,23 @@ module Sidekiq
|
|
17
17
|
# end
|
18
18
|
class Capsule
|
19
19
|
include Sidekiq::Component
|
20
|
+
extend Forwardable
|
20
21
|
|
21
22
|
attr_reader :name
|
22
23
|
attr_reader :queues
|
23
24
|
attr_accessor :concurrency
|
25
|
+
attr_reader :mode
|
26
|
+
attr_reader :weights
|
27
|
+
|
28
|
+
def_delegators :@config, :[], :[]=, :fetch, :key?, :has_key?, :merge!, :dig
|
24
29
|
|
25
30
|
def initialize(name, config)
|
26
31
|
@name = name
|
27
32
|
@config = config
|
28
33
|
@queues = ["default"]
|
34
|
+
@weights = {"default" => 0}
|
29
35
|
@concurrency = config[:concurrency]
|
36
|
+
@mode = :strict
|
30
37
|
end
|
31
38
|
|
32
39
|
def fetcher
|
@@ -41,15 +48,28 @@ module Sidekiq
|
|
41
48
|
fetcher&.bulk_requeue([])
|
42
49
|
end
|
43
50
|
|
51
|
+
# Sidekiq checks queues in three modes:
|
52
|
+
# - :strict - all queues have 0 weight and are checked strictly in order
|
53
|
+
# - :weighted - queues have arbitrary weight between 1 and N
|
54
|
+
# - :random - all queues have weight of 1
|
44
55
|
def queues=(val)
|
56
|
+
@weights = {}
|
45
57
|
@queues = Array(val).each_with_object([]) do |qstr, memo|
|
46
58
|
arr = qstr
|
47
59
|
arr = qstr.split(",") if qstr.is_a?(String)
|
48
60
|
name, weight = arr
|
61
|
+
@weights[name] = weight.to_i
|
49
62
|
[weight.to_i, 1].max.times do
|
50
63
|
memo << name
|
51
64
|
end
|
52
65
|
end
|
66
|
+
@mode = if @weights.values.all?(&:zero?)
|
67
|
+
:strict
|
68
|
+
elsif @weights.values.all? { |x| x == 1 }
|
69
|
+
:random
|
70
|
+
else
|
71
|
+
:weighted
|
72
|
+
end
|
53
73
|
end
|
54
74
|
|
55
75
|
# Allow the middleware to be different per-capsule.
|
data/lib/sidekiq/cli.rb
CHANGED
@@ -38,7 +38,7 @@ module Sidekiq # :nodoc:
|
|
38
38
|
# Code within this method is not tested because it alters
|
39
39
|
# global process state irreversibly. PRs which improve the
|
40
40
|
# test coverage of Sidekiq::CLI are welcomed.
|
41
|
-
def run(boot_app: true)
|
41
|
+
def run(boot_app: true, warmup: true)
|
42
42
|
boot_application if boot_app
|
43
43
|
|
44
44
|
if environment == "development" && $stdout.tty? && @config.logger.formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
|
@@ -77,13 +77,14 @@ module Sidekiq # :nodoc:
|
|
77
77
|
raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
|
78
78
|
|
79
79
|
maxmemory_policy = info["maxmemory_policy"]
|
80
|
-
if maxmemory_policy != "noeviction"
|
80
|
+
if maxmemory_policy != "noeviction" && maxmemory_policy != ""
|
81
|
+
# Redis Enterprise Cloud returns "" for their policy 😳
|
81
82
|
logger.warn <<~EOM
|
82
83
|
|
83
84
|
|
84
85
|
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
|
85
86
|
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
|
86
|
-
See: https://github.com/
|
87
|
+
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
|
87
88
|
|
88
89
|
EOM
|
89
90
|
end
|
@@ -100,6 +101,8 @@ module Sidekiq # :nodoc:
|
|
100
101
|
# Touch middleware so it isn't lazy loaded by multiple threads, #3043
|
101
102
|
@config.server_middleware
|
102
103
|
|
104
|
+
::Process.warmup if warmup && ::Process.respond_to?(:warmup)
|
105
|
+
|
103
106
|
# Before this point, the process is initializing with just the main thread.
|
104
107
|
# Starting here the process will now have multiple threads running.
|
105
108
|
fire_event(:startup, reverse: false, reraise: true)
|
@@ -229,6 +232,7 @@ module Sidekiq # :nodoc:
|
|
229
232
|
# Both Sinatra 2.0+ and Sidekiq support this term.
|
230
233
|
# RAILS_ENV and RACK_ENV are there for legacy support.
|
231
234
|
@environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
|
235
|
+
config[:environment] = @environment
|
232
236
|
end
|
233
237
|
|
234
238
|
def symbolize_keys_deep!(hash)
|
@@ -395,7 +399,7 @@ module Sidekiq # :nodoc:
|
|
395
399
|
end
|
396
400
|
|
397
401
|
def parse_config(path)
|
398
|
-
erb = ERB.new(File.read(path))
|
402
|
+
erb = ERB.new(File.read(path), trim_mode: "-")
|
399
403
|
erb.filename = File.expand_path(path)
|
400
404
|
opts = YAML.safe_load(erb.result, permitted_classes: [Symbol], aliases: true) || {}
|
401
405
|
|
@@ -419,3 +423,4 @@ end
|
|
419
423
|
|
420
424
|
require "sidekiq/systemd"
|
421
425
|
require "sidekiq/metrics/tracking"
|
426
|
+
require "sidekiq/job/interrupt_handler"
|
data/lib/sidekiq/client.rb
CHANGED
@@ -66,6 +66,7 @@ module Sidekiq
|
|
66
66
|
# args - an array of simple arguments to the perform method, must be JSON-serializable
|
67
67
|
# at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
|
68
68
|
# retry - whether to retry this job if it fails, default true or an integer number of retries
|
69
|
+
# retry_for - relative amount of time to retry this job if it fails, default nil
|
69
70
|
# backtrace - whether to save any error backtrace, default false
|
70
71
|
#
|
71
72
|
# If class is set to the class name, the jobs' options will be based on Sidekiq's default
|
@@ -73,7 +74,7 @@ module Sidekiq
|
|
73
74
|
#
|
74
75
|
# Any options valid for a job class's sidekiq_options are also available here.
|
75
76
|
#
|
76
|
-
# All
|
77
|
+
# All keys must be strings, not symbols. NB: because we are serializing to JSON, all
|
77
78
|
# symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
|
78
79
|
# space in Redis; a large volume of failing jobs can start Redis swapping if you aren't careful.
|
79
80
|
#
|
@@ -96,8 +97,9 @@ module Sidekiq
|
|
96
97
|
|
97
98
|
##
|
98
99
|
# Push a large number of jobs to Redis. This method cuts out the redis
|
99
|
-
# network round trip latency.
|
100
|
-
# 1000
|
100
|
+
# network round trip latency. It pushes jobs in batches if more than
|
101
|
+
# `:batch_size` (1000 by default) of jobs are passed. I wouldn't recommend making `:batch_size`
|
102
|
+
# larger than 1000 but YMMV based on network quality, size of job args, etc.
|
101
103
|
# A large number of jobs can cause a bit of Redis command processing latency.
|
102
104
|
#
|
103
105
|
# Takes the same arguments as #push except that args is expected to be
|
@@ -105,13 +107,15 @@ module Sidekiq
|
|
105
107
|
# is run through the client middleware pipeline and each job gets its own Job ID
|
106
108
|
# as normal.
|
107
109
|
#
|
108
|
-
# Returns an array of the of pushed jobs' jids
|
109
|
-
#
|
110
|
+
# Returns an array of the of pushed jobs' jids, may contain nils if any client middleware
|
111
|
+
# prevented a job push.
|
112
|
+
#
|
113
|
+
# Example (pushing jobs in batches):
|
114
|
+
# push_bulk('class' => MyJob, 'args' => (1..100_000).to_a, batch_size: 1_000)
|
115
|
+
#
|
110
116
|
def push_bulk(items)
|
117
|
+
batch_size = items.delete(:batch_size) || items.delete("batch_size") || 1_000
|
111
118
|
args = items["args"]
|
112
|
-
raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless args.is_a?(Array) && args.all?(Array)
|
113
|
-
return [] if args.empty? # no jobs to push
|
114
|
-
|
115
119
|
at = items.delete("at")
|
116
120
|
raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
|
117
121
|
raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
|
@@ -120,18 +124,28 @@ module Sidekiq
|
|
120
124
|
raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
|
121
125
|
|
122
126
|
normed = normalize_item(items)
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
127
|
+
slice_index = 0
|
128
|
+
result = args.each_slice(batch_size).flat_map do |slice|
|
129
|
+
raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless slice.is_a?(Array) && slice.all?(Array)
|
130
|
+
break [] if slice.empty? # no jobs to push
|
131
|
+
|
132
|
+
payloads = slice.map.with_index { |job_args, index|
|
133
|
+
copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
|
134
|
+
copy["at"] = (at.is_a?(Array) ? at[slice_index + index] : at) if at
|
135
|
+
result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
|
136
|
+
verify_json(copy)
|
137
|
+
copy
|
138
|
+
end
|
139
|
+
result || nil
|
140
|
+
}
|
141
|
+
slice_index += batch_size
|
142
|
+
|
143
|
+
to_push = payloads.compact
|
144
|
+
raw_push(to_push) unless to_push.empty?
|
145
|
+
payloads.map { |payload| payload&.[]("jid") }
|
146
|
+
end
|
132
147
|
|
133
|
-
|
134
|
-
payloads.collect { |payload| payload["jid"] }
|
148
|
+
result.is_a?(Enumerator::Lazy) ? result.force : result
|
135
149
|
end
|
136
150
|
|
137
151
|
# Allows sharding of jobs across any number of Redis instances. All jobs
|
@@ -160,8 +174,8 @@ module Sidekiq
|
|
160
174
|
new.push(item)
|
161
175
|
end
|
162
176
|
|
163
|
-
def push_bulk(
|
164
|
-
new.push_bulk(
|
177
|
+
def push_bulk(...)
|
178
|
+
new.push_bulk(...)
|
165
179
|
end
|
166
180
|
|
167
181
|
# Resque compatibility helpers. Note all helpers
|
@@ -189,7 +203,7 @@ module Sidekiq
|
|
189
203
|
def enqueue_to_in(queue, interval, klass, *args)
|
190
204
|
int = interval.to_f
|
191
205
|
now = Time.now.to_f
|
192
|
-
ts = (int < 1_000_000_000 ? now + int : int)
|
206
|
+
ts = ((int < 1_000_000_000) ? now + int : int)
|
193
207
|
|
194
208
|
item = {"class" => klass, "args" => args, "at" => ts, "queue" => queue}
|
195
209
|
item.delete("at") if ts <= now
|
@@ -234,8 +248,10 @@ module Sidekiq
|
|
234
248
|
def atomic_push(conn, payloads)
|
235
249
|
if payloads.first.key?("at")
|
236
250
|
conn.zadd("schedule", payloads.flat_map { |hash|
|
237
|
-
at = hash
|
238
|
-
|
251
|
+
at = hash["at"].to_s
|
252
|
+
# ActiveJob sets this but the job has not been enqueued yet
|
253
|
+
hash.delete("enqueued_at")
|
254
|
+
[at, Sidekiq.dump_json(hash.except("at"))]
|
239
255
|
})
|
240
256
|
else
|
241
257
|
queue = payloads.first["queue"]
|
data/lib/sidekiq/component.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Sidekiq
|
2
4
|
##
|
3
5
|
# Sidekiq::Component assumes a config instance is available at @config
|
@@ -13,7 +15,7 @@ module Sidekiq
|
|
13
15
|
|
14
16
|
def safe_thread(name, &block)
|
15
17
|
Thread.new do
|
16
|
-
Thread.current.name = name
|
18
|
+
Thread.current.name = "sidekiq.#{name}"
|
17
19
|
watchdog(name, &block)
|
18
20
|
end
|
19
21
|
end
|