sidekiq 6.5.12 → 7.3.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +340 -20
- data/README.md +43 -35
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +3 -8
- data/bin/sidekiqload +213 -118
- data/bin/sidekiqmon +3 -0
- data/lib/active_job/queue_adapters/sidekiq_adapter.rb +75 -0
- data/lib/generators/sidekiq/job_generator.rb +2 -0
- data/lib/sidekiq/api.rb +243 -162
- data/lib/sidekiq/capsule.rb +132 -0
- data/lib/sidekiq/cli.rb +60 -75
- data/lib/sidekiq/client.rb +87 -38
- data/lib/sidekiq/component.rb +26 -1
- data/lib/sidekiq/config.rb +311 -0
- data/lib/sidekiq/deploy.rb +64 -0
- data/lib/sidekiq/embedded.rb +63 -0
- data/lib/sidekiq/fetch.rb +11 -14
- data/lib/sidekiq/iterable_job.rb +55 -0
- data/lib/sidekiq/job/interrupt_handler.rb +24 -0
- data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
- data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
- data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
- data/lib/sidekiq/job/iterable.rb +294 -0
- data/lib/sidekiq/job.rb +382 -10
- data/lib/sidekiq/job_logger.rb +8 -7
- data/lib/sidekiq/job_retry.rb +42 -19
- data/lib/sidekiq/job_util.rb +53 -15
- data/lib/sidekiq/launcher.rb +71 -65
- data/lib/sidekiq/logger.rb +2 -27
- data/lib/sidekiq/manager.rb +9 -11
- data/lib/sidekiq/metrics/query.rb +9 -4
- data/lib/sidekiq/metrics/shared.rb +21 -9
- data/lib/sidekiq/metrics/tracking.rb +40 -26
- data/lib/sidekiq/middleware/chain.rb +19 -18
- data/lib/sidekiq/middleware/current_attributes.rb +85 -20
- data/lib/sidekiq/middleware/modules.rb +2 -0
- data/lib/sidekiq/monitor.rb +18 -4
- data/lib/sidekiq/paginator.rb +8 -2
- data/lib/sidekiq/processor.rb +62 -57
- data/lib/sidekiq/rails.rb +27 -10
- data/lib/sidekiq/redis_client_adapter.rb +31 -71
- data/lib/sidekiq/redis_connection.rb +44 -115
- data/lib/sidekiq/ring_buffer.rb +2 -0
- data/lib/sidekiq/scheduled.rb +22 -23
- data/lib/sidekiq/systemd.rb +2 -0
- data/lib/sidekiq/testing.rb +37 -46
- data/lib/sidekiq/transaction_aware_client.rb +11 -5
- data/lib/sidekiq/version.rb +6 -1
- data/lib/sidekiq/web/action.rb +29 -7
- data/lib/sidekiq/web/application.rb +82 -28
- data/lib/sidekiq/web/csrf_protection.rb +10 -7
- data/lib/sidekiq/web/helpers.rb +110 -49
- data/lib/sidekiq/web/router.rb +5 -2
- data/lib/sidekiq/web.rb +70 -17
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +78 -274
- data/sidekiq.gemspec +13 -10
- data/web/assets/javascripts/application.js +44 -0
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/dashboard-charts.js +194 -0
- data/web/assets/javascripts/dashboard.js +17 -233
- data/web/assets/javascripts/metrics.js +151 -115
- data/web/assets/stylesheets/application-dark.css +4 -0
- data/web/assets/stylesheets/application-rtl.css +10 -89
- data/web/assets/stylesheets/application.css +56 -296
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +2 -7
- data/web/locales/en.yml +81 -71
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +80 -67
- data/web/locales/gd.yml +98 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +85 -54
- data/web/locales/ja.yml +67 -70
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +78 -69
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/tr.yml +100 -0
- data/web/locales/uk.yml +85 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +20 -19
- data/web/locales/zh-tw.yml +10 -2
- data/web/views/_footer.erb +16 -2
- data/web/views/_job_info.erb +18 -2
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +1 -1
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +46 -35
- data/web/views/dashboard.erb +32 -8
- data/web/views/filtering.erb +6 -0
- data/web/views/layout.erb +6 -6
- data/web/views/metrics.erb +47 -26
- data/web/views/metrics_for_job.erb +43 -71
- data/web/views/morgue.erb +7 -11
- data/web/views/queue.erb +11 -15
- data/web/views/queues.erb +9 -3
- data/web/views/retries.erb +5 -9
- data/web/views/scheduled.erb +12 -13
- metadata +66 -41
- data/lib/sidekiq/delay.rb +0 -43
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/metrics/deploy.rb +0 -47
- data/lib/sidekiq/worker.rb +0 -370
- data/web/assets/javascripts/graph.js +0 -16
- /data/{LICENSE → LICENSE.txt} +0 -0
data/lib/sidekiq/api.rb
CHANGED
@@ -4,12 +4,8 @@ require "sidekiq"
|
|
4
4
|
|
5
5
|
require "zlib"
|
6
6
|
require "set"
|
7
|
-
require "base64"
|
8
7
|
|
9
|
-
|
10
|
-
require "sidekiq/metrics/deploy"
|
11
|
-
require "sidekiq/metrics/query"
|
12
|
-
end
|
8
|
+
require "sidekiq/metrics/query"
|
13
9
|
|
14
10
|
#
|
15
11
|
# Sidekiq's Data API provides a Ruby object model on top
|
@@ -70,7 +66,18 @@ module Sidekiq
|
|
70
66
|
end
|
71
67
|
|
72
68
|
def queues
|
73
|
-
Sidekiq
|
69
|
+
Sidekiq.redis do |conn|
|
70
|
+
queues = conn.sscan("queues").to_a
|
71
|
+
|
72
|
+
lengths = conn.pipelined { |pipeline|
|
73
|
+
queues.each do |queue|
|
74
|
+
pipeline.llen("queue:#{queue}")
|
75
|
+
end
|
76
|
+
}
|
77
|
+
|
78
|
+
array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
|
79
|
+
array_of_arrays.to_h
|
80
|
+
end
|
74
81
|
end
|
75
82
|
|
76
83
|
# O(1) redis calls
|
@@ -84,11 +91,11 @@ module Sidekiq
|
|
84
91
|
pipeline.zcard("retry")
|
85
92
|
pipeline.zcard("dead")
|
86
93
|
pipeline.scard("processes")
|
87
|
-
pipeline.
|
94
|
+
pipeline.lindex("queue:default", -1)
|
88
95
|
end
|
89
96
|
}
|
90
97
|
|
91
|
-
default_queue_latency = if (entry = pipe1_res[6]
|
98
|
+
default_queue_latency = if (entry = pipe1_res[6])
|
92
99
|
job = begin
|
93
100
|
Sidekiq.load_json(entry)
|
94
101
|
rescue
|
@@ -117,11 +124,11 @@ module Sidekiq
|
|
117
124
|
# @api private
|
118
125
|
def fetch_stats_slow!
|
119
126
|
processes = Sidekiq.redis { |conn|
|
120
|
-
conn.
|
127
|
+
conn.sscan("processes").to_a
|
121
128
|
}
|
122
129
|
|
123
130
|
queues = Sidekiq.redis { |conn|
|
124
|
-
conn.
|
131
|
+
conn.sscan("queues").to_a
|
125
132
|
}
|
126
133
|
|
127
134
|
pipe2_res = Sidekiq.redis { |conn|
|
@@ -133,7 +140,7 @@ module Sidekiq
|
|
133
140
|
|
134
141
|
s = processes.size
|
135
142
|
workers_size = pipe2_res[0...s].sum(&:to_i)
|
136
|
-
enqueued = pipe2_res[s
|
143
|
+
enqueued = pipe2_res[s..].sum(&:to_i)
|
137
144
|
|
138
145
|
@stats[:workers_size] = workers_size
|
139
146
|
@stats[:enqueued] = enqueued
|
@@ -168,25 +175,8 @@ module Sidekiq
|
|
168
175
|
@stats[s] || raise(ArgumentError, "Unknown stat #{s}")
|
169
176
|
end
|
170
177
|
|
171
|
-
class Queues
|
172
|
-
def lengths
|
173
|
-
Sidekiq.redis do |conn|
|
174
|
-
queues = conn.sscan_each("queues").to_a
|
175
|
-
|
176
|
-
lengths = conn.pipelined { |pipeline|
|
177
|
-
queues.each do |queue|
|
178
|
-
pipeline.llen("queue:#{queue}")
|
179
|
-
end
|
180
|
-
}
|
181
|
-
|
182
|
-
array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
|
183
|
-
array_of_arrays.to_h
|
184
|
-
end
|
185
|
-
end
|
186
|
-
end
|
187
|
-
|
188
178
|
class History
|
189
|
-
def initialize(days_previous, start_date = nil)
|
179
|
+
def initialize(days_previous, start_date = nil, pool: nil)
|
190
180
|
# we only store five years of data in Redis
|
191
181
|
raise ArgumentError if days_previous < 1 || days_previous > (5 * 365)
|
192
182
|
@days_previous = days_previous
|
@@ -211,15 +201,10 @@ module Sidekiq
|
|
211
201
|
|
212
202
|
keys = dates.map { |datestr| "stat:#{stat}:#{datestr}" }
|
213
203
|
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
stat_hash[dates[idx]] = value ? value.to_i : 0
|
218
|
-
end
|
204
|
+
Sidekiq.redis do |conn|
|
205
|
+
conn.mget(keys).each_with_index do |value, idx|
|
206
|
+
stat_hash[dates[idx]] = value ? value.to_i : 0
|
219
207
|
end
|
220
|
-
rescue RedisConnection.adapter::CommandError
|
221
|
-
# mget will trigger a CROSSSLOT error when run against a Cluster
|
222
|
-
# TODO Someone want to add Cluster support?
|
223
208
|
end
|
224
209
|
|
225
210
|
stat_hash
|
@@ -247,7 +232,7 @@ module Sidekiq
|
|
247
232
|
#
|
248
233
|
# @return [Array<Sidekiq::Queue>]
|
249
234
|
def self.all
|
250
|
-
Sidekiq.redis { |c| c.
|
235
|
+
Sidekiq.redis { |c| c.sscan("queues").to_a }.sort.map { |q| Sidekiq::Queue.new(q) }
|
251
236
|
end
|
252
237
|
|
253
238
|
attr_reader :name
|
@@ -278,8 +263,8 @@ module Sidekiq
|
|
278
263
|
# @return [Float] in seconds
|
279
264
|
def latency
|
280
265
|
entry = Sidekiq.redis { |conn|
|
281
|
-
conn.
|
282
|
-
}
|
266
|
+
conn.lindex(@rname, -1)
|
267
|
+
}
|
283
268
|
return 0 unless entry
|
284
269
|
job = Sidekiq.load_json(entry)
|
285
270
|
now = Time.now.to_f
|
@@ -388,12 +373,7 @@ module Sidekiq
|
|
388
373
|
def display_class
|
389
374
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
390
375
|
@klass ||= self["display_class"] || begin
|
391
|
-
|
392
|
-
when /\ASidekiq::Extensions::Delayed/
|
393
|
-
safe_load(args[0], klass) do |target, method, _|
|
394
|
-
"#{target}.#{method}"
|
395
|
-
end
|
396
|
-
when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
376
|
+
if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" || klass == "Sidekiq::ActiveJob::Wrapper"
|
397
377
|
job_class = @item["wrapped"] || args[0]
|
398
378
|
if job_class == "ActionMailer::DeliveryJob" || job_class == "ActionMailer::MailDeliveryJob"
|
399
379
|
# MailerClass#mailer_method
|
@@ -409,23 +389,14 @@ module Sidekiq
|
|
409
389
|
|
410
390
|
def display_args
|
411
391
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
412
|
-
@display_args ||=
|
413
|
-
|
414
|
-
safe_load(args[0], args) do |_, _, arg, kwarg|
|
415
|
-
if !kwarg || kwarg.empty?
|
416
|
-
arg
|
417
|
-
else
|
418
|
-
[arg, kwarg]
|
419
|
-
end
|
420
|
-
end
|
421
|
-
when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
422
|
-
job_args = self["wrapped"] ? args[0]["arguments"] : []
|
392
|
+
@display_args ||= if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper" || klass == "Sidekiq::ActiveJob::Wrapper"
|
393
|
+
job_args = self["wrapped"] ? deserialize_argument(args[0]["arguments"]) : []
|
423
394
|
if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
|
424
395
|
# remove MailerClass, mailer_method and 'deliver_now'
|
425
396
|
job_args.drop(3)
|
426
397
|
elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob"
|
427
398
|
# remove MailerClass, mailer_method and 'deliver_now'
|
428
|
-
job_args.drop(3).first
|
399
|
+
job_args.drop(3).first.values_at("params", "args")
|
429
400
|
else
|
430
401
|
job_args
|
431
402
|
end
|
@@ -446,6 +417,10 @@ module Sidekiq
|
|
446
417
|
self["jid"]
|
447
418
|
end
|
448
419
|
|
420
|
+
def bid
|
421
|
+
self["bid"]
|
422
|
+
end
|
423
|
+
|
449
424
|
def enqueued_at
|
450
425
|
self["enqueued_at"] ? Time.at(self["enqueued_at"]).utc : nil
|
451
426
|
end
|
@@ -491,32 +466,34 @@ module Sidekiq
|
|
491
466
|
|
492
467
|
private
|
493
468
|
|
494
|
-
|
495
|
-
|
496
|
-
rescue => ex
|
497
|
-
# #1761 in dev mode, it's possible to have jobs enqueued which haven't been loaded into
|
498
|
-
# memory yet so the YAML can't be loaded.
|
499
|
-
# TODO is this still necessary? Zeitwerk reloader should handle?
|
500
|
-
Sidekiq.logger.warn "Unable to load YAML: #{ex.message}" unless Sidekiq.options[:environment] == "development"
|
501
|
-
default
|
502
|
-
end
|
469
|
+
ACTIVE_JOB_PREFIX = "_aj_"
|
470
|
+
GLOBALID_KEY = "_aj_globalid"
|
503
471
|
|
504
|
-
def
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
# Handle old jobs with marshalled backtrace format
|
515
|
-
# TODO Remove in 7.x
|
516
|
-
Marshal.load(uncompressed)
|
472
|
+
def deserialize_argument(argument)
|
473
|
+
case argument
|
474
|
+
when Array
|
475
|
+
argument.map { |arg| deserialize_argument(arg) }
|
476
|
+
when Hash
|
477
|
+
if serialized_global_id?(argument)
|
478
|
+
argument[GLOBALID_KEY]
|
479
|
+
else
|
480
|
+
argument.transform_values { |v| deserialize_argument(v) }
|
481
|
+
.reject { |k, _| k.start_with?(ACTIVE_JOB_PREFIX) }
|
517
482
|
end
|
483
|
+
else
|
484
|
+
argument
|
518
485
|
end
|
519
486
|
end
|
487
|
+
|
488
|
+
def serialized_global_id?(hash)
|
489
|
+
hash.size == 1 && hash.include?(GLOBALID_KEY)
|
490
|
+
end
|
491
|
+
|
492
|
+
def uncompress_backtrace(backtrace)
|
493
|
+
strict_base64_decoded = backtrace.unpack1("m")
|
494
|
+
uncompressed = Zlib::Inflate.inflate(strict_base64_decoded)
|
495
|
+
Sidekiq.load_json(uncompressed)
|
496
|
+
end
|
520
497
|
end
|
521
498
|
|
522
499
|
# Represents a job within a Redis sorted set where the score
|
@@ -593,7 +570,7 @@ module Sidekiq
|
|
593
570
|
def remove_job
|
594
571
|
Sidekiq.redis do |conn|
|
595
572
|
results = conn.multi { |transaction|
|
596
|
-
transaction.
|
573
|
+
transaction.zrange(parent.name, score, score, "BYSCORE")
|
597
574
|
transaction.zremrangebyscore(parent.name, score, score)
|
598
575
|
}.first
|
599
576
|
|
@@ -656,7 +633,7 @@ module Sidekiq
|
|
656
633
|
|
657
634
|
match = "*#{match}*" unless match.include?("*")
|
658
635
|
Sidekiq.redis do |conn|
|
659
|
-
conn.
|
636
|
+
conn.zscan(name, match: match, count: count) do |entry, score|
|
660
637
|
yield SortedEntry.new(self, score, entry)
|
661
638
|
end
|
662
639
|
end
|
@@ -691,6 +668,41 @@ module Sidekiq
|
|
691
668
|
end
|
692
669
|
end
|
693
670
|
|
671
|
+
def pop_each
|
672
|
+
Sidekiq.redis do |c|
|
673
|
+
size.times do
|
674
|
+
data, score = c.zpopmin(name, 1)&.first
|
675
|
+
break unless data
|
676
|
+
yield data, score
|
677
|
+
end
|
678
|
+
end
|
679
|
+
end
|
680
|
+
|
681
|
+
def retry_all
|
682
|
+
c = Sidekiq::Client.new
|
683
|
+
pop_each do |msg, _|
|
684
|
+
job = Sidekiq.load_json(msg)
|
685
|
+
# Manual retries should not count against the retry limit.
|
686
|
+
job["retry_count"] -= 1 if job["retry_count"]
|
687
|
+
c.push(job)
|
688
|
+
end
|
689
|
+
end
|
690
|
+
|
691
|
+
# Move all jobs from this Set to the Dead Set.
|
692
|
+
# See DeadSet#kill
|
693
|
+
def kill_all(notify_failure: false, ex: nil)
|
694
|
+
ds = DeadSet.new
|
695
|
+
opts = {notify_failure: notify_failure, ex: ex, trim: false}
|
696
|
+
|
697
|
+
begin
|
698
|
+
pop_each do |msg, _|
|
699
|
+
ds.kill(msg, opts)
|
700
|
+
end
|
701
|
+
ensure
|
702
|
+
ds.trim
|
703
|
+
end
|
704
|
+
end
|
705
|
+
|
694
706
|
def each
|
695
707
|
initial_size = @_size
|
696
708
|
offset_size = 0
|
@@ -701,7 +713,7 @@ module Sidekiq
|
|
701
713
|
range_start = page * page_size + offset_size
|
702
714
|
range_end = range_start + page_size - 1
|
703
715
|
elements = Sidekiq.redis { |conn|
|
704
|
-
conn.zrange name, range_start, range_end, withscores
|
716
|
+
conn.zrange name, range_start, range_end, "withscores"
|
705
717
|
}
|
706
718
|
break if elements.empty?
|
707
719
|
page -= 1
|
@@ -728,7 +740,7 @@ module Sidekiq
|
|
728
740
|
end
|
729
741
|
|
730
742
|
elements = Sidekiq.redis { |conn|
|
731
|
-
conn.
|
743
|
+
conn.zrange(name, begin_score, end_score, "BYSCORE", "withscores")
|
732
744
|
}
|
733
745
|
|
734
746
|
elements.each_with_object([]) do |element, result|
|
@@ -746,8 +758,8 @@ module Sidekiq
|
|
746
758
|
# @return [SortedEntry] the record or nil
|
747
759
|
def find_job(jid)
|
748
760
|
Sidekiq.redis do |conn|
|
749
|
-
conn.
|
750
|
-
job =
|
761
|
+
conn.zscan(name, match: "*#{jid}*", count: 100) do |entry, score|
|
762
|
+
job = Sidekiq.load_json(entry)
|
751
763
|
matched = job["jid"] == jid
|
752
764
|
return SortedEntry.new(self, score, entry) if matched
|
753
765
|
end
|
@@ -769,7 +781,7 @@ module Sidekiq
|
|
769
781
|
# @api private
|
770
782
|
def delete_by_jid(score, jid)
|
771
783
|
Sidekiq.redis do |conn|
|
772
|
-
elements = conn.
|
784
|
+
elements = conn.zrange(name, score, score, "BYSCORE")
|
773
785
|
elements.each do |element|
|
774
786
|
if element.index(jid)
|
775
787
|
message = Sidekiq.load_json(element)
|
@@ -788,47 +800,21 @@ module Sidekiq
|
|
788
800
|
|
789
801
|
##
|
790
802
|
# The set of scheduled jobs within Sidekiq.
|
791
|
-
#
|
792
|
-
# example where I'm selecting jobs based on some complex logic
|
793
|
-
# and deleting them from the scheduled set.
|
803
|
+
# See the API wiki page for usage notes and examples.
|
794
804
|
#
|
795
|
-
# r = Sidekiq::ScheduledSet.new
|
796
|
-
# r.select do |scheduled|
|
797
|
-
# scheduled.klass == 'Sidekiq::Extensions::DelayedClass' &&
|
798
|
-
# scheduled.args[0] == 'User' &&
|
799
|
-
# scheduled.args[1] == 'setup_new_subscriber'
|
800
|
-
# end.map(&:delete)
|
801
805
|
class ScheduledSet < JobSet
|
802
806
|
def initialize
|
803
|
-
super
|
807
|
+
super("schedule")
|
804
808
|
end
|
805
809
|
end
|
806
810
|
|
807
811
|
##
|
808
812
|
# The set of retries within Sidekiq.
|
809
|
-
#
|
810
|
-
# example where I'm selecting all jobs of a certain type
|
811
|
-
# and deleting them from the retry queue.
|
813
|
+
# See the API wiki page for usage notes and examples.
|
812
814
|
#
|
813
|
-
# r = Sidekiq::RetrySet.new
|
814
|
-
# r.select do |retri|
|
815
|
-
# retri.klass == 'Sidekiq::Extensions::DelayedClass' &&
|
816
|
-
# retri.args[0] == 'User' &&
|
817
|
-
# retri.args[1] == 'setup_new_subscriber'
|
818
|
-
# end.map(&:delete)
|
819
815
|
class RetrySet < JobSet
|
820
816
|
def initialize
|
821
|
-
super
|
822
|
-
end
|
823
|
-
|
824
|
-
# Enqueues all jobs pending within the retry set.
|
825
|
-
def retry_all
|
826
|
-
each(&:retry) while size > 0
|
827
|
-
end
|
828
|
-
|
829
|
-
# Kills all jobs pending within the retry set.
|
830
|
-
def kill_all
|
831
|
-
each(&:kill) while size > 0
|
817
|
+
super("retry")
|
832
818
|
end
|
833
819
|
end
|
834
820
|
|
@@ -839,48 +825,48 @@ module Sidekiq
|
|
839
825
|
#
|
840
826
|
class DeadSet < JobSet
|
841
827
|
def initialize
|
842
|
-
super
|
828
|
+
super("dead")
|
829
|
+
end
|
830
|
+
|
831
|
+
# Trim dead jobs which are over our storage limits
|
832
|
+
def trim
|
833
|
+
hash = Sidekiq.default_configuration
|
834
|
+
now = Time.now.to_f
|
835
|
+
Sidekiq.redis do |conn|
|
836
|
+
conn.multi do |transaction|
|
837
|
+
transaction.zremrangebyscore(name, "-inf", now - hash[:dead_timeout_in_seconds])
|
838
|
+
transaction.zremrangebyrank(name, 0, - hash[:dead_max_jobs])
|
839
|
+
end
|
840
|
+
end
|
843
841
|
end
|
844
842
|
|
845
843
|
# Add the given job to the Dead set.
|
846
844
|
# @param message [String] the job data as JSON
|
845
|
+
# @option opts [Boolean] :notify_failure (true) Whether death handlers should be called
|
846
|
+
# @option opts [Boolean] :trim (true) Whether Sidekiq should trim the structure to keep it within configuration
|
847
|
+
# @option opts [Exception] :ex (RuntimeError) An exception to pass to the death handlers
|
847
848
|
def kill(message, opts = {})
|
848
849
|
now = Time.now.to_f
|
849
850
|
Sidekiq.redis do |conn|
|
850
|
-
conn.
|
851
|
-
transaction.zadd(name, now.to_s, message)
|
852
|
-
transaction.zremrangebyscore(name, "-inf", now - self.class.timeout)
|
853
|
-
transaction.zremrangebyrank(name, 0, - self.class.max_jobs)
|
854
|
-
end
|
851
|
+
conn.zadd(name, now.to_s, message)
|
855
852
|
end
|
856
853
|
|
854
|
+
trim if opts[:trim] != false
|
855
|
+
|
857
856
|
if opts[:notify_failure] != false
|
858
857
|
job = Sidekiq.load_json(message)
|
859
|
-
|
860
|
-
|
861
|
-
|
862
|
-
|
858
|
+
if opts[:ex]
|
859
|
+
ex = opts[:ex]
|
860
|
+
else
|
861
|
+
ex = RuntimeError.new("Job killed by API")
|
862
|
+
ex.set_backtrace(caller)
|
863
|
+
end
|
864
|
+
Sidekiq.default_configuration.death_handlers.each do |handle|
|
865
|
+
handle.call(job, ex)
|
863
866
|
end
|
864
867
|
end
|
865
868
|
true
|
866
869
|
end
|
867
|
-
|
868
|
-
# Enqueue all dead jobs
|
869
|
-
def retry_all
|
870
|
-
each(&:retry) while size > 0
|
871
|
-
end
|
872
|
-
|
873
|
-
# The maximum size of the Dead set. Older entries will be trimmed
|
874
|
-
# to stay within this limit. Default value is 10,000.
|
875
|
-
def self.max_jobs
|
876
|
-
Sidekiq[:dead_max_jobs]
|
877
|
-
end
|
878
|
-
|
879
|
-
# The time limit for entries within the Dead set. Older entries will be thrown away.
|
880
|
-
# Default value is six months.
|
881
|
-
def self.timeout
|
882
|
-
Sidekiq[:dead_timeout_in_seconds]
|
883
|
-
end
|
884
870
|
end
|
885
871
|
|
886
872
|
##
|
@@ -893,6 +879,24 @@ module Sidekiq
|
|
893
879
|
class ProcessSet
|
894
880
|
include Enumerable
|
895
881
|
|
882
|
+
def self.[](identity)
|
883
|
+
exists, (info, busy, beat, quiet, rss, rtt_us) = Sidekiq.redis { |conn|
|
884
|
+
conn.multi { |transaction|
|
885
|
+
transaction.sismember("processes", identity)
|
886
|
+
transaction.hmget(identity, "info", "busy", "beat", "quiet", "rss", "rtt_us")
|
887
|
+
}
|
888
|
+
}
|
889
|
+
|
890
|
+
return nil if exists == 0 || info.nil?
|
891
|
+
|
892
|
+
hash = Sidekiq.load_json(info)
|
893
|
+
Process.new(hash.merge("busy" => busy.to_i,
|
894
|
+
"beat" => beat.to_f,
|
895
|
+
"quiet" => quiet,
|
896
|
+
"rss" => rss.to_i,
|
897
|
+
"rtt_us" => rtt_us.to_i))
|
898
|
+
end
|
899
|
+
|
896
900
|
# :nodoc:
|
897
901
|
# @api private
|
898
902
|
def initialize(clean_plz = true)
|
@@ -905,11 +909,11 @@ module Sidekiq
|
|
905
909
|
# @api private
|
906
910
|
def cleanup
|
907
911
|
# dont run cleanup more than once per minute
|
908
|
-
return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1",
|
912
|
+
return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", "NX", "EX", "60") }
|
909
913
|
|
910
914
|
count = 0
|
911
915
|
Sidekiq.redis do |conn|
|
912
|
-
procs = conn.
|
916
|
+
procs = conn.sscan("processes").to_a
|
913
917
|
heartbeats = conn.pipelined { |pipeline|
|
914
918
|
procs.each do |key|
|
915
919
|
pipeline.hget(key, "info")
|
@@ -929,7 +933,7 @@ module Sidekiq
|
|
929
933
|
|
930
934
|
def each
|
931
935
|
result = Sidekiq.redis { |conn|
|
932
|
-
procs = conn.
|
936
|
+
procs = conn.sscan("processes").to_a.sort
|
933
937
|
|
934
938
|
# We're making a tradeoff here between consuming more memory instead of
|
935
939
|
# making more roundtrips to Redis, but if you have hundreds or thousands of workers,
|
@@ -941,7 +945,7 @@ module Sidekiq
|
|
941
945
|
end
|
942
946
|
}
|
943
947
|
|
944
|
-
result.each do |info, busy,
|
948
|
+
result.each do |info, busy, beat, quiet, rss, rtt_us|
|
945
949
|
# If a process is stopped between when we query Redis for `procs` and
|
946
950
|
# when we query for `result`, we will have an item in `result` that is
|
947
951
|
# composed of `nil` values.
|
@@ -949,10 +953,10 @@ module Sidekiq
|
|
949
953
|
|
950
954
|
hash = Sidekiq.load_json(info)
|
951
955
|
yield Process.new(hash.merge("busy" => busy.to_i,
|
952
|
-
"beat" =>
|
956
|
+
"beat" => beat.to_f,
|
953
957
|
"quiet" => quiet,
|
954
958
|
"rss" => rss.to_i,
|
955
|
-
"rtt_us" =>
|
959
|
+
"rtt_us" => rtt_us.to_i))
|
956
960
|
end
|
957
961
|
end
|
958
962
|
|
@@ -1008,6 +1012,7 @@ module Sidekiq
|
|
1008
1012
|
# 'busy' => 10,
|
1009
1013
|
# 'beat' => <last heartbeat>,
|
1010
1014
|
# 'identity' => <unique string identifying the process>,
|
1015
|
+
# 'embedded' => true,
|
1011
1016
|
# }
|
1012
1017
|
class Process
|
1013
1018
|
# :nodoc:
|
@@ -1021,7 +1026,7 @@ module Sidekiq
|
|
1021
1026
|
end
|
1022
1027
|
|
1023
1028
|
def labels
|
1024
|
-
|
1029
|
+
self["labels"].to_a
|
1025
1030
|
end
|
1026
1031
|
|
1027
1032
|
def [](key)
|
@@ -1036,11 +1041,25 @@ module Sidekiq
|
|
1036
1041
|
self["queues"]
|
1037
1042
|
end
|
1038
1043
|
|
1044
|
+
def weights
|
1045
|
+
self["weights"]
|
1046
|
+
end
|
1047
|
+
|
1048
|
+
def version
|
1049
|
+
self["version"]
|
1050
|
+
end
|
1051
|
+
|
1052
|
+
def embedded?
|
1053
|
+
self["embedded"]
|
1054
|
+
end
|
1055
|
+
|
1039
1056
|
# Signal this process to stop processing new jobs.
|
1040
1057
|
# It will continue to execute jobs it has already fetched.
|
1041
1058
|
# This method is *asynchronous* and it can take 5-10
|
1042
1059
|
# seconds for the process to quiet.
|
1043
1060
|
def quiet!
|
1061
|
+
raise "Can't quiet an embedded process" if embedded?
|
1062
|
+
|
1044
1063
|
signal("TSTP")
|
1045
1064
|
end
|
1046
1065
|
|
@@ -1049,6 +1068,8 @@ module Sidekiq
|
|
1049
1068
|
# This method is *asynchronous* and it can take 5-10
|
1050
1069
|
# seconds for the process to start shutting down.
|
1051
1070
|
def stop!
|
1071
|
+
raise "Can't stop an embedded process" if embedded?
|
1072
|
+
|
1052
1073
|
signal("TERM")
|
1053
1074
|
end
|
1054
1075
|
|
@@ -1107,8 +1128,7 @@ module Sidekiq
|
|
1107
1128
|
all_works = nil
|
1108
1129
|
|
1109
1130
|
Sidekiq.redis do |conn|
|
1110
|
-
procs = conn.
|
1111
|
-
|
1131
|
+
procs = conn.sscan("processes").to_a.sort
|
1112
1132
|
all_works = conn.pipelined do |pipeline|
|
1113
1133
|
procs.each do |key|
|
1114
1134
|
pipeline.hgetall("#{key}:work")
|
@@ -1118,17 +1138,11 @@ module Sidekiq
|
|
1118
1138
|
|
1119
1139
|
procs.zip(all_works).each do |key, workers|
|
1120
1140
|
workers.each_pair do |tid, json|
|
1121
|
-
|
1122
|
-
|
1123
|
-
hsh = Sidekiq.load_json(json)
|
1124
|
-
p = hsh["payload"]
|
1125
|
-
# avoid breaking API, this is a side effect of the JSON optimization in #4316
|
1126
|
-
hsh["payload"] = Sidekiq.load_json(p) if p.is_a?(String)
|
1127
|
-
results << [key, tid, hsh]
|
1141
|
+
results << [key, tid, Sidekiq::Work.new(key, tid, Sidekiq.load_json(json))] unless json.empty?
|
1128
1142
|
end
|
1129
1143
|
end
|
1130
1144
|
|
1131
|
-
results.sort_by { |(_, _, hsh)| hsh
|
1145
|
+
results.sort_by { |(_, _, hsh)| hsh.raw("run_at") }.each(&block)
|
1132
1146
|
end
|
1133
1147
|
|
1134
1148
|
# Note that #size is only as accurate as Sidekiq's heartbeat,
|
@@ -1139,7 +1153,7 @@ module Sidekiq
|
|
1139
1153
|
# which can easily get out of sync with crashy processes.
|
1140
1154
|
def size
|
1141
1155
|
Sidekiq.redis do |conn|
|
1142
|
-
procs = conn.
|
1156
|
+
procs = conn.sscan("processes").to_a
|
1143
1157
|
if procs.empty?
|
1144
1158
|
0
|
1145
1159
|
else
|
@@ -1151,7 +1165,74 @@ module Sidekiq
|
|
1151
1165
|
end
|
1152
1166
|
end
|
1153
1167
|
end
|
1168
|
+
|
1169
|
+
##
|
1170
|
+
# Find the work which represents a job with the given JID.
|
1171
|
+
# *This is a slow O(n) operation*. Do not use for app logic.
|
1172
|
+
#
|
1173
|
+
# @param jid [String] the job identifier
|
1174
|
+
# @return [Sidekiq::Work] the work or nil
|
1175
|
+
def find_work_by_jid(jid)
|
1176
|
+
each do |_process_id, _thread_id, work|
|
1177
|
+
job = work.job
|
1178
|
+
return work if job.jid == jid
|
1179
|
+
end
|
1180
|
+
nil
|
1181
|
+
end
|
1182
|
+
end
|
1183
|
+
|
1184
|
+
# Sidekiq::Work represents a job which is currently executing.
|
1185
|
+
class Work
|
1186
|
+
attr_reader :process_id
|
1187
|
+
attr_reader :thread_id
|
1188
|
+
|
1189
|
+
def initialize(pid, tid, hsh)
|
1190
|
+
@process_id = pid
|
1191
|
+
@thread_id = tid
|
1192
|
+
@hsh = hsh
|
1193
|
+
@job = nil
|
1194
|
+
end
|
1195
|
+
|
1196
|
+
def queue
|
1197
|
+
@hsh["queue"]
|
1198
|
+
end
|
1199
|
+
|
1200
|
+
def run_at
|
1201
|
+
Time.at(@hsh["run_at"])
|
1202
|
+
end
|
1203
|
+
|
1204
|
+
def job
|
1205
|
+
@job ||= Sidekiq::JobRecord.new(@hsh["payload"])
|
1206
|
+
end
|
1207
|
+
|
1208
|
+
def payload
|
1209
|
+
@hsh["payload"]
|
1210
|
+
end
|
1211
|
+
|
1212
|
+
# deprecated
|
1213
|
+
def [](key)
|
1214
|
+
kwargs = {uplevel: 1}
|
1215
|
+
kwargs[:category] = :deprecated if RUBY_VERSION > "3.0" # TODO
|
1216
|
+
warn("Direct access to `Sidekiq::Work` attributes is deprecated, please use `#payload`, `#queue`, `#run_at` or `#job` instead", **kwargs)
|
1217
|
+
|
1218
|
+
@hsh[key]
|
1219
|
+
end
|
1220
|
+
|
1221
|
+
# :nodoc:
|
1222
|
+
# @api private
|
1223
|
+
def raw(name)
|
1224
|
+
@hsh[name]
|
1225
|
+
end
|
1226
|
+
|
1227
|
+
def method_missing(*all)
|
1228
|
+
@hsh.send(*all)
|
1229
|
+
end
|
1230
|
+
|
1231
|
+
def respond_to_missing?(name, *args)
|
1232
|
+
@hsh.respond_to?(name)
|
1233
|
+
end
|
1154
1234
|
end
|
1235
|
+
|
1155
1236
|
# Since "worker" is a nebulous term, we've deprecated the use of this class name.
|
1156
1237
|
# Is "worker" a process, a type of job, a thread? Undefined!
|
1157
1238
|
# WorkSet better describes the data.
|