sidekiq 7.0.8 → 7.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +59 -0
- data/bin/sidekiqload +21 -3
- data/lib/sidekiq/api.rb +32 -9
- data/lib/sidekiq/cli.rb +2 -1
- data/lib/sidekiq/client.rb +34 -20
- data/lib/sidekiq/component.rb +1 -1
- data/lib/sidekiq/config.rb +12 -4
- data/lib/sidekiq/fetch.rb +1 -1
- data/lib/sidekiq/job.rb +1 -5
- data/lib/sidekiq/job_retry.rb +21 -4
- data/lib/sidekiq/job_util.rb +4 -2
- data/lib/sidekiq/launcher.rb +1 -1
- data/lib/sidekiq/metrics/query.rb +1 -1
- data/lib/sidekiq/metrics/shared.rb +4 -4
- data/lib/sidekiq/middleware/current_attributes.rb +55 -16
- data/lib/sidekiq/paginator.rb +1 -1
- data/lib/sidekiq/processor.rb +27 -26
- data/lib/sidekiq/rails.rb +10 -0
- data/lib/sidekiq/redis_client_adapter.rb +5 -24
- data/lib/sidekiq/scheduled.rb +1 -1
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +5 -5
- data/lib/sidekiq/web/csrf_protection.rb +1 -1
- data/lib/sidekiq/web.rb +13 -1
- data/sidekiq.gemspec +1 -10
- data/web/assets/javascripts/application.js +1 -0
- data/web/assets/javascripts/dashboard-charts.js +3 -1
- data/web/locales/fr.yml +14 -0
- data/web/locales/gd.yml +99 -0
- data/web/locales/pt-br.yml +20 -0
- data/web/views/_job_info.erb +1 -1
- data/web/views/busy.erb +2 -2
- data/web/views/metrics_for_job.erb +3 -6
- metadata +6 -13
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 0a6064918f9c33be1d21f890b9cff080969fb44a916fe67ccb36958fe0a3b8f3
|
4
|
+
data.tar.gz: baf268f21f27e0dac2fc287f247910afe493e4dcaee21aa769ccebbed03e7699
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c8f4e3caaeab143f20fdd592dc7939f09a302642867bddb4fcd02cbbaa0e734d7685ee3ad02543d3bb3ba6cbedaf2cbab3577a849c03d2f351aa7918d6eab73f
|
7
|
+
data.tar.gz: 1cf71898600afb872ee717be6496da7a146aa8e2f528b6c73e3630a64f71869bc23bfa2d7130f021c7f5e12b473ed3b537cebb4a8fedcfbe517af48dde5bbea6
|
data/Changes.md
CHANGED
@@ -2,6 +2,60 @@
|
|
2
2
|
|
3
3
|
[Sidekiq Changes](https://github.com/sidekiq/sidekiq/blob/main/Changes.md) | [Sidekiq Pro Changes](https://github.com/sidekiq/sidekiq/blob/main/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/sidekiq/sidekiq/blob/main/Ent-Changes.md)
|
4
4
|
|
5
|
+
7.1.4
|
6
|
+
----------
|
7
|
+
|
8
|
+
- Fix empty `retry_for` logic [#6035]
|
9
|
+
|
10
|
+
7.1.3
|
11
|
+
----------
|
12
|
+
|
13
|
+
- Add `sidekiq_options retry_for: 48.hours` to allow time-based retry windows [#6029]
|
14
|
+
- Support sidekiq_retry_in and sidekiq_retries_exhausted_block in ActiveJobs (#5994)
|
15
|
+
- Lowercase all Rack headers for Rack 3.0 [#5951]
|
16
|
+
- Validate Sidekiq::Web page refresh delay to avoid potential DoS,
|
17
|
+
CVE-2023-26141, thanks for reporting Keegan!
|
18
|
+
|
19
|
+
7.1.2
|
20
|
+
----------
|
21
|
+
|
22
|
+
- Mark Web UI assets as private so CDNs won't cache them [#5936]
|
23
|
+
- Fix stackoverflow when using Oj and the JSON log formatter [#5920]
|
24
|
+
- Remove spurious `enqueued_at` from scheduled ActiveJobs [#5937]
|
25
|
+
|
26
|
+
7.1.1
|
27
|
+
----------
|
28
|
+
|
29
|
+
- Support multiple CurrentAttributes [#5904]
|
30
|
+
- Speed up latency fetch with large queues on Redis <7 [#5910]
|
31
|
+
- Allow a larger default client pool [#5886]
|
32
|
+
- Ensure Sidekiq.options[:environment] == RAILS_ENV [#5932]
|
33
|
+
|
34
|
+
7.1.0
|
35
|
+
----------
|
36
|
+
|
37
|
+
- Improve display of ActiveJob arguments in Web UI [#5825, cover]
|
38
|
+
- Update `push_bulk` to push `batch_size` jobs at a time and allow laziness [#5827, fatkodima]
|
39
|
+
This allows Sidekiq::Client to push unlimited jobs as long as it has enough memory for the batch_size.
|
40
|
+
- Update `perform_bulk` to use `push_bulk` internally.
|
41
|
+
- Change return value of `push_bulk` to map 1-to-1 with arguments.
|
42
|
+
If you call `push_bulk(args: [[1], [2], [3]])`, you will now always get
|
43
|
+
an array of 3 values as the result: `["jid1", nil, "jid3"]` where nil means
|
44
|
+
that particular job did not push successfully (possibly due to middleware
|
45
|
+
stopping it). Previously nil values were removed so it was impossible to tell
|
46
|
+
which jobs pushed successfully and which did not.
|
47
|
+
- Migrate away from all deprecated Redis commands [#5788]
|
48
|
+
Sidekiq will now print a warning if you use one of those deprecated commands.
|
49
|
+
- Prefix all Sidekiq thread names [#5872]
|
50
|
+
|
51
|
+
7.0.9
|
52
|
+
----------
|
53
|
+
|
54
|
+
- Restore confirmation dialogs in Web UI [#5881, shevaun]
|
55
|
+
- Increase fetch timeout to minimize ReadTimeoutError [#5874]
|
56
|
+
- Reverse histogram tooltip ordering [#5868]
|
57
|
+
- Add Scottish Gaelic (gd) locale [#5867, GunChleoc]
|
58
|
+
|
5
59
|
7.0.8
|
6
60
|
----------
|
7
61
|
|
@@ -76,6 +130,11 @@ end
|
|
76
130
|
- Job Execution metrics!!!
|
77
131
|
- See `docs/7.0-Upgrade.md` for release notes
|
78
132
|
|
133
|
+
6.5.9
|
134
|
+
----------
|
135
|
+
|
136
|
+
- Ensure Sidekiq.options[:environment] == RAILS_ENV [#5932]
|
137
|
+
|
79
138
|
6.5.8
|
80
139
|
----------
|
81
140
|
|
data/bin/sidekiqload
CHANGED
@@ -1,5 +1,23 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
2
|
|
3
|
+
#
|
4
|
+
# bin/sidekiqload is a helpful script to load test and
|
5
|
+
# performance tune Sidekiq's core. It creates 500,000 no-op
|
6
|
+
# jobs and executes them as fast as possible.
|
7
|
+
# Example Usage:
|
8
|
+
#
|
9
|
+
# > RUBY_YJIT_ENABLE=1 LATENCY=0 THREADS=10 bin/sidekiqload
|
10
|
+
# Result: Done, 500000 jobs in 20.264945 sec, 24673 jobs/sec
|
11
|
+
#
|
12
|
+
# Use LATENCY=1 to get a more real world network setup
|
13
|
+
# but you'll need to setup and start toxiproxy as noted below.
|
14
|
+
#
|
15
|
+
# Use AJ=1 to test ActiveJob instead of plain old Sidekiq::Jobs so
|
16
|
+
# you can see the runtime performance difference between the two APIs.
|
17
|
+
#
|
18
|
+
# None of this script is considered a public API and may change over time.
|
19
|
+
#
|
20
|
+
|
3
21
|
# Quiet some warnings we see when running in warning mode:
|
4
22
|
# RUBYOPT=-w bundle exec sidekiq
|
5
23
|
$TESTING = false
|
@@ -32,7 +50,7 @@ if ENV["AJ"]
|
|
32
50
|
ActiveJob::Base.logger.level = Logger::WARN
|
33
51
|
|
34
52
|
class LoadJob < ActiveJob::Base
|
35
|
-
def perform(idx, ts=nil)
|
53
|
+
def perform(idx, ts = nil)
|
36
54
|
puts(Time.now.to_f - ts) if !ts.nil?
|
37
55
|
end
|
38
56
|
end
|
@@ -219,11 +237,11 @@ end
|
|
219
237
|
ll = Loader.new
|
220
238
|
ll.configure
|
221
239
|
|
222
|
-
|
240
|
+
if ENV["WARM"]
|
223
241
|
ll.setup
|
224
242
|
ll.run("warmup")
|
225
243
|
end
|
226
244
|
|
227
245
|
ll.setup
|
228
|
-
ll.run("
|
246
|
+
ll.run("load")
|
229
247
|
ll.done
|
data/lib/sidekiq/api.rb
CHANGED
@@ -92,11 +92,11 @@ module Sidekiq
|
|
92
92
|
pipeline.zcard("retry")
|
93
93
|
pipeline.zcard("dead")
|
94
94
|
pipeline.scard("processes")
|
95
|
-
pipeline.
|
95
|
+
pipeline.lindex("queue:default", -1)
|
96
96
|
end
|
97
97
|
}
|
98
98
|
|
99
|
-
default_queue_latency = if (entry = pipe1_res[6]
|
99
|
+
default_queue_latency = if (entry = pipe1_res[6])
|
100
100
|
job = begin
|
101
101
|
Sidekiq.load_json(entry)
|
102
102
|
rescue
|
@@ -264,8 +264,8 @@ module Sidekiq
|
|
264
264
|
# @return [Float] in seconds
|
265
265
|
def latency
|
266
266
|
entry = Sidekiq.redis { |conn|
|
267
|
-
conn.
|
268
|
-
}
|
267
|
+
conn.lindex(@rname, -1)
|
268
|
+
}
|
269
269
|
return 0 unless entry
|
270
270
|
job = Sidekiq.load_json(entry)
|
271
271
|
now = Time.now.to_f
|
@@ -391,13 +391,13 @@ module Sidekiq
|
|
391
391
|
def display_args
|
392
392
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
393
393
|
@display_args ||= if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
394
|
-
job_args = self["wrapped"] ? args[0]["arguments"] : []
|
394
|
+
job_args = self["wrapped"] ? deserialize_argument(args[0]["arguments"]) : []
|
395
395
|
if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
|
396
396
|
# remove MailerClass, mailer_method and 'deliver_now'
|
397
397
|
job_args.drop(3)
|
398
398
|
elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob"
|
399
399
|
# remove MailerClass, mailer_method and 'deliver_now'
|
400
|
-
job_args.drop(3).first
|
400
|
+
job_args.drop(3).first.values_at("params", "args")
|
401
401
|
else
|
402
402
|
job_args
|
403
403
|
end
|
@@ -467,6 +467,29 @@ module Sidekiq
|
|
467
467
|
|
468
468
|
private
|
469
469
|
|
470
|
+
ACTIVE_JOB_PREFIX = "_aj_"
|
471
|
+
GLOBALID_KEY = "_aj_globalid"
|
472
|
+
|
473
|
+
def deserialize_argument(argument)
|
474
|
+
case argument
|
475
|
+
when Array
|
476
|
+
argument.map { |arg| deserialize_argument(arg) }
|
477
|
+
when Hash
|
478
|
+
if serialized_global_id?(argument)
|
479
|
+
argument[GLOBALID_KEY]
|
480
|
+
else
|
481
|
+
argument.transform_values { |v| deserialize_argument(v) }
|
482
|
+
.reject { |k, _| k.start_with?(ACTIVE_JOB_PREFIX) }
|
483
|
+
end
|
484
|
+
else
|
485
|
+
argument
|
486
|
+
end
|
487
|
+
end
|
488
|
+
|
489
|
+
def serialized_global_id?(hash)
|
490
|
+
hash.size == 1 && hash.include?(GLOBALID_KEY)
|
491
|
+
end
|
492
|
+
|
470
493
|
def uncompress_backtrace(backtrace)
|
471
494
|
decoded = Base64.decode64(backtrace)
|
472
495
|
uncompressed = Zlib::Inflate.inflate(decoded)
|
@@ -548,7 +571,7 @@ module Sidekiq
|
|
548
571
|
def remove_job
|
549
572
|
Sidekiq.redis do |conn|
|
550
573
|
results = conn.multi { |transaction|
|
551
|
-
transaction.
|
574
|
+
transaction.zrange(parent.name, score, score, "BYSCORE")
|
552
575
|
transaction.zremrangebyscore(parent.name, score, score)
|
553
576
|
}.first
|
554
577
|
|
@@ -683,7 +706,7 @@ module Sidekiq
|
|
683
706
|
end
|
684
707
|
|
685
708
|
elements = Sidekiq.redis { |conn|
|
686
|
-
conn.
|
709
|
+
conn.zrange(name, begin_score, end_score, "BYSCORE", withscores: true)
|
687
710
|
}
|
688
711
|
|
689
712
|
elements.each_with_object([]) do |element, result|
|
@@ -724,7 +747,7 @@ module Sidekiq
|
|
724
747
|
# @api private
|
725
748
|
def delete_by_jid(score, jid)
|
726
749
|
Sidekiq.redis do |conn|
|
727
|
-
elements = conn.
|
750
|
+
elements = conn.zrange(name, score, score, "BYSCORE")
|
728
751
|
elements.each do |element|
|
729
752
|
if element.index(jid)
|
730
753
|
message = Sidekiq.load_json(element)
|
data/lib/sidekiq/cli.rb
CHANGED
@@ -230,6 +230,7 @@ module Sidekiq # :nodoc:
|
|
230
230
|
# Both Sinatra 2.0+ and Sidekiq support this term.
|
231
231
|
# RAILS_ENV and RACK_ENV are there for legacy support.
|
232
232
|
@environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
|
233
|
+
config[:environment] = @environment
|
233
234
|
end
|
234
235
|
|
235
236
|
def symbolize_keys_deep!(hash)
|
@@ -396,7 +397,7 @@ module Sidekiq # :nodoc:
|
|
396
397
|
end
|
397
398
|
|
398
399
|
def parse_config(path)
|
399
|
-
erb = ERB.new(File.read(path))
|
400
|
+
erb = ERB.new(File.read(path), trim_mode: "-")
|
400
401
|
erb.filename = File.expand_path(path)
|
401
402
|
opts = YAML.safe_load(erb.result, permitted_classes: [Symbol], aliases: true) || {}
|
402
403
|
|
data/lib/sidekiq/client.rb
CHANGED
@@ -66,6 +66,7 @@ module Sidekiq
|
|
66
66
|
# args - an array of simple arguments to the perform method, must be JSON-serializable
|
67
67
|
# at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
|
68
68
|
# retry - whether to retry this job if it fails, default true or an integer number of retries
|
69
|
+
# retry_for - relative amount of time to retry this job if it fails, default nil
|
69
70
|
# backtrace - whether to save any error backtrace, default false
|
70
71
|
#
|
71
72
|
# If class is set to the class name, the jobs' options will be based on Sidekiq's default
|
@@ -96,8 +97,9 @@ module Sidekiq
|
|
96
97
|
|
97
98
|
##
|
98
99
|
# Push a large number of jobs to Redis. This method cuts out the redis
|
99
|
-
# network round trip latency.
|
100
|
-
# 1000
|
100
|
+
# network round trip latency. It pushes jobs in batches if more than
|
101
|
+
# `:batch_size` (1000 by default) of jobs are passed. I wouldn't recommend making `:batch_size`
|
102
|
+
# larger than 1000 but YMMV based on network quality, size of job args, etc.
|
101
103
|
# A large number of jobs can cause a bit of Redis command processing latency.
|
102
104
|
#
|
103
105
|
# Takes the same arguments as #push except that args is expected to be
|
@@ -105,13 +107,15 @@ module Sidekiq
|
|
105
107
|
# is run through the client middleware pipeline and each job gets its own Job ID
|
106
108
|
# as normal.
|
107
109
|
#
|
108
|
-
# Returns an array of the of pushed jobs' jids
|
109
|
-
#
|
110
|
+
# Returns an array of the of pushed jobs' jids, may contain nils if any client middleware
|
111
|
+
# prevented a job push.
|
112
|
+
#
|
113
|
+
# Example (pushing jobs in batches):
|
114
|
+
# push_bulk('class' => 'MyJob', 'args' => (1..100_000).to_a, batch_size: 1_000)
|
115
|
+
#
|
110
116
|
def push_bulk(items)
|
117
|
+
batch_size = items.delete(:batch_size) || items.delete("batch_size") || 1_000
|
111
118
|
args = items["args"]
|
112
|
-
raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless args.is_a?(Array) && args.all?(Array)
|
113
|
-
return [] if args.empty? # no jobs to push
|
114
|
-
|
115
119
|
at = items.delete("at")
|
116
120
|
raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
|
117
121
|
raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
|
@@ -120,18 +124,26 @@ module Sidekiq
|
|
120
124
|
raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
|
121
125
|
|
122
126
|
normed = normalize_item(items)
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
copy
|
129
|
-
|
130
|
-
|
131
|
-
|
127
|
+
result = args.each_slice(batch_size).flat_map do |slice|
|
128
|
+
raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless slice.is_a?(Array) && slice.all?(Array)
|
129
|
+
break [] if slice.empty? # no jobs to push
|
130
|
+
|
131
|
+
payloads = slice.map.with_index { |job_args, index|
|
132
|
+
copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
|
133
|
+
copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
|
134
|
+
result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
|
135
|
+
verify_json(copy)
|
136
|
+
copy
|
137
|
+
end
|
138
|
+
result || nil
|
139
|
+
}
|
140
|
+
|
141
|
+
to_push = payloads.compact
|
142
|
+
raw_push(to_push) unless to_push.empty?
|
143
|
+
payloads.map { |payload| payload&.[]("jid") }
|
144
|
+
end
|
132
145
|
|
133
|
-
|
134
|
-
payloads.collect { |payload| payload["jid"] }
|
146
|
+
result.is_a?(Enumerator::Lazy) ? result.force : result
|
135
147
|
end
|
136
148
|
|
137
149
|
# Allows sharding of jobs across any number of Redis instances. All jobs
|
@@ -160,8 +172,8 @@ module Sidekiq
|
|
160
172
|
new.push(item)
|
161
173
|
end
|
162
174
|
|
163
|
-
def push_bulk(
|
164
|
-
new.push_bulk(
|
175
|
+
def push_bulk(...)
|
176
|
+
new.push_bulk(...)
|
165
177
|
end
|
166
178
|
|
167
179
|
# Resque compatibility helpers. Note all helpers
|
@@ -235,6 +247,8 @@ module Sidekiq
|
|
235
247
|
if payloads.first.key?("at")
|
236
248
|
conn.zadd("schedule", payloads.flat_map { |hash|
|
237
249
|
at = hash.delete("at").to_s
|
250
|
+
# ActiveJob sets this but the job has not been enqueued yet
|
251
|
+
hash.delete("enqueued_at")
|
238
252
|
[at, Sidekiq.dump_json(hash)]
|
239
253
|
})
|
240
254
|
else
|
data/lib/sidekiq/component.rb
CHANGED
data/lib/sidekiq/config.rb
CHANGED
@@ -30,7 +30,8 @@ module Sidekiq
|
|
30
30
|
},
|
31
31
|
dead_max_jobs: 10_000,
|
32
32
|
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
33
|
-
reloader: proc { |&block| block.call }
|
33
|
+
reloader: proc { |&block| block.call },
|
34
|
+
backtrace_cleaner: ->(backtrace) { backtrace }
|
34
35
|
}
|
35
36
|
|
36
37
|
ERROR_HANDLER = ->(ex, ctx) {
|
@@ -38,7 +39,10 @@ module Sidekiq
|
|
38
39
|
l = cfg.logger
|
39
40
|
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
|
40
41
|
l.warn("#{ex.class.name}: #{ex.message}")
|
41
|
-
|
42
|
+
unless ex.backtrace.nil?
|
43
|
+
backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
|
44
|
+
l.warn(backtrace.join("\n"))
|
45
|
+
end
|
42
46
|
}
|
43
47
|
|
44
48
|
def initialize(options = {})
|
@@ -52,6 +56,10 @@ module Sidekiq
|
|
52
56
|
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
|
53
57
|
attr_reader :capsules
|
54
58
|
|
59
|
+
def to_json(*)
|
60
|
+
Sidekiq.dump_json(@options)
|
61
|
+
end
|
62
|
+
|
55
63
|
# LEGACY: edits the default capsule
|
56
64
|
# config.concurrency = 5
|
57
65
|
def concurrency=(val)
|
@@ -123,7 +131,7 @@ module Sidekiq
|
|
123
131
|
private def local_redis_pool
|
124
132
|
# this is our internal client/housekeeping pool. each capsule has its
|
125
133
|
# own pool for executing threads.
|
126
|
-
@redis ||= new_redis_pool(
|
134
|
+
@redis ||= new_redis_pool(10, "internal")
|
127
135
|
end
|
128
136
|
|
129
137
|
def new_redis_pool(size, name = "unset")
|
@@ -259,7 +267,7 @@ module Sidekiq
|
|
259
267
|
ctx[:_config] = self
|
260
268
|
@options[:error_handlers].each do |handler|
|
261
269
|
handler.call(ex, ctx)
|
262
|
-
rescue => e
|
270
|
+
rescue Exception => e
|
263
271
|
l = logger
|
264
272
|
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
265
273
|
l.error e
|
data/lib/sidekiq/fetch.rb
CHANGED
@@ -44,7 +44,7 @@ module Sidekiq # :nodoc:
|
|
44
44
|
return nil
|
45
45
|
end
|
46
46
|
|
47
|
-
queue, job = redis { |conn| conn.blocking_call(
|
47
|
+
queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
|
48
48
|
UnitOfWork.new(queue, job, config) if queue
|
49
49
|
end
|
50
50
|
|
data/lib/sidekiq/job.rb
CHANGED
@@ -239,11 +239,7 @@ module Sidekiq
|
|
239
239
|
|
240
240
|
def perform_bulk(args, batch_size: 1_000)
|
241
241
|
client = @klass.build_client
|
242
|
-
|
243
|
-
client.push_bulk(@opts.merge("class" => @klass, "args" => slice))
|
244
|
-
end
|
245
|
-
|
246
|
-
result.is_a?(Enumerator::Lazy) ? result.force : result
|
242
|
+
client.push_bulk(@opts.merge("class" => @klass, "args" => args, :batch_size => batch_size))
|
247
243
|
end
|
248
244
|
|
249
245
|
# +interval+ must be a timestamp, numeric or something that acts
|
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -71,6 +71,7 @@ module Sidekiq
|
|
71
71
|
def initialize(capsule)
|
72
72
|
@config = @capsule = capsule
|
73
73
|
@max_retries = Sidekiq.default_configuration[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
|
74
|
+
@backtrace_cleaner = Sidekiq.default_configuration[:backtrace_cleaner]
|
74
75
|
end
|
75
76
|
|
76
77
|
# The global retry handler requires only the barest of data.
|
@@ -159,18 +160,21 @@ module Sidekiq
|
|
159
160
|
end
|
160
161
|
|
161
162
|
if msg["backtrace"]
|
163
|
+
backtrace = @backtrace_cleaner.call(exception.backtrace)
|
162
164
|
lines = if msg["backtrace"] == true
|
163
|
-
|
165
|
+
backtrace
|
164
166
|
else
|
165
|
-
|
167
|
+
backtrace[0...msg["backtrace"].to_i]
|
166
168
|
end
|
167
169
|
|
168
170
|
msg["error_backtrace"] = compress_backtrace(lines)
|
169
171
|
end
|
170
172
|
|
171
|
-
# Goodbye dear message, you (re)tried your best I'm sure.
|
172
173
|
return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
|
173
174
|
|
175
|
+
rf = msg["retry_for"]
|
176
|
+
return retries_exhausted(jobinst, msg, exception) if rf && ((msg["failed_at"] + rf) < Time.now.to_f)
|
177
|
+
|
174
178
|
strategy, delay = delay_for(jobinst, count, exception, msg)
|
175
179
|
case strategy
|
176
180
|
when :discard
|
@@ -195,7 +199,14 @@ module Sidekiq
|
|
195
199
|
# sidekiq_retry_in can return two different things:
|
196
200
|
# 1. When to retry next, as an integer of seconds
|
197
201
|
# 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
|
198
|
-
jobinst&.sidekiq_retry_in_block
|
202
|
+
block = jobinst&.sidekiq_retry_in_block
|
203
|
+
|
204
|
+
# the sidekiq_retry_in_block can be defined in a wrapped class (ActiveJob for instance)
|
205
|
+
unless msg["wrapped"].nil?
|
206
|
+
wrapped = Object.const_get(msg["wrapped"])
|
207
|
+
block = wrapped.respond_to?(:sidekiq_retry_in_block) ? wrapped.sidekiq_retry_in_block : nil
|
208
|
+
end
|
209
|
+
block&.call(count, exception, msg)
|
199
210
|
rescue Exception => e
|
200
211
|
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
|
201
212
|
nil
|
@@ -217,6 +228,12 @@ module Sidekiq
|
|
217
228
|
def retries_exhausted(jobinst, msg, exception)
|
218
229
|
begin
|
219
230
|
block = jobinst&.sidekiq_retries_exhausted_block
|
231
|
+
|
232
|
+
# the sidekiq_retries_exhausted_block can be defined in a wrapped class (ActiveJob for instance)
|
233
|
+
unless msg["wrapped"].nil?
|
234
|
+
wrapped = Object.const_get(msg["wrapped"])
|
235
|
+
block = wrapped.respond_to?(:sidekiq_retries_exhausted_block) ? wrapped.sidekiq_retries_exhausted_block : nil
|
236
|
+
end
|
220
237
|
block&.call(msg, exception)
|
221
238
|
rescue => e
|
222
239
|
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
data/lib/sidekiq/job_util.rb
CHANGED
@@ -9,10 +9,11 @@ module Sidekiq
|
|
9
9
|
|
10
10
|
def validate(item)
|
11
11
|
raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
|
12
|
-
raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
|
12
|
+
raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array) || item["args"].is_a?(Enumerator::Lazy)
|
13
13
|
raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
|
14
14
|
raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
|
15
15
|
raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
|
16
|
+
raise(ArgumentError, "retry_for must be a relative amount of time, e.g. 48.hours `#{item}`") if item["retry_for"] && item["retry_for"] > 1_000_000_000
|
16
17
|
end
|
17
18
|
|
18
19
|
def verify_json(item)
|
@@ -24,7 +25,7 @@ module Sidekiq
|
|
24
25
|
if (unsafe_item = json_unsafe?(args))
|
25
26
|
msg = <<~EOM
|
26
27
|
Job arguments to #{job_class} must be native JSON types, but #{unsafe_item.inspect} is a #{unsafe_item.class}.
|
27
|
-
See https://github.com/sidekiq/sidekiq/wiki/Best-Practices
|
28
|
+
See https://github.com/sidekiq/sidekiq/wiki/Best-Practices
|
28
29
|
To disable this error, add `Sidekiq.strict_args!(false)` to your initializer.
|
29
30
|
EOM
|
30
31
|
|
@@ -54,6 +55,7 @@ module Sidekiq
|
|
54
55
|
item["jid"] ||= SecureRandom.hex(12)
|
55
56
|
item["class"] = item["class"].to_s
|
56
57
|
item["queue"] = item["queue"].to_s
|
58
|
+
item["retry_for"] = item["retry_for"].to_i if item["retry_for"]
|
57
59
|
item["created_at"] ||= Time.now.to_f
|
58
60
|
item
|
59
61
|
end
|
data/lib/sidekiq/launcher.rb
CHANGED
@@ -166,7 +166,7 @@ module Sidekiq
|
|
166
166
|
conn.multi { |transaction|
|
167
167
|
transaction.sadd("processes", [key])
|
168
168
|
transaction.exists(key)
|
169
|
-
transaction.
|
169
|
+
transaction.hset(key, "info", to_json,
|
170
170
|
"busy", curstate.size,
|
171
171
|
"beat", Time.now.to_f,
|
172
172
|
"rtt_us", rtt,
|
@@ -70,7 +70,7 @@ module Sidekiq
|
|
70
70
|
result.job_results[klass].add_metric "ms", time, ms.to_i if ms
|
71
71
|
result.job_results[klass].add_metric "p", time, p.to_i if p
|
72
72
|
result.job_results[klass].add_metric "f", time, f.to_i if f
|
73
|
-
result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time)
|
73
|
+
result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time).reverse
|
74
74
|
time -= 60
|
75
75
|
end
|
76
76
|
end
|
@@ -29,8 +29,8 @@ module Sidekiq
|
|
29
29
|
1100, 1700, 2500, 3800, 5750,
|
30
30
|
8500, 13000, 20000, 30000, 45000,
|
31
31
|
65000, 100000, 150000, 225000, 335000,
|
32
|
-
|
33
|
-
]
|
32
|
+
1e20 # the "maybe your job is too long" bucket
|
33
|
+
].freeze
|
34
34
|
LABELS = [
|
35
35
|
"20ms", "30ms", "45ms", "65ms", "100ms",
|
36
36
|
"150ms", "225ms", "335ms", "500ms", "750ms",
|
@@ -38,7 +38,7 @@ module Sidekiq
|
|
38
38
|
"8.5s", "13s", "20s", "30s", "45s",
|
39
39
|
"65s", "100s", "150s", "225s", "335s",
|
40
40
|
"Slow"
|
41
|
-
]
|
41
|
+
].freeze
|
42
42
|
FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \
|
43
43
|
GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \
|
44
44
|
GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \
|
@@ -73,7 +73,7 @@ module Sidekiq
|
|
73
73
|
def fetch(conn, now = Time.now)
|
74
74
|
window = now.utc.strftime("%d-%H:%-M")
|
75
75
|
key = "#{@klass}-#{window}"
|
76
|
-
conn.
|
76
|
+
conn.bitfield_ro(key, *FETCH)
|
77
77
|
end
|
78
78
|
|
79
79
|
def persist(conn, now = Time.now)
|