sidekiq 7.1.4 → 7.3.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +204 -0
- data/README.md +5 -5
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiqload +21 -12
- data/lib/active_job/queue_adapters/sidekiq_adapter.rb +75 -0
- data/lib/generators/sidekiq/job_generator.rb +2 -0
- data/lib/sidekiq/api.rb +139 -44
- data/lib/sidekiq/capsule.rb +8 -3
- data/lib/sidekiq/cli.rb +4 -1
- data/lib/sidekiq/client.rb +26 -4
- data/lib/sidekiq/component.rb +22 -0
- data/lib/sidekiq/config.rb +40 -7
- data/lib/sidekiq/deploy.rb +4 -2
- data/lib/sidekiq/embedded.rb +2 -0
- data/lib/sidekiq/fetch.rb +1 -1
- data/lib/sidekiq/iterable_job.rb +55 -0
- data/lib/sidekiq/job/interrupt_handler.rb +24 -0
- data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
- data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
- data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
- data/lib/sidekiq/job/iterable.rb +294 -0
- data/lib/sidekiq/job.rb +14 -3
- data/lib/sidekiq/job_logger.rb +7 -6
- data/lib/sidekiq/job_retry.rb +9 -4
- data/lib/sidekiq/job_util.rb +2 -0
- data/lib/sidekiq/launcher.rb +7 -5
- data/lib/sidekiq/logger.rb +1 -1
- data/lib/sidekiq/metrics/query.rb +6 -1
- data/lib/sidekiq/metrics/shared.rb +15 -4
- data/lib/sidekiq/metrics/tracking.rb +20 -8
- data/lib/sidekiq/middleware/current_attributes.rb +46 -13
- data/lib/sidekiq/middleware/modules.rb +2 -0
- data/lib/sidekiq/monitor.rb +2 -1
- data/lib/sidekiq/paginator.rb +8 -2
- data/lib/sidekiq/processor.rb +21 -11
- data/lib/sidekiq/rails.rb +24 -13
- data/lib/sidekiq/redis_client_adapter.rb +25 -7
- data/lib/sidekiq/redis_connection.rb +37 -8
- data/lib/sidekiq/ring_buffer.rb +2 -0
- data/lib/sidekiq/scheduled.rb +2 -2
- data/lib/sidekiq/systemd.rb +2 -0
- data/lib/sidekiq/testing.rb +32 -13
- data/lib/sidekiq/transaction_aware_client.rb +7 -0
- data/lib/sidekiq/version.rb +5 -1
- data/lib/sidekiq/web/action.rb +26 -4
- data/lib/sidekiq/web/application.rb +58 -19
- data/lib/sidekiq/web/csrf_protection.rb +8 -5
- data/lib/sidekiq/web/helpers.rb +95 -35
- data/lib/sidekiq/web/router.rb +5 -2
- data/lib/sidekiq/web.rb +54 -2
- data/lib/sidekiq.rb +5 -3
- data/sidekiq.gemspec +3 -2
- data/web/assets/javascripts/application.js +26 -0
- data/web/assets/javascripts/dashboard-charts.js +37 -11
- data/web/assets/javascripts/dashboard.js +14 -10
- data/web/assets/javascripts/metrics.js +34 -0
- data/web/assets/stylesheets/application-rtl.css +10 -0
- data/web/assets/stylesheets/application.css +38 -3
- data/web/locales/en.yml +5 -1
- data/web/locales/fr.yml +0 -1
- data/web/locales/gd.yml +0 -1
- data/web/locales/it.yml +32 -1
- data/web/locales/ja.yml +0 -1
- data/web/locales/pt-br.yml +1 -2
- data/web/locales/tr.yml +100 -0
- data/web/locales/uk.yml +24 -1
- data/web/locales/zh-cn.yml +0 -1
- data/web/locales/zh-tw.yml +0 -1
- data/web/views/_footer.erb +12 -1
- data/web/views/_metrics_period_select.erb +1 -1
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +7 -7
- data/web/views/dashboard.erb +29 -36
- data/web/views/filtering.erb +6 -0
- data/web/views/layout.erb +6 -6
- data/web/views/metrics.erb +38 -30
- data/web/views/metrics_for_job.erb +29 -38
- data/web/views/morgue.erb +2 -2
- data/web/views/queue.erb +1 -1
- data/web/views/queues.erb +6 -2
- metadata +34 -13
data/lib/sidekiq/logger.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require "sidekiq"
|
2
4
|
require "date"
|
3
5
|
require "set"
|
@@ -20,7 +22,8 @@ module Sidekiq
|
|
20
22
|
end
|
21
23
|
|
22
24
|
# Get metric data for all jobs from the last hour
|
23
|
-
|
25
|
+
# +class_filter+: return only results for classes matching filter
|
26
|
+
def top_jobs(class_filter: nil, minutes: 60)
|
24
27
|
result = Result.new
|
25
28
|
|
26
29
|
time = @time
|
@@ -39,6 +42,7 @@ module Sidekiq
|
|
39
42
|
redis_results.each do |hash|
|
40
43
|
hash.each do |k, v|
|
41
44
|
kls, metric = k.split("|")
|
45
|
+
next if class_filter && !class_filter.match?(kls)
|
42
46
|
result.job_results[kls].add_metric metric, time, v.to_i
|
43
47
|
end
|
44
48
|
time -= 60
|
@@ -117,6 +121,7 @@ module Sidekiq
|
|
117
121
|
|
118
122
|
def total_avg(metric = "ms")
|
119
123
|
completed = totals["p"] - totals["f"]
|
124
|
+
return 0 if completed.zero?
|
120
125
|
totals[metric].to_f / completed
|
121
126
|
end
|
122
127
|
|
@@ -1,10 +1,21 @@
|
|
1
|
-
|
1
|
+
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Sidekiq
|
4
4
|
module Metrics
|
5
|
-
|
6
|
-
|
7
|
-
|
5
|
+
class Counter
|
6
|
+
def initialize
|
7
|
+
@value = 0
|
8
|
+
@lock = Mutex.new
|
9
|
+
end
|
10
|
+
|
11
|
+
def increment
|
12
|
+
@lock.synchronize { @value += 1 }
|
13
|
+
end
|
14
|
+
|
15
|
+
def value
|
16
|
+
@lock.synchronize { @value }
|
17
|
+
end
|
18
|
+
end
|
8
19
|
|
9
20
|
# Implements space-efficient but statistically useful histogram storage.
|
10
21
|
# A precise time histogram stores every time. Instead we break times into a set of
|
@@ -31,11 +31,11 @@ module Sidekiq
|
|
31
31
|
# We don't track time for failed jobs as they can have very unpredictable
|
32
32
|
# execution times. more important to know average time for successful jobs so we
|
33
33
|
# can better recognize when a perf regression is introduced.
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
34
|
+
track_time(klass, time_ms)
|
35
|
+
rescue JobRetry::Skip
|
36
|
+
# This is raised when iterable job is interrupted.
|
37
|
+
track_time(klass, time_ms)
|
38
|
+
raise
|
39
39
|
rescue Exception
|
40
40
|
@lock.synchronize {
|
41
41
|
@jobs["#{klass}|f"] += 1
|
@@ -100,15 +100,27 @@ module Sidekiq
|
|
100
100
|
|
101
101
|
private
|
102
102
|
|
103
|
+
def track_time(klass, time_ms)
|
104
|
+
@lock.synchronize {
|
105
|
+
@grams[klass].record_time(time_ms)
|
106
|
+
@jobs["#{klass}|ms"] += time_ms
|
107
|
+
@totals["ms"] += time_ms
|
108
|
+
}
|
109
|
+
end
|
110
|
+
|
103
111
|
def reset
|
104
112
|
@lock.synchronize {
|
105
113
|
array = [@totals, @jobs, @grams]
|
106
|
-
|
107
|
-
@jobs = Hash.new(0)
|
108
|
-
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
|
114
|
+
reset_instance_variables
|
109
115
|
array
|
110
116
|
}
|
111
117
|
end
|
118
|
+
|
119
|
+
def reset_instance_variables
|
120
|
+
@totals = Hash.new(0)
|
121
|
+
@jobs = Hash.new(0)
|
122
|
+
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
|
123
|
+
end
|
112
124
|
end
|
113
125
|
|
114
126
|
class Middleware
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require "active_support/current_attributes"
|
2
4
|
|
3
5
|
module Sidekiq
|
@@ -31,11 +33,26 @@ module Sidekiq
|
|
31
33
|
attrs = strklass.constantize.attributes
|
32
34
|
# Retries can push the job N times, we don't
|
33
35
|
# want retries to reset cattr. #5692, #5090
|
34
|
-
|
36
|
+
if attrs.any?
|
37
|
+
# Older rails has a bug that `CurrentAttributes#attributes` always returns
|
38
|
+
# the same hash instance. We need to dup it to avoid being accidentally mutated.
|
39
|
+
job[key] = if returns_same_object?
|
40
|
+
attrs.dup
|
41
|
+
else
|
42
|
+
attrs
|
43
|
+
end
|
44
|
+
end
|
35
45
|
end
|
36
46
|
end
|
37
47
|
yield
|
38
48
|
end
|
49
|
+
|
50
|
+
private
|
51
|
+
|
52
|
+
def returns_same_object?
|
53
|
+
ActiveSupport::VERSION::MAJOR < 8 ||
|
54
|
+
(ActiveSupport::VERSION::MAJOR == 8 && ActiveSupport::VERSION::MINOR == 0)
|
55
|
+
end
|
39
56
|
end
|
40
57
|
|
41
58
|
class Load
|
@@ -46,22 +63,38 @@ module Sidekiq
|
|
46
63
|
end
|
47
64
|
|
48
65
|
def call(_, job, _, &block)
|
49
|
-
|
66
|
+
klass_attrs = {}
|
50
67
|
|
51
68
|
@cattrs.each do |(key, strklass)|
|
52
|
-
|
53
|
-
constklass = strklass.constantize
|
54
|
-
cattrs_to_reset << constklass
|
69
|
+
next unless job.has_key?(key)
|
55
70
|
|
56
|
-
|
57
|
-
constklass.public_send("#{attribute}=", value)
|
58
|
-
end
|
59
|
-
end
|
71
|
+
klass_attrs[strklass.constantize] = job[key]
|
60
72
|
end
|
61
73
|
|
62
|
-
|
63
|
-
|
64
|
-
|
74
|
+
wrap(klass_attrs.to_a, &block)
|
75
|
+
end
|
76
|
+
|
77
|
+
private
|
78
|
+
|
79
|
+
def wrap(klass_attrs, &block)
|
80
|
+
klass, attrs = klass_attrs.shift
|
81
|
+
return block.call unless klass
|
82
|
+
|
83
|
+
retried = false
|
84
|
+
|
85
|
+
begin
|
86
|
+
klass.set(attrs) do
|
87
|
+
wrap(klass_attrs, &block)
|
88
|
+
end
|
89
|
+
rescue NoMethodError
|
90
|
+
raise if retried
|
91
|
+
|
92
|
+
# It is possible that the `CurrentAttributes` definition
|
93
|
+
# was changed before the job started processing.
|
94
|
+
attrs = attrs.select { |attr| klass.respond_to?(attr) }
|
95
|
+
retried = true
|
96
|
+
retry
|
97
|
+
end
|
65
98
|
end
|
66
99
|
end
|
67
100
|
|
@@ -70,7 +103,7 @@ module Sidekiq
|
|
70
103
|
cattrs = build_cattrs_hash(klass_or_array)
|
71
104
|
|
72
105
|
config.client_middleware.add Save, cattrs
|
73
|
-
config.server_middleware.
|
106
|
+
config.server_middleware.prepend Load, cattrs
|
74
107
|
end
|
75
108
|
|
76
109
|
private
|
data/lib/sidekiq/monitor.rb
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
|
+
# frozen_string_literal: true
|
2
3
|
|
3
4
|
require "fileutils"
|
4
5
|
require "sidekiq/api"
|
@@ -98,7 +99,7 @@ class Sidekiq::Monitor
|
|
98
99
|
pad = opts[:pad] || 0
|
99
100
|
max_length = opts[:max_length] || (80 - pad)
|
100
101
|
out = []
|
101
|
-
line = ""
|
102
|
+
line = +""
|
102
103
|
values.each do |value|
|
103
104
|
if (line.length + value.length) > max_length
|
104
105
|
out << line
|
data/lib/sidekiq/paginator.rb
CHANGED
@@ -2,6 +2,12 @@
|
|
2
2
|
|
3
3
|
module Sidekiq
|
4
4
|
module Paginator
|
5
|
+
TYPE_CACHE = {
|
6
|
+
"dead" => "zset",
|
7
|
+
"retry" => "zset",
|
8
|
+
"schedule" => "zset"
|
9
|
+
}
|
10
|
+
|
5
11
|
def page(key, pageidx = 1, page_size = 25, opts = nil)
|
6
12
|
current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i
|
7
13
|
pageidx = current_page - 1
|
@@ -19,9 +25,9 @@ module Sidekiq
|
|
19
25
|
total_size, items = conn.multi { |transaction|
|
20
26
|
transaction.zcard(key)
|
21
27
|
if rev
|
22
|
-
transaction.zrange(key, starting, ending, "REV", withscores
|
28
|
+
transaction.zrange(key, starting, ending, "REV", "withscores")
|
23
29
|
else
|
24
|
-
transaction.zrange(key, starting, ending, withscores
|
30
|
+
transaction.zrange(key, starting, ending, "withscores")
|
25
31
|
end
|
26
32
|
}
|
27
33
|
[current_page, total_size, items]
|
data/lib/sidekiq/processor.rb
CHANGED
@@ -36,7 +36,7 @@ module Sidekiq
|
|
36
36
|
@job = nil
|
37
37
|
@thread = nil
|
38
38
|
@reloader = Sidekiq.default_configuration[:reloader]
|
39
|
-
@job_logger = (capsule.config[:job_logger] || Sidekiq::JobLogger).new(
|
39
|
+
@job_logger = (capsule.config[:job_logger] || Sidekiq::JobLogger).new(capsule.config)
|
40
40
|
@retrier = Sidekiq::JobRetry.new(capsule)
|
41
41
|
end
|
42
42
|
|
@@ -58,6 +58,10 @@ module Sidekiq
|
|
58
58
|
@thread.value if wait
|
59
59
|
end
|
60
60
|
|
61
|
+
def stopping?
|
62
|
+
@done
|
63
|
+
end
|
64
|
+
|
61
65
|
def start
|
62
66
|
@thread ||= safe_thread("#{config.name}/processor", &method(:run))
|
63
67
|
end
|
@@ -134,10 +138,11 @@ module Sidekiq
|
|
134
138
|
# Effectively this block denotes a "unit of work" to Rails.
|
135
139
|
@reloader.call do
|
136
140
|
klass = Object.const_get(job_hash["class"])
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
+
instance = klass.new
|
142
|
+
instance.jid = job_hash["jid"]
|
143
|
+
instance._context = self
|
144
|
+
@retrier.local(instance, jobstr, queue) do
|
145
|
+
yield instance
|
141
146
|
end
|
142
147
|
end
|
143
148
|
end
|
@@ -175,9 +180,9 @@ module Sidekiq
|
|
175
180
|
ack = false
|
176
181
|
Thread.handle_interrupt(IGNORE_SHUTDOWN_INTERRUPTS) do
|
177
182
|
Thread.handle_interrupt(ALLOW_SHUTDOWN_INTERRUPTS) do
|
178
|
-
dispatch(job_hash, queue, jobstr) do |
|
179
|
-
config.server_middleware.invoke(
|
180
|
-
execute_job(
|
183
|
+
dispatch(job_hash, queue, jobstr) do |instance|
|
184
|
+
config.server_middleware.invoke(instance, job_hash, queue) do
|
185
|
+
execute_job(instance, job_hash["args"])
|
181
186
|
end
|
182
187
|
end
|
183
188
|
ack = true
|
@@ -185,9 +190,14 @@ module Sidekiq
|
|
185
190
|
# Had to force kill this job because it didn't finish
|
186
191
|
# within the timeout. Don't acknowledge the work since
|
187
192
|
# we didn't properly finish it.
|
193
|
+
rescue Sidekiq::JobRetry::Skip => s
|
194
|
+
# Skip means we handled this error elsewhere. We don't
|
195
|
+
# need to log or report the error.
|
196
|
+
ack = true
|
197
|
+
raise s
|
188
198
|
rescue Sidekiq::JobRetry::Handled => h
|
189
199
|
# this is the common case: job raised error and Sidekiq::JobRetry::Handled
|
190
|
-
# signals that we created a retry successfully. We can
|
200
|
+
# signals that we created a retry successfully. We can acknowledge the job.
|
191
201
|
ack = true
|
192
202
|
e = h.cause || h
|
193
203
|
handle_exception(e, {context: "Job raised exception", job: job_hash})
|
@@ -206,8 +216,8 @@ module Sidekiq
|
|
206
216
|
end
|
207
217
|
end
|
208
218
|
|
209
|
-
def execute_job(
|
210
|
-
|
219
|
+
def execute_job(instance, cloned_args)
|
220
|
+
instance.perform(*cloned_args)
|
211
221
|
end
|
212
222
|
|
213
223
|
# Ruby doesn't provide atomic counters out of the box so we'll
|
data/lib/sidekiq/rails.rb
CHANGED
@@ -4,6 +4,17 @@ require "sidekiq/job"
|
|
4
4
|
require "rails"
|
5
5
|
|
6
6
|
module Sidekiq
|
7
|
+
module ActiveJob
|
8
|
+
# @api private
|
9
|
+
class Wrapper
|
10
|
+
include Sidekiq::Job
|
11
|
+
|
12
|
+
def perform(job_data)
|
13
|
+
::ActiveJob::Base.execute(job_data.merge("provider_job_id" => jid))
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
7
18
|
class Rails < ::Rails::Engine
|
8
19
|
class Reloader
|
9
20
|
def initialize(app = ::Rails.application)
|
@@ -21,8 +32,8 @@ module Sidekiq
|
|
21
32
|
"#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
|
22
33
|
end
|
23
34
|
|
24
|
-
def
|
25
|
-
|
35
|
+
def to_hash
|
36
|
+
{app: @app.class.name}
|
26
37
|
end
|
27
38
|
end
|
28
39
|
|
@@ -39,21 +50,11 @@ module Sidekiq
|
|
39
50
|
# end
|
40
51
|
initializer "sidekiq.active_job_integration" do
|
41
52
|
ActiveSupport.on_load(:active_job) do
|
53
|
+
require_relative "../active_job/queue_adapters/sidekiq_adapter"
|
42
54
|
include ::Sidekiq::Job::Options unless respond_to?(:sidekiq_options)
|
43
55
|
end
|
44
56
|
end
|
45
57
|
|
46
|
-
initializer "sidekiq.rails_logger" do
|
47
|
-
Sidekiq.configure_server do |config|
|
48
|
-
# This is the integration code necessary so that if a job uses `Rails.logger.info "Hello"`,
|
49
|
-
# it will appear in the Sidekiq console with all of the job context. See #5021 and
|
50
|
-
# https://github.com/rails/rails/blob/b5f2b550f69a99336482739000c58e4e04e033aa/railties/lib/rails/commands/server/server_command.rb#L82-L84
|
51
|
-
unless ::Rails.logger == config.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout)
|
52
|
-
::Rails.logger.extend(::ActiveSupport::Logger.broadcast(config.logger))
|
53
|
-
end
|
54
|
-
end
|
55
|
-
end
|
56
|
-
|
57
58
|
initializer "sidekiq.backtrace_cleaner" do
|
58
59
|
Sidekiq.configure_server do |config|
|
59
60
|
config[:backtrace_cleaner] = ->(backtrace) { ::Rails.backtrace_cleaner.clean(backtrace) }
|
@@ -67,6 +68,16 @@ module Sidekiq
|
|
67
68
|
config.after_initialize do
|
68
69
|
Sidekiq.configure_server do |config|
|
69
70
|
config[:reloader] = Sidekiq::Rails::Reloader.new
|
71
|
+
|
72
|
+
# This is the integration code necessary so that if a job uses `Rails.logger.info "Hello"`,
|
73
|
+
# it will appear in the Sidekiq console with all of the job context.
|
74
|
+
unless ::Rails.logger == config.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout)
|
75
|
+
if ::Rails.logger.respond_to?(:broadcast_to)
|
76
|
+
::Rails.logger.broadcast_to(config.logger)
|
77
|
+
else
|
78
|
+
::Rails.logger.extend(::ActiveSupport::Logger.broadcast(config.logger))
|
79
|
+
end
|
80
|
+
end
|
70
81
|
end
|
71
82
|
end
|
72
83
|
end
|
@@ -21,6 +21,22 @@ module Sidekiq
|
|
21
21
|
@client.call("EVALSHA", sha, keys.size, *keys, *argv)
|
22
22
|
end
|
23
23
|
|
24
|
+
# this is the set of Redis commands used by Sidekiq. Not guaranteed
|
25
|
+
# to be comprehensive, we use this as a performance enhancement to
|
26
|
+
# avoid calling method_missing on most commands
|
27
|
+
USED_COMMANDS = %w[bitfield bitfield_ro del exists expire flushdb
|
28
|
+
get hdel hget hgetall hincrby hlen hmget hset hsetnx incr incrby
|
29
|
+
lindex llen lmove lpop lpush lrange lrem mget mset ping pttl
|
30
|
+
publish rpop rpush sadd scard script set sismember smembers
|
31
|
+
srem ttl type unlink zadd zcard zincrby zrange zrem
|
32
|
+
zremrangebyrank zremrangebyscore]
|
33
|
+
|
34
|
+
USED_COMMANDS.each do |name|
|
35
|
+
define_method(name) do |*args, **kwargs|
|
36
|
+
@client.call(name, *args, **kwargs)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
24
40
|
private
|
25
41
|
|
26
42
|
# this allows us to use methods like `conn.hmset(...)` instead of having to use
|
@@ -48,6 +64,13 @@ module Sidekiq
|
|
48
64
|
opts = client_opts(options)
|
49
65
|
@config = if opts.key?(:sentinels)
|
50
66
|
RedisClient.sentinel(**opts)
|
67
|
+
elsif opts.key?(:nodes)
|
68
|
+
# Sidekiq does not support Redis clustering but Sidekiq Enterprise's
|
69
|
+
# rate limiters are cluster-safe so we can scale to millions
|
70
|
+
# of rate limiters using a Redis cluster. This requires the
|
71
|
+
# `redis-cluster-client` gem.
|
72
|
+
# Sidekiq::Limiter.redis = { nodes: [...] }
|
73
|
+
RedisClient.cluster(**opts)
|
51
74
|
else
|
52
75
|
RedisClient.config(**opts)
|
53
76
|
end
|
@@ -63,8 +86,7 @@ module Sidekiq
|
|
63
86
|
opts = options.dup
|
64
87
|
|
65
88
|
if opts[:namespace]
|
66
|
-
raise ArgumentError, "Your Redis configuration uses the namespace '#{opts[:namespace]}' but this feature
|
67
|
-
"Either use the redis adapter or remove the namespace."
|
89
|
+
raise ArgumentError, "Your Redis configuration uses the namespace '#{opts[:namespace]}' but this feature is no longer supported in Sidekiq 7+. See https://github.com/sidekiq/sidekiq/blob/main/docs/7.0-Upgrade.md#redis-namespace."
|
68
90
|
end
|
69
91
|
|
70
92
|
opts.delete(:size)
|
@@ -75,13 +97,9 @@ module Sidekiq
|
|
75
97
|
opts.delete(:network_timeout)
|
76
98
|
end
|
77
99
|
|
78
|
-
if opts[:driver]
|
79
|
-
opts[:driver] = opts[:driver].to_sym
|
80
|
-
end
|
81
|
-
|
82
100
|
opts[:name] = opts.delete(:master_name) if opts.key?(:master_name)
|
83
101
|
opts[:role] = opts[:role].to_sym if opts.key?(:role)
|
84
|
-
opts
|
102
|
+
opts[:driver] = opts[:driver].to_sym if opts.key?(:driver)
|
85
103
|
|
86
104
|
# Issue #3303, redis-rb will silently retry an operation.
|
87
105
|
# This can lead to duplicate jobs if Sidekiq::Client's LPUSH
|
@@ -8,16 +8,28 @@ module Sidekiq
|
|
8
8
|
module RedisConnection
|
9
9
|
class << self
|
10
10
|
def create(options = {})
|
11
|
-
symbolized_options = options
|
11
|
+
symbolized_options = deep_symbolize_keys(options)
|
12
12
|
symbolized_options[:url] ||= determine_redis_provider
|
13
13
|
|
14
14
|
logger = symbolized_options.delete(:logger)
|
15
15
|
logger&.info { "Sidekiq #{Sidekiq::VERSION} connecting to Redis with options #{scrub(symbolized_options)}" }
|
16
16
|
|
17
|
+
raise "Sidekiq 7+ does not support Redis protocol 2" if symbolized_options[:protocol] == 2
|
18
|
+
|
19
|
+
safe = !!symbolized_options.delete(:cluster_safe)
|
20
|
+
raise ":nodes not allowed, Sidekiq is not safe to run on Redis Cluster" if !safe && symbolized_options.key?(:nodes)
|
21
|
+
|
17
22
|
size = symbolized_options.delete(:size) || 5
|
18
23
|
pool_timeout = symbolized_options.delete(:pool_timeout) || 1
|
19
24
|
pool_name = symbolized_options.delete(:pool_name)
|
20
25
|
|
26
|
+
# Default timeout in redis-client is 1 second, which can be too aggressive
|
27
|
+
# if the Sidekiq process is CPU-bound. With 10-15 threads and a thread quantum of 100ms,
|
28
|
+
# it can be easy to get the occasional ReadTimeoutError. You can still provide
|
29
|
+
# a smaller timeout explicitly:
|
30
|
+
# config.redis = { url: "...", timeout: 1 }
|
31
|
+
symbolized_options[:timeout] ||= 3
|
32
|
+
|
21
33
|
redis_config = Sidekiq::RedisClientAdapter.new(symbolized_options)
|
22
34
|
ConnectionPool.new(timeout: pool_timeout, size: size, name: pool_name) do
|
23
35
|
redis_config.new_client
|
@@ -26,6 +38,19 @@ module Sidekiq
|
|
26
38
|
|
27
39
|
private
|
28
40
|
|
41
|
+
def deep_symbolize_keys(object)
|
42
|
+
case object
|
43
|
+
when Hash
|
44
|
+
object.each_with_object({}) do |(key, value), result|
|
45
|
+
result[key.to_sym] = deep_symbolize_keys(value)
|
46
|
+
end
|
47
|
+
when Array
|
48
|
+
object.map { |e| deep_symbolize_keys(e) }
|
49
|
+
else
|
50
|
+
object
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
29
54
|
def scrub(options)
|
30
55
|
redacted = "REDACTED"
|
31
56
|
|
@@ -38,11 +63,17 @@ module Sidekiq
|
|
38
63
|
uri.password = redacted
|
39
64
|
scrubbed_options[:url] = uri.to_s
|
40
65
|
end
|
41
|
-
if scrubbed_options[:password]
|
42
|
-
|
43
|
-
end
|
66
|
+
scrubbed_options[:password] = redacted if scrubbed_options[:password]
|
67
|
+
scrubbed_options[:sentinel_password] = redacted if scrubbed_options[:sentinel_password]
|
44
68
|
scrubbed_options[:sentinels]&.each do |sentinel|
|
45
|
-
|
69
|
+
if sentinel.is_a?(String)
|
70
|
+
if (uri = URI(sentinel)) && uri.password
|
71
|
+
uri.password = redacted
|
72
|
+
sentinel.replace(uri.to_s)
|
73
|
+
end
|
74
|
+
elsif sentinel[:password]
|
75
|
+
sentinel[:password] = redacted
|
76
|
+
end
|
46
77
|
end
|
47
78
|
scrubbed_options
|
48
79
|
end
|
@@ -66,9 +97,7 @@ module Sidekiq
|
|
66
97
|
EOM
|
67
98
|
end
|
68
99
|
|
69
|
-
ENV[
|
70
|
-
p || "REDIS_URL"
|
71
|
-
]
|
100
|
+
ENV[p.to_s] || ENV["REDIS_URL"]
|
72
101
|
end
|
73
102
|
end
|
74
103
|
end
|
data/lib/sidekiq/ring_buffer.rb
CHANGED
data/lib/sidekiq/scheduled.rb
CHANGED
@@ -144,7 +144,7 @@ module Sidekiq
|
|
144
144
|
# In the example above, each process should schedule every 10 seconds on average. We special
|
145
145
|
# case smaller clusters to add 50% so they would sleep somewhere between 5 and 15 seconds.
|
146
146
|
# As we run more processes, the scheduling interval average will approach an even spread
|
147
|
-
# between 0 and poll interval so we don't need this
|
147
|
+
# between 0 and poll interval so we don't need this artificial boost.
|
148
148
|
#
|
149
149
|
count = process_count
|
150
150
|
interval = poll_interval_average(count)
|
@@ -193,7 +193,7 @@ module Sidekiq
|
|
193
193
|
# should never depend on sidekiq/api.
|
194
194
|
def cleanup
|
195
195
|
# dont run cleanup more than once per minute
|
196
|
-
return 0 unless redis { |conn| conn.set("process_cleanup", "1",
|
196
|
+
return 0 unless redis { |conn| conn.set("process_cleanup", "1", "NX", "EX", "60") }
|
197
197
|
|
198
198
|
count = 0
|
199
199
|
redis do |conn|
|
data/lib/sidekiq/systemd.rb
CHANGED
data/lib/sidekiq/testing.rb
CHANGED
@@ -5,23 +5,42 @@ require "sidekiq"
|
|
5
5
|
|
6
6
|
module Sidekiq
|
7
7
|
class Testing
|
8
|
+
class TestModeAlreadySetError < RuntimeError; end
|
8
9
|
class << self
|
9
|
-
attr_accessor :
|
10
|
+
attr_accessor :__global_test_mode
|
10
11
|
|
12
|
+
# Calling without a block sets the global test mode, affecting
|
13
|
+
# all threads. Calling with a block only affects the current Thread.
|
11
14
|
def __set_test_mode(mode)
|
12
15
|
if block_given?
|
13
|
-
|
16
|
+
# Reentrant testing modes will lead to a rat's nest of code which is
|
17
|
+
# hard to reason about. You can set the testing mode once globally and
|
18
|
+
# you can override that global setting once per-thread.
|
19
|
+
raise TestModeAlreadySetError, "Nesting test modes is not supported" if __local_test_mode
|
20
|
+
|
21
|
+
self.__local_test_mode = mode
|
14
22
|
begin
|
15
|
-
self.__test_mode = mode
|
16
23
|
yield
|
17
24
|
ensure
|
18
|
-
self.
|
25
|
+
self.__local_test_mode = nil
|
19
26
|
end
|
20
27
|
else
|
21
|
-
self.
|
28
|
+
self.__global_test_mode = mode
|
22
29
|
end
|
23
30
|
end
|
24
31
|
|
32
|
+
def __test_mode
|
33
|
+
__local_test_mode || __global_test_mode
|
34
|
+
end
|
35
|
+
|
36
|
+
def __local_test_mode
|
37
|
+
Thread.current[:__sidekiq_test_mode]
|
38
|
+
end
|
39
|
+
|
40
|
+
def __local_test_mode=(value)
|
41
|
+
Thread.current[:__sidekiq_test_mode] = value
|
42
|
+
end
|
43
|
+
|
25
44
|
def disable!(&block)
|
26
45
|
__set_test_mode(:disable, &block)
|
27
46
|
end
|
@@ -64,7 +83,7 @@ module Sidekiq
|
|
64
83
|
class EmptyQueueError < RuntimeError; end
|
65
84
|
|
66
85
|
module TestingClient
|
67
|
-
def
|
86
|
+
def atomic_push(conn, payloads)
|
68
87
|
if Sidekiq::Testing.fake?
|
69
88
|
payloads.each do |job|
|
70
89
|
job = Sidekiq.load_json(Sidekiq.dump_json(job))
|
@@ -93,7 +112,7 @@ module Sidekiq
|
|
93
112
|
# The Queues class is only for testing the fake queue implementation.
|
94
113
|
# There are 2 data structures involved in tandem. This is due to the
|
95
114
|
# Rspec syntax of change(HardJob.jobs, :size). It keeps a reference
|
96
|
-
# to the array. Because the array was
|
115
|
+
# to the array. Because the array was derived from a filter of the total
|
97
116
|
# jobs enqueued, it appeared as though the array didn't change.
|
98
117
|
#
|
99
118
|
# To solve this, we'll keep 2 hashes containing the jobs. One with keys based
|
@@ -259,16 +278,16 @@ module Sidekiq
|
|
259
278
|
def perform_one
|
260
279
|
raise(EmptyQueueError, "perform_one called with empty job queue") if jobs.empty?
|
261
280
|
next_job = jobs.first
|
262
|
-
Queues.delete_for(next_job["jid"], queue, to_s)
|
281
|
+
Queues.delete_for(next_job["jid"], next_job["queue"], to_s)
|
263
282
|
process_job(next_job)
|
264
283
|
end
|
265
284
|
|
266
285
|
def process_job(job)
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
Sidekiq::Testing.server_middleware.invoke(
|
271
|
-
execute_job(
|
286
|
+
instance = new
|
287
|
+
instance.jid = job["jid"]
|
288
|
+
instance.bid = job["bid"] if instance.respond_to?(:bid=)
|
289
|
+
Sidekiq::Testing.server_middleware.invoke(instance, job, job["queue"]) do
|
290
|
+
execute_job(instance, job["args"])
|
272
291
|
end
|
273
292
|
end
|
274
293
|
|
@@ -9,7 +9,14 @@ module Sidekiq
|
|
9
9
|
@redis_client = Client.new(pool: pool, config: config)
|
10
10
|
end
|
11
11
|
|
12
|
+
def batching?
|
13
|
+
Thread.current[:sidekiq_batch]
|
14
|
+
end
|
15
|
+
|
12
16
|
def push(item)
|
17
|
+
# 6160 we can't support both Sidekiq::Batch and transactions.
|
18
|
+
return @redis_client.push(item) if batching?
|
19
|
+
|
13
20
|
# pre-allocate the JID so we can return it immediately and
|
14
21
|
# save it to the database as part of the transaction.
|
15
22
|
item["jid"] ||= SecureRandom.hex(12)
|