sidekiq 6.4.1 → 7.0.7
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +172 -12
- data/README.md +41 -33
- data/bin/sidekiq +3 -8
- data/bin/sidekiqload +188 -114
- data/bin/sidekiqmon +3 -0
- data/lib/sidekiq/api.rb +275 -161
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +83 -88
- data/lib/sidekiq/client.rb +55 -43
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +270 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +21 -22
- data/lib/sidekiq/job.rb +375 -10
- data/lib/sidekiq/job_logger.rb +2 -2
- data/lib/sidekiq/job_retry.rb +76 -54
- data/lib/sidekiq/job_util.rb +59 -19
- data/lib/sidekiq/launcher.rb +90 -82
- data/lib/sidekiq/logger.rb +6 -45
- data/lib/sidekiq/manager.rb +33 -32
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +136 -0
- data/lib/sidekiq/middleware/chain.rb +96 -51
- data/lib/sidekiq/middleware/current_attributes.rb +16 -17
- data/lib/sidekiq/middleware/i18n.rb +6 -4
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +17 -4
- data/lib/sidekiq/paginator.rb +11 -3
- data/lib/sidekiq/processor.rb +60 -60
- data/lib/sidekiq/rails.rb +12 -10
- data/lib/sidekiq/redis_client_adapter.rb +115 -0
- data/lib/sidekiq/redis_connection.rb +13 -82
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +65 -37
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +41 -68
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +40 -9
- data/lib/sidekiq/web/csrf_protection.rb +3 -3
- data/lib/sidekiq/web/helpers.rb +34 -20
- data/lib/sidekiq/web.rb +7 -14
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +84 -207
- data/sidekiq.gemspec +20 -10
- data/web/assets/javascripts/application.js +76 -26
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +3 -240
- data/web/assets/javascripts/metrics.js +264 -0
- data/web/assets/stylesheets/application-dark.css +4 -0
- data/web/assets/stylesheets/application-rtl.css +2 -91
- data/web/assets/stylesheets/application.css +66 -299
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +82 -69
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +67 -67
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +73 -68
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +63 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +5 -2
- data/web/views/_job_info.erb +18 -2
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +1 -1
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +1 -1
- data/web/views/_summary.erb +1 -1
- data/web/views/busy.erb +42 -26
- data/web/views/dashboard.erb +36 -4
- data/web/views/metrics.erb +82 -0
- data/web/views/metrics_for_job.erb +71 -0
- data/web/views/morgue.erb +5 -9
- data/web/views/queue.erb +15 -15
- data/web/views/queues.erb +3 -1
- data/web/views/retries.erb +5 -9
- data/web/views/scheduled.erb +12 -13
- metadata +63 -28
- data/lib/sidekiq/delay.rb +0 -43
- data/lib/sidekiq/exception_handler.rb +0 -27
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/util.rb +0 -108
- data/lib/sidekiq/worker.rb +0 -362
- /data/{LICENSE → LICENSE.txt} +0 -0
data/lib/sidekiq/client.rb
CHANGED
@@ -15,13 +15,12 @@ module Sidekiq
|
|
15
15
|
# client.middleware do |chain|
|
16
16
|
# chain.use MyClientMiddleware
|
17
17
|
# end
|
18
|
-
# client.push('class' => '
|
18
|
+
# client.push('class' => 'SomeJob', 'args' => [1,2,3])
|
19
19
|
#
|
20
20
|
# All client instances default to the globally-defined
|
21
21
|
# Sidekiq.client_middleware but you can change as necessary.
|
22
22
|
#
|
23
23
|
def middleware(&block)
|
24
|
-
@chain ||= Sidekiq.client_middleware
|
25
24
|
if block
|
26
25
|
@chain = @chain.dup
|
27
26
|
yield @chain
|
@@ -31,34 +30,48 @@ module Sidekiq
|
|
31
30
|
|
32
31
|
attr_accessor :redis_pool
|
33
32
|
|
34
|
-
# Sidekiq::Client
|
35
|
-
#
|
36
|
-
# Sidekiq jobs across several Redis instances (for scalability
|
37
|
-
# reasons, e.g.)
|
33
|
+
# Sidekiq::Client is responsible for pushing job payloads to Redis.
|
34
|
+
# Requires the :pool or :config keyword argument.
|
38
35
|
#
|
39
|
-
# Sidekiq::Client.new(
|
36
|
+
# Sidekiq::Client.new(pool: Sidekiq::RedisConnection.create)
|
40
37
|
#
|
41
|
-
#
|
42
|
-
#
|
43
|
-
#
|
44
|
-
|
45
|
-
|
38
|
+
# Inside the Sidekiq process, you can reuse the configured resources:
|
39
|
+
#
|
40
|
+
# Sidekiq::Client.new(config: config)
|
41
|
+
#
|
42
|
+
# @param pool [ConnectionPool] explicit Redis pool to use
|
43
|
+
# @param config [Sidekiq::Config] use the pool and middleware from the given Sidekiq container
|
44
|
+
# @param chain [Sidekiq::Middleware::Chain] use the given middleware chain
|
45
|
+
def initialize(*args, **kwargs)
|
46
|
+
if args.size == 1 && kwargs.size == 0
|
47
|
+
warn "Sidekiq::Client.new(pool) is deprecated, please use Sidekiq::Client.new(pool: pool), #{caller(0..3)}"
|
48
|
+
# old calling method, accept 1 pool argument
|
49
|
+
@redis_pool = args[0]
|
50
|
+
@chain = Sidekiq.default_configuration.client_middleware
|
51
|
+
@config = Sidekiq.default_configuration
|
52
|
+
else
|
53
|
+
# new calling method: keyword arguments
|
54
|
+
@config = kwargs[:config] || Sidekiq.default_configuration
|
55
|
+
@redis_pool = kwargs[:pool] || Thread.current[:sidekiq_redis_pool] || @config&.redis_pool
|
56
|
+
@chain = kwargs[:chain] || @config&.client_middleware
|
57
|
+
raise ArgumentError, "No Redis pool available for Sidekiq::Client" unless @redis_pool
|
58
|
+
end
|
46
59
|
end
|
47
60
|
|
48
61
|
##
|
49
62
|
# The main method used to push a job to Redis. Accepts a number of options:
|
50
63
|
#
|
51
64
|
# queue - the named queue to use, default 'default'
|
52
|
-
# class - the
|
65
|
+
# class - the job class to call, required
|
53
66
|
# args - an array of simple arguments to the perform method, must be JSON-serializable
|
54
67
|
# at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
|
55
68
|
# retry - whether to retry this job if it fails, default true or an integer number of retries
|
56
69
|
# backtrace - whether to save any error backtrace, default false
|
57
70
|
#
|
58
71
|
# If class is set to the class name, the jobs' options will be based on Sidekiq's default
|
59
|
-
#
|
72
|
+
# job options. Otherwise, they will be based on the job class's options.
|
60
73
|
#
|
61
|
-
# Any options valid for a
|
74
|
+
# Any options valid for a job class's sidekiq_options are also available here.
|
62
75
|
#
|
63
76
|
# All options must be strings, not symbols. NB: because we are serializing to JSON, all
|
64
77
|
# symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
|
@@ -67,13 +80,15 @@ module Sidekiq
|
|
67
80
|
# Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
|
68
81
|
#
|
69
82
|
# Example:
|
70
|
-
# push('queue' => 'my_queue', 'class' =>
|
83
|
+
# push('queue' => 'my_queue', 'class' => MyJob, 'args' => ['foo', 1, :bat => 'bar'])
|
71
84
|
#
|
72
85
|
def push(item)
|
73
86
|
normed = normalize_item(item)
|
74
|
-
payload =
|
75
|
-
|
87
|
+
payload = middleware.invoke(item["class"], normed, normed["queue"], @redis_pool) do
|
88
|
+
normed
|
89
|
+
end
|
76
90
|
if payload
|
91
|
+
verify_json(payload)
|
77
92
|
raw_push([payload])
|
78
93
|
payload["jid"]
|
79
94
|
end
|
@@ -101,12 +116,17 @@ module Sidekiq
|
|
101
116
|
raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
|
102
117
|
raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
|
103
118
|
|
119
|
+
jid = items.delete("jid")
|
120
|
+
raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
|
121
|
+
|
104
122
|
normed = normalize_item(items)
|
105
123
|
payloads = args.map.with_index { |job_args, index|
|
106
124
|
copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
|
107
125
|
copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
|
108
|
-
|
109
|
-
|
126
|
+
result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
|
127
|
+
verify_json(copy)
|
128
|
+
copy
|
129
|
+
end
|
110
130
|
result || nil
|
111
131
|
}.compact
|
112
132
|
|
@@ -119,8 +139,8 @@ module Sidekiq
|
|
119
139
|
#
|
120
140
|
# pool = ConnectionPool.new { Redis.new }
|
121
141
|
# Sidekiq::Client.via(pool) do
|
122
|
-
#
|
123
|
-
#
|
142
|
+
# SomeJob.perform_async(1,2,3)
|
143
|
+
# SomeOtherJob.perform_async(1,2,3)
|
124
144
|
# end
|
125
145
|
#
|
126
146
|
# Generally this is only needed for very large Sidekiq installs processing
|
@@ -128,11 +148,11 @@ module Sidekiq
|
|
128
148
|
# you cannot scale any other way (e.g. splitting your app into smaller apps).
|
129
149
|
def self.via(pool)
|
130
150
|
raise ArgumentError, "No pool given" if pool.nil?
|
131
|
-
current_sidekiq_pool = Thread.current[:
|
132
|
-
Thread.current[:
|
151
|
+
current_sidekiq_pool = Thread.current[:sidekiq_redis_pool]
|
152
|
+
Thread.current[:sidekiq_redis_pool] = pool
|
133
153
|
yield
|
134
154
|
ensure
|
135
|
-
Thread.current[:
|
155
|
+
Thread.current[:sidekiq_redis_pool] = current_sidekiq_pool
|
136
156
|
end
|
137
157
|
|
138
158
|
class << self
|
@@ -145,10 +165,10 @@ module Sidekiq
|
|
145
165
|
end
|
146
166
|
|
147
167
|
# Resque compatibility helpers. Note all helpers
|
148
|
-
# should go through
|
168
|
+
# should go through Sidekiq::Job#client_push.
|
149
169
|
#
|
150
170
|
# Example usage:
|
151
|
-
# Sidekiq::Client.enqueue(
|
171
|
+
# Sidekiq::Client.enqueue(MyJob, 'foo', 1, :bat => 'bar')
|
152
172
|
#
|
153
173
|
# Messages are enqueued to the 'default' queue.
|
154
174
|
#
|
@@ -157,19 +177,19 @@ module Sidekiq
|
|
157
177
|
end
|
158
178
|
|
159
179
|
# Example usage:
|
160
|
-
# Sidekiq::Client.enqueue_to(:queue_name,
|
180
|
+
# Sidekiq::Client.enqueue_to(:queue_name, MyJob, 'foo', 1, :bat => 'bar')
|
161
181
|
#
|
162
182
|
def enqueue_to(queue, klass, *args)
|
163
183
|
klass.client_push("queue" => queue, "class" => klass, "args" => args)
|
164
184
|
end
|
165
185
|
|
166
186
|
# Example usage:
|
167
|
-
# Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes,
|
187
|
+
# Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyJob, 'foo', 1, :bat => 'bar')
|
168
188
|
#
|
169
189
|
def enqueue_to_in(queue, interval, klass, *args)
|
170
190
|
int = interval.to_f
|
171
191
|
now = Time.now.to_f
|
172
|
-
ts = (int < 1_000_000_000 ? now + int : int)
|
192
|
+
ts = ((int < 1_000_000_000) ? now + int : int)
|
173
193
|
|
174
194
|
item = {"class" => klass, "args" => args, "at" => ts, "queue" => queue}
|
175
195
|
item.delete("at") if ts <= now
|
@@ -178,7 +198,7 @@ module Sidekiq
|
|
178
198
|
end
|
179
199
|
|
180
200
|
# Example usage:
|
181
|
-
# Sidekiq::Client.enqueue_in(3.minutes,
|
201
|
+
# Sidekiq::Client.enqueue_in(3.minutes, MyJob, 'foo', 1, :bat => 'bar')
|
182
202
|
#
|
183
203
|
def enqueue_in(interval, klass, *args)
|
184
204
|
klass.perform_in(interval, *args)
|
@@ -194,14 +214,14 @@ module Sidekiq
|
|
194
214
|
conn.pipelined do |pipeline|
|
195
215
|
atomic_push(pipeline, payloads)
|
196
216
|
end
|
197
|
-
rescue
|
217
|
+
rescue RedisClient::Error => ex
|
198
218
|
# 2550 Failover can cause the server to become a replica, need
|
199
219
|
# to disconnect and reopen the socket to get back to the primary.
|
200
220
|
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
201
221
|
# 4985 Use the same logic when a blocking command is force-unblocked
|
202
222
|
# The retry logic is copied from sidekiq.rb
|
203
223
|
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
204
|
-
conn.
|
224
|
+
conn.close
|
205
225
|
retryable = false
|
206
226
|
retry
|
207
227
|
end
|
@@ -213,7 +233,7 @@ module Sidekiq
|
|
213
233
|
|
214
234
|
def atomic_push(conn, payloads)
|
215
235
|
if payloads.first.key?("at")
|
216
|
-
conn.zadd("schedule", payloads.
|
236
|
+
conn.zadd("schedule", payloads.flat_map { |hash|
|
217
237
|
at = hash.delete("at").to_s
|
218
238
|
[at, Sidekiq.dump_json(hash)]
|
219
239
|
})
|
@@ -224,17 +244,9 @@ module Sidekiq
|
|
224
244
|
entry["enqueued_at"] = now
|
225
245
|
Sidekiq.dump_json(entry)
|
226
246
|
}
|
227
|
-
conn.sadd("queues", queue)
|
247
|
+
conn.sadd("queues", [queue])
|
228
248
|
conn.lpush("queue:#{queue}", to_push)
|
229
249
|
end
|
230
250
|
end
|
231
|
-
|
232
|
-
def process_single(worker_class, item)
|
233
|
-
queue = item["queue"]
|
234
|
-
|
235
|
-
middleware.invoke(worker_class, item, queue, @redis_pool) do
|
236
|
-
item
|
237
|
-
end
|
238
|
-
end
|
239
251
|
end
|
240
252
|
end
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
##
|
5
|
+
# Sidekiq::Component assumes a config instance is available at @config
|
6
|
+
module Component # :nodoc:
|
7
|
+
attr_reader :config
|
8
|
+
|
9
|
+
def watchdog(last_words)
|
10
|
+
yield
|
11
|
+
rescue Exception => ex
|
12
|
+
handle_exception(ex, {context: last_words})
|
13
|
+
raise ex
|
14
|
+
end
|
15
|
+
|
16
|
+
def safe_thread(name, &block)
|
17
|
+
Thread.new do
|
18
|
+
Thread.current.name = name
|
19
|
+
watchdog(name, &block)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def logger
|
24
|
+
config.logger
|
25
|
+
end
|
26
|
+
|
27
|
+
def redis(&block)
|
28
|
+
config.redis(&block)
|
29
|
+
end
|
30
|
+
|
31
|
+
def tid
|
32
|
+
Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
|
33
|
+
end
|
34
|
+
|
35
|
+
def hostname
|
36
|
+
ENV["DYNO"] || Socket.gethostname
|
37
|
+
end
|
38
|
+
|
39
|
+
def process_nonce
|
40
|
+
@@process_nonce ||= SecureRandom.hex(6)
|
41
|
+
end
|
42
|
+
|
43
|
+
def identity
|
44
|
+
@@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}"
|
45
|
+
end
|
46
|
+
|
47
|
+
def handle_exception(ex, ctx = {})
|
48
|
+
config.handle_exception(ex, ctx)
|
49
|
+
end
|
50
|
+
|
51
|
+
def fire_event(event, options = {})
|
52
|
+
oneshot = options.fetch(:oneshot, true)
|
53
|
+
reverse = options[:reverse]
|
54
|
+
reraise = options[:reraise]
|
55
|
+
logger.debug("Firing #{event} event") if oneshot
|
56
|
+
|
57
|
+
arr = config[:lifecycle_events][event]
|
58
|
+
arr.reverse! if reverse
|
59
|
+
arr.each do |block|
|
60
|
+
block.call
|
61
|
+
rescue => ex
|
62
|
+
handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
|
63
|
+
raise ex if reraise
|
64
|
+
end
|
65
|
+
arr.clear if oneshot # once we've fired an event, we never fire it again
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
@@ -0,0 +1,270 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
|
3
|
+
require "set"
|
4
|
+
require "sidekiq/redis_connection"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
|
8
|
+
class Config
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
DEFAULTS = {
|
12
|
+
labels: Set.new,
|
13
|
+
require: ".",
|
14
|
+
environment: nil,
|
15
|
+
concurrency: 5,
|
16
|
+
timeout: 25,
|
17
|
+
poll_interval_average: nil,
|
18
|
+
average_scheduled_poll_interval: 5,
|
19
|
+
on_complex_arguments: :raise,
|
20
|
+
error_handlers: [],
|
21
|
+
death_handlers: [],
|
22
|
+
lifecycle_events: {
|
23
|
+
startup: [],
|
24
|
+
quiet: [],
|
25
|
+
shutdown: [],
|
26
|
+
# triggers when we fire the first heartbeat on startup OR repairing a network partition
|
27
|
+
heartbeat: [],
|
28
|
+
# triggers on EVERY heartbeat call, every 10 seconds
|
29
|
+
beat: []
|
30
|
+
},
|
31
|
+
dead_max_jobs: 10_000,
|
32
|
+
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
33
|
+
reloader: proc { |&block| block.call }
|
34
|
+
}
|
35
|
+
|
36
|
+
ERROR_HANDLER = ->(ex, ctx) {
|
37
|
+
cfg = ctx[:_config] || Sidekiq.default_configuration
|
38
|
+
l = cfg.logger
|
39
|
+
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
|
40
|
+
l.warn("#{ex.class.name}: #{ex.message}")
|
41
|
+
l.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil?
|
42
|
+
}
|
43
|
+
|
44
|
+
def initialize(options = {})
|
45
|
+
@options = DEFAULTS.merge(options)
|
46
|
+
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
|
47
|
+
@directory = {}
|
48
|
+
@redis_config = {}
|
49
|
+
@capsules = {}
|
50
|
+
end
|
51
|
+
|
52
|
+
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
|
53
|
+
attr_reader :capsules
|
54
|
+
|
55
|
+
# LEGACY: edits the default capsule
|
56
|
+
# config.concurrency = 5
|
57
|
+
def concurrency=(val)
|
58
|
+
default_capsule.concurrency = Integer(val)
|
59
|
+
end
|
60
|
+
|
61
|
+
def concurrency
|
62
|
+
default_capsule.concurrency
|
63
|
+
end
|
64
|
+
|
65
|
+
def total_concurrency
|
66
|
+
capsules.each_value.sum(&:concurrency)
|
67
|
+
end
|
68
|
+
|
69
|
+
# Edit the default capsule.
|
70
|
+
# config.queues = %w( high default low ) # strict
|
71
|
+
# config.queues = %w( high,3 default,2 low,1 ) # weighted
|
72
|
+
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
|
73
|
+
#
|
74
|
+
# With weighted priority, queue will be checked first (weight / total) of the time.
|
75
|
+
# high will be checked first (3/6) or 50% of the time.
|
76
|
+
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
|
77
|
+
# are ridiculous and unnecessarily expensive. You can get random queue ordering
|
78
|
+
# by explicitly setting all weights to 1.
|
79
|
+
def queues=(val)
|
80
|
+
default_capsule.queues = val
|
81
|
+
end
|
82
|
+
|
83
|
+
def queues
|
84
|
+
default_capsule.queues
|
85
|
+
end
|
86
|
+
|
87
|
+
def client_middleware
|
88
|
+
@client_chain ||= Sidekiq::Middleware::Chain.new(self)
|
89
|
+
yield @client_chain if block_given?
|
90
|
+
@client_chain
|
91
|
+
end
|
92
|
+
|
93
|
+
def server_middleware
|
94
|
+
@server_chain ||= Sidekiq::Middleware::Chain.new(self)
|
95
|
+
yield @server_chain if block_given?
|
96
|
+
@server_chain
|
97
|
+
end
|
98
|
+
|
99
|
+
def default_capsule(&block)
|
100
|
+
capsule("default", &block)
|
101
|
+
end
|
102
|
+
|
103
|
+
# register a new queue processing subsystem
|
104
|
+
def capsule(name)
|
105
|
+
nm = name.to_s
|
106
|
+
cap = @capsules.fetch(nm) do
|
107
|
+
cap = Sidekiq::Capsule.new(nm, self)
|
108
|
+
@capsules[nm] = cap
|
109
|
+
end
|
110
|
+
yield cap if block_given?
|
111
|
+
cap
|
112
|
+
end
|
113
|
+
|
114
|
+
# All capsules must use the same Redis configuration
|
115
|
+
def redis=(hash)
|
116
|
+
@redis_config = @redis_config.merge(hash)
|
117
|
+
end
|
118
|
+
|
119
|
+
def redis_pool
|
120
|
+
Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
|
121
|
+
end
|
122
|
+
|
123
|
+
private def local_redis_pool
|
124
|
+
# this is our internal client/housekeeping pool. each capsule has its
|
125
|
+
# own pool for executing threads.
|
126
|
+
@redis ||= new_redis_pool(5, "internal")
|
127
|
+
end
|
128
|
+
|
129
|
+
def new_redis_pool(size, name = "unset")
|
130
|
+
# connection pool is lazy, it will not create connections unless you actually need them
|
131
|
+
# so don't be skimpy!
|
132
|
+
RedisConnection.create({size: size, logger: logger, pool_name: name}.merge(@redis_config))
|
133
|
+
end
|
134
|
+
|
135
|
+
def redis_info
|
136
|
+
redis do |conn|
|
137
|
+
conn.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
|
138
|
+
rescue RedisClientAdapter::CommandError => ex
|
139
|
+
# 2850 return fake version when INFO command has (probably) been renamed
|
140
|
+
raise unless /unknown command/.match?(ex.message)
|
141
|
+
{
|
142
|
+
"redis_version" => "9.9.9",
|
143
|
+
"uptime_in_days" => "9999",
|
144
|
+
"connected_clients" => "9999",
|
145
|
+
"used_memory_human" => "9P",
|
146
|
+
"used_memory_peak_human" => "9P"
|
147
|
+
}.freeze
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
def redis
|
152
|
+
raise ArgumentError, "requires a block" unless block_given?
|
153
|
+
redis_pool.with do |conn|
|
154
|
+
retryable = true
|
155
|
+
begin
|
156
|
+
yield conn
|
157
|
+
rescue RedisClientAdapter::BaseError => ex
|
158
|
+
# 2550 Failover can cause the server to become a replica, need
|
159
|
+
# to disconnect and reopen the socket to get back to the primary.
|
160
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
161
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
162
|
+
# The same retry logic is also used in client.rb
|
163
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
164
|
+
conn.close
|
165
|
+
retryable = false
|
166
|
+
retry
|
167
|
+
end
|
168
|
+
raise
|
169
|
+
end
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
# register global singletons which can be accessed elsewhere
|
174
|
+
def register(name, instance)
|
175
|
+
@directory[name] = instance
|
176
|
+
end
|
177
|
+
|
178
|
+
# find a singleton
|
179
|
+
def lookup(name, default_class = nil)
|
180
|
+
# JNDI is just a fancy name for a hash lookup
|
181
|
+
@directory.fetch(name) do |key|
|
182
|
+
return nil unless default_class
|
183
|
+
@directory[key] = default_class.new(self)
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
##
|
188
|
+
# Death handlers are called when all retries for a job have been exhausted and
|
189
|
+
# the job dies. It's the notification to your application
|
190
|
+
# that this job will not succeed without manual intervention.
|
191
|
+
#
|
192
|
+
# Sidekiq.configure_server do |config|
|
193
|
+
# config.death_handlers << ->(job, ex) do
|
194
|
+
# end
|
195
|
+
# end
|
196
|
+
def death_handlers
|
197
|
+
@options[:death_handlers]
|
198
|
+
end
|
199
|
+
|
200
|
+
# How frequently Redis should be checked by a random Sidekiq process for
|
201
|
+
# scheduled and retriable jobs. Each individual process will take turns by
|
202
|
+
# waiting some multiple of this value.
|
203
|
+
#
|
204
|
+
# See sidekiq/scheduled.rb for an in-depth explanation of this value
|
205
|
+
def average_scheduled_poll_interval=(interval)
|
206
|
+
@options[:average_scheduled_poll_interval] = interval
|
207
|
+
end
|
208
|
+
|
209
|
+
# Register a proc to handle any error which occurs within the Sidekiq process.
|
210
|
+
#
|
211
|
+
# Sidekiq.configure_server do |config|
|
212
|
+
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
|
213
|
+
# end
|
214
|
+
#
|
215
|
+
# The default error handler logs errors to @logger.
|
216
|
+
def error_handlers
|
217
|
+
@options[:error_handlers]
|
218
|
+
end
|
219
|
+
|
220
|
+
# Register a block to run at a point in the Sidekiq lifecycle.
|
221
|
+
# :startup, :quiet or :shutdown are valid events.
|
222
|
+
#
|
223
|
+
# Sidekiq.configure_server do |config|
|
224
|
+
# config.on(:shutdown) do
|
225
|
+
# puts "Goodbye cruel world!"
|
226
|
+
# end
|
227
|
+
# end
|
228
|
+
def on(event, &block)
|
229
|
+
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
|
230
|
+
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
|
231
|
+
@options[:lifecycle_events][event] << block
|
232
|
+
end
|
233
|
+
|
234
|
+
def logger
|
235
|
+
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
|
236
|
+
log.level = Logger::INFO
|
237
|
+
log.formatter = if ENV["DYNO"]
|
238
|
+
Sidekiq::Logger::Formatters::WithoutTimestamp.new
|
239
|
+
else
|
240
|
+
Sidekiq::Logger::Formatters::Pretty.new
|
241
|
+
end
|
242
|
+
end
|
243
|
+
end
|
244
|
+
|
245
|
+
def logger=(logger)
|
246
|
+
if logger.nil?
|
247
|
+
self.logger.level = Logger::FATAL
|
248
|
+
return
|
249
|
+
end
|
250
|
+
|
251
|
+
@logger = logger
|
252
|
+
end
|
253
|
+
|
254
|
+
# INTERNAL USE ONLY
|
255
|
+
def handle_exception(ex, ctx = {})
|
256
|
+
if @options[:error_handlers].size == 0
|
257
|
+
p ["!!!!!", ex]
|
258
|
+
end
|
259
|
+
ctx[:_config] = self
|
260
|
+
@options[:error_handlers].each do |handler|
|
261
|
+
handler.call(ex, ctx)
|
262
|
+
rescue => e
|
263
|
+
l = logger
|
264
|
+
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
265
|
+
l.error e
|
266
|
+
l.error e.backtrace.join("\n") unless e.backtrace.nil?
|
267
|
+
end
|
268
|
+
end
|
269
|
+
end
|
270
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
require "sidekiq/redis_connection"
|
2
|
+
require "time"
|
3
|
+
|
4
|
+
# This file is designed to be required within the user's
|
5
|
+
# deployment script; it should need a bare minimum of dependencies.
|
6
|
+
# Usage:
|
7
|
+
#
|
8
|
+
# require "sidekiq/deploy"
|
9
|
+
# Sidekiq::Deploy.mark!("Some change")
|
10
|
+
#
|
11
|
+
# If you do not pass a label, Sidekiq will try to use the latest
|
12
|
+
# git commit info.
|
13
|
+
#
|
14
|
+
|
15
|
+
module Sidekiq
|
16
|
+
class Deploy
|
17
|
+
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
|
18
|
+
|
19
|
+
LABEL_MAKER = -> {
|
20
|
+
`git log -1 --format="%h %s"`.strip
|
21
|
+
}
|
22
|
+
|
23
|
+
def self.mark!(label = nil)
|
24
|
+
Sidekiq::Deploy.new.mark!(label: label)
|
25
|
+
end
|
26
|
+
|
27
|
+
def initialize(pool = Sidekiq::RedisConnection.create)
|
28
|
+
@pool = pool
|
29
|
+
end
|
30
|
+
|
31
|
+
def mark!(at: Time.now, label: nil)
|
32
|
+
label ||= LABEL_MAKER.call
|
33
|
+
# we need to round the timestamp so that we gracefully
|
34
|
+
# handle an very common error in marking deploys:
|
35
|
+
# having every process mark its deploy, leading
|
36
|
+
# to N marks for each deploy. Instead we round the time
|
37
|
+
# to the minute so that multple marks within that minute
|
38
|
+
# will all naturally rollup into one mark per minute.
|
39
|
+
whence = at.utc
|
40
|
+
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
|
41
|
+
datecode = floor.strftime("%Y%m%d")
|
42
|
+
key = "#{datecode}-marks"
|
43
|
+
stamp = floor.iso8601
|
44
|
+
|
45
|
+
@pool.with do |c|
|
46
|
+
# only allow one deploy mark for a given label for the next minute
|
47
|
+
lock = c.set("deploylock-#{label}", stamp, nx: true, ex: 60)
|
48
|
+
if lock
|
49
|
+
c.multi do |pipe|
|
50
|
+
pipe.hsetnx(key, stamp, label)
|
51
|
+
pipe.expire(key, MARK_TTL)
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def fetch(date = Time.now.utc.to_date)
|
58
|
+
datecode = date.strftime("%Y%m%d")
|
59
|
+
@pool.with { |c| c.hgetall("#{datecode}-marks") }
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
require "sidekiq/component"
|
2
|
+
require "sidekiq/launcher"
|
3
|
+
require "sidekiq/metrics/tracking"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
class Embedded
|
7
|
+
include Sidekiq::Component
|
8
|
+
|
9
|
+
def initialize(config)
|
10
|
+
@config = config
|
11
|
+
end
|
12
|
+
|
13
|
+
def run
|
14
|
+
housekeeping
|
15
|
+
fire_event(:startup, reverse: false, reraise: true)
|
16
|
+
@launcher = Sidekiq::Launcher.new(@config, embedded: true)
|
17
|
+
@launcher.run
|
18
|
+
sleep 0.2 # pause to give threads time to spin up
|
19
|
+
|
20
|
+
logger.info "Sidekiq running embedded, total process thread count: #{Thread.list.size}"
|
21
|
+
logger.debug { Thread.list.map(&:name) }
|
22
|
+
end
|
23
|
+
|
24
|
+
def quiet
|
25
|
+
@launcher&.quiet
|
26
|
+
end
|
27
|
+
|
28
|
+
def stop
|
29
|
+
@launcher&.stop
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def housekeeping
|
35
|
+
logger.info "Running in #{RUBY_DESCRIPTION}"
|
36
|
+
logger.info Sidekiq::LICENSE
|
37
|
+
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
|
38
|
+
|
39
|
+
# touch the connection pool so it is created before we
|
40
|
+
# fire startup and start multithreading.
|
41
|
+
info = config.redis_info
|
42
|
+
ver = Gem::Version.new(info["redis_version"])
|
43
|
+
raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
|
44
|
+
|
45
|
+
maxmemory_policy = info["maxmemory_policy"]
|
46
|
+
if maxmemory_policy != "noeviction"
|
47
|
+
logger.warn <<~EOM
|
48
|
+
|
49
|
+
|
50
|
+
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
|
51
|
+
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
|
52
|
+
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
|
53
|
+
|
54
|
+
EOM
|
55
|
+
end
|
56
|
+
|
57
|
+
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
|
58
|
+
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|