sidekiq 5.2.7 → 8.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +845 -8
- data/LICENSE.txt +9 -0
- data/README.md +54 -54
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +219 -112
- data/bin/sidekiqmon +11 -0
- data/bin/webload +69 -0
- data/lib/active_job/queue_adapters/sidekiq_adapter.rb +120 -0
- data/lib/generators/sidekiq/job_generator.rb +59 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +757 -373
- data/lib/sidekiq/capsule.rb +132 -0
- data/lib/sidekiq/cli.rb +210 -233
- data/lib/sidekiq/client.rb +145 -103
- data/lib/sidekiq/component.rb +128 -0
- data/lib/sidekiq/config.rb +315 -0
- data/lib/sidekiq/deploy.rb +64 -0
- data/lib/sidekiq/embedded.rb +64 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/iterable_job.rb +56 -0
- data/lib/sidekiq/job/interrupt_handler.rb +24 -0
- data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
- data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
- data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
- data/lib/sidekiq/job/iterable.rb +306 -0
- data/lib/sidekiq/job.rb +385 -0
- data/lib/sidekiq/job_logger.rb +34 -7
- data/lib/sidekiq/job_retry.rb +164 -109
- data/lib/sidekiq/job_util.rb +113 -0
- data/lib/sidekiq/launcher.rb +208 -107
- data/lib/sidekiq/logger.rb +80 -0
- data/lib/sidekiq/manager.rb +42 -46
- data/lib/sidekiq/metrics/query.rb +184 -0
- data/lib/sidekiq/metrics/shared.rb +109 -0
- data/lib/sidekiq/metrics/tracking.rb +150 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +119 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +23 -0
- data/lib/sidekiq/monitor.rb +147 -0
- data/lib/sidekiq/paginator.rb +41 -16
- data/lib/sidekiq/processor.rb +146 -127
- data/lib/sidekiq/profiler.rb +72 -0
- data/lib/sidekiq/rails.rb +46 -43
- data/lib/sidekiq/redis_client_adapter.rb +113 -0
- data/lib/sidekiq/redis_connection.rb +79 -108
- data/lib/sidekiq/ring_buffer.rb +31 -0
- data/lib/sidekiq/scheduled.rb +112 -50
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +26 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +91 -90
- data/lib/sidekiq/transaction_aware_client.rb +51 -0
- data/lib/sidekiq/version.rb +7 -1
- data/lib/sidekiq/web/action.rb +125 -60
- data/lib/sidekiq/web/application.rb +363 -259
- data/lib/sidekiq/web/config.rb +120 -0
- data/lib/sidekiq/web/csrf_protection.rb +183 -0
- data/lib/sidekiq/web/helpers.rb +241 -120
- data/lib/sidekiq/web/router.rb +62 -71
- data/lib/sidekiq/web.rb +69 -161
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +94 -182
- data/sidekiq.gemspec +26 -16
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +150 -61
- data/web/assets/javascripts/base-charts.js +120 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-adapter-date-fns.min.js +7 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +194 -0
- data/web/assets/javascripts/dashboard.js +41 -293
- data/web/assets/javascripts/metrics.js +280 -0
- data/web/assets/stylesheets/style.css +766 -0
- data/web/locales/ar.yml +72 -65
- data/web/locales/cs.yml +63 -62
- data/web/locales/da.yml +61 -53
- data/web/locales/de.yml +66 -53
- data/web/locales/el.yml +44 -24
- data/web/locales/en.yml +94 -66
- data/web/locales/es.yml +92 -54
- data/web/locales/fa.yml +66 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +66 -64
- data/web/locales/hi.yml +60 -59
- data/web/locales/it.yml +93 -54
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +53 -52
- data/web/locales/lt.yml +84 -0
- data/web/locales/nb.yml +62 -61
- data/web/locales/nl.yml +53 -52
- data/web/locales/pl.yml +46 -45
- data/web/locales/{pt-br.yml → pt-BR.yml} +84 -56
- data/web/locales/pt.yml +52 -51
- data/web/locales/ru.yml +69 -63
- data/web/locales/sv.yml +54 -53
- data/web/locales/ta.yml +61 -60
- data/web/locales/tr.yml +101 -0
- data/web/locales/uk.yml +86 -61
- data/web/locales/ur.yml +65 -64
- data/web/locales/vi.yml +84 -0
- data/web/locales/zh-CN.yml +106 -0
- data/web/locales/{zh-tw.yml → zh-TW.yml} +43 -9
- data/web/views/_footer.erb +31 -19
- data/web/views/_job_info.erb +94 -75
- data/web/views/_metrics_period_select.erb +15 -0
- data/web/views/_nav.erb +14 -21
- data/web/views/_paging.erb +23 -19
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +23 -23
- data/web/views/busy.erb +139 -87
- data/web/views/dashboard.erb +82 -53
- data/web/views/dead.erb +31 -27
- data/web/views/filtering.erb +6 -0
- data/web/views/layout.erb +15 -29
- data/web/views/metrics.erb +84 -0
- data/web/views/metrics_for_job.erb +58 -0
- data/web/views/morgue.erb +60 -70
- data/web/views/profiles.erb +43 -0
- data/web/views/queue.erb +50 -39
- data/web/views/queues.erb +45 -29
- data/web/views/retries.erb +65 -75
- data/web/views/retry.erb +32 -27
- data/web/views/scheduled.erb +58 -52
- data/web/views/scheduled_job_info.erb +1 -1
- metadata +96 -76
- data/.circleci/config.yml +0 -61
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -15
- data/.travis.yml +0 -11
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/COMM-LICENSE +0 -97
- data/Ent-Changes.md +0 -238
- data/Gemfile +0 -23
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -759
- data/Rakefile +0 -9
- data/bin/sidekiqctl +0 -20
- data/code_of_conduct.md +0 -50
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/ctl.rb +0 -221
- data/lib/sidekiq/delay.rb +0 -42
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -220
- data/web/assets/stylesheets/application-rtl.css +0 -246
- data/web/assets/stylesheets/application.css +0 -1144
- data/web/assets/stylesheets/bootstrap-rtl.min.css +0 -9
- data/web/assets/stylesheets/bootstrap.css +0 -5
- data/web/locales/zh-cn.yml +0 -68
- data/web/views/_status.erb +0 -4
@@ -0,0 +1,315 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "forwardable"
|
4
|
+
require "sidekiq/redis_connection"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
|
8
|
+
class Config
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
DEFAULTS = {
|
12
|
+
labels: Set.new,
|
13
|
+
require: ".",
|
14
|
+
environment: nil,
|
15
|
+
concurrency: 5,
|
16
|
+
timeout: 25,
|
17
|
+
poll_interval_average: nil,
|
18
|
+
average_scheduled_poll_interval: 5,
|
19
|
+
on_complex_arguments: :raise,
|
20
|
+
iteration: {
|
21
|
+
max_job_runtime: nil,
|
22
|
+
retry_backoff: 0
|
23
|
+
},
|
24
|
+
error_handlers: [],
|
25
|
+
death_handlers: [],
|
26
|
+
lifecycle_events: {
|
27
|
+
startup: [],
|
28
|
+
quiet: [],
|
29
|
+
shutdown: [],
|
30
|
+
exit: [],
|
31
|
+
# triggers when we fire the first heartbeat on startup OR repairing a network partition
|
32
|
+
heartbeat: [],
|
33
|
+
# triggers on EVERY heartbeat call, every 10 seconds
|
34
|
+
beat: []
|
35
|
+
},
|
36
|
+
dead_max_jobs: 10_000,
|
37
|
+
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
38
|
+
reloader: proc { |&block| block.call },
|
39
|
+
backtrace_cleaner: ->(backtrace) { backtrace }
|
40
|
+
}
|
41
|
+
|
42
|
+
ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.default_configuration) {
|
43
|
+
Sidekiq::Context.with(ctx) do
|
44
|
+
dev = cfg[:environment] == "development"
|
45
|
+
fancy = dev && $stdout.tty? # 🎩
|
46
|
+
# Weird logic here but we want to show the backtrace in local
|
47
|
+
# development or if verbose logging is enabled.
|
48
|
+
#
|
49
|
+
# `full_message` contains the error class, message and backtrace
|
50
|
+
# `detailed_message` contains the error class and message
|
51
|
+
#
|
52
|
+
# Absolutely terrible API names. Not useful at all to have two
|
53
|
+
# methods with similar but obscure names.
|
54
|
+
if dev || cfg.logger.debug?
|
55
|
+
cfg.logger.info { ex.full_message(highlight: fancy) }
|
56
|
+
else
|
57
|
+
cfg.logger.info { ex.detailed_message(highlight: fancy) }
|
58
|
+
end
|
59
|
+
end
|
60
|
+
}
|
61
|
+
|
62
|
+
def initialize(options = {})
|
63
|
+
@options = DEFAULTS.merge(options)
|
64
|
+
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
|
65
|
+
@directory = {}
|
66
|
+
@redis_config = {}
|
67
|
+
@capsules = {}
|
68
|
+
end
|
69
|
+
|
70
|
+
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!, :dig
|
71
|
+
attr_reader :capsules
|
72
|
+
attr_accessor :thread_priority
|
73
|
+
|
74
|
+
def inspect
|
75
|
+
"#<#{self.class.name} @options=#{
|
76
|
+
@options.except(:lifecycle_events, :reloader, :death_handlers, :error_handlers).inspect
|
77
|
+
}>"
|
78
|
+
end
|
79
|
+
|
80
|
+
def to_json(*)
|
81
|
+
Sidekiq.dump_json(@options)
|
82
|
+
end
|
83
|
+
|
84
|
+
# LEGACY: edits the default capsule
|
85
|
+
# config.concurrency = 5
|
86
|
+
def concurrency=(val)
|
87
|
+
default_capsule.concurrency = Integer(val)
|
88
|
+
end
|
89
|
+
|
90
|
+
def concurrency
|
91
|
+
default_capsule.concurrency
|
92
|
+
end
|
93
|
+
|
94
|
+
def total_concurrency
|
95
|
+
capsules.each_value.sum(&:concurrency)
|
96
|
+
end
|
97
|
+
|
98
|
+
# Edit the default capsule.
|
99
|
+
# config.queues = %w( high default low ) # strict
|
100
|
+
# config.queues = %w( high,3 default,2 low,1 ) # weighted
|
101
|
+
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
|
102
|
+
#
|
103
|
+
# With weighted priority, queue will be checked first (weight / total) of the time.
|
104
|
+
# high will be checked first (3/6) or 50% of the time.
|
105
|
+
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
|
106
|
+
# are ridiculous and unnecessarily expensive. You can get random queue ordering
|
107
|
+
# by explicitly setting all weights to 1.
|
108
|
+
def queues=(val)
|
109
|
+
default_capsule.queues = val
|
110
|
+
end
|
111
|
+
|
112
|
+
def queues
|
113
|
+
default_capsule.queues
|
114
|
+
end
|
115
|
+
|
116
|
+
def client_middleware
|
117
|
+
@client_chain ||= Sidekiq::Middleware::Chain.new(self)
|
118
|
+
yield @client_chain if block_given?
|
119
|
+
@client_chain
|
120
|
+
end
|
121
|
+
|
122
|
+
def server_middleware
|
123
|
+
@server_chain ||= Sidekiq::Middleware::Chain.new(self)
|
124
|
+
yield @server_chain if block_given?
|
125
|
+
@server_chain
|
126
|
+
end
|
127
|
+
|
128
|
+
def default_capsule(&block)
|
129
|
+
capsule("default", &block)
|
130
|
+
end
|
131
|
+
|
132
|
+
# register a new queue processing subsystem
|
133
|
+
def capsule(name)
|
134
|
+
nm = name.to_s
|
135
|
+
cap = @capsules.fetch(nm) do
|
136
|
+
cap = Sidekiq::Capsule.new(nm, self)
|
137
|
+
@capsules[nm] = cap
|
138
|
+
end
|
139
|
+
yield cap if block_given?
|
140
|
+
cap
|
141
|
+
end
|
142
|
+
|
143
|
+
# All capsules must use the same Redis configuration
|
144
|
+
def redis=(hash)
|
145
|
+
@redis_config = @redis_config.merge(hash)
|
146
|
+
end
|
147
|
+
|
148
|
+
def redis_pool
|
149
|
+
Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
|
150
|
+
end
|
151
|
+
|
152
|
+
private def local_redis_pool
|
153
|
+
# this is our internal client/housekeeping pool. each capsule has its
|
154
|
+
# own pool for executing threads.
|
155
|
+
@redis ||= new_redis_pool(10, "internal")
|
156
|
+
end
|
157
|
+
|
158
|
+
def new_redis_pool(size, name = "unset")
|
159
|
+
# connection pool is lazy, it will not create connections unless you actually need them
|
160
|
+
# so don't be skimpy!
|
161
|
+
RedisConnection.create({size: size, logger: logger, pool_name: name}.merge(@redis_config))
|
162
|
+
end
|
163
|
+
|
164
|
+
def redis_info
|
165
|
+
redis do |conn|
|
166
|
+
conn.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
|
167
|
+
rescue RedisClientAdapter::CommandError => ex
|
168
|
+
# 2850 return fake version when INFO command has (probably) been renamed
|
169
|
+
raise unless /unknown command/.match?(ex.message)
|
170
|
+
{
|
171
|
+
"redis_version" => "9.9.9",
|
172
|
+
"uptime_in_days" => "9999",
|
173
|
+
"connected_clients" => "9999",
|
174
|
+
"used_memory_human" => "9P",
|
175
|
+
"used_memory_peak_human" => "9P"
|
176
|
+
}.freeze
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
def redis
|
181
|
+
raise ArgumentError, "requires a block" unless block_given?
|
182
|
+
redis_pool.with do |conn|
|
183
|
+
retryable = true
|
184
|
+
begin
|
185
|
+
yield conn
|
186
|
+
rescue RedisClientAdapter::BaseError => ex
|
187
|
+
# 2550 Failover can cause the server to become a replica, need
|
188
|
+
# to disconnect and reopen the socket to get back to the primary.
|
189
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
190
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
191
|
+
# The same retry logic is also used in client.rb
|
192
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
193
|
+
conn.close
|
194
|
+
retryable = false
|
195
|
+
retry
|
196
|
+
end
|
197
|
+
raise
|
198
|
+
end
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
# register global singletons which can be accessed elsewhere
|
203
|
+
def register(name, instance)
|
204
|
+
# logger.debug("register[#{name}] = #{instance}")
|
205
|
+
# Sidekiq Enterprise lazy registers a few services so we
|
206
|
+
# can't lock down this hash completely.
|
207
|
+
hash = @directory.dup
|
208
|
+
hash[name] = instance
|
209
|
+
@directory = hash.freeze
|
210
|
+
instance
|
211
|
+
end
|
212
|
+
|
213
|
+
# find a singleton
|
214
|
+
def lookup(name, default_class = nil)
|
215
|
+
# JNDI is just a fancy name for a hash lookup
|
216
|
+
@directory.fetch(name) do |key|
|
217
|
+
return nil unless default_class
|
218
|
+
register(key, default_class.new(self))
|
219
|
+
end
|
220
|
+
end
|
221
|
+
|
222
|
+
def freeze!
|
223
|
+
@directory.freeze
|
224
|
+
@options.freeze
|
225
|
+
true
|
226
|
+
end
|
227
|
+
|
228
|
+
##
|
229
|
+
# Death handlers are called when all retries for a job have been exhausted and
|
230
|
+
# the job dies. It's the notification to your application
|
231
|
+
# that this job will not succeed without manual intervention.
|
232
|
+
#
|
233
|
+
# Sidekiq.configure_server do |config|
|
234
|
+
# config.death_handlers << ->(job, ex) do
|
235
|
+
# end
|
236
|
+
# end
|
237
|
+
def death_handlers
|
238
|
+
@options[:death_handlers]
|
239
|
+
end
|
240
|
+
|
241
|
+
# How frequently Redis should be checked by a random Sidekiq process for
|
242
|
+
# scheduled and retriable jobs. Each individual process will take turns by
|
243
|
+
# waiting some multiple of this value.
|
244
|
+
#
|
245
|
+
# See sidekiq/scheduled.rb for an in-depth explanation of this value
|
246
|
+
def average_scheduled_poll_interval=(interval)
|
247
|
+
@options[:average_scheduled_poll_interval] = interval
|
248
|
+
end
|
249
|
+
|
250
|
+
# Register a proc to handle any error which occurs within the Sidekiq process.
|
251
|
+
#
|
252
|
+
# Sidekiq.configure_server do |config|
|
253
|
+
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
|
254
|
+
# end
|
255
|
+
#
|
256
|
+
# The default error handler logs errors to @logger.
|
257
|
+
def error_handlers
|
258
|
+
@options[:error_handlers]
|
259
|
+
end
|
260
|
+
|
261
|
+
# Register a block to run at a point in the Sidekiq lifecycle.
|
262
|
+
# :startup, :quiet, :shutdown, or :exit are valid events.
|
263
|
+
#
|
264
|
+
# Sidekiq.configure_server do |config|
|
265
|
+
# config.on(:shutdown) do
|
266
|
+
# puts "Goodbye cruel world!"
|
267
|
+
# end
|
268
|
+
# end
|
269
|
+
def on(event, &block)
|
270
|
+
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
|
271
|
+
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
|
272
|
+
@options[:lifecycle_events][event] << block
|
273
|
+
end
|
274
|
+
|
275
|
+
def logger
|
276
|
+
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
|
277
|
+
log.level = Logger::INFO
|
278
|
+
log.formatter = if ENV["DYNO"]
|
279
|
+
Sidekiq::Logger::Formatters::WithoutTimestamp.new
|
280
|
+
else
|
281
|
+
Sidekiq::Logger::Formatters::Pretty.new
|
282
|
+
end
|
283
|
+
end
|
284
|
+
end
|
285
|
+
|
286
|
+
def logger=(logger)
|
287
|
+
if logger.nil?
|
288
|
+
self.logger.level = Logger::FATAL
|
289
|
+
return
|
290
|
+
end
|
291
|
+
|
292
|
+
@logger = logger
|
293
|
+
end
|
294
|
+
|
295
|
+
private def parameter_size(handler)
|
296
|
+
target = handler.is_a?(Proc) ? handler : handler.method(:call)
|
297
|
+
target.parameters.size
|
298
|
+
end
|
299
|
+
|
300
|
+
# INTERNAL USE ONLY
|
301
|
+
def handle_exception(ex, ctx = {})
|
302
|
+
if @options[:error_handlers].size == 0
|
303
|
+
p ["!!!!!", ex]
|
304
|
+
end
|
305
|
+
@options[:error_handlers].each do |handler|
|
306
|
+
handler.call(ex, ctx, self)
|
307
|
+
rescue Exception => e
|
308
|
+
l = logger
|
309
|
+
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
310
|
+
l.error e
|
311
|
+
l.error e.backtrace.join("\n") unless e.backtrace.nil?
|
312
|
+
end
|
313
|
+
end
|
314
|
+
end
|
315
|
+
end
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/redis_connection"
|
4
|
+
require "time"
|
5
|
+
|
6
|
+
# This file is designed to be required within the user's
|
7
|
+
# deployment script; it should need a bare minimum of dependencies.
|
8
|
+
# Usage:
|
9
|
+
#
|
10
|
+
# require "sidekiq/deploy"
|
11
|
+
# Sidekiq::Deploy.mark!("Some change")
|
12
|
+
#
|
13
|
+
# If you do not pass a label, Sidekiq will try to use the latest
|
14
|
+
# git commit info.
|
15
|
+
#
|
16
|
+
|
17
|
+
module Sidekiq
|
18
|
+
class Deploy
|
19
|
+
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
|
20
|
+
|
21
|
+
LABEL_MAKER = -> {
|
22
|
+
`git log -1 --format="%h %s"`.strip
|
23
|
+
}
|
24
|
+
|
25
|
+
def self.mark!(label = nil)
|
26
|
+
Sidekiq::Deploy.new.mark!(label: label)
|
27
|
+
end
|
28
|
+
|
29
|
+
def initialize(pool = Sidekiq::RedisConnection.create)
|
30
|
+
@pool = pool
|
31
|
+
end
|
32
|
+
|
33
|
+
def mark!(at: Time.now, label: nil)
|
34
|
+
label ||= LABEL_MAKER.call
|
35
|
+
# we need to round the timestamp so that we gracefully
|
36
|
+
# handle an very common error in marking deploys:
|
37
|
+
# having every process mark its deploy, leading
|
38
|
+
# to N marks for each deploy. Instead we round the time
|
39
|
+
# to the minute so that multiple marks within that minute
|
40
|
+
# will all naturally rollup into one mark per minute.
|
41
|
+
whence = at.utc
|
42
|
+
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
|
43
|
+
datecode = floor.strftime("%Y%m%d")
|
44
|
+
key = "#{datecode}-marks"
|
45
|
+
stamp = floor.iso8601
|
46
|
+
|
47
|
+
@pool.with do |c|
|
48
|
+
# only allow one deploy mark for a given label for the next minute
|
49
|
+
lock = c.set("deploylock-#{label}", stamp, "nx", "ex", "60")
|
50
|
+
if lock
|
51
|
+
c.multi do |pipe|
|
52
|
+
pipe.hsetnx(key, stamp, label)
|
53
|
+
pipe.expire(key, MARK_TTL)
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
def fetch(date = Time.now.utc.to_date)
|
60
|
+
datecode = date.strftime("%Y%m%d")
|
61
|
+
@pool.with { |c| c.hgetall("#{datecode}-marks") }
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/component"
|
4
|
+
require "sidekiq/launcher"
|
5
|
+
require "sidekiq/metrics/tracking"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
class Embedded
|
9
|
+
include Sidekiq::Component
|
10
|
+
|
11
|
+
def initialize(config)
|
12
|
+
@config = config
|
13
|
+
end
|
14
|
+
|
15
|
+
def run
|
16
|
+
housekeeping
|
17
|
+
fire_event(:startup, reverse: false, reraise: true)
|
18
|
+
@launcher = Sidekiq::Launcher.new(@config, embedded: true)
|
19
|
+
@launcher.run
|
20
|
+
sleep 0.2 # pause to give threads time to spin up
|
21
|
+
|
22
|
+
logger.info "Sidekiq running embedded, total process thread count: #{Thread.list.size}"
|
23
|
+
logger.debug { Thread.list.map(&:name) }
|
24
|
+
end
|
25
|
+
|
26
|
+
def quiet
|
27
|
+
@launcher&.quiet
|
28
|
+
end
|
29
|
+
|
30
|
+
def stop
|
31
|
+
@launcher&.stop
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
|
36
|
+
def housekeeping
|
37
|
+
@config[:tag] ||= default_tag
|
38
|
+
logger.info "Running in #{RUBY_DESCRIPTION}"
|
39
|
+
logger.info Sidekiq::LICENSE
|
40
|
+
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
|
41
|
+
|
42
|
+
# touch the connection pool so it is created before we
|
43
|
+
# fire startup and start multithreading.
|
44
|
+
info = config.redis_info
|
45
|
+
ver = Gem::Version.new(info["redis_version"])
|
46
|
+
raise "You are connected to Redis #{ver}, Sidekiq requires Redis 7.0.0 or greater" if ver < Gem::Version.new("7.0.0")
|
47
|
+
|
48
|
+
maxmemory_policy = info["maxmemory_policy"]
|
49
|
+
if maxmemory_policy != "noeviction"
|
50
|
+
logger.warn <<~EOM
|
51
|
+
|
52
|
+
|
53
|
+
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
|
54
|
+
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
|
55
|
+
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
|
56
|
+
|
57
|
+
EOM
|
58
|
+
end
|
59
|
+
|
60
|
+
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
|
61
|
+
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
data/lib/sidekiq/fetch.rb
CHANGED
@@ -1,81 +1,88 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
require 'sidekiq'
|
3
2
|
|
4
|
-
|
3
|
+
require "sidekiq"
|
4
|
+
require "sidekiq/component"
|
5
|
+
require "sidekiq/capsule"
|
6
|
+
|
7
|
+
module Sidekiq # :nodoc:
|
5
8
|
class BasicFetch
|
9
|
+
include Sidekiq::Component
|
6
10
|
# We want the fetch operation to timeout every few seconds so the thread
|
7
11
|
# can check if the process is shutting down.
|
8
12
|
TIMEOUT = 2
|
9
13
|
|
10
|
-
UnitOfWork = Struct.new(:queue, :job)
|
14
|
+
UnitOfWork = Struct.new(:queue, :job, :config) {
|
11
15
|
def acknowledge
|
12
16
|
# nothing to do
|
13
17
|
end
|
14
18
|
|
15
19
|
def queue_name
|
16
|
-
queue.
|
20
|
+
queue.delete_prefix("queue:")
|
17
21
|
end
|
18
22
|
|
19
23
|
def requeue
|
20
|
-
|
21
|
-
conn.rpush(
|
24
|
+
config.redis do |conn|
|
25
|
+
conn.rpush(queue, job)
|
22
26
|
end
|
23
27
|
end
|
24
|
-
|
28
|
+
}
|
25
29
|
|
26
|
-
def initialize(
|
27
|
-
|
28
|
-
@
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
end
|
30
|
+
def initialize(cap)
|
31
|
+
raise ArgumentError, "missing queue list" unless cap.queues
|
32
|
+
@config = cap
|
33
|
+
@strictly_ordered_queues = cap.mode == :strict
|
34
|
+
@queues = config.queues.map { |q| "queue:#{q}" }
|
35
|
+
@queues.uniq! if @strictly_ordered_queues
|
33
36
|
end
|
34
37
|
|
35
38
|
def retrieve_work
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
# data from the first queue that has pending elements. We
|
43
|
-
# recreate the queue command each time we invoke Redis#brpop
|
44
|
-
# to honor weights and avoid queue starvation.
|
45
|
-
def queues_cmd
|
46
|
-
if @strictly_ordered_queues
|
47
|
-
@queues
|
48
|
-
else
|
49
|
-
queues = @queues.shuffle.uniq
|
50
|
-
queues << TIMEOUT
|
51
|
-
queues
|
39
|
+
qs = queues_cmd
|
40
|
+
# 4825 Sidekiq Pro with all queues paused will return an
|
41
|
+
# empty set of queues
|
42
|
+
if qs.size <= 0
|
43
|
+
sleep(TIMEOUT)
|
44
|
+
return nil
|
52
45
|
end
|
53
|
-
end
|
54
46
|
|
47
|
+
queue, job = redis { |conn| conn.blocking_call(TIMEOUT, "brpop", *qs, TIMEOUT) }
|
48
|
+
UnitOfWork.new(queue, job, config) if queue
|
49
|
+
end
|
55
50
|
|
56
|
-
|
57
|
-
# an instance method will make it async to the Fetcher actor
|
58
|
-
def self.bulk_requeue(inprogress, options)
|
51
|
+
def bulk_requeue(inprogress)
|
59
52
|
return if inprogress.empty?
|
60
53
|
|
61
|
-
|
54
|
+
logger.debug { "Re-queueing terminated jobs" }
|
62
55
|
jobs_to_requeue = {}
|
63
56
|
inprogress.each do |unit_of_work|
|
64
|
-
jobs_to_requeue[unit_of_work.
|
65
|
-
jobs_to_requeue[unit_of_work.
|
57
|
+
jobs_to_requeue[unit_of_work.queue] ||= []
|
58
|
+
jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
|
66
59
|
end
|
67
60
|
|
68
|
-
|
69
|
-
conn.pipelined do
|
61
|
+
redis do |conn|
|
62
|
+
conn.pipelined do |pipeline|
|
70
63
|
jobs_to_requeue.each do |queue, jobs|
|
71
|
-
|
64
|
+
pipeline.rpush(queue, jobs)
|
72
65
|
end
|
73
66
|
end
|
74
67
|
end
|
75
|
-
|
68
|
+
logger.info("Pushed #{inprogress.size} jobs back to Redis")
|
76
69
|
rescue => ex
|
77
|
-
|
70
|
+
logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
78
71
|
end
|
79
72
|
|
73
|
+
# Creating the Redis#brpop command takes into account any
|
74
|
+
# configured queue weights. By default Redis#brpop returns
|
75
|
+
# data from the first queue that has pending elements. We
|
76
|
+
# recreate the queue command each time we invoke Redis#brpop
|
77
|
+
# to honor weights and avoid queue starvation.
|
78
|
+
def queues_cmd
|
79
|
+
if @strictly_ordered_queues
|
80
|
+
@queues
|
81
|
+
else
|
82
|
+
permute = @queues.shuffle
|
83
|
+
permute.uniq!
|
84
|
+
permute
|
85
|
+
end
|
86
|
+
end
|
80
87
|
end
|
81
88
|
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/job/iterable"
|
4
|
+
|
5
|
+
# Iterable jobs are ones which provide a sequence to process using
|
6
|
+
# `build_enumerator(*args, cursor: cursor)` and then process each
|
7
|
+
# element of that sequence in `each_iteration(item, *args)`.
|
8
|
+
#
|
9
|
+
# The job is kicked off as normal:
|
10
|
+
#
|
11
|
+
# ProcessUserSet.perform_async(123)
|
12
|
+
#
|
13
|
+
# but instead of calling `perform`, Sidekiq will call:
|
14
|
+
#
|
15
|
+
# enum = ProcessUserSet#build_enumerator(123, cursor:nil)
|
16
|
+
#
|
17
|
+
# Your Enumerator must yield `(object, updated_cursor)` and
|
18
|
+
# Sidekiq will call your `each_iteration` method:
|
19
|
+
#
|
20
|
+
# ProcessUserSet#each_iteration(object, 123)
|
21
|
+
#
|
22
|
+
# After every iteration, Sidekiq will check for shutdown. If we are
|
23
|
+
# stopping, the cursor will be saved to Redis and the job re-queued
|
24
|
+
# to pick up the rest of the work upon restart. Your job will get
|
25
|
+
# the updated_cursor so it can pick up right where it stopped.
|
26
|
+
#
|
27
|
+
# enum = ProcessUserSet#build_enumerator(123, cursor: updated_cursor)
|
28
|
+
#
|
29
|
+
# The cursor object must be serializable to JSON.
|
30
|
+
#
|
31
|
+
# Note there are several APIs to help you build enumerators for
|
32
|
+
# ActiveRecord Relations, CSV files, etc. See sidekiq/job/iterable/*.rb.
|
33
|
+
module Sidekiq
|
34
|
+
module IterableJob
|
35
|
+
def self.included(base)
|
36
|
+
base.include Sidekiq::Job
|
37
|
+
base.include Sidekiq::Job::Iterable
|
38
|
+
end
|
39
|
+
|
40
|
+
# def build_enumerator(*args, cursor:)
|
41
|
+
# def each_iteration(item, *args)
|
42
|
+
|
43
|
+
# Your job can also define several callbacks during points
|
44
|
+
# in each job's lifecycle.
|
45
|
+
#
|
46
|
+
# def on_start
|
47
|
+
# def on_resume
|
48
|
+
# def on_stop
|
49
|
+
# def on_cancel
|
50
|
+
# def on_complete
|
51
|
+
# def around_iteration
|
52
|
+
#
|
53
|
+
# To keep things simple and compatible, this is the same
|
54
|
+
# API as the `sidekiq-iteration` gem.
|
55
|
+
end
|
56
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
module Job
|
5
|
+
class InterruptHandler
|
6
|
+
include Sidekiq::ServerMiddleware
|
7
|
+
|
8
|
+
def call(instance, hash, queue)
|
9
|
+
yield
|
10
|
+
rescue Interrupted
|
11
|
+
logger.debug "Interrupted, re-queueing..."
|
12
|
+
c = Sidekiq::Client.new
|
13
|
+
c.push(hash)
|
14
|
+
raise Sidekiq::JobRetry::Skip
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
Sidekiq.configure_server do |config|
|
21
|
+
config.server_middleware do |chain|
|
22
|
+
chain.add Sidekiq::Job::InterruptHandler
|
23
|
+
end
|
24
|
+
end
|