sidekiq 5.1.3 → 7.3.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/Changes.md +756 -8
- data/LICENSE.txt +9 -0
- data/README.md +48 -51
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +213 -115
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +59 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +640 -330
- data/lib/sidekiq/capsule.rb +132 -0
- data/lib/sidekiq/cli.rb +244 -257
- data/lib/sidekiq/client.rb +132 -103
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +293 -0
- data/lib/sidekiq/deploy.rb +64 -0
- data/lib/sidekiq/embedded.rb +63 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/iterable_job.rb +55 -0
- data/lib/sidekiq/job/interrupt_handler.rb +24 -0
- data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
- data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
- data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
- data/lib/sidekiq/job/iterable.rb +231 -0
- data/lib/sidekiq/job.rb +385 -0
- data/lib/sidekiq/job_logger.rb +49 -12
- data/lib/sidekiq/job_retry.rb +167 -103
- data/lib/sidekiq/job_util.rb +109 -0
- data/lib/sidekiq/launcher.rb +209 -102
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +43 -46
- data/lib/sidekiq/metrics/query.rb +158 -0
- data/lib/sidekiq/metrics/shared.rb +97 -0
- data/lib/sidekiq/metrics/tracking.rb +148 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +113 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +23 -0
- data/lib/sidekiq/monitor.rb +147 -0
- data/lib/sidekiq/paginator.rb +28 -16
- data/lib/sidekiq/processor.rb +175 -112
- data/lib/sidekiq/rails.rb +54 -39
- data/lib/sidekiq/redis_client_adapter.rb +114 -0
- data/lib/sidekiq/redis_connection.rb +65 -86
- data/lib/sidekiq/ring_buffer.rb +31 -0
- data/lib/sidekiq/scheduled.rb +139 -48
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +26 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +95 -94
- data/lib/sidekiq/transaction_aware_client.rb +51 -0
- data/lib/sidekiq/version.rb +3 -1
- data/lib/sidekiq/web/action.rb +22 -12
- data/lib/sidekiq/web/application.rb +225 -76
- data/lib/sidekiq/web/csrf_protection.rb +183 -0
- data/lib/sidekiq/web/helpers.rb +215 -118
- data/lib/sidekiq/web/router.rb +23 -19
- data/lib/sidekiq/web.rb +114 -106
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +95 -182
- data/sidekiq.gemspec +26 -23
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +157 -61
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +192 -0
- data/web/assets/javascripts/dashboard.js +35 -283
- data/web/assets/javascripts/metrics.js +298 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +10 -93
- data/web/assets/stylesheets/application.css +169 -522
- data/web/assets/stylesheets/bootstrap.css +2 -2
- data/web/locales/ar.yml +71 -64
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -53
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +86 -65
- data/web/locales/es.yml +70 -54
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +83 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +68 -63
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/tr.yml +101 -0
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +18 -3
- data/web/views/_job_info.erb +21 -4
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +4 -18
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +79 -29
- data/web/views/dashboard.erb +49 -19
- data/web/views/dead.erb +3 -3
- data/web/views/filtering.erb +7 -0
- data/web/views/layout.erb +9 -7
- data/web/views/metrics.erb +91 -0
- data/web/views/metrics_for_job.erb +59 -0
- data/web/views/morgue.erb +14 -15
- data/web/views/queue.erb +33 -23
- data/web/views/queues.erb +19 -5
- data/web/views/retries.erb +19 -16
- data/web/views/retry.erb +3 -3
- data/web/views/scheduled.erb +17 -15
- metadata +84 -129
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -13
- data/.travis.yml +0 -14
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/COMM-LICENSE +0 -95
- data/Ent-Changes.md +0 -216
- data/Gemfile +0 -8
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -729
- data/Rakefile +0 -8
- data/bin/sidekiqctl +0 -99
- data/code_of_conduct.md +0 -50
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/delay.rb +0 -42
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -204
@@ -0,0 +1,293 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "forwardable"
|
4
|
+
|
5
|
+
require "set"
|
6
|
+
require "sidekiq/redis_connection"
|
7
|
+
|
8
|
+
module Sidekiq
|
9
|
+
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
|
10
|
+
class Config
|
11
|
+
extend Forwardable
|
12
|
+
|
13
|
+
DEFAULTS = {
|
14
|
+
labels: Set.new,
|
15
|
+
require: ".",
|
16
|
+
environment: nil,
|
17
|
+
concurrency: 5,
|
18
|
+
timeout: 25,
|
19
|
+
poll_interval_average: nil,
|
20
|
+
average_scheduled_poll_interval: 5,
|
21
|
+
on_complex_arguments: :raise,
|
22
|
+
iteration: {
|
23
|
+
max_job_runtime: nil,
|
24
|
+
retry_backoff: 0
|
25
|
+
},
|
26
|
+
error_handlers: [],
|
27
|
+
death_handlers: [],
|
28
|
+
lifecycle_events: {
|
29
|
+
startup: [],
|
30
|
+
quiet: [],
|
31
|
+
shutdown: [],
|
32
|
+
# triggers when we fire the first heartbeat on startup OR repairing a network partition
|
33
|
+
heartbeat: [],
|
34
|
+
# triggers on EVERY heartbeat call, every 10 seconds
|
35
|
+
beat: []
|
36
|
+
},
|
37
|
+
dead_max_jobs: 10_000,
|
38
|
+
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
39
|
+
reloader: proc { |&block| block.call },
|
40
|
+
backtrace_cleaner: ->(backtrace) { backtrace }
|
41
|
+
}
|
42
|
+
|
43
|
+
ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.default_configuration) {
|
44
|
+
l = cfg.logger
|
45
|
+
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
|
46
|
+
l.warn("#{ex.class.name}: #{ex.message}")
|
47
|
+
unless ex.backtrace.nil?
|
48
|
+
backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
|
49
|
+
l.warn(backtrace.join("\n"))
|
50
|
+
end
|
51
|
+
}
|
52
|
+
|
53
|
+
def initialize(options = {})
|
54
|
+
@options = DEFAULTS.merge(options)
|
55
|
+
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
|
56
|
+
@directory = {}
|
57
|
+
@redis_config = {}
|
58
|
+
@capsules = {}
|
59
|
+
end
|
60
|
+
|
61
|
+
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!, :dig
|
62
|
+
attr_reader :capsules
|
63
|
+
|
64
|
+
def to_json(*)
|
65
|
+
Sidekiq.dump_json(@options)
|
66
|
+
end
|
67
|
+
|
68
|
+
# LEGACY: edits the default capsule
|
69
|
+
# config.concurrency = 5
|
70
|
+
def concurrency=(val)
|
71
|
+
default_capsule.concurrency = Integer(val)
|
72
|
+
end
|
73
|
+
|
74
|
+
def concurrency
|
75
|
+
default_capsule.concurrency
|
76
|
+
end
|
77
|
+
|
78
|
+
def total_concurrency
|
79
|
+
capsules.each_value.sum(&:concurrency)
|
80
|
+
end
|
81
|
+
|
82
|
+
# Edit the default capsule.
|
83
|
+
# config.queues = %w( high default low ) # strict
|
84
|
+
# config.queues = %w( high,3 default,2 low,1 ) # weighted
|
85
|
+
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
|
86
|
+
#
|
87
|
+
# With weighted priority, queue will be checked first (weight / total) of the time.
|
88
|
+
# high will be checked first (3/6) or 50% of the time.
|
89
|
+
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
|
90
|
+
# are ridiculous and unnecessarily expensive. You can get random queue ordering
|
91
|
+
# by explicitly setting all weights to 1.
|
92
|
+
def queues=(val)
|
93
|
+
default_capsule.queues = val
|
94
|
+
end
|
95
|
+
|
96
|
+
def queues
|
97
|
+
default_capsule.queues
|
98
|
+
end
|
99
|
+
|
100
|
+
def client_middleware
|
101
|
+
@client_chain ||= Sidekiq::Middleware::Chain.new(self)
|
102
|
+
yield @client_chain if block_given?
|
103
|
+
@client_chain
|
104
|
+
end
|
105
|
+
|
106
|
+
def server_middleware
|
107
|
+
@server_chain ||= Sidekiq::Middleware::Chain.new(self)
|
108
|
+
yield @server_chain if block_given?
|
109
|
+
@server_chain
|
110
|
+
end
|
111
|
+
|
112
|
+
def default_capsule(&block)
|
113
|
+
capsule("default", &block)
|
114
|
+
end
|
115
|
+
|
116
|
+
# register a new queue processing subsystem
|
117
|
+
def capsule(name)
|
118
|
+
nm = name.to_s
|
119
|
+
cap = @capsules.fetch(nm) do
|
120
|
+
cap = Sidekiq::Capsule.new(nm, self)
|
121
|
+
@capsules[nm] = cap
|
122
|
+
end
|
123
|
+
yield cap if block_given?
|
124
|
+
cap
|
125
|
+
end
|
126
|
+
|
127
|
+
# All capsules must use the same Redis configuration
|
128
|
+
def redis=(hash)
|
129
|
+
@redis_config = @redis_config.merge(hash)
|
130
|
+
end
|
131
|
+
|
132
|
+
def redis_pool
|
133
|
+
Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
|
134
|
+
end
|
135
|
+
|
136
|
+
private def local_redis_pool
|
137
|
+
# this is our internal client/housekeeping pool. each capsule has its
|
138
|
+
# own pool for executing threads.
|
139
|
+
@redis ||= new_redis_pool(10, "internal")
|
140
|
+
end
|
141
|
+
|
142
|
+
def new_redis_pool(size, name = "unset")
|
143
|
+
# connection pool is lazy, it will not create connections unless you actually need them
|
144
|
+
# so don't be skimpy!
|
145
|
+
RedisConnection.create({size: size, logger: logger, pool_name: name}.merge(@redis_config))
|
146
|
+
end
|
147
|
+
|
148
|
+
def redis_info
|
149
|
+
redis do |conn|
|
150
|
+
conn.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
|
151
|
+
rescue RedisClientAdapter::CommandError => ex
|
152
|
+
# 2850 return fake version when INFO command has (probably) been renamed
|
153
|
+
raise unless /unknown command/.match?(ex.message)
|
154
|
+
{
|
155
|
+
"redis_version" => "9.9.9",
|
156
|
+
"uptime_in_days" => "9999",
|
157
|
+
"connected_clients" => "9999",
|
158
|
+
"used_memory_human" => "9P",
|
159
|
+
"used_memory_peak_human" => "9P"
|
160
|
+
}.freeze
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
def redis
|
165
|
+
raise ArgumentError, "requires a block" unless block_given?
|
166
|
+
redis_pool.with do |conn|
|
167
|
+
retryable = true
|
168
|
+
begin
|
169
|
+
yield conn
|
170
|
+
rescue RedisClientAdapter::BaseError => ex
|
171
|
+
# 2550 Failover can cause the server to become a replica, need
|
172
|
+
# to disconnect and reopen the socket to get back to the primary.
|
173
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
174
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
175
|
+
# The same retry logic is also used in client.rb
|
176
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
177
|
+
conn.close
|
178
|
+
retryable = false
|
179
|
+
retry
|
180
|
+
end
|
181
|
+
raise
|
182
|
+
end
|
183
|
+
end
|
184
|
+
end
|
185
|
+
|
186
|
+
# register global singletons which can be accessed elsewhere
|
187
|
+
def register(name, instance)
|
188
|
+
@directory[name] = instance
|
189
|
+
end
|
190
|
+
|
191
|
+
# find a singleton
|
192
|
+
def lookup(name, default_class = nil)
|
193
|
+
# JNDI is just a fancy name for a hash lookup
|
194
|
+
@directory.fetch(name) do |key|
|
195
|
+
return nil unless default_class
|
196
|
+
@directory[key] = default_class.new(self)
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
##
|
201
|
+
# Death handlers are called when all retries for a job have been exhausted and
|
202
|
+
# the job dies. It's the notification to your application
|
203
|
+
# that this job will not succeed without manual intervention.
|
204
|
+
#
|
205
|
+
# Sidekiq.configure_server do |config|
|
206
|
+
# config.death_handlers << ->(job, ex) do
|
207
|
+
# end
|
208
|
+
# end
|
209
|
+
def death_handlers
|
210
|
+
@options[:death_handlers]
|
211
|
+
end
|
212
|
+
|
213
|
+
# How frequently Redis should be checked by a random Sidekiq process for
|
214
|
+
# scheduled and retriable jobs. Each individual process will take turns by
|
215
|
+
# waiting some multiple of this value.
|
216
|
+
#
|
217
|
+
# See sidekiq/scheduled.rb for an in-depth explanation of this value
|
218
|
+
def average_scheduled_poll_interval=(interval)
|
219
|
+
@options[:average_scheduled_poll_interval] = interval
|
220
|
+
end
|
221
|
+
|
222
|
+
# Register a proc to handle any error which occurs within the Sidekiq process.
|
223
|
+
#
|
224
|
+
# Sidekiq.configure_server do |config|
|
225
|
+
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
|
226
|
+
# end
|
227
|
+
#
|
228
|
+
# The default error handler logs errors to @logger.
|
229
|
+
def error_handlers
|
230
|
+
@options[:error_handlers]
|
231
|
+
end
|
232
|
+
|
233
|
+
# Register a block to run at a point in the Sidekiq lifecycle.
|
234
|
+
# :startup, :quiet or :shutdown are valid events.
|
235
|
+
#
|
236
|
+
# Sidekiq.configure_server do |config|
|
237
|
+
# config.on(:shutdown) do
|
238
|
+
# puts "Goodbye cruel world!"
|
239
|
+
# end
|
240
|
+
# end
|
241
|
+
def on(event, &block)
|
242
|
+
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
|
243
|
+
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
|
244
|
+
@options[:lifecycle_events][event] << block
|
245
|
+
end
|
246
|
+
|
247
|
+
def logger
|
248
|
+
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
|
249
|
+
log.level = Logger::INFO
|
250
|
+
log.formatter = if ENV["DYNO"]
|
251
|
+
Sidekiq::Logger::Formatters::WithoutTimestamp.new
|
252
|
+
else
|
253
|
+
Sidekiq::Logger::Formatters::Pretty.new
|
254
|
+
end
|
255
|
+
end
|
256
|
+
end
|
257
|
+
|
258
|
+
def logger=(logger)
|
259
|
+
if logger.nil?
|
260
|
+
self.logger.level = Logger::FATAL
|
261
|
+
return
|
262
|
+
end
|
263
|
+
|
264
|
+
@logger = logger
|
265
|
+
end
|
266
|
+
|
267
|
+
private def parameter_size(handler)
|
268
|
+
target = handler.is_a?(Proc) ? handler : handler.method(:call)
|
269
|
+
target.parameters.size
|
270
|
+
end
|
271
|
+
|
272
|
+
# INTERNAL USE ONLY
|
273
|
+
def handle_exception(ex, ctx = {})
|
274
|
+
if @options[:error_handlers].size == 0
|
275
|
+
p ["!!!!!", ex]
|
276
|
+
end
|
277
|
+
@options[:error_handlers].each do |handler|
|
278
|
+
if parameter_size(handler) == 2
|
279
|
+
# TODO Remove in 8.0
|
280
|
+
logger.info { "DEPRECATION: Sidekiq exception handlers now take three arguments, see #{handler}" }
|
281
|
+
handler.call(ex, {_config: self}.merge(ctx))
|
282
|
+
else
|
283
|
+
handler.call(ex, ctx, self)
|
284
|
+
end
|
285
|
+
rescue Exception => e
|
286
|
+
l = logger
|
287
|
+
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
288
|
+
l.error e
|
289
|
+
l.error e.backtrace.join("\n") unless e.backtrace.nil?
|
290
|
+
end
|
291
|
+
end
|
292
|
+
end
|
293
|
+
end
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/redis_connection"
|
4
|
+
require "time"
|
5
|
+
|
6
|
+
# This file is designed to be required within the user's
|
7
|
+
# deployment script; it should need a bare minimum of dependencies.
|
8
|
+
# Usage:
|
9
|
+
#
|
10
|
+
# require "sidekiq/deploy"
|
11
|
+
# Sidekiq::Deploy.mark!("Some change")
|
12
|
+
#
|
13
|
+
# If you do not pass a label, Sidekiq will try to use the latest
|
14
|
+
# git commit info.
|
15
|
+
#
|
16
|
+
|
17
|
+
module Sidekiq
|
18
|
+
class Deploy
|
19
|
+
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
|
20
|
+
|
21
|
+
LABEL_MAKER = -> {
|
22
|
+
`git log -1 --format="%h %s"`.strip
|
23
|
+
}
|
24
|
+
|
25
|
+
def self.mark!(label = nil)
|
26
|
+
Sidekiq::Deploy.new.mark!(label: label)
|
27
|
+
end
|
28
|
+
|
29
|
+
def initialize(pool = Sidekiq::RedisConnection.create)
|
30
|
+
@pool = pool
|
31
|
+
end
|
32
|
+
|
33
|
+
def mark!(at: Time.now, label: nil)
|
34
|
+
label ||= LABEL_MAKER.call
|
35
|
+
# we need to round the timestamp so that we gracefully
|
36
|
+
# handle an very common error in marking deploys:
|
37
|
+
# having every process mark its deploy, leading
|
38
|
+
# to N marks for each deploy. Instead we round the time
|
39
|
+
# to the minute so that multiple marks within that minute
|
40
|
+
# will all naturally rollup into one mark per minute.
|
41
|
+
whence = at.utc
|
42
|
+
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
|
43
|
+
datecode = floor.strftime("%Y%m%d")
|
44
|
+
key = "#{datecode}-marks"
|
45
|
+
stamp = floor.iso8601
|
46
|
+
|
47
|
+
@pool.with do |c|
|
48
|
+
# only allow one deploy mark for a given label for the next minute
|
49
|
+
lock = c.set("deploylock-#{label}", stamp, "nx", "ex", "60")
|
50
|
+
if lock
|
51
|
+
c.multi do |pipe|
|
52
|
+
pipe.hsetnx(key, stamp, label)
|
53
|
+
pipe.expire(key, MARK_TTL)
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
def fetch(date = Time.now.utc.to_date)
|
60
|
+
datecode = date.strftime("%Y%m%d")
|
61
|
+
@pool.with { |c| c.hgetall("#{datecode}-marks") }
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
@@ -0,0 +1,63 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/component"
|
4
|
+
require "sidekiq/launcher"
|
5
|
+
require "sidekiq/metrics/tracking"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
class Embedded
|
9
|
+
include Sidekiq::Component
|
10
|
+
|
11
|
+
def initialize(config)
|
12
|
+
@config = config
|
13
|
+
end
|
14
|
+
|
15
|
+
def run
|
16
|
+
housekeeping
|
17
|
+
fire_event(:startup, reverse: false, reraise: true)
|
18
|
+
@launcher = Sidekiq::Launcher.new(@config, embedded: true)
|
19
|
+
@launcher.run
|
20
|
+
sleep 0.2 # pause to give threads time to spin up
|
21
|
+
|
22
|
+
logger.info "Sidekiq running embedded, total process thread count: #{Thread.list.size}"
|
23
|
+
logger.debug { Thread.list.map(&:name) }
|
24
|
+
end
|
25
|
+
|
26
|
+
def quiet
|
27
|
+
@launcher&.quiet
|
28
|
+
end
|
29
|
+
|
30
|
+
def stop
|
31
|
+
@launcher&.stop
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
|
36
|
+
def housekeeping
|
37
|
+
logger.info "Running in #{RUBY_DESCRIPTION}"
|
38
|
+
logger.info Sidekiq::LICENSE
|
39
|
+
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
|
40
|
+
|
41
|
+
# touch the connection pool so it is created before we
|
42
|
+
# fire startup and start multithreading.
|
43
|
+
info = config.redis_info
|
44
|
+
ver = Gem::Version.new(info["redis_version"])
|
45
|
+
raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
|
46
|
+
|
47
|
+
maxmemory_policy = info["maxmemory_policy"]
|
48
|
+
if maxmemory_policy != "noeviction"
|
49
|
+
logger.warn <<~EOM
|
50
|
+
|
51
|
+
|
52
|
+
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
|
53
|
+
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
|
54
|
+
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
|
55
|
+
|
56
|
+
EOM
|
57
|
+
end
|
58
|
+
|
59
|
+
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
|
60
|
+
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
data/lib/sidekiq/fetch.rb
CHANGED
@@ -1,81 +1,88 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
require 'sidekiq'
|
3
2
|
|
4
|
-
|
3
|
+
require "sidekiq"
|
4
|
+
require "sidekiq/component"
|
5
|
+
require "sidekiq/capsule"
|
6
|
+
|
7
|
+
module Sidekiq # :nodoc:
|
5
8
|
class BasicFetch
|
9
|
+
include Sidekiq::Component
|
6
10
|
# We want the fetch operation to timeout every few seconds so the thread
|
7
11
|
# can check if the process is shutting down.
|
8
12
|
TIMEOUT = 2
|
9
13
|
|
10
|
-
UnitOfWork = Struct.new(:queue, :job)
|
14
|
+
UnitOfWork = Struct.new(:queue, :job, :config) {
|
11
15
|
def acknowledge
|
12
16
|
# nothing to do
|
13
17
|
end
|
14
18
|
|
15
19
|
def queue_name
|
16
|
-
queue.
|
20
|
+
queue.delete_prefix("queue:")
|
17
21
|
end
|
18
22
|
|
19
23
|
def requeue
|
20
|
-
|
21
|
-
conn.rpush(
|
24
|
+
config.redis do |conn|
|
25
|
+
conn.rpush(queue, job)
|
22
26
|
end
|
23
27
|
end
|
24
|
-
|
28
|
+
}
|
25
29
|
|
26
|
-
def initialize(
|
27
|
-
|
28
|
-
@
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
end
|
30
|
+
def initialize(cap)
|
31
|
+
raise ArgumentError, "missing queue list" unless cap.queues
|
32
|
+
@config = cap
|
33
|
+
@strictly_ordered_queues = cap.mode == :strict
|
34
|
+
@queues = config.queues.map { |q| "queue:#{q}" }
|
35
|
+
@queues.uniq! if @strictly_ordered_queues
|
33
36
|
end
|
34
37
|
|
35
38
|
def retrieve_work
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
# data from the first queue that has pending elements. We
|
43
|
-
# recreate the queue command each time we invoke Redis#brpop
|
44
|
-
# to honor weights and avoid queue starvation.
|
45
|
-
def queues_cmd
|
46
|
-
if @strictly_ordered_queues
|
47
|
-
@queues
|
48
|
-
else
|
49
|
-
queues = @queues.shuffle.uniq
|
50
|
-
queues << TIMEOUT
|
51
|
-
queues
|
39
|
+
qs = queues_cmd
|
40
|
+
# 4825 Sidekiq Pro with all queues paused will return an
|
41
|
+
# empty set of queues
|
42
|
+
if qs.size <= 0
|
43
|
+
sleep(TIMEOUT)
|
44
|
+
return nil
|
52
45
|
end
|
53
|
-
end
|
54
46
|
|
47
|
+
queue, job = redis { |conn| conn.blocking_call(TIMEOUT, "brpop", *qs, TIMEOUT) }
|
48
|
+
UnitOfWork.new(queue, job, config) if queue
|
49
|
+
end
|
55
50
|
|
56
|
-
|
57
|
-
# an instance method will make it async to the Fetcher actor
|
58
|
-
def self.bulk_requeue(inprogress, options)
|
51
|
+
def bulk_requeue(inprogress)
|
59
52
|
return if inprogress.empty?
|
60
53
|
|
61
|
-
|
54
|
+
logger.debug { "Re-queueing terminated jobs" }
|
62
55
|
jobs_to_requeue = {}
|
63
56
|
inprogress.each do |unit_of_work|
|
64
|
-
jobs_to_requeue[unit_of_work.
|
65
|
-
jobs_to_requeue[unit_of_work.
|
57
|
+
jobs_to_requeue[unit_of_work.queue] ||= []
|
58
|
+
jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
|
66
59
|
end
|
67
60
|
|
68
|
-
|
69
|
-
conn.pipelined do
|
61
|
+
redis do |conn|
|
62
|
+
conn.pipelined do |pipeline|
|
70
63
|
jobs_to_requeue.each do |queue, jobs|
|
71
|
-
|
64
|
+
pipeline.rpush(queue, jobs)
|
72
65
|
end
|
73
66
|
end
|
74
67
|
end
|
75
|
-
|
68
|
+
logger.info("Pushed #{inprogress.size} jobs back to Redis")
|
76
69
|
rescue => ex
|
77
|
-
|
70
|
+
logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
78
71
|
end
|
79
72
|
|
73
|
+
# Creating the Redis#brpop command takes into account any
|
74
|
+
# configured queue weights. By default Redis#brpop returns
|
75
|
+
# data from the first queue that has pending elements. We
|
76
|
+
# recreate the queue command each time we invoke Redis#brpop
|
77
|
+
# to honor weights and avoid queue starvation.
|
78
|
+
def queues_cmd
|
79
|
+
if @strictly_ordered_queues
|
80
|
+
@queues
|
81
|
+
else
|
82
|
+
permute = @queues.shuffle
|
83
|
+
permute.uniq!
|
84
|
+
permute
|
85
|
+
end
|
86
|
+
end
|
80
87
|
end
|
81
88
|
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/job/iterable"
|
4
|
+
|
5
|
+
# Iterable jobs are ones which provide a sequence to process using
|
6
|
+
# `build_enumerator(*args, cursor: cursor)` and then process each
|
7
|
+
# element of that sequence in `each_iteration(item, *args)`.
|
8
|
+
#
|
9
|
+
# The job is kicked off as normal:
|
10
|
+
#
|
11
|
+
# ProcessUserSet.perform_async(123)
|
12
|
+
#
|
13
|
+
# but instead of calling `perform`, Sidekiq will call:
|
14
|
+
#
|
15
|
+
# enum = ProcessUserSet#build_enumerator(123, cursor:nil)
|
16
|
+
#
|
17
|
+
# Your Enumerator must yield `(object, updated_cursor)` and
|
18
|
+
# Sidekiq will call your `each_iteration` method:
|
19
|
+
#
|
20
|
+
# ProcessUserSet#each_iteration(object, 123)
|
21
|
+
#
|
22
|
+
# After every iteration, Sidekiq will check for shutdown. If we are
|
23
|
+
# stopping, the cursor will be saved to Redis and the job re-queued
|
24
|
+
# to pick up the rest of the work upon restart. Your job will get
|
25
|
+
# the updated_cursor so it can pick up right where it stopped.
|
26
|
+
#
|
27
|
+
# enum = ProcessUserSet#build_enumerator(123, cursor: updated_cursor)
|
28
|
+
#
|
29
|
+
# The cursor object must be serializable to JSON.
|
30
|
+
#
|
31
|
+
# Note there are several APIs to help you build enumerators for
|
32
|
+
# ActiveRecord Relations, CSV files, etc. See sidekiq/job/iterable/*.rb.
|
33
|
+
module Sidekiq
|
34
|
+
module IterableJob
|
35
|
+
def self.included(base)
|
36
|
+
base.include Sidekiq::Job
|
37
|
+
base.include Sidekiq::Job::Iterable
|
38
|
+
end
|
39
|
+
|
40
|
+
# def build_enumerator(*args, cursor:)
|
41
|
+
# def each_iteration(item, *args)
|
42
|
+
|
43
|
+
# Your job can also define several callbacks during points
|
44
|
+
# in each job's lifecycle.
|
45
|
+
#
|
46
|
+
# def on_start
|
47
|
+
# def on_resume
|
48
|
+
# def on_stop
|
49
|
+
# def on_complete
|
50
|
+
# def around_iteration
|
51
|
+
#
|
52
|
+
# To keep things simple and compatible, this is the same
|
53
|
+
# API as the `sidekiq-iteration` gem.
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
module Job
|
5
|
+
class InterruptHandler
|
6
|
+
include Sidekiq::ServerMiddleware
|
7
|
+
|
8
|
+
def call(instance, hash, queue)
|
9
|
+
yield
|
10
|
+
rescue Interrupted
|
11
|
+
logger.debug "Interrupted, re-queueing..."
|
12
|
+
c = Sidekiq::Client.new
|
13
|
+
c.push(hash)
|
14
|
+
raise Sidekiq::JobRetry::Skip
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
Sidekiq.configure_server do |config|
|
21
|
+
config.server_middleware do |chain|
|
22
|
+
chain.add Sidekiq::Job::InterruptHandler
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
module Job
|
5
|
+
module Iterable
|
6
|
+
# @api private
|
7
|
+
class ActiveRecordEnumerator
|
8
|
+
def initialize(relation, cursor: nil, **options)
|
9
|
+
@relation = relation
|
10
|
+
@cursor = cursor
|
11
|
+
@options = options
|
12
|
+
end
|
13
|
+
|
14
|
+
def records
|
15
|
+
Enumerator.new(-> { @relation.count }) do |yielder|
|
16
|
+
@relation.find_each(**@options, start: @cursor) do |record|
|
17
|
+
yielder.yield(record, record.id)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def batches
|
23
|
+
Enumerator.new(-> { @relation.count }) do |yielder|
|
24
|
+
@relation.find_in_batches(**@options, start: @cursor) do |batch|
|
25
|
+
yielder.yield(batch, batch.last.id)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def relations
|
31
|
+
Enumerator.new(-> { relations_size }) do |yielder|
|
32
|
+
# Convenience to use :batch_size for all the
|
33
|
+
# ActiveRecord batching methods.
|
34
|
+
options = @options.dup
|
35
|
+
options[:of] ||= options.delete(:batch_size)
|
36
|
+
|
37
|
+
@relation.in_batches(**options, start: @cursor) do |relation|
|
38
|
+
last_record = relation.last
|
39
|
+
yielder.yield(relation, last_record.id)
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
private
|
45
|
+
|
46
|
+
def relations_size
|
47
|
+
batch_size = @options[:batch_size] || 1000
|
48
|
+
(@relation.count + batch_size - 1) / batch_size # ceiling division
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|