sidekiq 3.5.4 → 7.2.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +5 -5
- data/Changes.md +992 -6
- data/LICENSE.txt +9 -0
- data/README.md +52 -43
- data/bin/sidekiq +22 -4
- data/bin/sidekiqload +209 -115
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/job_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/job_test.rb.erb +8 -0
- data/lib/sidekiq/api.rb +633 -295
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +270 -248
- data/lib/sidekiq/client.rb +139 -108
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +287 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +53 -121
- data/lib/sidekiq/job.rb +374 -0
- data/lib/sidekiq/job_logger.rb +51 -0
- data/lib/sidekiq/job_retry.rb +301 -0
- data/lib/sidekiq/job_util.rb +107 -0
- data/lib/sidekiq/launcher.rb +241 -69
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +88 -190
- data/lib/sidekiq/metrics/query.rb +155 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +136 -0
- data/lib/sidekiq/middleware/chain.rb +114 -56
- data/lib/sidekiq/middleware/current_attributes.rb +95 -0
- data/lib/sidekiq/middleware/i18n.rb +8 -7
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +146 -0
- data/lib/sidekiq/paginator.rb +29 -16
- data/lib/sidekiq/processor.rb +238 -118
- data/lib/sidekiq/rails.rb +57 -27
- data/lib/sidekiq/redis_client_adapter.rb +111 -0
- data/lib/sidekiq/redis_connection.rb +49 -50
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +173 -52
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing/inline.rb +7 -5
- data/lib/sidekiq/testing.rb +197 -65
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +4 -1
- data/lib/sidekiq/web/action.rb +93 -0
- data/lib/sidekiq/web/application.rb +463 -0
- data/lib/sidekiq/web/csrf_protection.rb +180 -0
- data/lib/sidekiq/web/helpers.rb +364 -0
- data/lib/sidekiq/web/router.rb +104 -0
- data/lib/sidekiq/web.rb +113 -216
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +99 -142
- data/sidekiq.gemspec +26 -23
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +163 -74
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +182 -0
- data/web/assets/javascripts/dashboard.js +37 -280
- data/web/assets/javascripts/metrics.js +298 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +153 -0
- data/web/assets/stylesheets/application.css +181 -198
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +4 -8
- data/web/locales/ar.yml +87 -0
- data/web/locales/cs.yml +62 -52
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -53
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +86 -62
- data/web/locales/es.yml +70 -53
- data/web/locales/fa.yml +80 -0
- data/web/locales/fr.yml +86 -56
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +80 -0
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +78 -56
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +83 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +68 -60
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +80 -0
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +10 -9
- data/web/views/_job_info.erb +26 -5
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +6 -20
- data/web/views/_paging.erb +3 -1
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +87 -28
- data/web/views/dashboard.erb +51 -21
- data/web/views/dead.erb +4 -4
- data/web/views/filtering.erb +7 -0
- data/web/views/layout.erb +15 -5
- data/web/views/metrics.erb +91 -0
- data/web/views/metrics_for_job.erb +59 -0
- data/web/views/morgue.erb +25 -22
- data/web/views/queue.erb +35 -25
- data/web/views/queues.erb +23 -7
- data/web/views/retries.erb +28 -23
- data/web/views/retry.erb +5 -5
- data/web/views/scheduled.erb +19 -17
- data/web/views/scheduled_job_info.erb +1 -1
- metadata +86 -268
- data/.gitignore +0 -12
- data/.travis.yml +0 -16
- data/3.0-Upgrade.md +0 -70
- data/COMM-LICENSE +0 -95
- data/Contributing.md +0 -32
- data/Ent-Changes.md +0 -39
- data/Gemfile +0 -27
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-Changes.md +0 -454
- data/Rakefile +0 -9
- data/bin/sidekiqctl +0 -93
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +0 -6
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +0 -8
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/actor.rb +0 -39
- data/lib/sidekiq/core_ext.rb +0 -105
- data/lib/sidekiq/exception_handler.rb +0 -30
- data/lib/sidekiq/extensions/action_mailer.rb +0 -56
- data/lib/sidekiq/extensions/active_record.rb +0 -39
- data/lib/sidekiq/extensions/class_methods.rb +0 -39
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -24
- data/lib/sidekiq/logging.rb +0 -104
- data/lib/sidekiq/middleware/server/active_record.rb +0 -13
- data/lib/sidekiq/middleware/server/logging.rb +0 -40
- data/lib/sidekiq/middleware/server/retry_jobs.rb +0 -206
- data/lib/sidekiq/util.rb +0 -68
- data/lib/sidekiq/web_helpers.rb +0 -249
- data/lib/sidekiq/worker.rb +0 -103
- data/test/config.yml +0 -9
- data/test/env_based_config.yml +0 -11
- data/test/fake_env.rb +0 -0
- data/test/fixtures/en.yml +0 -2
- data/test/helper.rb +0 -49
- data/test/test_api.rb +0 -493
- data/test/test_cli.rb +0 -335
- data/test/test_client.rb +0 -194
- data/test/test_exception_handler.rb +0 -55
- data/test/test_extensions.rb +0 -126
- data/test/test_fetch.rb +0 -104
- data/test/test_logging.rb +0 -34
- data/test/test_manager.rb +0 -168
- data/test/test_middleware.rb +0 -159
- data/test/test_processor.rb +0 -237
- data/test/test_rails.rb +0 -21
- data/test/test_redis_connection.rb +0 -126
- data/test/test_retry.rb +0 -325
- data/test/test_scheduled.rb +0 -114
- data/test/test_scheduling.rb +0 -49
- data/test/test_sidekiq.rb +0 -99
- data/test/test_testing.rb +0 -142
- data/test/test_testing_fake.rb +0 -268
- data/test/test_testing_inline.rb +0 -93
- data/test/test_util.rb +0 -16
- data/test/test_web.rb +0 -608
- data/test/test_web_helpers.rb +0 -53
- data/web/assets/images/bootstrap/glyphicons-halflings-white.png +0 -0
- data/web/assets/images/bootstrap/glyphicons-halflings.png +0 -0
- data/web/assets/images/status/active.png +0 -0
- data/web/assets/images/status/idle.png +0 -0
- data/web/assets/javascripts/locales/README.md +0 -27
- data/web/assets/javascripts/locales/jquery.timeago.ar.js +0 -96
- data/web/assets/javascripts/locales/jquery.timeago.bg.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.bs.js +0 -49
- data/web/assets/javascripts/locales/jquery.timeago.ca.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.cs.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.cy.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.da.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.de.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.el.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.en-short.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.en.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.es.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.et.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.fa.js +0 -22
- data/web/assets/javascripts/locales/jquery.timeago.fi.js +0 -28
- data/web/assets/javascripts/locales/jquery.timeago.fr-short.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.fr.js +0 -17
- data/web/assets/javascripts/locales/jquery.timeago.he.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.hr.js +0 -49
- data/web/assets/javascripts/locales/jquery.timeago.hu.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.hy.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.id.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.it.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.ja.js +0 -19
- data/web/assets/javascripts/locales/jquery.timeago.ko.js +0 -17
- data/web/assets/javascripts/locales/jquery.timeago.lt.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.mk.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.nl.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.no.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.pl.js +0 -31
- data/web/assets/javascripts/locales/jquery.timeago.pt-br.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.pt.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.ro.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.rs.js +0 -49
- data/web/assets/javascripts/locales/jquery.timeago.ru.js +0 -34
- data/web/assets/javascripts/locales/jquery.timeago.sk.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.sl.js +0 -44
- data/web/assets/javascripts/locales/jquery.timeago.sv.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.th.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.tr.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.uk.js +0 -34
- data/web/assets/javascripts/locales/jquery.timeago.uz.js +0 -19
- data/web/assets/javascripts/locales/jquery.timeago.zh-cn.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.zh-tw.js +0 -20
- data/web/views/_poll_js.erb +0 -5
- /data/web/assets/images/{status-sd8051fd480.png → status.png} +0 -0
@@ -0,0 +1,287 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
|
3
|
+
require "set"
|
4
|
+
require "sidekiq/redis_connection"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
|
8
|
+
class Config
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
DEFAULTS = {
|
12
|
+
labels: Set.new,
|
13
|
+
require: ".",
|
14
|
+
environment: nil,
|
15
|
+
concurrency: 5,
|
16
|
+
timeout: 25,
|
17
|
+
poll_interval_average: nil,
|
18
|
+
average_scheduled_poll_interval: 5,
|
19
|
+
on_complex_arguments: :raise,
|
20
|
+
error_handlers: [],
|
21
|
+
death_handlers: [],
|
22
|
+
lifecycle_events: {
|
23
|
+
startup: [],
|
24
|
+
quiet: [],
|
25
|
+
shutdown: [],
|
26
|
+
# triggers when we fire the first heartbeat on startup OR repairing a network partition
|
27
|
+
heartbeat: [],
|
28
|
+
# triggers on EVERY heartbeat call, every 10 seconds
|
29
|
+
beat: []
|
30
|
+
},
|
31
|
+
dead_max_jobs: 10_000,
|
32
|
+
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
33
|
+
reloader: proc { |&block| block.call },
|
34
|
+
backtrace_cleaner: ->(backtrace) { backtrace }
|
35
|
+
}
|
36
|
+
|
37
|
+
ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.default_configuration) {
|
38
|
+
l = cfg.logger
|
39
|
+
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
|
40
|
+
l.warn("#{ex.class.name}: #{ex.message}")
|
41
|
+
unless ex.backtrace.nil?
|
42
|
+
backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
|
43
|
+
l.warn(backtrace.join("\n"))
|
44
|
+
end
|
45
|
+
}
|
46
|
+
|
47
|
+
def initialize(options = {})
|
48
|
+
@options = DEFAULTS.merge(options)
|
49
|
+
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
|
50
|
+
@directory = {}
|
51
|
+
@redis_config = {}
|
52
|
+
@capsules = {}
|
53
|
+
end
|
54
|
+
|
55
|
+
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
|
56
|
+
attr_reader :capsules
|
57
|
+
|
58
|
+
def to_json(*)
|
59
|
+
Sidekiq.dump_json(@options)
|
60
|
+
end
|
61
|
+
|
62
|
+
# LEGACY: edits the default capsule
|
63
|
+
# config.concurrency = 5
|
64
|
+
def concurrency=(val)
|
65
|
+
default_capsule.concurrency = Integer(val)
|
66
|
+
end
|
67
|
+
|
68
|
+
def concurrency
|
69
|
+
default_capsule.concurrency
|
70
|
+
end
|
71
|
+
|
72
|
+
def total_concurrency
|
73
|
+
capsules.each_value.sum(&:concurrency)
|
74
|
+
end
|
75
|
+
|
76
|
+
# Edit the default capsule.
|
77
|
+
# config.queues = %w( high default low ) # strict
|
78
|
+
# config.queues = %w( high,3 default,2 low,1 ) # weighted
|
79
|
+
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
|
80
|
+
#
|
81
|
+
# With weighted priority, queue will be checked first (weight / total) of the time.
|
82
|
+
# high will be checked first (3/6) or 50% of the time.
|
83
|
+
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
|
84
|
+
# are ridiculous and unnecessarily expensive. You can get random queue ordering
|
85
|
+
# by explicitly setting all weights to 1.
|
86
|
+
def queues=(val)
|
87
|
+
default_capsule.queues = val
|
88
|
+
end
|
89
|
+
|
90
|
+
def queues
|
91
|
+
default_capsule.queues
|
92
|
+
end
|
93
|
+
|
94
|
+
def client_middleware
|
95
|
+
@client_chain ||= Sidekiq::Middleware::Chain.new(self)
|
96
|
+
yield @client_chain if block_given?
|
97
|
+
@client_chain
|
98
|
+
end
|
99
|
+
|
100
|
+
def server_middleware
|
101
|
+
@server_chain ||= Sidekiq::Middleware::Chain.new(self)
|
102
|
+
yield @server_chain if block_given?
|
103
|
+
@server_chain
|
104
|
+
end
|
105
|
+
|
106
|
+
def default_capsule(&block)
|
107
|
+
capsule("default", &block)
|
108
|
+
end
|
109
|
+
|
110
|
+
# register a new queue processing subsystem
|
111
|
+
def capsule(name)
|
112
|
+
nm = name.to_s
|
113
|
+
cap = @capsules.fetch(nm) do
|
114
|
+
cap = Sidekiq::Capsule.new(nm, self)
|
115
|
+
@capsules[nm] = cap
|
116
|
+
end
|
117
|
+
yield cap if block_given?
|
118
|
+
cap
|
119
|
+
end
|
120
|
+
|
121
|
+
# All capsules must use the same Redis configuration
|
122
|
+
def redis=(hash)
|
123
|
+
@redis_config = @redis_config.merge(hash)
|
124
|
+
end
|
125
|
+
|
126
|
+
def redis_pool
|
127
|
+
Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
|
128
|
+
end
|
129
|
+
|
130
|
+
private def local_redis_pool
|
131
|
+
# this is our internal client/housekeeping pool. each capsule has its
|
132
|
+
# own pool for executing threads.
|
133
|
+
@redis ||= new_redis_pool(10, "internal")
|
134
|
+
end
|
135
|
+
|
136
|
+
def new_redis_pool(size, name = "unset")
|
137
|
+
# connection pool is lazy, it will not create connections unless you actually need them
|
138
|
+
# so don't be skimpy!
|
139
|
+
RedisConnection.create({size: size, logger: logger, pool_name: name}.merge(@redis_config))
|
140
|
+
end
|
141
|
+
|
142
|
+
def redis_info
|
143
|
+
redis do |conn|
|
144
|
+
conn.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
|
145
|
+
rescue RedisClientAdapter::CommandError => ex
|
146
|
+
# 2850 return fake version when INFO command has (probably) been renamed
|
147
|
+
raise unless /unknown command/.match?(ex.message)
|
148
|
+
{
|
149
|
+
"redis_version" => "9.9.9",
|
150
|
+
"uptime_in_days" => "9999",
|
151
|
+
"connected_clients" => "9999",
|
152
|
+
"used_memory_human" => "9P",
|
153
|
+
"used_memory_peak_human" => "9P"
|
154
|
+
}.freeze
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
def redis
|
159
|
+
raise ArgumentError, "requires a block" unless block_given?
|
160
|
+
redis_pool.with do |conn|
|
161
|
+
retryable = true
|
162
|
+
begin
|
163
|
+
yield conn
|
164
|
+
rescue RedisClientAdapter::BaseError => ex
|
165
|
+
# 2550 Failover can cause the server to become a replica, need
|
166
|
+
# to disconnect and reopen the socket to get back to the primary.
|
167
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
168
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
169
|
+
# The same retry logic is also used in client.rb
|
170
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
171
|
+
conn.close
|
172
|
+
retryable = false
|
173
|
+
retry
|
174
|
+
end
|
175
|
+
raise
|
176
|
+
end
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
# register global singletons which can be accessed elsewhere
|
181
|
+
def register(name, instance)
|
182
|
+
@directory[name] = instance
|
183
|
+
end
|
184
|
+
|
185
|
+
# find a singleton
|
186
|
+
def lookup(name, default_class = nil)
|
187
|
+
# JNDI is just a fancy name for a hash lookup
|
188
|
+
@directory.fetch(name) do |key|
|
189
|
+
return nil unless default_class
|
190
|
+
@directory[key] = default_class.new(self)
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
##
|
195
|
+
# Death handlers are called when all retries for a job have been exhausted and
|
196
|
+
# the job dies. It's the notification to your application
|
197
|
+
# that this job will not succeed without manual intervention.
|
198
|
+
#
|
199
|
+
# Sidekiq.configure_server do |config|
|
200
|
+
# config.death_handlers << ->(job, ex) do
|
201
|
+
# end
|
202
|
+
# end
|
203
|
+
def death_handlers
|
204
|
+
@options[:death_handlers]
|
205
|
+
end
|
206
|
+
|
207
|
+
# How frequently Redis should be checked by a random Sidekiq process for
|
208
|
+
# scheduled and retriable jobs. Each individual process will take turns by
|
209
|
+
# waiting some multiple of this value.
|
210
|
+
#
|
211
|
+
# See sidekiq/scheduled.rb for an in-depth explanation of this value
|
212
|
+
def average_scheduled_poll_interval=(interval)
|
213
|
+
@options[:average_scheduled_poll_interval] = interval
|
214
|
+
end
|
215
|
+
|
216
|
+
# Register a proc to handle any error which occurs within the Sidekiq process.
|
217
|
+
#
|
218
|
+
# Sidekiq.configure_server do |config|
|
219
|
+
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
|
220
|
+
# end
|
221
|
+
#
|
222
|
+
# The default error handler logs errors to @logger.
|
223
|
+
def error_handlers
|
224
|
+
@options[:error_handlers]
|
225
|
+
end
|
226
|
+
|
227
|
+
# Register a block to run at a point in the Sidekiq lifecycle.
|
228
|
+
# :startup, :quiet or :shutdown are valid events.
|
229
|
+
#
|
230
|
+
# Sidekiq.configure_server do |config|
|
231
|
+
# config.on(:shutdown) do
|
232
|
+
# puts "Goodbye cruel world!"
|
233
|
+
# end
|
234
|
+
# end
|
235
|
+
def on(event, &block)
|
236
|
+
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
|
237
|
+
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
|
238
|
+
@options[:lifecycle_events][event] << block
|
239
|
+
end
|
240
|
+
|
241
|
+
def logger
|
242
|
+
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
|
243
|
+
log.level = Logger::INFO
|
244
|
+
log.formatter = if ENV["DYNO"]
|
245
|
+
Sidekiq::Logger::Formatters::WithoutTimestamp.new
|
246
|
+
else
|
247
|
+
Sidekiq::Logger::Formatters::Pretty.new
|
248
|
+
end
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
def logger=(logger)
|
253
|
+
if logger.nil?
|
254
|
+
self.logger.level = Logger::FATAL
|
255
|
+
return
|
256
|
+
end
|
257
|
+
|
258
|
+
@logger = logger
|
259
|
+
end
|
260
|
+
|
261
|
+
private def parameter_size(handler)
|
262
|
+
target = handler.is_a?(Proc) ? handler : handler.method(:call)
|
263
|
+
target.parameters.size
|
264
|
+
end
|
265
|
+
|
266
|
+
# INTERNAL USE ONLY
|
267
|
+
def handle_exception(ex, ctx = {})
|
268
|
+
if @options[:error_handlers].size == 0
|
269
|
+
p ["!!!!!", ex]
|
270
|
+
end
|
271
|
+
@options[:error_handlers].each do |handler|
|
272
|
+
if parameter_size(handler) == 2
|
273
|
+
# TODO Remove in 8.0
|
274
|
+
logger.info { "DEPRECATION: Sidekiq exception handlers now take three arguments, see #{handler}" }
|
275
|
+
handler.call(ex, {_config: self}.merge(ctx))
|
276
|
+
else
|
277
|
+
handler.call(ex, ctx, self)
|
278
|
+
end
|
279
|
+
rescue Exception => e
|
280
|
+
l = logger
|
281
|
+
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
282
|
+
l.error e
|
283
|
+
l.error e.backtrace.join("\n") unless e.backtrace.nil?
|
284
|
+
end
|
285
|
+
end
|
286
|
+
end
|
287
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
require "sidekiq/redis_connection"
|
2
|
+
require "time"
|
3
|
+
|
4
|
+
# This file is designed to be required within the user's
|
5
|
+
# deployment script; it should need a bare minimum of dependencies.
|
6
|
+
# Usage:
|
7
|
+
#
|
8
|
+
# require "sidekiq/deploy"
|
9
|
+
# Sidekiq::Deploy.mark!("Some change")
|
10
|
+
#
|
11
|
+
# If you do not pass a label, Sidekiq will try to use the latest
|
12
|
+
# git commit info.
|
13
|
+
#
|
14
|
+
|
15
|
+
module Sidekiq
|
16
|
+
class Deploy
|
17
|
+
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
|
18
|
+
|
19
|
+
LABEL_MAKER = -> {
|
20
|
+
`git log -1 --format="%h %s"`.strip
|
21
|
+
}
|
22
|
+
|
23
|
+
def self.mark!(label = nil)
|
24
|
+
Sidekiq::Deploy.new.mark!(label: label)
|
25
|
+
end
|
26
|
+
|
27
|
+
def initialize(pool = Sidekiq::RedisConnection.create)
|
28
|
+
@pool = pool
|
29
|
+
end
|
30
|
+
|
31
|
+
def mark!(at: Time.now, label: nil)
|
32
|
+
label ||= LABEL_MAKER.call
|
33
|
+
# we need to round the timestamp so that we gracefully
|
34
|
+
# handle an very common error in marking deploys:
|
35
|
+
# having every process mark its deploy, leading
|
36
|
+
# to N marks for each deploy. Instead we round the time
|
37
|
+
# to the minute so that multple marks within that minute
|
38
|
+
# will all naturally rollup into one mark per minute.
|
39
|
+
whence = at.utc
|
40
|
+
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
|
41
|
+
datecode = floor.strftime("%Y%m%d")
|
42
|
+
key = "#{datecode}-marks"
|
43
|
+
stamp = floor.iso8601
|
44
|
+
|
45
|
+
@pool.with do |c|
|
46
|
+
# only allow one deploy mark for a given label for the next minute
|
47
|
+
lock = c.set("deploylock-#{label}", stamp, "nx", "ex", "60")
|
48
|
+
if lock
|
49
|
+
c.multi do |pipe|
|
50
|
+
pipe.hsetnx(key, stamp, label)
|
51
|
+
pipe.expire(key, MARK_TTL)
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def fetch(date = Time.now.utc.to_date)
|
58
|
+
datecode = date.strftime("%Y%m%d")
|
59
|
+
@pool.with { |c| c.hgetall("#{datecode}-marks") }
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
require "sidekiq/component"
|
2
|
+
require "sidekiq/launcher"
|
3
|
+
require "sidekiq/metrics/tracking"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
class Embedded
|
7
|
+
include Sidekiq::Component
|
8
|
+
|
9
|
+
def initialize(config)
|
10
|
+
@config = config
|
11
|
+
end
|
12
|
+
|
13
|
+
def run
|
14
|
+
housekeeping
|
15
|
+
fire_event(:startup, reverse: false, reraise: true)
|
16
|
+
@launcher = Sidekiq::Launcher.new(@config, embedded: true)
|
17
|
+
@launcher.run
|
18
|
+
sleep 0.2 # pause to give threads time to spin up
|
19
|
+
|
20
|
+
logger.info "Sidekiq running embedded, total process thread count: #{Thread.list.size}"
|
21
|
+
logger.debug { Thread.list.map(&:name) }
|
22
|
+
end
|
23
|
+
|
24
|
+
def quiet
|
25
|
+
@launcher&.quiet
|
26
|
+
end
|
27
|
+
|
28
|
+
def stop
|
29
|
+
@launcher&.stop
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def housekeeping
|
35
|
+
logger.info "Running in #{RUBY_DESCRIPTION}"
|
36
|
+
logger.info Sidekiq::LICENSE
|
37
|
+
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
|
38
|
+
|
39
|
+
# touch the connection pool so it is created before we
|
40
|
+
# fire startup and start multithreading.
|
41
|
+
info = config.redis_info
|
42
|
+
ver = Gem::Version.new(info["redis_version"])
|
43
|
+
raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
|
44
|
+
|
45
|
+
maxmemory_policy = info["maxmemory_policy"]
|
46
|
+
if maxmemory_policy != "noeviction"
|
47
|
+
logger.warn <<~EOM
|
48
|
+
|
49
|
+
|
50
|
+
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
|
51
|
+
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
|
52
|
+
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
|
53
|
+
|
54
|
+
EOM
|
55
|
+
end
|
56
|
+
|
57
|
+
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
|
58
|
+
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
data/lib/sidekiq/fetch.rb
CHANGED
@@ -1,146 +1,73 @@
|
|
1
|
-
|
2
|
-
require 'sidekiq/util'
|
3
|
-
require 'sidekiq/actor'
|
1
|
+
# frozen_string_literal: true
|
4
2
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
# from the queues. It gets the message and hands it to the Manager
|
9
|
-
# to assign to a ready Processor.
|
10
|
-
class Fetcher
|
11
|
-
include Util
|
12
|
-
include Actor
|
3
|
+
require "sidekiq"
|
4
|
+
require "sidekiq/component"
|
5
|
+
require "sidekiq/capsule"
|
13
6
|
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
@mgr = mgr
|
21
|
-
@strategy = Fetcher.strategy.new(options)
|
22
|
-
end
|
23
|
-
|
24
|
-
# Fetching is straightforward: the Manager makes a fetch
|
25
|
-
# request for each idle processor when Sidekiq starts and
|
26
|
-
# then issues a new fetch request every time a Processor
|
27
|
-
# finishes a message.
|
28
|
-
#
|
29
|
-
# Because we have to shut down cleanly, we can't block
|
30
|
-
# forever and we can't loop forever. Instead we reschedule
|
31
|
-
# a new fetch if the current fetch turned up nothing.
|
32
|
-
def fetch
|
33
|
-
watchdog('Fetcher#fetch died') do
|
34
|
-
return if Sidekiq::Fetcher.done?
|
35
|
-
|
36
|
-
begin
|
37
|
-
work = @strategy.retrieve_work
|
38
|
-
::Sidekiq.logger.info("Redis is online, #{Time.now - @down} sec downtime") if @down
|
39
|
-
@down = nil
|
40
|
-
|
41
|
-
if work
|
42
|
-
@mgr.async.assign(work)
|
43
|
-
else
|
44
|
-
after(0) { fetch }
|
45
|
-
end
|
46
|
-
rescue => ex
|
47
|
-
handle_fetch_exception(ex)
|
48
|
-
end
|
7
|
+
module Sidekiq # :nodoc:
|
8
|
+
class BasicFetch
|
9
|
+
include Sidekiq::Component
|
10
|
+
# We want the fetch operation to timeout every few seconds so the thread
|
11
|
+
# can check if the process is shutting down.
|
12
|
+
TIMEOUT = 2
|
49
13
|
|
14
|
+
UnitOfWork = Struct.new(:queue, :job, :config) {
|
15
|
+
def acknowledge
|
16
|
+
# nothing to do
|
50
17
|
end
|
51
|
-
end
|
52
|
-
|
53
|
-
private
|
54
18
|
|
55
|
-
|
56
|
-
|
57
|
-
|
19
|
+
def queue_name
|
20
|
+
queue.delete_prefix("queue:")
|
21
|
+
end
|
58
22
|
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
ex.backtrace.each do |bt|
|
63
|
-
logger.error(bt)
|
23
|
+
def requeue
|
24
|
+
config.redis do |conn|
|
25
|
+
conn.rpush(queue, job)
|
64
26
|
end
|
65
27
|
end
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
# Ugh. Say hello to a bloody hack.
|
75
|
-
# Can't find a clean way to get the fetcher to just stop processing
|
76
|
-
# its mailbox when shutdown starts.
|
77
|
-
def self.done!
|
78
|
-
@done = true
|
79
|
-
end
|
80
|
-
|
81
|
-
def self.reset # testing only
|
82
|
-
@done = nil
|
83
|
-
end
|
84
|
-
|
85
|
-
def self.done?
|
86
|
-
defined?(@done) && @done
|
87
|
-
end
|
88
|
-
|
89
|
-
def self.strategy
|
90
|
-
Sidekiq.options[:fetch] || BasicFetch
|
91
|
-
end
|
92
|
-
end
|
93
|
-
|
94
|
-
class BasicFetch
|
95
|
-
def initialize(options)
|
96
|
-
@strictly_ordered_queues = !!options[:strict]
|
97
|
-
@queues = options[:queues].map { |q| "queue:#{q}" }
|
98
|
-
@unique_queues = @queues.uniq
|
28
|
+
}
|
29
|
+
|
30
|
+
def initialize(cap)
|
31
|
+
raise ArgumentError, "missing queue list" unless cap.queues
|
32
|
+
@config = cap
|
33
|
+
@strictly_ordered_queues = cap.mode == :strict
|
34
|
+
@queues = config.queues.map { |q| "queue:#{q}" }
|
35
|
+
@queues.uniq! if @strictly_ordered_queues
|
99
36
|
end
|
100
37
|
|
101
38
|
def retrieve_work
|
102
|
-
|
103
|
-
|
39
|
+
qs = queues_cmd
|
40
|
+
# 4825 Sidekiq Pro with all queues paused will return an
|
41
|
+
# empty set of queues
|
42
|
+
if qs.size <= 0
|
43
|
+
sleep(TIMEOUT)
|
44
|
+
return nil
|
45
|
+
end
|
46
|
+
|
47
|
+
queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
|
48
|
+
UnitOfWork.new(queue, job, config) if queue
|
104
49
|
end
|
105
50
|
|
106
|
-
|
107
|
-
# an instance method will make it async to the Fetcher actor
|
108
|
-
def self.bulk_requeue(inprogress, options)
|
51
|
+
def bulk_requeue(inprogress)
|
109
52
|
return if inprogress.empty?
|
110
53
|
|
111
|
-
|
54
|
+
logger.debug { "Re-queueing terminated jobs" }
|
112
55
|
jobs_to_requeue = {}
|
113
56
|
inprogress.each do |unit_of_work|
|
114
|
-
jobs_to_requeue[unit_of_work.
|
115
|
-
jobs_to_requeue[unit_of_work.
|
57
|
+
jobs_to_requeue[unit_of_work.queue] ||= []
|
58
|
+
jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
|
116
59
|
end
|
117
60
|
|
118
|
-
|
119
|
-
conn.pipelined do
|
61
|
+
redis do |conn|
|
62
|
+
conn.pipelined do |pipeline|
|
120
63
|
jobs_to_requeue.each do |queue, jobs|
|
121
|
-
|
64
|
+
pipeline.rpush(queue, jobs)
|
122
65
|
end
|
123
66
|
end
|
124
67
|
end
|
125
|
-
|
68
|
+
logger.info("Pushed #{inprogress.size} jobs back to Redis")
|
126
69
|
rescue => ex
|
127
|
-
|
128
|
-
end
|
129
|
-
|
130
|
-
UnitOfWork = Struct.new(:queue, :message) do
|
131
|
-
def acknowledge
|
132
|
-
# nothing to do
|
133
|
-
end
|
134
|
-
|
135
|
-
def queue_name
|
136
|
-
queue.gsub(/.*queue:/, '')
|
137
|
-
end
|
138
|
-
|
139
|
-
def requeue
|
140
|
-
Sidekiq.redis do |conn|
|
141
|
-
conn.rpush("queue:#{queue_name}", message)
|
142
|
-
end
|
143
|
-
end
|
70
|
+
logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
144
71
|
end
|
145
72
|
|
146
73
|
# Creating the Redis#brpop command takes into account any
|
@@ -149,8 +76,13 @@ module Sidekiq
|
|
149
76
|
# recreate the queue command each time we invoke Redis#brpop
|
150
77
|
# to honor weights and avoid queue starvation.
|
151
78
|
def queues_cmd
|
152
|
-
|
153
|
-
|
79
|
+
if @strictly_ordered_queues
|
80
|
+
@queues
|
81
|
+
else
|
82
|
+
permute = @queues.shuffle
|
83
|
+
permute.uniq!
|
84
|
+
permute
|
85
|
+
end
|
154
86
|
end
|
155
87
|
end
|
156
88
|
end
|