sidekiq 5.2.6 → 7.1.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +537 -8
- data/LICENSE.txt +9 -0
- data/README.md +47 -50
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +213 -115
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +556 -351
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +203 -226
- data/lib/sidekiq/client.rb +121 -101
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +274 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/job.rb +374 -0
- data/lib/sidekiq/job_logger.rb +33 -7
- data/lib/sidekiq/job_retry.rb +131 -108
- data/lib/sidekiq/job_util.rb +105 -0
- data/lib/sidekiq/launcher.rb +203 -105
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +43 -46
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +136 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +56 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +146 -0
- data/lib/sidekiq/paginator.rb +28 -16
- data/lib/sidekiq/processor.rb +108 -107
- data/lib/sidekiq/rails.rb +49 -38
- data/lib/sidekiq/redis_client_adapter.rb +96 -0
- data/lib/sidekiq/redis_connection.rb +38 -107
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +111 -49
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +66 -84
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +3 -1
- data/lib/sidekiq/web/action.rb +15 -11
- data/lib/sidekiq/web/application.rb +123 -79
- data/lib/sidekiq/web/csrf_protection.rb +180 -0
- data/lib/sidekiq/web/helpers.rb +137 -106
- data/lib/sidekiq/web/router.rb +23 -19
- data/lib/sidekiq/web.rb +56 -107
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +92 -182
- data/sidekiq.gemspec +25 -16
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +130 -61
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +36 -292
- data/web/assets/javascripts/metrics.js +264 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +2 -95
- data/web/assets/stylesheets/application.css +102 -522
- data/web/locales/ar.yml +71 -65
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -53
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +84 -66
- data/web/locales/es.yml +70 -54
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +63 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +68 -63
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +6 -3
- data/web/views/_job_info.erb +21 -4
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +1 -1
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +75 -25
- data/web/views/dashboard.erb +58 -18
- data/web/views/dead.erb +3 -3
- data/web/views/layout.erb +3 -1
- data/web/views/metrics.erb +82 -0
- data/web/views/metrics_for_job.erb +68 -0
- data/web/views/morgue.erb +14 -15
- data/web/views/queue.erb +33 -24
- data/web/views/queues.erb +13 -3
- data/web/views/retries.erb +16 -17
- data/web/views/retry.erb +3 -3
- data/web/views/scheduled.erb +17 -15
- metadata +69 -69
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -15
- data/.travis.yml +0 -11
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/COMM-LICENSE +0 -97
- data/Ent-Changes.md +0 -238
- data/Gemfile +0 -23
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -759
- data/Rakefile +0 -9
- data/bin/sidekiqctl +0 -20
- data/code_of_conduct.md +0 -50
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/ctl.rb +0 -221
- data/lib/sidekiq/delay.rb +0 -42
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -220
data/lib/sidekiq/rails.rb
CHANGED
@@ -1,45 +1,18 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "sidekiq/job"
|
4
|
+
require "rails"
|
5
|
+
|
3
6
|
module Sidekiq
|
4
7
|
class Rails < ::Rails::Engine
|
5
|
-
# We need to setup this up before any application configuration which might
|
6
|
-
# change Sidekiq middleware.
|
7
|
-
#
|
8
|
-
# This hook happens after `Rails::Application` is inherited within
|
9
|
-
# config/application.rb and before config is touched, usually within the
|
10
|
-
# class block. Definitely before config/environments/*.rb and
|
11
|
-
# config/initializers/*.rb.
|
12
|
-
config.before_configuration do
|
13
|
-
if ::Rails::VERSION::MAJOR < 5 && defined?(::ActiveRecord)
|
14
|
-
Sidekiq.server_middleware do |chain|
|
15
|
-
require 'sidekiq/middleware/server/active_record'
|
16
|
-
chain.add Sidekiq::Middleware::Server::ActiveRecord
|
17
|
-
end
|
18
|
-
end
|
19
|
-
end
|
20
|
-
|
21
|
-
config.after_initialize do
|
22
|
-
# This hook happens after all initializers are run, just before returning
|
23
|
-
# from config/environment.rb back to sidekiq/cli.rb.
|
24
|
-
# We have to add the reloader after initialize to see if cache_classes has
|
25
|
-
# been turned on.
|
26
|
-
#
|
27
|
-
# None of this matters on the client-side, only within the Sidekiq process itself.
|
28
|
-
#
|
29
|
-
Sidekiq.configure_server do |_|
|
30
|
-
if ::Rails::VERSION::MAJOR >= 5
|
31
|
-
Sidekiq.options[:reloader] = Sidekiq::Rails::Reloader.new
|
32
|
-
end
|
33
|
-
end
|
34
|
-
end
|
35
|
-
|
36
8
|
class Reloader
|
37
9
|
def initialize(app = ::Rails.application)
|
38
10
|
@app = app
|
39
11
|
end
|
40
12
|
|
41
13
|
def call
|
42
|
-
|
14
|
+
params = (::Rails::VERSION::STRING >= "7.1") ? {source: "job.sidekiq"} : {}
|
15
|
+
@app.reloader.wrap(**params) do
|
43
16
|
yield
|
44
17
|
end
|
45
18
|
end
|
@@ -48,11 +21,49 @@ module Sidekiq
|
|
48
21
|
"#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
|
49
22
|
end
|
50
23
|
end
|
51
|
-
end if defined?(::Rails)
|
52
|
-
end
|
53
24
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
25
|
+
# By including the Options module, we allow AJs to directly control sidekiq features
|
26
|
+
# via the *sidekiq_options* class method and, for instance, not use AJ's retry system.
|
27
|
+
# AJ retries don't show up in the Sidekiq UI Retries tab, don't save any error data, can't be
|
28
|
+
# manually retried, don't automatically die, etc.
|
29
|
+
#
|
30
|
+
# class SomeJob < ActiveJob::Base
|
31
|
+
# queue_as :default
|
32
|
+
# sidekiq_options retry: 3, backtrace: 10
|
33
|
+
# def perform
|
34
|
+
# end
|
35
|
+
# end
|
36
|
+
initializer "sidekiq.active_job_integration" do
|
37
|
+
ActiveSupport.on_load(:active_job) do
|
38
|
+
include ::Sidekiq::Job::Options unless respond_to?(:sidekiq_options)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
initializer "sidekiq.rails_logger" do
|
43
|
+
Sidekiq.configure_server do |config|
|
44
|
+
# This is the integration code necessary so that if a job uses `Rails.logger.info "Hello"`,
|
45
|
+
# it will appear in the Sidekiq console with all of the job context. See #5021 and
|
46
|
+
# https://github.com/rails/rails/blob/b5f2b550f69a99336482739000c58e4e04e033aa/railties/lib/rails/commands/server/server_command.rb#L82-L84
|
47
|
+
unless ::Rails.logger == config.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout)
|
48
|
+
::Rails.logger.extend(::ActiveSupport::Logger.broadcast(config.logger))
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
initializer "sidekiq.backtrace_cleaner" do
|
54
|
+
Sidekiq.configure_server do |config|
|
55
|
+
config[:backtrace_cleaner] = ->(backtrace) { ::Rails.backtrace_cleaner.clean(backtrace) }
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
# This hook happens after all initializers are run, just before returning
|
60
|
+
# from config/environment.rb back to sidekiq/cli.rb.
|
61
|
+
#
|
62
|
+
# None of this matters on the client-side, only within the Sidekiq process itself.
|
63
|
+
config.after_initialize do
|
64
|
+
Sidekiq.configure_server do |config|
|
65
|
+
config[:reloader] = Sidekiq::Rails::Reloader.new
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
58
69
|
end
|
@@ -0,0 +1,96 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "set"
|
4
|
+
require "redis_client"
|
5
|
+
require "redis_client/decorator"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
class RedisClientAdapter
|
9
|
+
BaseError = RedisClient::Error
|
10
|
+
CommandError = RedisClient::CommandError
|
11
|
+
|
12
|
+
# You can add/remove items or clear the whole thing if you don't want deprecation warnings.
|
13
|
+
DEPRECATED_COMMANDS = %i[rpoplpush zrangebyscore zrevrange zrevrangebyscore getset hmset setex setnx].to_set
|
14
|
+
|
15
|
+
module CompatMethods
|
16
|
+
def info
|
17
|
+
@client.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
|
18
|
+
end
|
19
|
+
|
20
|
+
def evalsha(sha, keys, argv)
|
21
|
+
@client.call("EVALSHA", sha, keys.size, *keys, *argv)
|
22
|
+
end
|
23
|
+
|
24
|
+
private
|
25
|
+
|
26
|
+
# this allows us to use methods like `conn.hmset(...)` instead of having to use
|
27
|
+
# redis-client's native `conn.call("hmset", ...)`
|
28
|
+
def method_missing(*args, &block)
|
29
|
+
warn("[sidekiq#5788] Redis has deprecated the `#{args.first}`command, called at #{caller(1..1)}") if DEPRECATED_COMMANDS.include?(args.first)
|
30
|
+
@client.call(*args, *block)
|
31
|
+
end
|
32
|
+
ruby2_keywords :method_missing if respond_to?(:ruby2_keywords, true)
|
33
|
+
|
34
|
+
def respond_to_missing?(name, include_private = false)
|
35
|
+
super # Appease the linter. We can't tell what is a valid command.
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
CompatClient = RedisClient::Decorator.create(CompatMethods)
|
40
|
+
|
41
|
+
class CompatClient
|
42
|
+
def config
|
43
|
+
@client.config
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def initialize(options)
|
48
|
+
opts = client_opts(options)
|
49
|
+
@config = if opts.key?(:sentinels)
|
50
|
+
RedisClient.sentinel(**opts)
|
51
|
+
else
|
52
|
+
RedisClient.config(**opts)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def new_client
|
57
|
+
CompatClient.new(@config.new_client)
|
58
|
+
end
|
59
|
+
|
60
|
+
private
|
61
|
+
|
62
|
+
def client_opts(options)
|
63
|
+
opts = options.dup
|
64
|
+
|
65
|
+
if opts[:namespace]
|
66
|
+
raise ArgumentError, "Your Redis configuration uses the namespace '#{opts[:namespace]}' but this feature isn't supported by redis-client. " \
|
67
|
+
"Either use the redis adapter or remove the namespace."
|
68
|
+
end
|
69
|
+
|
70
|
+
opts.delete(:size)
|
71
|
+
opts.delete(:pool_timeout)
|
72
|
+
|
73
|
+
if opts[:network_timeout]
|
74
|
+
opts[:timeout] = opts[:network_timeout]
|
75
|
+
opts.delete(:network_timeout)
|
76
|
+
end
|
77
|
+
|
78
|
+
if opts[:driver]
|
79
|
+
opts[:driver] = opts[:driver].to_sym
|
80
|
+
end
|
81
|
+
|
82
|
+
opts[:name] = opts.delete(:master_name) if opts.key?(:master_name)
|
83
|
+
opts[:role] = opts[:role].to_sym if opts.key?(:role)
|
84
|
+
opts.delete(:url) if opts.key?(:sentinels)
|
85
|
+
|
86
|
+
# Issue #3303, redis-rb will silently retry an operation.
|
87
|
+
# This can lead to duplicate jobs if Sidekiq::Client's LPUSH
|
88
|
+
# is performed twice but I believe this is much, much rarer
|
89
|
+
# than the reconnect silently fixing a problem; we keep it
|
90
|
+
# on by default.
|
91
|
+
opts[:reconnect_attempts] ||= 1
|
92
|
+
|
93
|
+
opts
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
@@ -1,99 +1,39 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
4
|
-
require
|
2
|
+
|
3
|
+
require "connection_pool"
|
4
|
+
require "uri"
|
5
|
+
require "sidekiq/redis_client_adapter"
|
5
6
|
|
6
7
|
module Sidekiq
|
7
|
-
|
8
|
+
module RedisConnection
|
8
9
|
class << self
|
10
|
+
def create(options = {})
|
11
|
+
symbolized_options = options.transform_keys(&:to_sym)
|
12
|
+
symbolized_options[:url] ||= determine_redis_provider
|
9
13
|
|
10
|
-
|
11
|
-
options
|
12
|
-
options[key.to_sym] = options.delete(key)
|
13
|
-
end
|
14
|
-
|
15
|
-
options[:id] = "Sidekiq-#{Sidekiq.server? ? "server" : "client"}-PID-#{$$}" if !options.has_key?(:id)
|
16
|
-
options[:url] ||= determine_redis_provider
|
17
|
-
|
18
|
-
size = if options[:size]
|
19
|
-
options[:size]
|
20
|
-
elsif Sidekiq.server?
|
21
|
-
Sidekiq.options[:concurrency] + 5
|
22
|
-
elsif ENV['RAILS_MAX_THREADS']
|
23
|
-
Integer(ENV['RAILS_MAX_THREADS'])
|
24
|
-
else
|
25
|
-
5
|
26
|
-
end
|
14
|
+
logger = symbolized_options.delete(:logger)
|
15
|
+
logger&.info { "Sidekiq #{Sidekiq::VERSION} connecting to Redis with options #{scrub(symbolized_options)}" }
|
27
16
|
|
28
|
-
|
17
|
+
size = symbolized_options.delete(:size) || 5
|
18
|
+
pool_timeout = symbolized_options.delete(:pool_timeout) || 1
|
19
|
+
pool_name = symbolized_options.delete(:pool_name)
|
29
20
|
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
ConnectionPool.new(:timeout => pool_timeout, :size => size) do
|
34
|
-
build_client(options)
|
21
|
+
redis_config = Sidekiq::RedisClientAdapter.new(symbolized_options)
|
22
|
+
ConnectionPool.new(timeout: pool_timeout, size: size, name: pool_name) do
|
23
|
+
redis_config.new_client
|
35
24
|
end
|
36
25
|
end
|
37
26
|
|
38
27
|
private
|
39
28
|
|
40
|
-
|
41
|
-
#
|
42
|
-
# We need a connection for each Processor.
|
43
|
-
# We need a connection for Pro's real-time change listener
|
44
|
-
# We need a connection to various features to call Redis every few seconds:
|
45
|
-
# - the process heartbeat.
|
46
|
-
# - enterprise's leader election
|
47
|
-
# - enterprise's cron support
|
48
|
-
def verify_sizing(size, concurrency)
|
49
|
-
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size <= concurrency
|
50
|
-
end
|
51
|
-
|
52
|
-
def build_client(options)
|
53
|
-
namespace = options[:namespace]
|
54
|
-
|
55
|
-
client = Redis.new client_opts(options)
|
56
|
-
if namespace
|
57
|
-
begin
|
58
|
-
require 'redis/namespace'
|
59
|
-
Redis::Namespace.new(namespace, :redis => client)
|
60
|
-
rescue LoadError
|
61
|
-
Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
|
62
|
-
"Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
|
63
|
-
exit(-127)
|
64
|
-
end
|
65
|
-
else
|
66
|
-
client
|
67
|
-
end
|
68
|
-
end
|
69
|
-
|
70
|
-
def client_opts(options)
|
71
|
-
opts = options.dup
|
72
|
-
if opts[:namespace]
|
73
|
-
opts.delete(:namespace)
|
74
|
-
end
|
75
|
-
|
76
|
-
if opts[:network_timeout]
|
77
|
-
opts[:timeout] = opts[:network_timeout]
|
78
|
-
opts.delete(:network_timeout)
|
79
|
-
end
|
80
|
-
|
81
|
-
opts[:driver] ||= Redis::Connection.drivers.last || 'ruby'
|
82
|
-
|
83
|
-
# Issue #3303, redis-rb will silently retry an operation.
|
84
|
-
# This can lead to duplicate jobs if Sidekiq::Client's LPUSH
|
85
|
-
# is performed twice but I believe this is much, much rarer
|
86
|
-
# than the reconnect silently fixing a problem; we keep it
|
87
|
-
# on by default.
|
88
|
-
opts[:reconnect_attempts] ||= 1
|
89
|
-
|
90
|
-
opts
|
91
|
-
end
|
92
|
-
|
93
|
-
def log_info(options)
|
94
|
-
# Don't log Redis AUTH password
|
29
|
+
def scrub(options)
|
95
30
|
redacted = "REDACTED"
|
96
|
-
|
31
|
+
|
32
|
+
# Deep clone so we can muck with these options all we want and exclude
|
33
|
+
# params from dump-and-load that may contain objects that Marshal is
|
34
|
+
# unable to safely dump.
|
35
|
+
keys = options.keys - [:logger, :ssl_params]
|
36
|
+
scrubbed_options = Marshal.load(Marshal.dump(options.slice(*keys)))
|
97
37
|
if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
|
98
38
|
uri.password = redacted
|
99
39
|
scrubbed_options[:url] = uri.to_s
|
@@ -101,11 +41,10 @@ module Sidekiq
|
|
101
41
|
if scrubbed_options[:password]
|
102
42
|
scrubbed_options[:password] = redacted
|
103
43
|
end
|
104
|
-
|
105
|
-
|
106
|
-
else
|
107
|
-
Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}")
|
44
|
+
scrubbed_options[:sentinels]&.each do |sentinel|
|
45
|
+
sentinel[:password] = redacted if sentinel[:password]
|
108
46
|
end
|
47
|
+
scrubbed_options
|
109
48
|
end
|
110
49
|
|
111
50
|
def determine_redis_provider
|
@@ -115,30 +54,22 @@ module Sidekiq
|
|
115
54
|
# REDIS_PROVIDER=MY_REDIS_URL
|
116
55
|
# and Sidekiq will find your custom URL variable with no custom
|
117
56
|
# initialization code at all.
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
REDISTOGO_URL=redis://somehost.example.com:6379/4
|
129
|
-
|
130
|
-
Use REDIS_URL if you wish to point Sidekiq to a URL directly.
|
131
|
-
|
132
|
-
This configuration error will crash starting in Sidekiq 5.3.
|
133
|
-
|
134
|
-
#################################################################################
|
135
|
-
EOM
|
57
|
+
#
|
58
|
+
p = ENV["REDIS_PROVIDER"]
|
59
|
+
if p && p =~ /:/
|
60
|
+
raise <<~EOM
|
61
|
+
REDIS_PROVIDER should be set to the name of the variable which contains the Redis URL, not a URL itself.
|
62
|
+
Platforms like Heroku will sell addons that publish a *_URL variable. You need to tell Sidekiq with REDIS_PROVIDER, e.g.:
|
63
|
+
|
64
|
+
REDISTOGO_URL=redis://somehost.example.com:6379/4
|
65
|
+
REDIS_PROVIDER=REDISTOGO_URL
|
66
|
+
EOM
|
136
67
|
end
|
68
|
+
|
137
69
|
ENV[
|
138
|
-
|
70
|
+
p || "REDIS_URL"
|
139
71
|
]
|
140
72
|
end
|
141
|
-
|
142
73
|
end
|
143
74
|
end
|
144
75
|
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
class RingBuffer
|
5
|
+
include Enumerable
|
6
|
+
extend Forwardable
|
7
|
+
def_delegators :@buf, :[], :each, :size
|
8
|
+
|
9
|
+
def initialize(size, default = 0)
|
10
|
+
@size = size
|
11
|
+
@buf = Array.new(size, default)
|
12
|
+
@index = 0
|
13
|
+
end
|
14
|
+
|
15
|
+
def <<(element)
|
16
|
+
@buf[@index % @size] = element
|
17
|
+
@index += 1
|
18
|
+
element
|
19
|
+
end
|
20
|
+
|
21
|
+
def buffer
|
22
|
+
@buf
|
23
|
+
end
|
24
|
+
|
25
|
+
def reset(default = 0)
|
26
|
+
@buf.fill(default)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
data/lib/sidekiq/scheduled.rb
CHANGED
@@ -1,35 +1,66 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
4
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq"
|
4
|
+
require "sidekiq/component"
|
5
5
|
|
6
6
|
module Sidekiq
|
7
7
|
module Scheduled
|
8
|
-
SETS = %w
|
8
|
+
SETS = %w[retry schedule]
|
9
9
|
|
10
10
|
class Enq
|
11
|
-
|
11
|
+
include Sidekiq::Component
|
12
|
+
|
13
|
+
LUA_ZPOPBYSCORE = <<~LUA
|
14
|
+
local key, now = KEYS[1], ARGV[1]
|
15
|
+
local jobs = redis.call("zrange", key, "-inf", now, "byscore", "limit", 0, 1)
|
16
|
+
if jobs[1] then
|
17
|
+
redis.call("zrem", key, jobs[1])
|
18
|
+
return jobs[1]
|
19
|
+
end
|
20
|
+
LUA
|
21
|
+
|
22
|
+
def initialize(container)
|
23
|
+
@config = container
|
24
|
+
@client = Sidekiq::Client.new(config: container)
|
25
|
+
@done = false
|
26
|
+
@lua_zpopbyscore_sha = nil
|
27
|
+
end
|
28
|
+
|
29
|
+
def enqueue_jobs(sorted_sets = SETS)
|
12
30
|
# A job's "score" in Redis is the time at which it should be processed.
|
13
31
|
# Just check Redis for the set of jobs with a timestamp before now.
|
14
|
-
|
32
|
+
redis do |conn|
|
15
33
|
sorted_sets.each do |sorted_set|
|
16
|
-
# Get
|
34
|
+
# Get next item in the queue with score (time to execute) <= now.
|
17
35
|
# We need to go through the list one at a time to reduce the risk of something
|
18
36
|
# going wrong between the time jobs are popped from the scheduled queue and when
|
19
37
|
# they are pushed onto a work queue and losing the jobs.
|
20
|
-
while job = conn
|
21
|
-
|
22
|
-
|
23
|
-
# the queue, it's because another process already popped it so we can move on to the
|
24
|
-
# next one.
|
25
|
-
if conn.zrem(sorted_set, job)
|
26
|
-
Sidekiq::Client.push(Sidekiq.load_json(job))
|
27
|
-
Sidekiq::Logging.logger.debug { "enqueued #{sorted_set}: #{job}" }
|
28
|
-
end
|
38
|
+
while !@done && (job = zpopbyscore(conn, keys: [sorted_set], argv: [Time.now.to_f.to_s]))
|
39
|
+
@client.push(Sidekiq.load_json(job))
|
40
|
+
logger.debug { "enqueued #{sorted_set}: #{job}" }
|
29
41
|
end
|
30
42
|
end
|
31
43
|
end
|
32
44
|
end
|
45
|
+
|
46
|
+
def terminate
|
47
|
+
@done = true
|
48
|
+
end
|
49
|
+
|
50
|
+
private
|
51
|
+
|
52
|
+
def zpopbyscore(conn, keys: nil, argv: nil)
|
53
|
+
if @lua_zpopbyscore_sha.nil?
|
54
|
+
@lua_zpopbyscore_sha = conn.script(:load, LUA_ZPOPBYSCORE)
|
55
|
+
end
|
56
|
+
|
57
|
+
conn.call("EVALSHA", @lua_zpopbyscore_sha, keys.size, *keys, *argv)
|
58
|
+
rescue RedisClient::CommandError => e
|
59
|
+
raise unless e.message.start_with?("NOSCRIPT")
|
60
|
+
|
61
|
+
@lua_zpopbyscore_sha = nil
|
62
|
+
retry
|
63
|
+
end
|
33
64
|
end
|
34
65
|
|
35
66
|
##
|
@@ -38,49 +69,47 @@ module Sidekiq
|
|
38
69
|
# just pops the job back onto its original queue so the
|
39
70
|
# workers can pick it up like any other job.
|
40
71
|
class Poller
|
41
|
-
include
|
72
|
+
include Sidekiq::Component
|
42
73
|
|
43
74
|
INITIAL_WAIT = 10
|
44
75
|
|
45
|
-
def initialize
|
46
|
-
@
|
76
|
+
def initialize(config)
|
77
|
+
@config = config
|
78
|
+
@enq = (config[:scheduled_enq] || Sidekiq::Scheduled::Enq).new(config)
|
47
79
|
@sleeper = ConnectionPool::TimedStack.new
|
48
80
|
@done = false
|
49
81
|
@thread = nil
|
82
|
+
@count_calls = 0
|
50
83
|
end
|
51
84
|
|
52
85
|
# Shut down this instance, will pause until the thread is dead.
|
53
86
|
def terminate
|
54
87
|
@done = true
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
t.value
|
60
|
-
end
|
88
|
+
@enq.terminate
|
89
|
+
|
90
|
+
@sleeper << 0
|
91
|
+
@thread&.value
|
61
92
|
end
|
62
93
|
|
63
94
|
def start
|
64
|
-
@thread ||= safe_thread("scheduler")
|
95
|
+
@thread ||= safe_thread("scheduler") {
|
65
96
|
initial_wait
|
66
97
|
|
67
|
-
|
98
|
+
until @done
|
68
99
|
enqueue
|
69
100
|
wait
|
70
101
|
end
|
71
|
-
|
72
|
-
|
102
|
+
logger.info("Scheduler exiting...")
|
103
|
+
}
|
73
104
|
end
|
74
105
|
|
75
106
|
def enqueue
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
handle_exception(ex)
|
83
|
-
end
|
107
|
+
@enq.enqueue_jobs
|
108
|
+
rescue => ex
|
109
|
+
# Most likely a problem with redis networking.
|
110
|
+
# Punt and try again at the next interval
|
111
|
+
logger.error ex.message
|
112
|
+
handle_exception(ex)
|
84
113
|
end
|
85
114
|
|
86
115
|
private
|
@@ -117,13 +146,16 @@ module Sidekiq
|
|
117
146
|
# As we run more processes, the scheduling interval average will approach an even spread
|
118
147
|
# between 0 and poll interval so we don't need this artifical boost.
|
119
148
|
#
|
120
|
-
|
149
|
+
count = process_count
|
150
|
+
interval = poll_interval_average(count)
|
151
|
+
|
152
|
+
if count < 10
|
121
153
|
# For small clusters, calculate a random interval that is ±50% the desired average.
|
122
|
-
|
154
|
+
interval * rand + interval.to_f / 2
|
123
155
|
else
|
124
156
|
# With 10+ processes, we should have enough randomness to get decent polling
|
125
157
|
# across the entire timespan
|
126
|
-
|
158
|
+
interval * rand
|
127
159
|
end
|
128
160
|
end
|
129
161
|
|
@@ -140,35 +172,65 @@ module Sidekiq
|
|
140
172
|
# the same time: the thundering herd problem.
|
141
173
|
#
|
142
174
|
# We only do this if poll_interval_average is unset (the default).
|
143
|
-
def poll_interval_average
|
144
|
-
|
175
|
+
def poll_interval_average(count)
|
176
|
+
@config[:poll_interval_average] || scaled_poll_interval(count)
|
145
177
|
end
|
146
178
|
|
147
179
|
# Calculates an average poll interval based on the number of known Sidekiq processes.
|
148
180
|
# This minimizes a single point of failure by dispersing check-ins but without taxing
|
149
181
|
# Redis if you run many Sidekiq processes.
|
150
|
-
def scaled_poll_interval
|
151
|
-
process_count *
|
182
|
+
def scaled_poll_interval(process_count)
|
183
|
+
process_count * @config[:average_scheduled_poll_interval]
|
152
184
|
end
|
153
185
|
|
154
186
|
def process_count
|
155
|
-
pcount = Sidekiq
|
187
|
+
pcount = Sidekiq.redis { |conn| conn.scard("processes") }
|
156
188
|
pcount = 1 if pcount == 0
|
157
189
|
pcount
|
158
190
|
end
|
159
191
|
|
192
|
+
# A copy of Sidekiq::ProcessSet#cleanup because server
|
193
|
+
# should never depend on sidekiq/api.
|
194
|
+
def cleanup
|
195
|
+
# dont run cleanup more than once per minute
|
196
|
+
return 0 unless redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
|
197
|
+
|
198
|
+
count = 0
|
199
|
+
redis do |conn|
|
200
|
+
procs = conn.sscan("processes").to_a
|
201
|
+
heartbeats = conn.pipelined { |pipeline|
|
202
|
+
procs.each do |key|
|
203
|
+
pipeline.hget(key, "info")
|
204
|
+
end
|
205
|
+
}
|
206
|
+
|
207
|
+
# the hash named key has an expiry of 60 seconds.
|
208
|
+
# if it's not found, that means the process has not reported
|
209
|
+
# in to Redis and probably died.
|
210
|
+
to_prune = procs.select.with_index { |proc, i|
|
211
|
+
heartbeats[i].nil?
|
212
|
+
}
|
213
|
+
count = conn.srem("processes", to_prune) unless to_prune.empty?
|
214
|
+
end
|
215
|
+
count
|
216
|
+
end
|
217
|
+
|
160
218
|
def initial_wait
|
161
|
-
# Have all processes sleep between 5-15 seconds.
|
162
|
-
#
|
219
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds to give time for
|
220
|
+
# the heartbeat to register (if the poll interval is going to be calculated by the number
|
163
221
|
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
164
222
|
total = 0
|
165
|
-
total += INITIAL_WAIT unless
|
223
|
+
total += INITIAL_WAIT unless @config[:poll_interval_average]
|
166
224
|
total += (5 * rand)
|
167
225
|
|
168
226
|
@sleeper.pop(total)
|
169
227
|
rescue Timeout::Error
|
228
|
+
ensure
|
229
|
+
# periodically clean out the `processes` set in Redis which can collect
|
230
|
+
# references to dead processes over time. The process count affects how
|
231
|
+
# often we scan for scheduled jobs.
|
232
|
+
cleanup
|
170
233
|
end
|
171
|
-
|
172
234
|
end
|
173
235
|
end
|
174
236
|
end
|