sidekiq 6.1.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/.circleci/config.yml +71 -0
- data/.github/contributing.md +32 -0
- data/.github/issue_template.md +11 -0
- data/.gitignore +13 -0
- data/.standard.yml +20 -0
- data/3.0-Upgrade.md +70 -0
- data/4.0-Upgrade.md +53 -0
- data/5.0-Upgrade.md +56 -0
- data/6.0-Upgrade.md +72 -0
- data/COMM-LICENSE +97 -0
- data/Changes.md +1718 -0
- data/Ent-2.0-Upgrade.md +37 -0
- data/Ent-Changes.md +269 -0
- data/Gemfile +24 -0
- data/Gemfile.lock +208 -0
- data/LICENSE +9 -0
- data/Pro-2.0-Upgrade.md +138 -0
- data/Pro-3.0-Upgrade.md +44 -0
- data/Pro-4.0-Upgrade.md +35 -0
- data/Pro-5.0-Upgrade.md +25 -0
- data/Pro-Changes.md +790 -0
- data/README.md +94 -0
- data/Rakefile +10 -0
- data/bin/sidekiq +42 -0
- data/bin/sidekiqload +157 -0
- data/bin/sidekiqmon +8 -0
- data/code_of_conduct.md +50 -0
- data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
- data/lib/generators/sidekiq/worker_generator.rb +57 -0
- data/lib/sidekiq.rb +262 -0
- data/lib/sidekiq/api.rb +960 -0
- data/lib/sidekiq/cli.rb +401 -0
- data/lib/sidekiq/client.rb +263 -0
- data/lib/sidekiq/delay.rb +41 -0
- data/lib/sidekiq/exception_handler.rb +27 -0
- data/lib/sidekiq/extensions/action_mailer.rb +47 -0
- data/lib/sidekiq/extensions/active_record.rb +43 -0
- data/lib/sidekiq/extensions/class_methods.rb +43 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
- data/lib/sidekiq/fetch.rb +82 -0
- data/lib/sidekiq/job_logger.rb +63 -0
- data/lib/sidekiq/job_retry.rb +262 -0
- data/lib/sidekiq/launcher.rb +206 -0
- data/lib/sidekiq/logger.rb +165 -0
- data/lib/sidekiq/manager.rb +135 -0
- data/lib/sidekiq/middleware/chain.rb +160 -0
- data/lib/sidekiq/middleware/i18n.rb +40 -0
- data/lib/sidekiq/monitor.rb +133 -0
- data/lib/sidekiq/paginator.rb +47 -0
- data/lib/sidekiq/processor.rb +280 -0
- data/lib/sidekiq/rails.rb +50 -0
- data/lib/sidekiq/redis_connection.rb +146 -0
- data/lib/sidekiq/scheduled.rb +173 -0
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing.rb +344 -0
- data/lib/sidekiq/testing/inline.rb +30 -0
- data/lib/sidekiq/util.rb +67 -0
- data/lib/sidekiq/version.rb +5 -0
- data/lib/sidekiq/web.rb +213 -0
- data/lib/sidekiq/web/action.rb +93 -0
- data/lib/sidekiq/web/application.rb +357 -0
- data/lib/sidekiq/web/csrf_protection.rb +153 -0
- data/lib/sidekiq/web/helpers.rb +333 -0
- data/lib/sidekiq/web/router.rb +101 -0
- data/lib/sidekiq/worker.rb +244 -0
- data/sidekiq.gemspec +20 -0
- data/web/assets/images/favicon.ico +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status.png +0 -0
- data/web/assets/javascripts/application.js +95 -0
- data/web/assets/javascripts/dashboard.js +296 -0
- data/web/assets/stylesheets/application-dark.css +133 -0
- data/web/assets/stylesheets/application-rtl.css +246 -0
- data/web/assets/stylesheets/application.css +1158 -0
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +5 -0
- data/web/locales/ar.yml +81 -0
- data/web/locales/cs.yml +78 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +81 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +83 -0
- data/web/locales/es.yml +70 -0
- data/web/locales/fa.yml +80 -0
- data/web/locales/fr.yml +78 -0
- data/web/locales/he.yml +79 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +83 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +77 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +78 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/uk.yml +76 -0
- data/web/locales/ur.yml +80 -0
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +20 -0
- data/web/views/_job_info.erb +89 -0
- data/web/views/_nav.erb +52 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +101 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +41 -0
- data/web/views/morgue.erb +78 -0
- data/web/views/queue.erb +55 -0
- data/web/views/queues.erb +38 -0
- data/web/views/retries.erb +83 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +57 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +212 -0
@@ -0,0 +1,50 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/worker"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
class Rails < ::Rails::Engine
|
7
|
+
class Reloader
|
8
|
+
def initialize(app = ::Rails.application)
|
9
|
+
@app = app
|
10
|
+
end
|
11
|
+
|
12
|
+
def call
|
13
|
+
@app.reloader.wrap do
|
14
|
+
yield
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def inspect
|
19
|
+
"#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
# By including the Options module, we allow AJs to directly control sidekiq features
|
24
|
+
# via the *sidekiq_options* class method and, for instance, not use AJ's retry system.
|
25
|
+
# AJ retries don't show up in the Sidekiq UI Retries tab, save any error data, can't be
|
26
|
+
# manually retried, don't automatically die, etc.
|
27
|
+
#
|
28
|
+
# class SomeJob < ActiveJob::Base
|
29
|
+
# queue_as :default
|
30
|
+
# sidekiq_options retry: 3, backtrace: 10
|
31
|
+
# def perform
|
32
|
+
# end
|
33
|
+
# end
|
34
|
+
initializer "sidekiq.active_job_integration" do
|
35
|
+
ActiveSupport.on_load(:active_job) do
|
36
|
+
include ::Sidekiq::Worker::Options unless respond_to?(:sidekiq_options)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
# This hook happens after all initializers are run, just before returning
|
41
|
+
# from config/environment.rb back to sidekiq/cli.rb.
|
42
|
+
#
|
43
|
+
# None of this matters on the client-side, only within the Sidekiq process itself.
|
44
|
+
config.after_initialize do
|
45
|
+
Sidekiq.configure_server do |_|
|
46
|
+
Sidekiq.options[:reloader] = Sidekiq::Rails::Reloader.new
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
@@ -0,0 +1,146 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "connection_pool"
|
4
|
+
require "redis"
|
5
|
+
require "uri"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
class RedisConnection
|
9
|
+
class << self
|
10
|
+
def create(options = {})
|
11
|
+
symbolized_options = options.transform_keys(&:to_sym)
|
12
|
+
|
13
|
+
if !symbolized_options[:url] && (u = determine_redis_provider)
|
14
|
+
symbolized_options[:url] = u
|
15
|
+
end
|
16
|
+
|
17
|
+
size = if symbolized_options[:size]
|
18
|
+
symbolized_options[:size]
|
19
|
+
elsif Sidekiq.server?
|
20
|
+
# Give ourselves plenty of connections. pool is lazy
|
21
|
+
# so we won't create them until we need them.
|
22
|
+
Sidekiq.options[:concurrency] + 5
|
23
|
+
elsif ENV["RAILS_MAX_THREADS"]
|
24
|
+
Integer(ENV["RAILS_MAX_THREADS"])
|
25
|
+
else
|
26
|
+
5
|
27
|
+
end
|
28
|
+
|
29
|
+
verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
|
30
|
+
|
31
|
+
pool_timeout = symbolized_options[:pool_timeout] || 1
|
32
|
+
log_info(symbolized_options)
|
33
|
+
|
34
|
+
ConnectionPool.new(timeout: pool_timeout, size: size) do
|
35
|
+
build_client(symbolized_options)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
private
|
40
|
+
|
41
|
+
# Sidekiq needs a lot of concurrent Redis connections.
|
42
|
+
#
|
43
|
+
# We need a connection for each Processor.
|
44
|
+
# We need a connection for Pro's real-time change listener
|
45
|
+
# We need a connection to various features to call Redis every few seconds:
|
46
|
+
# - the process heartbeat.
|
47
|
+
# - enterprise's leader election
|
48
|
+
# - enterprise's cron support
|
49
|
+
def verify_sizing(size, concurrency)
|
50
|
+
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
|
51
|
+
end
|
52
|
+
|
53
|
+
def build_client(options)
|
54
|
+
namespace = options[:namespace]
|
55
|
+
|
56
|
+
client = Redis.new client_opts(options)
|
57
|
+
if namespace
|
58
|
+
begin
|
59
|
+
require "redis/namespace"
|
60
|
+
Redis::Namespace.new(namespace, redis: client)
|
61
|
+
rescue LoadError
|
62
|
+
Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
|
63
|
+
"Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
|
64
|
+
exit(-127)
|
65
|
+
end
|
66
|
+
else
|
67
|
+
client
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
def client_opts(options)
|
72
|
+
opts = options.dup
|
73
|
+
if opts[:namespace]
|
74
|
+
opts.delete(:namespace)
|
75
|
+
end
|
76
|
+
|
77
|
+
if opts[:network_timeout]
|
78
|
+
opts[:timeout] = opts[:network_timeout]
|
79
|
+
opts.delete(:network_timeout)
|
80
|
+
end
|
81
|
+
|
82
|
+
opts[:driver] ||= Redis::Connection.drivers.last || "ruby"
|
83
|
+
|
84
|
+
# Issue #3303, redis-rb will silently retry an operation.
|
85
|
+
# This can lead to duplicate jobs if Sidekiq::Client's LPUSH
|
86
|
+
# is performed twice but I believe this is much, much rarer
|
87
|
+
# than the reconnect silently fixing a problem; we keep it
|
88
|
+
# on by default.
|
89
|
+
opts[:reconnect_attempts] ||= 1
|
90
|
+
|
91
|
+
opts
|
92
|
+
end
|
93
|
+
|
94
|
+
def log_info(options)
|
95
|
+
redacted = "REDACTED"
|
96
|
+
|
97
|
+
# deep clone so we can muck with these options all we want
|
98
|
+
#
|
99
|
+
# exclude SSL params from dump-and-load because some information isn't
|
100
|
+
# safely dumpable in current Rubies
|
101
|
+
keys = options.keys
|
102
|
+
keys.delete(:ssl_params)
|
103
|
+
scrubbed_options = Marshal.load(Marshal.dump(options.slice(*keys)))
|
104
|
+
if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
|
105
|
+
uri.password = redacted
|
106
|
+
scrubbed_options[:url] = uri.to_s
|
107
|
+
end
|
108
|
+
if scrubbed_options[:password]
|
109
|
+
scrubbed_options[:password] = redacted
|
110
|
+
end
|
111
|
+
scrubbed_options[:sentinels]&.each do |sentinel|
|
112
|
+
sentinel[:password] = redacted if sentinel[:password]
|
113
|
+
end
|
114
|
+
if Sidekiq.server?
|
115
|
+
Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with redis options #{scrubbed_options}")
|
116
|
+
else
|
117
|
+
Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}")
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
def determine_redis_provider
|
122
|
+
# If you have this in your environment:
|
123
|
+
# MY_REDIS_URL=redis://hostname.example.com:1238/4
|
124
|
+
# then set:
|
125
|
+
# REDIS_PROVIDER=MY_REDIS_URL
|
126
|
+
# and Sidekiq will find your custom URL variable with no custom
|
127
|
+
# initialization code at all.
|
128
|
+
#
|
129
|
+
p = ENV["REDIS_PROVIDER"]
|
130
|
+
if p && p =~ /:/
|
131
|
+
raise <<~EOM
|
132
|
+
REDIS_PROVIDER should be set to the name of the variable which contains the Redis URL, not a URL itself.
|
133
|
+
Platforms like Heroku will sell addons that publish a *_URL variable. You need to tell Sidekiq with REDIS_PROVIDER, e.g.:
|
134
|
+
|
135
|
+
REDISTOGO_URL=redis://somehost.example.com:6379/4
|
136
|
+
REDIS_PROVIDER=REDISTOGO_URL
|
137
|
+
EOM
|
138
|
+
end
|
139
|
+
|
140
|
+
ENV[
|
141
|
+
p || "REDIS_URL"
|
142
|
+
]
|
143
|
+
end
|
144
|
+
end
|
145
|
+
end
|
146
|
+
end
|
@@ -0,0 +1,173 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq"
|
4
|
+
require "sidekiq/util"
|
5
|
+
require "sidekiq/api"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
module Scheduled
|
9
|
+
SETS = %w[retry schedule]
|
10
|
+
|
11
|
+
class Enq
|
12
|
+
def enqueue_jobs(now = Time.now.to_f.to_s, sorted_sets = SETS)
|
13
|
+
# A job's "score" in Redis is the time at which it should be processed.
|
14
|
+
# Just check Redis for the set of jobs with a timestamp before now.
|
15
|
+
Sidekiq.redis do |conn|
|
16
|
+
sorted_sets.each do |sorted_set|
|
17
|
+
# Get next items in the queue with scores (time to execute) <= now.
|
18
|
+
until (jobs = conn.zrangebyscore(sorted_set, "-inf", now, limit: [0, 100])).empty?
|
19
|
+
# We need to go through the list one at a time to reduce the risk of something
|
20
|
+
# going wrong between the time jobs are popped from the scheduled queue and when
|
21
|
+
# they are pushed onto a work queue and losing the jobs.
|
22
|
+
jobs.each do |job|
|
23
|
+
# Pop item off the queue and add it to the work queue. If the job can't be popped from
|
24
|
+
# the queue, it's because another process already popped it so we can move on to the
|
25
|
+
# next one.
|
26
|
+
if conn.zrem(sorted_set, job)
|
27
|
+
Sidekiq::Client.push(Sidekiq.load_json(job))
|
28
|
+
Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
##
|
38
|
+
# The Poller checks Redis every N seconds for jobs in the retry or scheduled
|
39
|
+
# set have passed their timestamp and should be enqueued. If so, it
|
40
|
+
# just pops the job back onto its original queue so the
|
41
|
+
# workers can pick it up like any other job.
|
42
|
+
class Poller
|
43
|
+
include Util
|
44
|
+
|
45
|
+
INITIAL_WAIT = 10
|
46
|
+
|
47
|
+
def initialize
|
48
|
+
@enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
|
49
|
+
@sleeper = ConnectionPool::TimedStack.new
|
50
|
+
@done = false
|
51
|
+
@thread = nil
|
52
|
+
end
|
53
|
+
|
54
|
+
# Shut down this instance, will pause until the thread is dead.
|
55
|
+
def terminate
|
56
|
+
@done = true
|
57
|
+
if @thread
|
58
|
+
t = @thread
|
59
|
+
@thread = nil
|
60
|
+
@sleeper << 0
|
61
|
+
t.value
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def start
|
66
|
+
@thread ||= safe_thread("scheduler") {
|
67
|
+
initial_wait
|
68
|
+
|
69
|
+
until @done
|
70
|
+
enqueue
|
71
|
+
wait
|
72
|
+
end
|
73
|
+
Sidekiq.logger.info("Scheduler exiting...")
|
74
|
+
}
|
75
|
+
end
|
76
|
+
|
77
|
+
def enqueue
|
78
|
+
@enq.enqueue_jobs
|
79
|
+
rescue => ex
|
80
|
+
# Most likely a problem with redis networking.
|
81
|
+
# Punt and try again at the next interval
|
82
|
+
logger.error ex.message
|
83
|
+
handle_exception(ex)
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
def wait
|
89
|
+
@sleeper.pop(random_poll_interval)
|
90
|
+
rescue Timeout::Error
|
91
|
+
# expected
|
92
|
+
rescue => ex
|
93
|
+
# if poll_interval_average hasn't been calculated yet, we can
|
94
|
+
# raise an error trying to reach Redis.
|
95
|
+
logger.error ex.message
|
96
|
+
handle_exception(ex)
|
97
|
+
sleep 5
|
98
|
+
end
|
99
|
+
|
100
|
+
def random_poll_interval
|
101
|
+
# We want one Sidekiq process to schedule jobs every N seconds. We have M processes
|
102
|
+
# and **don't** want to coordinate.
|
103
|
+
#
|
104
|
+
# So in N*M second timespan, we want each process to schedule once. The basic loop is:
|
105
|
+
#
|
106
|
+
# * sleep a random amount within that N*M timespan
|
107
|
+
# * wake up and schedule
|
108
|
+
#
|
109
|
+
# We want to avoid one edge case: imagine a set of 2 processes, scheduling every 5 seconds,
|
110
|
+
# so N*M = 10. Each process decides to randomly sleep 8 seconds, now we've failed to meet
|
111
|
+
# that 5 second average. Thankfully each schedule cycle will sleep randomly so the next
|
112
|
+
# iteration could see each process sleep for 1 second, undercutting our average.
|
113
|
+
#
|
114
|
+
# So below 10 processes, we special case and ensure the processes sleep closer to the average.
|
115
|
+
# In the example above, each process should schedule every 10 seconds on average. We special
|
116
|
+
# case smaller clusters to add 50% so they would sleep somewhere between 5 and 15 seconds.
|
117
|
+
# As we run more processes, the scheduling interval average will approach an even spread
|
118
|
+
# between 0 and poll interval so we don't need this artifical boost.
|
119
|
+
#
|
120
|
+
if process_count < 10
|
121
|
+
# For small clusters, calculate a random interval that is ±50% the desired average.
|
122
|
+
poll_interval_average * rand + poll_interval_average.to_f / 2
|
123
|
+
else
|
124
|
+
# With 10+ processes, we should have enough randomness to get decent polling
|
125
|
+
# across the entire timespan
|
126
|
+
poll_interval_average * rand
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
# We do our best to tune the poll interval to the size of the active Sidekiq
|
131
|
+
# cluster. If you have 30 processes and poll every 15 seconds, that means one
|
132
|
+
# Sidekiq is checking Redis every 0.5 seconds - way too often for most people
|
133
|
+
# and really bad if the retry or scheduled sets are large.
|
134
|
+
#
|
135
|
+
# Instead try to avoid polling more than once every 15 seconds. If you have
|
136
|
+
# 30 Sidekiq processes, we'll poll every 30 * 15 or 450 seconds.
|
137
|
+
# To keep things statistically random, we'll sleep a random amount between
|
138
|
+
# 225 and 675 seconds for each poll or 450 seconds on average. Otherwise restarting
|
139
|
+
# all your Sidekiq processes at the same time will lead to them all polling at
|
140
|
+
# the same time: the thundering herd problem.
|
141
|
+
#
|
142
|
+
# We only do this if poll_interval_average is unset (the default).
|
143
|
+
def poll_interval_average
|
144
|
+
Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
|
145
|
+
end
|
146
|
+
|
147
|
+
# Calculates an average poll interval based on the number of known Sidekiq processes.
|
148
|
+
# This minimizes a single point of failure by dispersing check-ins but without taxing
|
149
|
+
# Redis if you run many Sidekiq processes.
|
150
|
+
def scaled_poll_interval
|
151
|
+
process_count * Sidekiq.options[:average_scheduled_poll_interval]
|
152
|
+
end
|
153
|
+
|
154
|
+
def process_count
|
155
|
+
pcount = Sidekiq::ProcessSet.new.size
|
156
|
+
pcount = 1 if pcount == 0
|
157
|
+
pcount
|
158
|
+
end
|
159
|
+
|
160
|
+
def initial_wait
|
161
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds
|
162
|
+
# to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
|
163
|
+
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
164
|
+
total = 0
|
165
|
+
total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
|
166
|
+
total += (5 * rand)
|
167
|
+
|
168
|
+
@sleeper.pop(total)
|
169
|
+
rescue Timeout::Error
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
173
|
+
end
|
@@ -0,0 +1,149 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# The MIT License
|
4
|
+
#
|
5
|
+
# Copyright (c) 2017, 2018, 2019, 2020 Agis Anastasopoulos
|
6
|
+
#
|
7
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
8
|
+
# this software and associated documentation files (the "Software"), to deal in
|
9
|
+
# the Software without restriction, including without limitation the rights to
|
10
|
+
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
11
|
+
# the Software, and to permit persons to whom the Software is furnished to do so,
|
12
|
+
# subject to the following conditions:
|
13
|
+
#
|
14
|
+
# The above copyright notice and this permission notice shall be included in all
|
15
|
+
# copies or substantial portions of the Software.
|
16
|
+
#
|
17
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
18
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
19
|
+
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
20
|
+
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
21
|
+
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
22
|
+
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
23
|
+
|
24
|
+
# This is a copy of https://github.com/agis/ruby-sdnotify as of commit a7d52ee
|
25
|
+
# The only changes made was "rehoming" it within the Sidekiq module to avoid
|
26
|
+
# namespace collisions and applying standard's code formatting style.
|
27
|
+
|
28
|
+
require "socket"
|
29
|
+
|
30
|
+
# SdNotify is a pure-Ruby implementation of sd_notify(3). It can be used to
|
31
|
+
# notify systemd about state changes. Methods of this package are no-op on
|
32
|
+
# non-systemd systems (eg. Darwin).
|
33
|
+
#
|
34
|
+
# The API maps closely to the original implementation of sd_notify(3),
|
35
|
+
# therefore be sure to check the official man pages prior to using SdNotify.
|
36
|
+
#
|
37
|
+
# @see https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
38
|
+
module Sidekiq
|
39
|
+
module SdNotify
|
40
|
+
# Exception raised when there's an error writing to the notification socket
|
41
|
+
class NotifyError < RuntimeError; end
|
42
|
+
|
43
|
+
READY = "READY=1"
|
44
|
+
RELOADING = "RELOADING=1"
|
45
|
+
STOPPING = "STOPPING=1"
|
46
|
+
STATUS = "STATUS="
|
47
|
+
ERRNO = "ERRNO="
|
48
|
+
MAINPID = "MAINPID="
|
49
|
+
WATCHDOG = "WATCHDOG=1"
|
50
|
+
FDSTORE = "FDSTORE=1"
|
51
|
+
|
52
|
+
def self.ready(unset_env = false)
|
53
|
+
notify(READY, unset_env)
|
54
|
+
end
|
55
|
+
|
56
|
+
def self.reloading(unset_env = false)
|
57
|
+
notify(RELOADING, unset_env)
|
58
|
+
end
|
59
|
+
|
60
|
+
def self.stopping(unset_env = false)
|
61
|
+
notify(STOPPING, unset_env)
|
62
|
+
end
|
63
|
+
|
64
|
+
# @param status [String] a custom status string that describes the current
|
65
|
+
# state of the service
|
66
|
+
def self.status(status, unset_env = false)
|
67
|
+
notify("#{STATUS}#{status}", unset_env)
|
68
|
+
end
|
69
|
+
|
70
|
+
# @param errno [Integer]
|
71
|
+
def self.errno(errno, unset_env = false)
|
72
|
+
notify("#{ERRNO}#{errno}", unset_env)
|
73
|
+
end
|
74
|
+
|
75
|
+
# @param pid [Integer]
|
76
|
+
def self.mainpid(pid, unset_env = false)
|
77
|
+
notify("#{MAINPID}#{pid}", unset_env)
|
78
|
+
end
|
79
|
+
|
80
|
+
def self.watchdog(unset_env = false)
|
81
|
+
notify(WATCHDOG, unset_env)
|
82
|
+
end
|
83
|
+
|
84
|
+
def self.fdstore(unset_env = false)
|
85
|
+
notify(FDSTORE, unset_env)
|
86
|
+
end
|
87
|
+
|
88
|
+
# @return [Boolean] true if the service manager expects watchdog keep-alive
|
89
|
+
# notification messages to be sent from this process.
|
90
|
+
#
|
91
|
+
# If the $WATCHDOG_USEC environment variable is set,
|
92
|
+
# and the $WATCHDOG_PID variable is unset or set to the PID of the current
|
93
|
+
# process
|
94
|
+
#
|
95
|
+
# @note Unlike sd_watchdog_enabled(3), this method does not mutate the
|
96
|
+
# environment.
|
97
|
+
def self.watchdog?
|
98
|
+
wd_usec = ENV["WATCHDOG_USEC"]
|
99
|
+
wd_pid = ENV["WATCHDOG_PID"]
|
100
|
+
|
101
|
+
return false unless wd_usec
|
102
|
+
|
103
|
+
begin
|
104
|
+
wd_usec = Integer(wd_usec)
|
105
|
+
rescue
|
106
|
+
return false
|
107
|
+
end
|
108
|
+
|
109
|
+
return false if wd_usec <= 0
|
110
|
+
return true if !wd_pid || wd_pid == $$.to_s
|
111
|
+
|
112
|
+
false
|
113
|
+
end
|
114
|
+
|
115
|
+
# Notify systemd with the provided state, via the notification socket, if
|
116
|
+
# any.
|
117
|
+
#
|
118
|
+
# Generally this method will be used indirectly through the other methods
|
119
|
+
# of the library.
|
120
|
+
#
|
121
|
+
# @param state [String]
|
122
|
+
# @param unset_env [Boolean]
|
123
|
+
#
|
124
|
+
# @return [Fixnum, nil] the number of bytes written to the notification
|
125
|
+
# socket or nil if there was no socket to report to (eg. the program wasn't
|
126
|
+
# started by systemd)
|
127
|
+
#
|
128
|
+
# @raise [NotifyError] if there was an error communicating with the systemd
|
129
|
+
# socket
|
130
|
+
#
|
131
|
+
# @see https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
132
|
+
def self.notify(state, unset_env = false)
|
133
|
+
sock = ENV["NOTIFY_SOCKET"]
|
134
|
+
|
135
|
+
return nil unless sock
|
136
|
+
|
137
|
+
ENV.delete("NOTIFY_SOCKET") if unset_env
|
138
|
+
|
139
|
+
begin
|
140
|
+
Addrinfo.unix(sock, :DGRAM).connect do |s|
|
141
|
+
s.close_on_exec = true
|
142
|
+
s.write(state)
|
143
|
+
end
|
144
|
+
rescue => e
|
145
|
+
raise NotifyError, "#{e.class}: #{e.message}", e.backtrace
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|