sidekiq 0.10.0 → 7.1.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/Changes.md +2047 -0
- data/LICENSE.txt +9 -0
- data/README.md +73 -27
- data/bin/sidekiq +25 -9
- data/bin/sidekiqload +247 -0
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/job.rb.erb +9 -0
- data/lib/generators/sidekiq/templates/job_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/job_test.rb.erb +8 -0
- data/lib/sidekiq/api.rb +1145 -0
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +348 -109
- data/lib/sidekiq/client.rb +241 -41
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +287 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +88 -0
- data/lib/sidekiq/job.rb +374 -0
- data/lib/sidekiq/job_logger.rb +51 -0
- data/lib/sidekiq/job_retry.rb +300 -0
- data/lib/sidekiq/job_util.rb +107 -0
- data/lib/sidekiq/launcher.rb +271 -0
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +96 -103
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +136 -0
- data/lib/sidekiq/middleware/chain.rb +149 -38
- data/lib/sidekiq/middleware/current_attributes.rb +95 -0
- data/lib/sidekiq/middleware/i18n.rb +42 -0
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +146 -0
- data/lib/sidekiq/paginator.rb +55 -0
- data/lib/sidekiq/processor.rb +246 -61
- data/lib/sidekiq/rails.rb +60 -13
- data/lib/sidekiq/redis_client_adapter.rb +95 -0
- data/lib/sidekiq/redis_connection.rb +68 -15
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +236 -0
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing/inline.rb +30 -0
- data/lib/sidekiq/testing.rb +304 -10
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +4 -1
- data/lib/sidekiq/web/action.rb +93 -0
- data/lib/sidekiq/web/application.rb +447 -0
- data/lib/sidekiq/web/csrf_protection.rb +180 -0
- data/lib/sidekiq/web/helpers.rb +370 -0
- data/lib/sidekiq/web/router.rb +104 -0
- data/lib/sidekiq/web.rb +143 -74
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +120 -73
- data/sidekiq.gemspec +26 -23
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/images/favicon.ico +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status.png +0 -0
- data/web/assets/javascripts/application.js +162 -3
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +168 -0
- data/web/assets/javascripts/dashboard.js +59 -0
- data/web/assets/javascripts/metrics.js +264 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +153 -0
- data/web/assets/stylesheets/application.css +720 -7
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +5 -0
- data/web/locales/ar.yml +87 -0
- data/web/locales/cs.yml +78 -0
- data/web/locales/da.yml +75 -0
- data/web/locales/de.yml +81 -0
- data/web/locales/el.yml +87 -0
- data/web/locales/en.yml +101 -0
- data/web/locales/es.yml +86 -0
- data/web/locales/fa.yml +80 -0
- data/web/locales/fr.yml +99 -0
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +80 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +91 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +77 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +96 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +83 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/uk.yml +77 -0
- data/web/locales/ur.yml +80 -0
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +95 -0
- data/web/locales/zh-tw.yml +102 -0
- data/web/views/_footer.erb +23 -0
- data/web/views/_job_info.erb +105 -0
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +52 -0
- data/web/views/_paging.erb +25 -0
- data/web/views/_poll_link.erb +4 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +148 -0
- data/web/views/dashboard.erb +115 -0
- data/web/views/dead.erb +34 -0
- data/web/views/filtering.erb +7 -0
- data/web/views/layout.erb +42 -0
- data/web/views/metrics.erb +82 -0
- data/web/views/metrics_for_job.erb +68 -0
- data/web/views/morgue.erb +74 -0
- data/web/views/queue.erb +55 -0
- data/web/views/queues.erb +40 -0
- data/web/views/retries.erb +79 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +56 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +159 -237
- data/.gitignore +0 -6
- data/.rvmrc +0 -4
- data/COMM-LICENSE +0 -75
- data/Gemfile +0 -10
- data/LICENSE +0 -22
- data/Rakefile +0 -9
- data/TODO.md +0 -1
- data/bin/client +0 -7
- data/bin/sidekiqctl +0 -43
- data/config.ru +0 -8
- data/examples/chef/cookbooks/sidekiq/README.rdoc +0 -11
- data/examples/chef/cookbooks/sidekiq/recipes/default.rb +0 -55
- data/examples/chef/cookbooks/sidekiq/templates/default/monitrc.conf.erb +0 -8
- data/examples/chef/cookbooks/sidekiq/templates/default/sidekiq.erb +0 -219
- data/examples/chef/cookbooks/sidekiq/templates/default/sidekiq.yml.erb +0 -22
- data/examples/config.yml +0 -9
- data/examples/monitrc.conf +0 -6
- data/examples/por.rb +0 -27
- data/examples/scheduling.rb +0 -37
- data/examples/sinkiq.rb +0 -57
- data/examples/web-ui.png +0 -0
- data/lib/sidekiq/capistrano.rb +0 -32
- data/lib/sidekiq/extensions/action_mailer.rb +0 -26
- data/lib/sidekiq/extensions/active_record.rb +0 -27
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -21
- data/lib/sidekiq/middleware/client/unique_jobs.rb +0 -32
- data/lib/sidekiq/middleware/server/active_record.rb +0 -13
- data/lib/sidekiq/middleware/server/exception_handler.rb +0 -38
- data/lib/sidekiq/middleware/server/failure_jobs.rb +0 -24
- data/lib/sidekiq/middleware/server/logging.rb +0 -27
- data/lib/sidekiq/middleware/server/retry_jobs.rb +0 -59
- data/lib/sidekiq/middleware/server/unique_jobs.rb +0 -15
- data/lib/sidekiq/retry.rb +0 -57
- data/lib/sidekiq/util.rb +0 -61
- data/lib/sidekiq/worker.rb +0 -37
- data/myapp/.gitignore +0 -15
- data/myapp/Capfile +0 -5
- data/myapp/Gemfile +0 -19
- data/myapp/Gemfile.lock +0 -143
- data/myapp/Rakefile +0 -7
- data/myapp/app/controllers/application_controller.rb +0 -3
- data/myapp/app/controllers/work_controller.rb +0 -38
- data/myapp/app/helpers/application_helper.rb +0 -2
- data/myapp/app/mailers/.gitkeep +0 -0
- data/myapp/app/mailers/user_mailer.rb +0 -9
- data/myapp/app/models/.gitkeep +0 -0
- data/myapp/app/models/post.rb +0 -5
- data/myapp/app/views/layouts/application.html.erb +0 -14
- data/myapp/app/views/user_mailer/greetings.html.erb +0 -3
- data/myapp/app/views/work/index.html.erb +0 -1
- data/myapp/app/workers/hard_worker.rb +0 -9
- data/myapp/config/application.rb +0 -59
- data/myapp/config/boot.rb +0 -6
- data/myapp/config/database.yml +0 -25
- data/myapp/config/deploy.rb +0 -15
- data/myapp/config/environment.rb +0 -5
- data/myapp/config/environments/development.rb +0 -38
- data/myapp/config/environments/production.rb +0 -67
- data/myapp/config/environments/test.rb +0 -37
- data/myapp/config/initializers/backtrace_silencers.rb +0 -7
- data/myapp/config/initializers/inflections.rb +0 -15
- data/myapp/config/initializers/mime_types.rb +0 -5
- data/myapp/config/initializers/secret_token.rb +0 -7
- data/myapp/config/initializers/session_store.rb +0 -8
- data/myapp/config/initializers/sidekiq.rb +0 -6
- data/myapp/config/initializers/wrap_parameters.rb +0 -14
- data/myapp/config/locales/en.yml +0 -5
- data/myapp/config/routes.rb +0 -10
- data/myapp/config.ru +0 -4
- data/myapp/db/migrate/20120123214055_create_posts.rb +0 -10
- data/myapp/db/seeds.rb +0 -7
- data/myapp/lib/assets/.gitkeep +0 -0
- data/myapp/lib/tasks/.gitkeep +0 -0
- data/myapp/log/.gitkeep +0 -0
- data/myapp/script/rails +0 -6
- data/test/config.yml +0 -9
- data/test/fake_env.rb +0 -0
- data/test/helper.rb +0 -15
- data/test/test_cli.rb +0 -168
- data/test/test_client.rb +0 -105
- data/test/test_extensions.rb +0 -68
- data/test/test_manager.rb +0 -43
- data/test/test_middleware.rb +0 -92
- data/test/test_processor.rb +0 -32
- data/test/test_retry.rb +0 -83
- data/test/test_stats.rb +0 -78
- data/test/test_testing.rb +0 -65
- data/test/test_web.rb +0 -61
- data/web/assets/images/bootstrap/glyphicons-halflings-white.png +0 -0
- data/web/assets/images/bootstrap/glyphicons-halflings.png +0 -0
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-alert.js +0 -91
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-button.js +0 -98
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-carousel.js +0 -154
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-collapse.js +0 -136
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-dropdown.js +0 -92
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-modal.js +0 -210
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-popover.js +0 -95
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-scrollspy.js +0 -125
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-tab.js +0 -130
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-tooltip.js +0 -270
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-transition.js +0 -51
- data/web/assets/javascripts/vendor/bootstrap/bootstrap-typeahead.js +0 -271
- data/web/assets/javascripts/vendor/bootstrap.js +0 -12
- data/web/assets/javascripts/vendor/jquery.js +0 -9266
- data/web/assets/stylesheets/vendor/bootstrap-responsive.css +0 -567
- data/web/assets/stylesheets/vendor/bootstrap.css +0 -3365
- data/web/views/index.slim +0 -62
- data/web/views/layout.slim +0 -24
- data/web/views/queue.slim +0 -11
@@ -0,0 +1,29 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
class RingBuffer
|
5
|
+
include Enumerable
|
6
|
+
extend Forwardable
|
7
|
+
def_delegators :@buf, :[], :each, :size
|
8
|
+
|
9
|
+
def initialize(size, default = 0)
|
10
|
+
@size = size
|
11
|
+
@buf = Array.new(size, default)
|
12
|
+
@index = 0
|
13
|
+
end
|
14
|
+
|
15
|
+
def <<(element)
|
16
|
+
@buf[@index % @size] = element
|
17
|
+
@index += 1
|
18
|
+
element
|
19
|
+
end
|
20
|
+
|
21
|
+
def buffer
|
22
|
+
@buf
|
23
|
+
end
|
24
|
+
|
25
|
+
def reset(default = 0)
|
26
|
+
@buf.fill(default)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,236 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq"
|
4
|
+
require "sidekiq/component"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
module Scheduled
|
8
|
+
SETS = %w[retry schedule]
|
9
|
+
|
10
|
+
class Enq
|
11
|
+
include Sidekiq::Component
|
12
|
+
|
13
|
+
LUA_ZPOPBYSCORE = <<~LUA
|
14
|
+
local key, now = KEYS[1], ARGV[1]
|
15
|
+
local jobs = redis.call("zrange", key, "-inf", now, "byscore", "limit", 0, 1)
|
16
|
+
if jobs[1] then
|
17
|
+
redis.call("zrem", key, jobs[1])
|
18
|
+
return jobs[1]
|
19
|
+
end
|
20
|
+
LUA
|
21
|
+
|
22
|
+
def initialize(container)
|
23
|
+
@config = container
|
24
|
+
@client = Sidekiq::Client.new(config: container)
|
25
|
+
@done = false
|
26
|
+
@lua_zpopbyscore_sha = nil
|
27
|
+
end
|
28
|
+
|
29
|
+
def enqueue_jobs(sorted_sets = SETS)
|
30
|
+
# A job's "score" in Redis is the time at which it should be processed.
|
31
|
+
# Just check Redis for the set of jobs with a timestamp before now.
|
32
|
+
redis do |conn|
|
33
|
+
sorted_sets.each do |sorted_set|
|
34
|
+
# Get next item in the queue with score (time to execute) <= now.
|
35
|
+
# We need to go through the list one at a time to reduce the risk of something
|
36
|
+
# going wrong between the time jobs are popped from the scheduled queue and when
|
37
|
+
# they are pushed onto a work queue and losing the jobs.
|
38
|
+
while !@done && (job = zpopbyscore(conn, keys: [sorted_set], argv: [Time.now.to_f.to_s]))
|
39
|
+
@client.push(Sidekiq.load_json(job))
|
40
|
+
logger.debug { "enqueued #{sorted_set}: #{job}" }
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
def terminate
|
47
|
+
@done = true
|
48
|
+
end
|
49
|
+
|
50
|
+
private
|
51
|
+
|
52
|
+
def zpopbyscore(conn, keys: nil, argv: nil)
|
53
|
+
if @lua_zpopbyscore_sha.nil?
|
54
|
+
@lua_zpopbyscore_sha = conn.script(:load, LUA_ZPOPBYSCORE)
|
55
|
+
end
|
56
|
+
|
57
|
+
conn.call("EVALSHA", @lua_zpopbyscore_sha, keys.size, *keys, *argv)
|
58
|
+
rescue RedisClient::CommandError => e
|
59
|
+
raise unless e.message.start_with?("NOSCRIPT")
|
60
|
+
|
61
|
+
@lua_zpopbyscore_sha = nil
|
62
|
+
retry
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
##
|
67
|
+
# The Poller checks Redis every N seconds for jobs in the retry or scheduled
|
68
|
+
# set have passed their timestamp and should be enqueued. If so, it
|
69
|
+
# just pops the job back onto its original queue so the
|
70
|
+
# workers can pick it up like any other job.
|
71
|
+
class Poller
|
72
|
+
include Sidekiq::Component
|
73
|
+
|
74
|
+
INITIAL_WAIT = 10
|
75
|
+
|
76
|
+
def initialize(config)
|
77
|
+
@config = config
|
78
|
+
@enq = (config[:scheduled_enq] || Sidekiq::Scheduled::Enq).new(config)
|
79
|
+
@sleeper = ConnectionPool::TimedStack.new
|
80
|
+
@done = false
|
81
|
+
@thread = nil
|
82
|
+
@count_calls = 0
|
83
|
+
end
|
84
|
+
|
85
|
+
# Shut down this instance, will pause until the thread is dead.
|
86
|
+
def terminate
|
87
|
+
@done = true
|
88
|
+
@enq.terminate
|
89
|
+
|
90
|
+
@sleeper << 0
|
91
|
+
@thread&.value
|
92
|
+
end
|
93
|
+
|
94
|
+
def start
|
95
|
+
@thread ||= safe_thread("scheduler") {
|
96
|
+
initial_wait
|
97
|
+
|
98
|
+
until @done
|
99
|
+
enqueue
|
100
|
+
wait
|
101
|
+
end
|
102
|
+
logger.info("Scheduler exiting...")
|
103
|
+
}
|
104
|
+
end
|
105
|
+
|
106
|
+
def enqueue
|
107
|
+
@enq.enqueue_jobs
|
108
|
+
rescue => ex
|
109
|
+
# Most likely a problem with redis networking.
|
110
|
+
# Punt and try again at the next interval
|
111
|
+
logger.error ex.message
|
112
|
+
handle_exception(ex)
|
113
|
+
end
|
114
|
+
|
115
|
+
private
|
116
|
+
|
117
|
+
def wait
|
118
|
+
@sleeper.pop(random_poll_interval)
|
119
|
+
rescue Timeout::Error
|
120
|
+
# expected
|
121
|
+
rescue => ex
|
122
|
+
# if poll_interval_average hasn't been calculated yet, we can
|
123
|
+
# raise an error trying to reach Redis.
|
124
|
+
logger.error ex.message
|
125
|
+
handle_exception(ex)
|
126
|
+
sleep 5
|
127
|
+
end
|
128
|
+
|
129
|
+
def random_poll_interval
|
130
|
+
# We want one Sidekiq process to schedule jobs every N seconds. We have M processes
|
131
|
+
# and **don't** want to coordinate.
|
132
|
+
#
|
133
|
+
# So in N*M second timespan, we want each process to schedule once. The basic loop is:
|
134
|
+
#
|
135
|
+
# * sleep a random amount within that N*M timespan
|
136
|
+
# * wake up and schedule
|
137
|
+
#
|
138
|
+
# We want to avoid one edge case: imagine a set of 2 processes, scheduling every 5 seconds,
|
139
|
+
# so N*M = 10. Each process decides to randomly sleep 8 seconds, now we've failed to meet
|
140
|
+
# that 5 second average. Thankfully each schedule cycle will sleep randomly so the next
|
141
|
+
# iteration could see each process sleep for 1 second, undercutting our average.
|
142
|
+
#
|
143
|
+
# So below 10 processes, we special case and ensure the processes sleep closer to the average.
|
144
|
+
# In the example above, each process should schedule every 10 seconds on average. We special
|
145
|
+
# case smaller clusters to add 50% so they would sleep somewhere between 5 and 15 seconds.
|
146
|
+
# As we run more processes, the scheduling interval average will approach an even spread
|
147
|
+
# between 0 and poll interval so we don't need this artifical boost.
|
148
|
+
#
|
149
|
+
count = process_count
|
150
|
+
interval = poll_interval_average(count)
|
151
|
+
|
152
|
+
if count < 10
|
153
|
+
# For small clusters, calculate a random interval that is ±50% the desired average.
|
154
|
+
interval * rand + interval.to_f / 2
|
155
|
+
else
|
156
|
+
# With 10+ processes, we should have enough randomness to get decent polling
|
157
|
+
# across the entire timespan
|
158
|
+
interval * rand
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
# We do our best to tune the poll interval to the size of the active Sidekiq
|
163
|
+
# cluster. If you have 30 processes and poll every 15 seconds, that means one
|
164
|
+
# Sidekiq is checking Redis every 0.5 seconds - way too often for most people
|
165
|
+
# and really bad if the retry or scheduled sets are large.
|
166
|
+
#
|
167
|
+
# Instead try to avoid polling more than once every 15 seconds. If you have
|
168
|
+
# 30 Sidekiq processes, we'll poll every 30 * 15 or 450 seconds.
|
169
|
+
# To keep things statistically random, we'll sleep a random amount between
|
170
|
+
# 225 and 675 seconds for each poll or 450 seconds on average. Otherwise restarting
|
171
|
+
# all your Sidekiq processes at the same time will lead to them all polling at
|
172
|
+
# the same time: the thundering herd problem.
|
173
|
+
#
|
174
|
+
# We only do this if poll_interval_average is unset (the default).
|
175
|
+
def poll_interval_average(count)
|
176
|
+
@config[:poll_interval_average] || scaled_poll_interval(count)
|
177
|
+
end
|
178
|
+
|
179
|
+
# Calculates an average poll interval based on the number of known Sidekiq processes.
|
180
|
+
# This minimizes a single point of failure by dispersing check-ins but without taxing
|
181
|
+
# Redis if you run many Sidekiq processes.
|
182
|
+
def scaled_poll_interval(process_count)
|
183
|
+
process_count * @config[:average_scheduled_poll_interval]
|
184
|
+
end
|
185
|
+
|
186
|
+
def process_count
|
187
|
+
pcount = Sidekiq.redis { |conn| conn.scard("processes") }
|
188
|
+
pcount = 1 if pcount == 0
|
189
|
+
pcount
|
190
|
+
end
|
191
|
+
|
192
|
+
# A copy of Sidekiq::ProcessSet#cleanup because server
|
193
|
+
# should never depend on sidekiq/api.
|
194
|
+
def cleanup
|
195
|
+
# dont run cleanup more than once per minute
|
196
|
+
return 0 unless redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
|
197
|
+
|
198
|
+
count = 0
|
199
|
+
redis do |conn|
|
200
|
+
procs = conn.sscan("processes").to_a
|
201
|
+
heartbeats = conn.pipelined { |pipeline|
|
202
|
+
procs.each do |key|
|
203
|
+
pipeline.hget(key, "info")
|
204
|
+
end
|
205
|
+
}
|
206
|
+
|
207
|
+
# the hash named key has an expiry of 60 seconds.
|
208
|
+
# if it's not found, that means the process has not reported
|
209
|
+
# in to Redis and probably died.
|
210
|
+
to_prune = procs.select.with_index { |proc, i|
|
211
|
+
heartbeats[i].nil?
|
212
|
+
}
|
213
|
+
count = conn.srem("processes", to_prune) unless to_prune.empty?
|
214
|
+
end
|
215
|
+
count
|
216
|
+
end
|
217
|
+
|
218
|
+
def initial_wait
|
219
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds to give time for
|
220
|
+
# the heartbeat to register (if the poll interval is going to be calculated by the number
|
221
|
+
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
222
|
+
total = 0
|
223
|
+
total += INITIAL_WAIT unless @config[:poll_interval_average]
|
224
|
+
total += (5 * rand)
|
225
|
+
|
226
|
+
@sleeper.pop(total)
|
227
|
+
rescue Timeout::Error
|
228
|
+
ensure
|
229
|
+
# periodically clean out the `processes` set in Redis which can collect
|
230
|
+
# references to dead processes over time. The process count affects how
|
231
|
+
# often we scan for scheduled jobs.
|
232
|
+
cleanup
|
233
|
+
end
|
234
|
+
end
|
235
|
+
end
|
236
|
+
end
|
@@ -0,0 +1,149 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# The MIT License
|
4
|
+
#
|
5
|
+
# Copyright (c) 2017, 2018, 2019, 2020 Agis Anastasopoulos
|
6
|
+
#
|
7
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
8
|
+
# this software and associated documentation files (the "Software"), to deal in
|
9
|
+
# the Software without restriction, including without limitation the rights to
|
10
|
+
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
11
|
+
# the Software, and to permit persons to whom the Software is furnished to do so,
|
12
|
+
# subject to the following conditions:
|
13
|
+
#
|
14
|
+
# The above copyright notice and this permission notice shall be included in all
|
15
|
+
# copies or substantial portions of the Software.
|
16
|
+
#
|
17
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
18
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
19
|
+
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
20
|
+
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
21
|
+
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
22
|
+
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
23
|
+
|
24
|
+
# This is a copy of https://github.com/agis/ruby-sdnotify as of commit a7d52ee
|
25
|
+
# The only changes made was "rehoming" it within the Sidekiq module to avoid
|
26
|
+
# namespace collisions and applying standard's code formatting style.
|
27
|
+
|
28
|
+
require "socket"
|
29
|
+
|
30
|
+
# SdNotify is a pure-Ruby implementation of sd_notify(3). It can be used to
|
31
|
+
# notify systemd about state changes. Methods of this package are no-op on
|
32
|
+
# non-systemd systems (eg. Darwin).
|
33
|
+
#
|
34
|
+
# The API maps closely to the original implementation of sd_notify(3),
|
35
|
+
# therefore be sure to check the official man pages prior to using SdNotify.
|
36
|
+
#
|
37
|
+
# @see https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
38
|
+
module Sidekiq
|
39
|
+
module SdNotify
|
40
|
+
# Exception raised when there's an error writing to the notification socket
|
41
|
+
class NotifyError < RuntimeError; end
|
42
|
+
|
43
|
+
READY = "READY=1"
|
44
|
+
RELOADING = "RELOADING=1"
|
45
|
+
STOPPING = "STOPPING=1"
|
46
|
+
STATUS = "STATUS="
|
47
|
+
ERRNO = "ERRNO="
|
48
|
+
MAINPID = "MAINPID="
|
49
|
+
WATCHDOG = "WATCHDOG=1"
|
50
|
+
FDSTORE = "FDSTORE=1"
|
51
|
+
|
52
|
+
def self.ready(unset_env = false)
|
53
|
+
notify(READY, unset_env)
|
54
|
+
end
|
55
|
+
|
56
|
+
def self.reloading(unset_env = false)
|
57
|
+
notify(RELOADING, unset_env)
|
58
|
+
end
|
59
|
+
|
60
|
+
def self.stopping(unset_env = false)
|
61
|
+
notify(STOPPING, unset_env)
|
62
|
+
end
|
63
|
+
|
64
|
+
# @param status [String] a custom status string that describes the current
|
65
|
+
# state of the service
|
66
|
+
def self.status(status, unset_env = false)
|
67
|
+
notify("#{STATUS}#{status}", unset_env)
|
68
|
+
end
|
69
|
+
|
70
|
+
# @param errno [Integer]
|
71
|
+
def self.errno(errno, unset_env = false)
|
72
|
+
notify("#{ERRNO}#{errno}", unset_env)
|
73
|
+
end
|
74
|
+
|
75
|
+
# @param pid [Integer]
|
76
|
+
def self.mainpid(pid, unset_env = false)
|
77
|
+
notify("#{MAINPID}#{pid}", unset_env)
|
78
|
+
end
|
79
|
+
|
80
|
+
def self.watchdog(unset_env = false)
|
81
|
+
notify(WATCHDOG, unset_env)
|
82
|
+
end
|
83
|
+
|
84
|
+
def self.fdstore(unset_env = false)
|
85
|
+
notify(FDSTORE, unset_env)
|
86
|
+
end
|
87
|
+
|
88
|
+
# @return [Boolean] true if the service manager expects watchdog keep-alive
|
89
|
+
# notification messages to be sent from this process.
|
90
|
+
#
|
91
|
+
# If the $WATCHDOG_USEC environment variable is set,
|
92
|
+
# and the $WATCHDOG_PID variable is unset or set to the PID of the current
|
93
|
+
# process
|
94
|
+
#
|
95
|
+
# @note Unlike sd_watchdog_enabled(3), this method does not mutate the
|
96
|
+
# environment.
|
97
|
+
def self.watchdog?
|
98
|
+
wd_usec = ENV["WATCHDOG_USEC"]
|
99
|
+
wd_pid = ENV["WATCHDOG_PID"]
|
100
|
+
|
101
|
+
return false unless wd_usec
|
102
|
+
|
103
|
+
begin
|
104
|
+
wd_usec = Integer(wd_usec)
|
105
|
+
rescue
|
106
|
+
return false
|
107
|
+
end
|
108
|
+
|
109
|
+
return false if wd_usec <= 0
|
110
|
+
return true if !wd_pid || wd_pid == $$.to_s
|
111
|
+
|
112
|
+
false
|
113
|
+
end
|
114
|
+
|
115
|
+
# Notify systemd with the provided state, via the notification socket, if
|
116
|
+
# any.
|
117
|
+
#
|
118
|
+
# Generally this method will be used indirectly through the other methods
|
119
|
+
# of the library.
|
120
|
+
#
|
121
|
+
# @param state [String]
|
122
|
+
# @param unset_env [Boolean]
|
123
|
+
#
|
124
|
+
# @return [Fixnum, nil] the number of bytes written to the notification
|
125
|
+
# socket or nil if there was no socket to report to (eg. the program wasn't
|
126
|
+
# started by systemd)
|
127
|
+
#
|
128
|
+
# @raise [NotifyError] if there was an error communicating with the systemd
|
129
|
+
# socket
|
130
|
+
#
|
131
|
+
# @see https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
132
|
+
def self.notify(state, unset_env = false)
|
133
|
+
sock = ENV["NOTIFY_SOCKET"]
|
134
|
+
|
135
|
+
return nil unless sock
|
136
|
+
|
137
|
+
ENV.delete("NOTIFY_SOCKET") if unset_env
|
138
|
+
|
139
|
+
begin
|
140
|
+
Addrinfo.unix(sock, :DGRAM).connect do |s|
|
141
|
+
s.close_on_exec = true
|
142
|
+
s.write(state)
|
143
|
+
end
|
144
|
+
rescue => e
|
145
|
+
raise NotifyError, "#{e.class}: #{e.message}", e.backtrace
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
#
|
2
|
+
# Sidekiq's systemd integration allows Sidekiq to inform systemd:
|
3
|
+
# 1. when it has successfully started
|
4
|
+
# 2. when it is starting shutdown
|
5
|
+
# 3. periodically for a liveness check with a watchdog thread
|
6
|
+
#
|
7
|
+
module Sidekiq
|
8
|
+
def self.start_watchdog
|
9
|
+
usec = Integer(ENV["WATCHDOG_USEC"])
|
10
|
+
return Sidekiq.logger.error("systemd Watchdog too fast: " + usec) if usec < 1_000_000
|
11
|
+
|
12
|
+
sec_f = usec / 1_000_000.0
|
13
|
+
# "It is recommended that a daemon sends a keep-alive notification message
|
14
|
+
# to the service manager every half of the time returned here."
|
15
|
+
ping_f = sec_f / 2
|
16
|
+
Sidekiq.logger.info "Pinging systemd watchdog every #{ping_f.round(1)} sec"
|
17
|
+
Thread.new do
|
18
|
+
loop do
|
19
|
+
sleep ping_f
|
20
|
+
Sidekiq::SdNotify.watchdog
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/testing"
|
4
|
+
|
5
|
+
##
|
6
|
+
# The Sidekiq inline infrastructure overrides perform_async so that it
|
7
|
+
# actually calls perform instead. This allows jobs to be run inline in a
|
8
|
+
# testing environment.
|
9
|
+
#
|
10
|
+
# This is similar to `Resque.inline = true` functionality.
|
11
|
+
#
|
12
|
+
# Example:
|
13
|
+
#
|
14
|
+
# require 'sidekiq/testing/inline'
|
15
|
+
#
|
16
|
+
# $external_variable = 0
|
17
|
+
#
|
18
|
+
# class ExternalJob
|
19
|
+
# include Sidekiq::Job
|
20
|
+
#
|
21
|
+
# def perform
|
22
|
+
# $external_variable = 1
|
23
|
+
# end
|
24
|
+
# end
|
25
|
+
#
|
26
|
+
# assert_equal 0, $external_variable
|
27
|
+
# ExternalJob.perform_async
|
28
|
+
# assert_equal 1, $external_variable
|
29
|
+
#
|
30
|
+
Sidekiq::Testing.inline!
|