sidekiq 5.2.7 → 8.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +845 -8
- data/LICENSE.txt +9 -0
- data/README.md +54 -54
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +219 -112
- data/bin/sidekiqmon +11 -0
- data/bin/webload +69 -0
- data/lib/active_job/queue_adapters/sidekiq_adapter.rb +120 -0
- data/lib/generators/sidekiq/job_generator.rb +59 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +757 -373
- data/lib/sidekiq/capsule.rb +132 -0
- data/lib/sidekiq/cli.rb +210 -233
- data/lib/sidekiq/client.rb +145 -103
- data/lib/sidekiq/component.rb +128 -0
- data/lib/sidekiq/config.rb +315 -0
- data/lib/sidekiq/deploy.rb +64 -0
- data/lib/sidekiq/embedded.rb +64 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/iterable_job.rb +56 -0
- data/lib/sidekiq/job/interrupt_handler.rb +24 -0
- data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
- data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
- data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
- data/lib/sidekiq/job/iterable.rb +306 -0
- data/lib/sidekiq/job.rb +385 -0
- data/lib/sidekiq/job_logger.rb +34 -7
- data/lib/sidekiq/job_retry.rb +164 -109
- data/lib/sidekiq/job_util.rb +113 -0
- data/lib/sidekiq/launcher.rb +208 -107
- data/lib/sidekiq/logger.rb +80 -0
- data/lib/sidekiq/manager.rb +42 -46
- data/lib/sidekiq/metrics/query.rb +184 -0
- data/lib/sidekiq/metrics/shared.rb +109 -0
- data/lib/sidekiq/metrics/tracking.rb +150 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +119 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +23 -0
- data/lib/sidekiq/monitor.rb +147 -0
- data/lib/sidekiq/paginator.rb +41 -16
- data/lib/sidekiq/processor.rb +146 -127
- data/lib/sidekiq/profiler.rb +72 -0
- data/lib/sidekiq/rails.rb +46 -43
- data/lib/sidekiq/redis_client_adapter.rb +113 -0
- data/lib/sidekiq/redis_connection.rb +79 -108
- data/lib/sidekiq/ring_buffer.rb +31 -0
- data/lib/sidekiq/scheduled.rb +112 -50
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +26 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +91 -90
- data/lib/sidekiq/transaction_aware_client.rb +51 -0
- data/lib/sidekiq/version.rb +7 -1
- data/lib/sidekiq/web/action.rb +125 -60
- data/lib/sidekiq/web/application.rb +363 -259
- data/lib/sidekiq/web/config.rb +120 -0
- data/lib/sidekiq/web/csrf_protection.rb +183 -0
- data/lib/sidekiq/web/helpers.rb +241 -120
- data/lib/sidekiq/web/router.rb +62 -71
- data/lib/sidekiq/web.rb +69 -161
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +94 -182
- data/sidekiq.gemspec +26 -16
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +150 -61
- data/web/assets/javascripts/base-charts.js +120 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-adapter-date-fns.min.js +7 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +194 -0
- data/web/assets/javascripts/dashboard.js +41 -293
- data/web/assets/javascripts/metrics.js +280 -0
- data/web/assets/stylesheets/style.css +766 -0
- data/web/locales/ar.yml +72 -65
- data/web/locales/cs.yml +63 -62
- data/web/locales/da.yml +61 -53
- data/web/locales/de.yml +66 -53
- data/web/locales/el.yml +44 -24
- data/web/locales/en.yml +94 -66
- data/web/locales/es.yml +92 -54
- data/web/locales/fa.yml +66 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +66 -64
- data/web/locales/hi.yml +60 -59
- data/web/locales/it.yml +93 -54
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +53 -52
- data/web/locales/lt.yml +84 -0
- data/web/locales/nb.yml +62 -61
- data/web/locales/nl.yml +53 -52
- data/web/locales/pl.yml +46 -45
- data/web/locales/{pt-br.yml → pt-BR.yml} +84 -56
- data/web/locales/pt.yml +52 -51
- data/web/locales/ru.yml +69 -63
- data/web/locales/sv.yml +54 -53
- data/web/locales/ta.yml +61 -60
- data/web/locales/tr.yml +101 -0
- data/web/locales/uk.yml +86 -61
- data/web/locales/ur.yml +65 -64
- data/web/locales/vi.yml +84 -0
- data/web/locales/zh-CN.yml +106 -0
- data/web/locales/{zh-tw.yml → zh-TW.yml} +43 -9
- data/web/views/_footer.erb +31 -19
- data/web/views/_job_info.erb +94 -75
- data/web/views/_metrics_period_select.erb +15 -0
- data/web/views/_nav.erb +14 -21
- data/web/views/_paging.erb +23 -19
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +23 -23
- data/web/views/busy.erb +139 -87
- data/web/views/dashboard.erb +82 -53
- data/web/views/dead.erb +31 -27
- data/web/views/filtering.erb +6 -0
- data/web/views/layout.erb +15 -29
- data/web/views/metrics.erb +84 -0
- data/web/views/metrics_for_job.erb +58 -0
- data/web/views/morgue.erb +60 -70
- data/web/views/profiles.erb +43 -0
- data/web/views/queue.erb +50 -39
- data/web/views/queues.erb +45 -29
- data/web/views/retries.erb +65 -75
- data/web/views/retry.erb +32 -27
- data/web/views/scheduled.erb +58 -52
- data/web/views/scheduled_job_info.erb +1 -1
- metadata +96 -76
- data/.circleci/config.yml +0 -61
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -15
- data/.travis.yml +0 -11
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/COMM-LICENSE +0 -97
- data/Ent-Changes.md +0 -238
- data/Gemfile +0 -23
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -759
- data/Rakefile +0 -9
- data/bin/sidekiqctl +0 -20
- data/code_of_conduct.md +0 -50
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/ctl.rb +0 -221
- data/lib/sidekiq/delay.rb +0 -42
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -220
- data/web/assets/stylesheets/application-rtl.css +0 -246
- data/web/assets/stylesheets/application.css +0 -1144
- data/web/assets/stylesheets/bootstrap-rtl.min.css +0 -9
- data/web/assets/stylesheets/bootstrap.css +0 -5
- data/web/locales/zh-cn.yml +0 -68
- data/web/views/_status.erb +0 -4
data/lib/sidekiq/processor.rb
CHANGED
@@ -1,9 +1,9 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq/fetch"
|
4
|
+
require "sidekiq/job_logger"
|
5
|
+
require "sidekiq/job_retry"
|
6
|
+
require "sidekiq/profiler"
|
7
7
|
|
8
8
|
module Sidekiq
|
9
9
|
##
|
@@ -11,45 +11,45 @@ module Sidekiq
|
|
11
11
|
#
|
12
12
|
# 1. fetches a job from Redis
|
13
13
|
# 2. executes the job
|
14
|
-
# a. instantiate the
|
14
|
+
# a. instantiate the job class
|
15
15
|
# b. run the middleware chain
|
16
16
|
# c. call #perform
|
17
17
|
#
|
18
|
-
# A Processor can exit due to shutdown
|
19
|
-
#
|
18
|
+
# A Processor can exit due to shutdown or due to
|
19
|
+
# an error during job execution.
|
20
20
|
#
|
21
21
|
# If an error occurs in the job execution, the
|
22
22
|
# Processor calls the Manager to create a new one
|
23
23
|
# to replace itself and exits.
|
24
24
|
#
|
25
25
|
class Processor
|
26
|
-
|
27
|
-
include Util
|
26
|
+
include Sidekiq::Component
|
28
27
|
|
29
28
|
attr_reader :thread
|
30
29
|
attr_reader :job
|
30
|
+
attr_reader :capsule
|
31
31
|
|
32
|
-
def initialize(
|
33
|
-
@
|
32
|
+
def initialize(capsule, &block)
|
33
|
+
@config = @capsule = capsule
|
34
|
+
@callback = block
|
34
35
|
@down = false
|
35
36
|
@done = false
|
36
37
|
@job = nil
|
37
38
|
@thread = nil
|
38
|
-
@
|
39
|
-
@
|
40
|
-
@
|
41
|
-
@retrier = Sidekiq::JobRetry.new
|
39
|
+
@reloader = Sidekiq.default_configuration[:reloader]
|
40
|
+
@job_logger = (capsule.config[:job_logger] || Sidekiq::JobLogger).new(capsule.config)
|
41
|
+
@retrier = Sidekiq::JobRetry.new(capsule)
|
42
42
|
end
|
43
43
|
|
44
|
-
def terminate(wait=false)
|
44
|
+
def terminate(wait = false)
|
45
45
|
@done = true
|
46
|
-
return
|
46
|
+
return unless @thread
|
47
47
|
@thread.value if wait
|
48
48
|
end
|
49
49
|
|
50
|
-
def kill(wait=false)
|
50
|
+
def kill(wait = false)
|
51
51
|
@done = true
|
52
|
-
return
|
52
|
+
return unless @thread
|
53
53
|
# unlike the other actors, terminate does not wait
|
54
54
|
# for the thread to finish because we don't know how
|
55
55
|
# long the job will take to finish. Instead we
|
@@ -59,40 +59,45 @@ module Sidekiq
|
|
59
59
|
@thread.value if wait
|
60
60
|
end
|
61
61
|
|
62
|
+
def stopping?
|
63
|
+
@done
|
64
|
+
end
|
65
|
+
|
62
66
|
def start
|
63
|
-
@thread ||= safe_thread("processor", &method(:run))
|
67
|
+
@thread ||= safe_thread("#{config.name}/processor", &method(:run))
|
64
68
|
end
|
65
69
|
|
66
|
-
private
|
70
|
+
private
|
67
71
|
|
68
72
|
def run
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
73
|
+
# By setting this thread-local, Sidekiq.redis will access +Sidekiq::Capsule#redis_pool+
|
74
|
+
# instead of the global pool in +Sidekiq::Config#redis_pool+.
|
75
|
+
Thread.current[:sidekiq_capsule] = @capsule
|
76
|
+
|
77
|
+
process_one until @done
|
78
|
+
@callback.call(self)
|
79
|
+
rescue Sidekiq::Shutdown
|
80
|
+
@callback.call(self)
|
81
|
+
rescue Exception => ex
|
82
|
+
@callback.call(self, ex)
|
79
83
|
end
|
80
84
|
|
81
|
-
def process_one
|
85
|
+
def process_one(&block)
|
82
86
|
@job = fetch
|
83
87
|
process(@job) if @job
|
84
88
|
@job = nil
|
85
89
|
end
|
86
90
|
|
87
91
|
def get_one
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
rescue Sidekiq::Shutdown
|
93
|
-
rescue => ex
|
94
|
-
handle_fetch_exception(ex)
|
92
|
+
uow = capsule.fetcher.retrieve_work
|
93
|
+
if @down
|
94
|
+
logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" }
|
95
|
+
@down = nil
|
95
96
|
end
|
97
|
+
uow
|
98
|
+
rescue Sidekiq::Shutdown
|
99
|
+
rescue => ex
|
100
|
+
handle_fetch_exception(ex)
|
96
101
|
end
|
97
102
|
|
98
103
|
def fetch
|
@@ -106,35 +111,45 @@ module Sidekiq
|
|
106
111
|
end
|
107
112
|
|
108
113
|
def handle_fetch_exception(ex)
|
109
|
-
|
114
|
+
unless @down
|
110
115
|
@down = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
111
|
-
logger.error("Error fetching job: #{ex}")
|
112
116
|
handle_exception(ex)
|
113
117
|
end
|
114
118
|
sleep(1)
|
115
119
|
nil
|
116
120
|
end
|
117
121
|
|
118
|
-
def
|
122
|
+
def profile(job, &block)
|
123
|
+
return yield unless job["profile"]
|
124
|
+
Sidekiq::Profiler.new(config).call(job, &block)
|
125
|
+
end
|
126
|
+
|
127
|
+
def dispatch(job_hash, queue, jobstr)
|
119
128
|
# since middleware can mutate the job hash
|
120
|
-
# we clone
|
129
|
+
# we need to clone it to report the original
|
121
130
|
# job structure to the Web UI
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
@
|
137
|
-
|
131
|
+
# or to push back to redis when retrying.
|
132
|
+
# To avoid costly and, most of the time, useless cloning here,
|
133
|
+
# we pass original String of JSON to respected methods
|
134
|
+
# to re-parse it there if we need access to the original, untouched job
|
135
|
+
|
136
|
+
@job_logger.prepare(job_hash) do
|
137
|
+
@retrier.global(jobstr, queue) do
|
138
|
+
@job_logger.call(job_hash, queue) do
|
139
|
+
stats(jobstr, queue) do
|
140
|
+
profile(job_hash) do
|
141
|
+
# Rails 5 requires a Reloader to wrap code execution. In order to
|
142
|
+
# constantize the worker and instantiate an instance, we have to call
|
143
|
+
# the Reloader. It handles code loading, db connection management, etc.
|
144
|
+
# Effectively this block denotes a "unit of work" to Rails.
|
145
|
+
@reloader.call do
|
146
|
+
klass = Object.const_get(job_hash["class"])
|
147
|
+
instance = klass.new
|
148
|
+
instance.jid = job_hash["jid"]
|
149
|
+
instance._context = self
|
150
|
+
@retrier.local(instance, jobstr, queue) do
|
151
|
+
yield instance
|
152
|
+
end
|
138
153
|
end
|
139
154
|
end
|
140
155
|
end
|
@@ -143,53 +158,73 @@ module Sidekiq
|
|
143
158
|
end
|
144
159
|
end
|
145
160
|
|
146
|
-
|
147
|
-
|
148
|
-
|
161
|
+
IGNORE_SHUTDOWN_INTERRUPTS = {Sidekiq::Shutdown => :never}
|
162
|
+
private_constant :IGNORE_SHUTDOWN_INTERRUPTS
|
163
|
+
ALLOW_SHUTDOWN_INTERRUPTS = {Sidekiq::Shutdown => :immediate}
|
164
|
+
private_constant :ALLOW_SHUTDOWN_INTERRUPTS
|
165
|
+
|
166
|
+
def process(uow)
|
167
|
+
jobstr = uow.job
|
168
|
+
queue = uow.queue_name
|
149
169
|
|
150
170
|
# Treat malformed JSON as a special case: job goes straight to the morgue.
|
151
171
|
job_hash = nil
|
152
172
|
begin
|
153
173
|
job_hash = Sidekiq.load_json(jobstr)
|
154
174
|
rescue => ex
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
175
|
+
now = Time.now.to_f
|
176
|
+
redis do |conn|
|
177
|
+
conn.multi do |xa|
|
178
|
+
xa.zadd("dead", now.to_s, jobstr)
|
179
|
+
xa.zremrangebyscore("dead", "-inf", now - @capsule.config[:dead_timeout_in_seconds])
|
180
|
+
xa.zremrangebyrank("dead", 0, - @capsule.config[:dead_max_jobs])
|
181
|
+
end
|
182
|
+
end
|
183
|
+
handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
|
184
|
+
return uow.acknowledge
|
159
185
|
end
|
160
186
|
|
161
|
-
ack =
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
187
|
+
ack = false
|
188
|
+
Thread.handle_interrupt(IGNORE_SHUTDOWN_INTERRUPTS) do
|
189
|
+
Thread.handle_interrupt(ALLOW_SHUTDOWN_INTERRUPTS) do
|
190
|
+
dispatch(job_hash, queue, jobstr) do |instance|
|
191
|
+
config.server_middleware.invoke(instance, job_hash, queue) do
|
192
|
+
execute_job(instance, job_hash["args"])
|
193
|
+
end
|
166
194
|
end
|
195
|
+
ack = true
|
196
|
+
rescue Sidekiq::Shutdown
|
197
|
+
# Had to force kill this job because it didn't finish
|
198
|
+
# within the timeout. Don't acknowledge the work since
|
199
|
+
# we didn't properly finish it.
|
200
|
+
rescue Sidekiq::JobRetry::Skip => s
|
201
|
+
# Skip means we handled this error elsewhere. We don't
|
202
|
+
# need to log or report the error.
|
203
|
+
ack = true
|
204
|
+
raise s
|
205
|
+
rescue Sidekiq::JobRetry::Handled => h
|
206
|
+
# this is the common case: job raised error and Sidekiq::JobRetry::Handled
|
207
|
+
# signals that we created a retry successfully. We can acknowledge the job.
|
208
|
+
ack = true
|
209
|
+
e = h.cause || h
|
210
|
+
handle_exception(e, {context: "Job raised exception", job: job_hash})
|
211
|
+
raise e
|
212
|
+
rescue Exception => ex
|
213
|
+
# Unexpected error! This is very bad and indicates an exception that got past
|
214
|
+
# the retry subsystem (e.g. network partition). We won't acknowledge the job
|
215
|
+
# so it can be rescued when using Sidekiq Pro.
|
216
|
+
handle_exception(ex, {context: "Internal exception!", job: job_hash, jobstr: jobstr})
|
217
|
+
raise ex
|
167
218
|
end
|
168
|
-
rescue Sidekiq::Shutdown
|
169
|
-
# Had to force kill this job because it didn't finish
|
170
|
-
# within the timeout. Don't acknowledge the work since
|
171
|
-
# we didn't properly finish it.
|
172
|
-
ack = false
|
173
|
-
rescue Sidekiq::JobRetry::Handled => h
|
174
|
-
# this is the common case: job raised error and Sidekiq::JobRetry::Handled
|
175
|
-
# signals that we created a retry successfully. We can acknowlege the job.
|
176
|
-
e = h.cause ? h.cause : h
|
177
|
-
handle_exception(e, { :context => "Job raised exception", :job => job_hash, :jobstr => jobstr })
|
178
|
-
raise e
|
179
|
-
rescue Exception => ex
|
180
|
-
# Unexpected error! This is very bad and indicates an exception that got past
|
181
|
-
# the retry subsystem (e.g. network partition). We won't acknowledge the job
|
182
|
-
# so it can be rescued when using Sidekiq Pro.
|
183
|
-
ack = false
|
184
|
-
handle_exception(ex, { :context => "Internal exception!", :job => job_hash, :jobstr => jobstr })
|
185
|
-
raise e
|
186
219
|
ensure
|
187
|
-
|
220
|
+
if ack
|
221
|
+
uow.acknowledge
|
222
|
+
end
|
188
223
|
end
|
189
224
|
end
|
190
225
|
|
191
|
-
def execute_job(
|
192
|
-
|
226
|
+
def execute_job(instance, cloned_args)
|
227
|
+
instance.perform(*cloned_args)
|
193
228
|
end
|
194
229
|
|
195
230
|
# Ruby doesn't provide atomic counters out of the box so we'll
|
@@ -201,50 +236,53 @@ module Sidekiq
|
|
201
236
|
@lock = Mutex.new
|
202
237
|
end
|
203
238
|
|
204
|
-
def incr(amount=1)
|
205
|
-
@lock.synchronize { @value
|
239
|
+
def incr(amount = 1)
|
240
|
+
@lock.synchronize { @value += amount }
|
206
241
|
end
|
207
242
|
|
208
243
|
def reset
|
209
|
-
@lock.synchronize {
|
244
|
+
@lock.synchronize {
|
245
|
+
val = @value
|
246
|
+
@value = 0
|
247
|
+
val
|
248
|
+
}
|
210
249
|
end
|
211
250
|
end
|
212
251
|
|
213
252
|
# jruby's Hash implementation is not threadsafe, so we wrap it in a mutex here
|
214
|
-
class
|
253
|
+
class SharedWorkState
|
215
254
|
def initialize
|
216
|
-
@
|
255
|
+
@work_state = {}
|
217
256
|
@lock = Mutex.new
|
218
257
|
end
|
219
258
|
|
220
259
|
def set(tid, hash)
|
221
|
-
@lock.synchronize { @
|
260
|
+
@lock.synchronize { @work_state[tid] = hash }
|
222
261
|
end
|
223
262
|
|
224
263
|
def delete(tid)
|
225
|
-
@lock.synchronize { @
|
264
|
+
@lock.synchronize { @work_state.delete(tid) }
|
226
265
|
end
|
227
266
|
|
228
267
|
def dup
|
229
|
-
@lock.synchronize { @
|
268
|
+
@lock.synchronize { @work_state.dup }
|
230
269
|
end
|
231
270
|
|
232
271
|
def size
|
233
|
-
@lock.synchronize { @
|
272
|
+
@lock.synchronize { @work_state.size }
|
234
273
|
end
|
235
274
|
|
236
275
|
def clear
|
237
|
-
@lock.synchronize { @
|
276
|
+
@lock.synchronize { @work_state.clear }
|
238
277
|
end
|
239
278
|
end
|
240
279
|
|
241
280
|
PROCESSED = Counter.new
|
242
281
|
FAILURE = Counter.new
|
243
|
-
|
282
|
+
WORK_STATE = SharedWorkState.new
|
244
283
|
|
245
|
-
def stats(
|
246
|
-
tid
|
247
|
-
WORKER_STATE.set(tid, {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i })
|
284
|
+
def stats(jobstr, queue)
|
285
|
+
WORK_STATE.set(tid, {queue: queue, payload: jobstr, run_at: Time.now.to_i})
|
248
286
|
|
249
287
|
begin
|
250
288
|
yield
|
@@ -252,28 +290,9 @@ module Sidekiq
|
|
252
290
|
FAILURE.incr
|
253
291
|
raise
|
254
292
|
ensure
|
255
|
-
|
293
|
+
WORK_STATE.delete(tid)
|
256
294
|
PROCESSED.incr
|
257
295
|
end
|
258
296
|
end
|
259
|
-
|
260
|
-
# Deep clone the arguments passed to the worker so that if
|
261
|
-
# the job fails, what is pushed back onto Redis hasn't
|
262
|
-
# been mutated by the worker.
|
263
|
-
def cloned(thing)
|
264
|
-
Marshal.load(Marshal.dump(thing))
|
265
|
-
end
|
266
|
-
|
267
|
-
def constantize(str)
|
268
|
-
names = str.split('::')
|
269
|
-
names.shift if names.empty? || names.first.empty?
|
270
|
-
|
271
|
-
names.inject(Object) do |constant, name|
|
272
|
-
# the false flag limits search for name to under the constant namespace
|
273
|
-
# which mimics Rails' behaviour
|
274
|
-
constant.const_defined?(name, false) ? constant.const_get(name, false) : constant.const_missing(name)
|
275
|
-
end
|
276
|
-
end
|
277
|
-
|
278
297
|
end
|
279
298
|
end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
require "fileutils"
|
2
|
+
require "sidekiq/component"
|
3
|
+
|
4
|
+
module Sidekiq
|
5
|
+
# Allows the user to profile jobs running in production.
|
6
|
+
# See details in the Profiling wiki page.
|
7
|
+
class Profiler
|
8
|
+
EXPIRY = 86400 # 1 day
|
9
|
+
DEFAULT_OPTIONS = {
|
10
|
+
mode: :wall
|
11
|
+
}
|
12
|
+
|
13
|
+
include Sidekiq::Component
|
14
|
+
def initialize(config)
|
15
|
+
@config = config
|
16
|
+
@vernier_output_dir = ENV.fetch("VERNIER_OUTPUT_DIR") { Dir.tmpdir }
|
17
|
+
end
|
18
|
+
|
19
|
+
def call(job, &block)
|
20
|
+
return yield unless job["profile"]
|
21
|
+
|
22
|
+
token = job["profile"]
|
23
|
+
type = job["class"]
|
24
|
+
jid = job["jid"]
|
25
|
+
started_at = Time.now
|
26
|
+
|
27
|
+
rundata = {
|
28
|
+
started_at: started_at.to_i,
|
29
|
+
token: token,
|
30
|
+
type: type,
|
31
|
+
jid: jid,
|
32
|
+
# .gz extension tells Vernier to compress the data
|
33
|
+
filename: File.join(
|
34
|
+
@vernier_output_dir,
|
35
|
+
"#{token}-#{type}-#{jid}-#{started_at.strftime("%Y%m%d-%H%M%S")}.json.gz"
|
36
|
+
)
|
37
|
+
}
|
38
|
+
profiler_options = profiler_options(job, rundata)
|
39
|
+
|
40
|
+
require "vernier"
|
41
|
+
begin
|
42
|
+
a = Time.now
|
43
|
+
rc = Vernier.profile(**profiler_options, &block)
|
44
|
+
b = Time.now
|
45
|
+
|
46
|
+
# Failed jobs will raise an exception on previous line and skip this
|
47
|
+
# block. Only successful jobs will persist profile data to Redis.
|
48
|
+
key = "#{token}-#{jid}"
|
49
|
+
data = File.read(rundata[:filename])
|
50
|
+
redis do |conn|
|
51
|
+
conn.multi do |m|
|
52
|
+
m.zadd("profiles", Time.now.to_f + EXPIRY, key)
|
53
|
+
m.hset(key, rundata.merge(elapsed: (b - a), data: data, size: data.bytesize))
|
54
|
+
m.expire(key, EXPIRY)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
rc
|
58
|
+
ensure
|
59
|
+
FileUtils.rm_f(rundata[:filename])
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
private
|
64
|
+
|
65
|
+
def profiler_options(job, rundata)
|
66
|
+
profiler_options = (job["profiler_options"] || {}).transform_keys(&:to_sym)
|
67
|
+
profiler_options[:mode] = profiler_options[:mode].to_sym if profiler_options[:mode]
|
68
|
+
|
69
|
+
DEFAULT_OPTIONS.merge(profiler_options, {out: rundata[:filename]})
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
data/lib/sidekiq/rails.rb
CHANGED
@@ -1,58 +1,61 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Sidekiq
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
Sidekiq.server_middleware do |chain|
|
15
|
-
require 'sidekiq/middleware/server/active_record'
|
16
|
-
chain.add Sidekiq::Middleware::Server::ActiveRecord
|
4
|
+
begin
|
5
|
+
gem "railties", ">= 7.0"
|
6
|
+
require "rails"
|
7
|
+
require "sidekiq/job"
|
8
|
+
require_relative "../active_job/queue_adapters/sidekiq_adapter"
|
9
|
+
|
10
|
+
class Rails < ::Rails::Engine
|
11
|
+
class Reloader
|
12
|
+
def initialize(app = ::Rails.application)
|
13
|
+
@app = app
|
17
14
|
end
|
18
|
-
end
|
19
|
-
end
|
20
15
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
#
|
27
|
-
# None of this matters on the client-side, only within the Sidekiq process itself.
|
28
|
-
#
|
29
|
-
Sidekiq.configure_server do |_|
|
30
|
-
if ::Rails::VERSION::MAJOR >= 5
|
31
|
-
Sidekiq.options[:reloader] = Sidekiq::Rails::Reloader.new
|
16
|
+
def call
|
17
|
+
params = (::Rails::VERSION::STRING >= "7.1") ? {source: "job.sidekiq"} : {}
|
18
|
+
@app.reloader.wrap(**params) do
|
19
|
+
yield
|
20
|
+
end
|
32
21
|
end
|
33
|
-
end
|
34
|
-
end
|
35
22
|
|
36
|
-
|
37
|
-
|
38
|
-
|
23
|
+
def inspect
|
24
|
+
"#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
|
25
|
+
end
|
26
|
+
|
27
|
+
def to_hash
|
28
|
+
{app: @app.class.name}
|
29
|
+
end
|
39
30
|
end
|
40
31
|
|
41
|
-
|
42
|
-
|
43
|
-
|
32
|
+
initializer "sidekiq.backtrace_cleaner" do
|
33
|
+
Sidekiq.configure_server do |config|
|
34
|
+
config[:backtrace_cleaner] = ->(backtrace) { ::Rails.backtrace_cleaner.clean(backtrace) }
|
44
35
|
end
|
45
36
|
end
|
46
37
|
|
47
|
-
|
48
|
-
|
38
|
+
# This hook happens after all initializers are run, just before returning
|
39
|
+
# from config/environment.rb back to sidekiq/cli.rb.
|
40
|
+
#
|
41
|
+
# None of this matters on the client-side, only within the Sidekiq process itself.
|
42
|
+
config.after_initialize do
|
43
|
+
Sidekiq.configure_server do |config|
|
44
|
+
config[:reloader] = Sidekiq::Rails::Reloader.new
|
45
|
+
|
46
|
+
# This is the integration code necessary so that if a job uses `Rails.logger.info "Hello"`,
|
47
|
+
# it will appear in the Sidekiq console with all of the job context.
|
48
|
+
unless ::Rails.logger == config.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout)
|
49
|
+
if ::Rails.logger.respond_to?(:broadcast_to)
|
50
|
+
::Rails.logger.broadcast_to(config.logger)
|
51
|
+
else
|
52
|
+
::Rails.logger.extend(::ActiveSupport::Logger.broadcast(config.logger))
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
49
56
|
end
|
50
57
|
end
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
if defined?(::Rails) && ::Rails::VERSION::MAJOR < 4
|
55
|
-
$stderr.puts("**************************************************")
|
56
|
-
$stderr.puts("⛔️ WARNING: Sidekiq server is no longer supported by Rails 3.2 - please ensure your server/workers are updated")
|
57
|
-
$stderr.puts("**************************************************")
|
58
|
+
rescue Gem::LoadError
|
59
|
+
# Rails not available or version requirement not met
|
60
|
+
end
|
58
61
|
end
|