sidekiq 5.2.4 → 6.0.1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.circleci/config.yml +82 -0
- data/.gitignore +0 -2
- data/.standard.yml +20 -0
- data/6.0-Upgrade.md +72 -0
- data/COMM-LICENSE +11 -9
- data/Changes.md +129 -0
- data/Ent-2.0-Upgrade.md +37 -0
- data/Ent-Changes.md +32 -1
- data/Gemfile +12 -17
- data/Gemfile.lock +196 -0
- data/Pro-5.0-Upgrade.md +25 -0
- data/Pro-Changes.md +26 -2
- data/README.md +18 -31
- data/Rakefile +5 -4
- data/bin/sidekiqload +33 -25
- data/bin/sidekiqmon +8 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
- data/lib/generators/sidekiq/worker_generator.rb +20 -12
- data/lib/sidekiq.rb +62 -43
- data/lib/sidekiq/api.rb +196 -175
- data/lib/sidekiq/cli.rb +118 -178
- data/lib/sidekiq/client.rb +51 -46
- data/lib/sidekiq/delay.rb +5 -6
- data/lib/sidekiq/exception_handler.rb +10 -12
- data/lib/sidekiq/extensions/action_mailer.rb +10 -20
- data/lib/sidekiq/extensions/active_record.rb +9 -7
- data/lib/sidekiq/extensions/class_methods.rb +9 -7
- data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
- data/lib/sidekiq/fetch.rb +11 -12
- data/lib/sidekiq/job_logger.rb +45 -7
- data/lib/sidekiq/job_retry.rb +67 -58
- data/lib/sidekiq/launcher.rb +57 -51
- data/lib/sidekiq/logger.rb +165 -0
- data/lib/sidekiq/manager.rb +7 -9
- data/lib/sidekiq/middleware/chain.rb +14 -4
- data/lib/sidekiq/middleware/i18n.rb +5 -7
- data/lib/sidekiq/monitor.rb +148 -0
- data/lib/sidekiq/paginator.rb +18 -14
- data/lib/sidekiq/processor.rb +96 -66
- data/lib/sidekiq/rails.rb +23 -29
- data/lib/sidekiq/redis_connection.rb +31 -37
- data/lib/sidekiq/scheduled.rb +28 -29
- data/lib/sidekiq/testing.rb +34 -23
- data/lib/sidekiq/testing/inline.rb +2 -1
- data/lib/sidekiq/util.rb +17 -14
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web.rb +41 -49
- data/lib/sidekiq/web/action.rb +14 -10
- data/lib/sidekiq/web/application.rb +63 -64
- data/lib/sidekiq/web/helpers.rb +92 -68
- data/lib/sidekiq/web/router.rb +17 -14
- data/lib/sidekiq/worker.rb +129 -97
- data/sidekiq.gemspec +16 -16
- data/web/assets/javascripts/dashboard.js +4 -23
- data/web/assets/stylesheets/application-dark.css +125 -0
- data/web/assets/stylesheets/application.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +1 -1
- data/web/locales/ja.yml +2 -1
- data/web/views/_job_info.erb +2 -1
- data/web/views/busy.erb +4 -1
- data/web/views/dead.erb +2 -2
- data/web/views/layout.erb +1 -0
- data/web/views/morgue.erb +4 -1
- data/web/views/queue.erb +10 -1
- data/web/views/queues.erb +1 -1
- data/web/views/retries.erb +4 -1
- data/web/views/retry.erb +2 -2
- data/web/views/scheduled.erb +4 -1
- metadata +20 -30
- data/.travis.yml +0 -17
- data/Appraisals +0 -9
- data/bin/sidekiqctl +0 -237
- data/gemfiles/rails_4.gemfile +0 -31
- data/gemfiles/rails_5.gemfile +0 -31
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
data/lib/sidekiq/job_logger.rb
CHANGED
@@ -1,25 +1,63 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
+
|
2
3
|
module Sidekiq
|
3
4
|
class JobLogger
|
5
|
+
def initialize(logger = Sidekiq.logger)
|
6
|
+
@logger = logger
|
7
|
+
end
|
4
8
|
|
5
9
|
def call(item, queue)
|
6
10
|
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
7
|
-
logger.info("start")
|
11
|
+
@logger.info("start")
|
12
|
+
|
8
13
|
yield
|
9
|
-
|
14
|
+
|
15
|
+
with_elapsed_time_context(start) do
|
16
|
+
@logger.info("done")
|
17
|
+
end
|
10
18
|
rescue Exception
|
11
|
-
|
19
|
+
with_elapsed_time_context(start) do
|
20
|
+
@logger.info("fail")
|
21
|
+
end
|
22
|
+
|
12
23
|
raise
|
13
24
|
end
|
14
25
|
|
26
|
+
def prepare(job_hash, &block)
|
27
|
+
level = job_hash["log_level"]
|
28
|
+
if level
|
29
|
+
@logger.log_at(level) do
|
30
|
+
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
31
|
+
end
|
32
|
+
else
|
33
|
+
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def job_hash_context(job_hash)
|
38
|
+
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
39
|
+
# attribute to expose the underlying thing.
|
40
|
+
h = {
|
41
|
+
class: job_hash["wrapped"] || job_hash["class"],
|
42
|
+
jid: job_hash["jid"],
|
43
|
+
}
|
44
|
+
h[:bid] = job_hash["bid"] if job_hash["bid"]
|
45
|
+
h[:tags] = job_hash["tags"] if job_hash["tags"]
|
46
|
+
h
|
47
|
+
end
|
48
|
+
|
49
|
+
def with_elapsed_time_context(start, &block)
|
50
|
+
Sidekiq::Context.with(elapsed_time_context(start), &block)
|
51
|
+
end
|
52
|
+
|
53
|
+
def elapsed_time_context(start)
|
54
|
+
{elapsed: elapsed(start).to_s}
|
55
|
+
end
|
56
|
+
|
15
57
|
private
|
16
58
|
|
17
59
|
def elapsed(start)
|
18
60
|
(::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
|
19
61
|
end
|
20
|
-
|
21
|
-
def logger
|
22
|
-
Sidekiq.logger
|
23
|
-
end
|
24
62
|
end
|
25
63
|
end
|
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -1,6 +1,10 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq/scheduled"
|
4
|
+
require "sidekiq/api"
|
5
|
+
|
6
|
+
require "zlib"
|
7
|
+
require "base64"
|
4
8
|
|
5
9
|
module Sidekiq
|
6
10
|
##
|
@@ -56,7 +60,8 @@ module Sidekiq
|
|
56
60
|
# end
|
57
61
|
#
|
58
62
|
class JobRetry
|
59
|
-
class
|
63
|
+
class Handled < ::RuntimeError; end
|
64
|
+
class Skip < Handled; end
|
60
65
|
|
61
66
|
include Sidekiq::Util
|
62
67
|
|
@@ -71,7 +76,7 @@ module Sidekiq
|
|
71
76
|
# require the worker to be instantiated.
|
72
77
|
def global(msg, queue)
|
73
78
|
yield
|
74
|
-
rescue
|
79
|
+
rescue Handled => ex
|
75
80
|
raise ex
|
76
81
|
rescue Sidekiq::Shutdown => ey
|
77
82
|
# ignore, will be pushed back onto queue during hard_shutdown
|
@@ -80,22 +85,19 @@ module Sidekiq
|
|
80
85
|
# ignore, will be pushed back onto queue during hard_shutdown
|
81
86
|
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
82
87
|
|
83
|
-
if msg[
|
88
|
+
if msg["retry"]
|
84
89
|
attempt_retry(nil, msg, queue, e)
|
85
90
|
else
|
86
91
|
Sidekiq.death_handlers.each do |handler|
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
|
91
|
-
end
|
92
|
+
handler.call(msg, e)
|
93
|
+
rescue => handler_ex
|
94
|
+
handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
|
92
95
|
end
|
93
96
|
end
|
94
97
|
|
95
|
-
raise
|
98
|
+
raise Handled
|
96
99
|
end
|
97
100
|
|
98
|
-
|
99
101
|
# The local retry support means that any errors that occur within
|
100
102
|
# this block can be associated with the given worker instance.
|
101
103
|
# This is required to support the `sidekiq_retries_exhausted` block.
|
@@ -106,7 +108,7 @@ module Sidekiq
|
|
106
108
|
# calling the handle_exception handlers.
|
107
109
|
def local(worker, msg, queue)
|
108
110
|
yield
|
109
|
-
rescue
|
111
|
+
rescue Handled => ex
|
110
112
|
raise ex
|
111
113
|
rescue Sidekiq::Shutdown => ey
|
112
114
|
# ignore, will be pushed back onto queue during hard_shutdown
|
@@ -115,11 +117,11 @@ module Sidekiq
|
|
115
117
|
# ignore, will be pushed back onto queue during hard_shutdown
|
116
118
|
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
117
119
|
|
118
|
-
if msg[
|
119
|
-
msg[
|
120
|
+
if msg["retry"].nil?
|
121
|
+
msg["retry"] = worker.class.get_sidekiq_options["retry"]
|
120
122
|
end
|
121
123
|
|
122
|
-
raise e unless msg[
|
124
|
+
raise e unless msg["retry"]
|
123
125
|
attempt_retry(worker, msg, queue, e)
|
124
126
|
# We've handled this error associated with this job, don't
|
125
127
|
# need to handle it at the global level
|
@@ -132,48 +134,44 @@ module Sidekiq
|
|
132
134
|
# instantiate the worker instance. All access must be guarded and
|
133
135
|
# best effort.
|
134
136
|
def attempt_retry(worker, msg, queue, exception)
|
135
|
-
max_retry_attempts = retry_attempts_from(msg[
|
137
|
+
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
136
138
|
|
137
|
-
msg[
|
138
|
-
msg['retry_queue']
|
139
|
-
else
|
140
|
-
queue
|
141
|
-
end
|
139
|
+
msg["queue"] = (msg["retry_queue"] || queue)
|
142
140
|
|
143
|
-
|
144
|
-
# that won't convert to JSON.
|
145
|
-
m = exception.message.to_s[0, 10_000]
|
141
|
+
m = exception_message(exception)
|
146
142
|
if m.respond_to?(:scrub!)
|
147
143
|
m.force_encoding("utf-8")
|
148
144
|
m.scrub!
|
149
145
|
end
|
150
146
|
|
151
|
-
msg[
|
152
|
-
msg[
|
153
|
-
count = if msg[
|
154
|
-
msg[
|
155
|
-
msg[
|
147
|
+
msg["error_message"] = m
|
148
|
+
msg["error_class"] = exception.class.name
|
149
|
+
count = if msg["retry_count"]
|
150
|
+
msg["retried_at"] = Time.now.to_f
|
151
|
+
msg["retry_count"] += 1
|
156
152
|
else
|
157
|
-
msg[
|
158
|
-
msg[
|
153
|
+
msg["failed_at"] = Time.now.to_f
|
154
|
+
msg["retry_count"] = 0
|
159
155
|
end
|
160
156
|
|
161
|
-
if msg[
|
162
|
-
msg[
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
157
|
+
if msg["backtrace"]
|
158
|
+
lines = if msg["backtrace"] == true
|
159
|
+
exception.backtrace
|
160
|
+
else
|
161
|
+
exception.backtrace[0...msg["backtrace"].to_i]
|
162
|
+
end
|
163
|
+
|
164
|
+
msg["error_backtrace"] = compress_backtrace(lines)
|
167
165
|
end
|
168
166
|
|
169
167
|
if count < max_retry_attempts
|
170
168
|
delay = delay_for(worker, count, exception)
|
171
169
|
# Logging here can break retries if the logging device raises ENOSPC #3979
|
172
|
-
#logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
170
|
+
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
173
171
|
retry_at = Time.now.to_f + delay
|
174
172
|
payload = Sidekiq.dump_json(msg)
|
175
173
|
Sidekiq.redis do |conn|
|
176
|
-
conn.zadd(
|
174
|
+
conn.zadd("retry", retry_at.to_s, payload)
|
177
175
|
end
|
178
176
|
else
|
179
177
|
# Goodbye dear message, you (re)tried your best I'm sure.
|
@@ -183,25 +181,23 @@ module Sidekiq
|
|
183
181
|
|
184
182
|
def retries_exhausted(worker, msg, exception)
|
185
183
|
begin
|
186
|
-
block = worker
|
187
|
-
block
|
184
|
+
block = worker&.sidekiq_retries_exhausted_block
|
185
|
+
block&.call(msg, exception)
|
188
186
|
rescue => e
|
189
|
-
handle_exception(e, {
|
187
|
+
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
190
188
|
end
|
191
189
|
|
192
190
|
Sidekiq.death_handlers.each do |handler|
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
handle_exception(e, { context: "Error calling death handler", job: msg })
|
197
|
-
end
|
191
|
+
handler.call(msg, exception)
|
192
|
+
rescue => e
|
193
|
+
handle_exception(e, {context: "Error calling death handler", job: msg})
|
198
194
|
end
|
199
195
|
|
200
|
-
send_to_morgue(msg) unless msg[
|
196
|
+
send_to_morgue(msg) unless msg["dead"] == false
|
201
197
|
end
|
202
198
|
|
203
199
|
def send_to_morgue(msg)
|
204
|
-
logger.info { "Adding dead #{msg[
|
200
|
+
logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
|
205
201
|
payload = Sidekiq.dump_json(msg)
|
206
202
|
DeadSet.new.kill(payload, notify_failure: false)
|
207
203
|
end
|
@@ -215,7 +211,7 @@ module Sidekiq
|
|
215
211
|
end
|
216
212
|
|
217
213
|
def delay_for(worker, count, exception)
|
218
|
-
if worker
|
214
|
+
if worker&.sidekiq_retry_in_block
|
219
215
|
custom_retry_in = retry_in(worker, count, exception).to_i
|
220
216
|
return custom_retry_in if custom_retry_in > 0
|
221
217
|
end
|
@@ -224,16 +220,14 @@ module Sidekiq
|
|
224
220
|
|
225
221
|
# delayed_job uses the same basic formula
|
226
222
|
def seconds_to_delay(count)
|
227
|
-
(count
|
223
|
+
(count**4) + 15 + (rand(30) * (count + 1))
|
228
224
|
end
|
229
225
|
|
230
226
|
def retry_in(worker, count, exception)
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
nil
|
236
|
-
end
|
227
|
+
worker.sidekiq_retry_in_block.call(count, exception)
|
228
|
+
rescue Exception => e
|
229
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
|
230
|
+
nil
|
237
231
|
end
|
238
232
|
|
239
233
|
def exception_caused_by_shutdown?(e, checked_causes = [])
|
@@ -247,5 +241,20 @@ module Sidekiq
|
|
247
241
|
exception_caused_by_shutdown?(e.cause, checked_causes)
|
248
242
|
end
|
249
243
|
|
244
|
+
# Extract message from exception.
|
245
|
+
# Set a default if the message raises an error
|
246
|
+
def exception_message(exception)
|
247
|
+
# App code can stuff all sorts of crazy binary data into the error message
|
248
|
+
# that won't convert to JSON.
|
249
|
+
exception.message.to_s[0, 10_000]
|
250
|
+
rescue
|
251
|
+
+"!!! ERROR MESSAGE THREW AN ERROR !!!"
|
252
|
+
end
|
253
|
+
|
254
|
+
def compress_backtrace(backtrace)
|
255
|
+
serialized = Marshal.dump(backtrace)
|
256
|
+
compressed = Zlib::Deflate.deflate(serialized)
|
257
|
+
Base64.encode64(compressed)
|
258
|
+
end
|
250
259
|
end
|
251
260
|
end
|
data/lib/sidekiq/launcher.rb
CHANGED
@@ -1,19 +1,25 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
4
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq/manager"
|
4
|
+
require "sidekiq/fetch"
|
5
|
+
require "sidekiq/scheduled"
|
5
6
|
|
6
7
|
module Sidekiq
|
7
|
-
# The Launcher
|
8
|
-
# start, monitor and stop the core Actors in Sidekiq.
|
9
|
-
# If any of these actors die, the Sidekiq process exits
|
10
|
-
# immediately.
|
8
|
+
# The Launcher starts the Manager and Poller threads and provides the process heartbeat.
|
11
9
|
class Launcher
|
12
10
|
include Util
|
13
11
|
|
14
|
-
|
12
|
+
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
15
13
|
|
16
|
-
|
14
|
+
PROCTITLES = [
|
15
|
+
proc { "sidekiq" },
|
16
|
+
proc { Sidekiq::VERSION },
|
17
|
+
proc { |me, data| data["tag"] },
|
18
|
+
proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
|
19
|
+
proc { |me, data| "stopping" if me.stopping? },
|
20
|
+
]
|
21
|
+
|
22
|
+
attr_accessor :manager, :poller, :fetcher
|
17
23
|
|
18
24
|
def initialize(options)
|
19
25
|
@manager = Sidekiq::Manager.new(options)
|
@@ -62,10 +68,30 @@ module Sidekiq
|
|
62
68
|
|
63
69
|
private unless $TESTING
|
64
70
|
|
71
|
+
def start_heartbeat
|
72
|
+
loop do
|
73
|
+
heartbeat
|
74
|
+
sleep 5
|
75
|
+
end
|
76
|
+
Sidekiq.logger.info("Heartbeat stopping...")
|
77
|
+
end
|
78
|
+
|
79
|
+
def clear_heartbeat
|
80
|
+
# Remove record from Redis since we are shutting down.
|
81
|
+
# Note we don't stop the heartbeat thread; if the process
|
82
|
+
# doesn't actually exit, it'll reappear in the Web UI.
|
83
|
+
Sidekiq.redis do |conn|
|
84
|
+
conn.pipelined do
|
85
|
+
conn.srem("processes", identity)
|
86
|
+
conn.del("#{identity}:workers")
|
87
|
+
end
|
88
|
+
end
|
89
|
+
rescue
|
90
|
+
# best effort, ignore network errors
|
91
|
+
end
|
92
|
+
|
65
93
|
def heartbeat
|
66
|
-
|
67
|
-
results.compact!
|
68
|
-
$0 = results.join(' ')
|
94
|
+
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
|
69
95
|
|
70
96
|
❤
|
71
97
|
end
|
@@ -73,6 +99,7 @@ module Sidekiq
|
|
73
99
|
def ❤
|
74
100
|
key = identity
|
75
101
|
fails = procd = 0
|
102
|
+
|
76
103
|
begin
|
77
104
|
fails = Processor::FAILURE.reset
|
78
105
|
procd = Processor::PROCESSED.reset
|
@@ -80,6 +107,7 @@ module Sidekiq
|
|
80
107
|
|
81
108
|
workers_key = "#{key}:workers"
|
82
109
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
110
|
+
|
83
111
|
Sidekiq.redis do |conn|
|
84
112
|
conn.multi do
|
85
113
|
conn.incrby("stat:processed", procd)
|
@@ -97,24 +125,25 @@ module Sidekiq
|
|
97
125
|
conn.expire(workers_key, 60)
|
98
126
|
end
|
99
127
|
end
|
128
|
+
|
100
129
|
fails = procd = 0
|
101
130
|
|
102
|
-
_, exists, _, _, msg = Sidekiq.redis
|
103
|
-
conn.multi
|
104
|
-
conn.sadd(
|
131
|
+
_, exists, _, _, msg = Sidekiq.redis { |conn|
|
132
|
+
conn.multi {
|
133
|
+
conn.sadd("processes", key)
|
105
134
|
conn.exists(key)
|
106
|
-
conn.hmset(key,
|
135
|
+
conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
|
107
136
|
conn.expire(key, 60)
|
108
137
|
conn.rpop("#{key}-signals")
|
109
|
-
|
110
|
-
|
138
|
+
}
|
139
|
+
}
|
111
140
|
|
112
141
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
113
|
-
fire_event(:heartbeat)
|
142
|
+
fire_event(:heartbeat) unless exists
|
114
143
|
|
115
144
|
return unless msg
|
116
145
|
|
117
|
-
::Process.kill(msg,
|
146
|
+
::Process.kill(msg, ::Process.pid)
|
118
147
|
rescue => e
|
119
148
|
# ignore all redis/network issues
|
120
149
|
logger.error("heartbeat: #{e.message}")
|
@@ -124,25 +153,17 @@ module Sidekiq
|
|
124
153
|
end
|
125
154
|
end
|
126
155
|
|
127
|
-
def start_heartbeat
|
128
|
-
while true
|
129
|
-
heartbeat
|
130
|
-
sleep 5
|
131
|
-
end
|
132
|
-
Sidekiq.logger.info("Heartbeat stopping...")
|
133
|
-
end
|
134
|
-
|
135
156
|
def to_data
|
136
157
|
@data ||= begin
|
137
158
|
{
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
159
|
+
"hostname" => hostname,
|
160
|
+
"started_at" => Time.now.to_f,
|
161
|
+
"pid" => ::Process.pid,
|
162
|
+
"tag" => @options[:tag] || "",
|
163
|
+
"concurrency" => @options[:concurrency],
|
164
|
+
"queues" => @options[:queues].uniq,
|
165
|
+
"labels" => @options[:labels],
|
166
|
+
"identity" => identity,
|
146
167
|
}
|
147
168
|
end
|
148
169
|
end
|
@@ -154,20 +175,5 @@ module Sidekiq
|
|
154
175
|
Sidekiq.dump_json(to_data)
|
155
176
|
end
|
156
177
|
end
|
157
|
-
|
158
|
-
def clear_heartbeat
|
159
|
-
# Remove record from Redis since we are shutting down.
|
160
|
-
# Note we don't stop the heartbeat thread; if the process
|
161
|
-
# doesn't actually exit, it'll reappear in the Web UI.
|
162
|
-
Sidekiq.redis do |conn|
|
163
|
-
conn.pipelined do
|
164
|
-
conn.srem('processes', identity)
|
165
|
-
conn.del("#{identity}:workers")
|
166
|
-
end
|
167
|
-
end
|
168
|
-
rescue
|
169
|
-
# best effort, ignore network errors
|
170
|
-
end
|
171
|
-
|
172
178
|
end
|
173
179
|
end
|