sidekiq 5.2.9 → 6.1.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (89) hide show
  1. checksums.yaml +4 -4
  2. data/.github/ISSUE_TEMPLATE/bug_report.md +20 -0
  3. data/.github/workflows/ci.yml +41 -0
  4. data/.gitignore +0 -2
  5. data/.standard.yml +20 -0
  6. data/6.0-Upgrade.md +72 -0
  7. data/Changes.md +184 -0
  8. data/Ent-2.0-Upgrade.md +37 -0
  9. data/Ent-Changes.md +44 -1
  10. data/Gemfile +12 -11
  11. data/Gemfile.lock +192 -0
  12. data/Pro-5.0-Upgrade.md +25 -0
  13. data/Pro-Changes.md +48 -2
  14. data/README.md +18 -34
  15. data/Rakefile +5 -4
  16. data/bin/sidekiq +26 -2
  17. data/bin/sidekiqload +32 -24
  18. data/bin/sidekiqmon +8 -0
  19. data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
  20. data/lib/generators/sidekiq/worker_generator.rb +21 -13
  21. data/lib/sidekiq/api.rb +242 -219
  22. data/lib/sidekiq/cli.rb +131 -180
  23. data/lib/sidekiq/client.rb +67 -47
  24. data/lib/sidekiq/delay.rb +5 -6
  25. data/lib/sidekiq/exception_handler.rb +10 -12
  26. data/lib/sidekiq/extensions/action_mailer.rb +13 -22
  27. data/lib/sidekiq/extensions/active_record.rb +13 -10
  28. data/lib/sidekiq/extensions/class_methods.rb +14 -11
  29. data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
  30. data/lib/sidekiq/fetch.rb +29 -30
  31. data/lib/sidekiq/job_logger.rb +45 -7
  32. data/lib/sidekiq/job_retry.rb +61 -61
  33. data/lib/sidekiq/launcher.rb +88 -55
  34. data/lib/sidekiq/logger.rb +165 -0
  35. data/lib/sidekiq/manager.rb +11 -13
  36. data/lib/sidekiq/middleware/chain.rb +15 -5
  37. data/lib/sidekiq/middleware/i18n.rb +5 -7
  38. data/lib/sidekiq/monitor.rb +133 -0
  39. data/lib/sidekiq/paginator.rb +18 -14
  40. data/lib/sidekiq/processor.rb +71 -70
  41. data/lib/sidekiq/rails.rb +29 -37
  42. data/lib/sidekiq/redis_connection.rb +50 -48
  43. data/lib/sidekiq/scheduled.rb +28 -29
  44. data/lib/sidekiq/sd_notify.rb +149 -0
  45. data/lib/sidekiq/systemd.rb +24 -0
  46. data/lib/sidekiq/testing/inline.rb +2 -1
  47. data/lib/sidekiq/testing.rb +35 -24
  48. data/lib/sidekiq/util.rb +17 -16
  49. data/lib/sidekiq/version.rb +2 -1
  50. data/lib/sidekiq/web/action.rb +14 -10
  51. data/lib/sidekiq/web/application.rb +73 -71
  52. data/lib/sidekiq/web/csrf_protection.rb +158 -0
  53. data/lib/sidekiq/web/helpers.rb +85 -77
  54. data/lib/sidekiq/web/router.rb +18 -17
  55. data/lib/sidekiq/web.rb +53 -53
  56. data/lib/sidekiq/worker.rb +126 -102
  57. data/lib/sidekiq.rb +69 -44
  58. data/sidekiq.gemspec +15 -16
  59. data/web/assets/javascripts/application.js +25 -27
  60. data/web/assets/javascripts/dashboard.js +4 -23
  61. data/web/assets/stylesheets/application-dark.css +143 -0
  62. data/web/assets/stylesheets/application.css +16 -6
  63. data/web/locales/de.yml +14 -2
  64. data/web/locales/en.yml +2 -0
  65. data/web/locales/fr.yml +2 -2
  66. data/web/locales/ja.yml +4 -1
  67. data/web/locales/lt.yml +83 -0
  68. data/web/locales/pl.yml +4 -4
  69. data/web/locales/ru.yml +4 -0
  70. data/web/locales/vi.yml +83 -0
  71. data/web/views/_job_info.erb +2 -1
  72. data/web/views/busy.erb +6 -3
  73. data/web/views/dead.erb +2 -2
  74. data/web/views/layout.erb +1 -0
  75. data/web/views/morgue.erb +5 -2
  76. data/web/views/queue.erb +10 -1
  77. data/web/views/queues.erb +9 -1
  78. data/web/views/retries.erb +5 -2
  79. data/web/views/retry.erb +2 -2
  80. data/web/views/scheduled.erb +5 -2
  81. metadata +25 -43
  82. data/.circleci/config.yml +0 -61
  83. data/.github/issue_template.md +0 -11
  84. data/.travis.yml +0 -11
  85. data/bin/sidekiqctl +0 -20
  86. data/lib/sidekiq/core_ext.rb +0 -1
  87. data/lib/sidekiq/ctl.rb +0 -221
  88. data/lib/sidekiq/logging.rb +0 -122
  89. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
@@ -1,6 +1,10 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/scheduled'
3
- require 'sidekiq/api'
2
+
3
+ require "sidekiq/scheduled"
4
+ require "sidekiq/api"
5
+
6
+ require "zlib"
7
+ require "base64"
4
8
 
5
9
  module Sidekiq
6
10
  ##
@@ -70,7 +74,7 @@ module Sidekiq
70
74
  # The global retry handler requires only the barest of data.
71
75
  # We want to be able to retry as much as possible so we don't
72
76
  # require the worker to be instantiated.
73
- def global(msg, queue)
77
+ def global(jobstr, queue)
74
78
  yield
75
79
  rescue Handled => ex
76
80
  raise ex
@@ -81,22 +85,20 @@ module Sidekiq
81
85
  # ignore, will be pushed back onto queue during hard_shutdown
82
86
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
83
87
 
84
- if msg['retry']
88
+ msg = Sidekiq.load_json(jobstr)
89
+ if msg["retry"]
85
90
  attempt_retry(nil, msg, queue, e)
86
91
  else
87
92
  Sidekiq.death_handlers.each do |handler|
88
- begin
89
- handler.call(msg, e)
90
- rescue => handler_ex
91
- handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
- end
93
+ handler.call(msg, e)
94
+ rescue => handler_ex
95
+ handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
93
96
  end
94
97
  end
95
98
 
96
99
  raise Handled
97
100
  end
98
101
 
99
-
100
102
  # The local retry support means that any errors that occur within
101
103
  # this block can be associated with the given worker instance.
102
104
  # This is required to support the `sidekiq_retries_exhausted` block.
@@ -105,7 +107,7 @@ module Sidekiq
105
107
  # exception so the global block does not reprocess the error. The
106
108
  # Skip exception is unwrapped within Sidekiq::Processor#process before
107
109
  # calling the handle_exception handlers.
108
- def local(worker, msg, queue)
110
+ def local(worker, jobstr, queue)
109
111
  yield
110
112
  rescue Handled => ex
111
113
  raise ex
@@ -116,11 +118,12 @@ module Sidekiq
116
118
  # ignore, will be pushed back onto queue during hard_shutdown
117
119
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
118
120
 
119
- if msg['retry'] == nil
120
- msg['retry'] = worker.class.get_sidekiq_options['retry']
121
+ msg = Sidekiq.load_json(jobstr)
122
+ if msg["retry"].nil?
123
+ msg["retry"] = worker.class.get_sidekiq_options["retry"]
121
124
  end
122
125
 
123
- raise e unless msg['retry']
126
+ raise e unless msg["retry"]
124
127
  attempt_retry(worker, msg, queue, e)
125
128
  # We've handled this error associated with this job, don't
126
129
  # need to handle it at the global level
@@ -133,13 +136,9 @@ module Sidekiq
133
136
  # instantiate the worker instance. All access must be guarded and
134
137
  # best effort.
135
138
  def attempt_retry(worker, msg, queue, exception)
136
- max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
139
+ max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
137
140
 
138
- msg['queue'] = if msg['retry_queue']
139
- msg['retry_queue']
140
- else
141
- queue
142
- end
141
+ msg["queue"] = (msg["retry_queue"] || queue)
143
142
 
144
143
  m = exception_message(exception)
145
144
  if m.respond_to?(:scrub!)
@@ -147,32 +146,34 @@ module Sidekiq
147
146
  m.scrub!
148
147
  end
149
148
 
150
- msg['error_message'] = m
151
- msg['error_class'] = exception.class.name
152
- count = if msg['retry_count']
153
- msg['retried_at'] = Time.now.to_f
154
- msg['retry_count'] += 1
149
+ msg["error_message"] = m
150
+ msg["error_class"] = exception.class.name
151
+ count = if msg["retry_count"]
152
+ msg["retried_at"] = Time.now.to_f
153
+ msg["retry_count"] += 1
155
154
  else
156
- msg['failed_at'] = Time.now.to_f
157
- msg['retry_count'] = 0
155
+ msg["failed_at"] = Time.now.to_f
156
+ msg["retry_count"] = 0
158
157
  end
159
158
 
160
- if msg['backtrace'] == true
161
- msg['error_backtrace'] = exception.backtrace
162
- elsif !msg['backtrace']
163
- # do nothing
164
- elsif msg['backtrace'].to_i != 0
165
- msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
159
+ if msg["backtrace"]
160
+ lines = if msg["backtrace"] == true
161
+ exception.backtrace
162
+ else
163
+ exception.backtrace[0...msg["backtrace"].to_i]
164
+ end
165
+
166
+ msg["error_backtrace"] = compress_backtrace(lines)
166
167
  end
167
168
 
168
169
  if count < max_retry_attempts
169
170
  delay = delay_for(worker, count, exception)
170
171
  # Logging here can break retries if the logging device raises ENOSPC #3979
171
- #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
172
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
172
173
  retry_at = Time.now.to_f + delay
173
174
  payload = Sidekiq.dump_json(msg)
174
175
  Sidekiq.redis do |conn|
175
- conn.zadd('retry', retry_at.to_s, payload)
176
+ conn.zadd("retry", retry_at.to_s, payload)
176
177
  end
177
178
  else
178
179
  # Goodbye dear message, you (re)tried your best I'm sure.
@@ -182,25 +183,23 @@ module Sidekiq
182
183
 
183
184
  def retries_exhausted(worker, msg, exception)
184
185
  begin
185
- block = worker && worker.sidekiq_retries_exhausted_block
186
- block.call(msg, exception) if block
186
+ block = worker&.sidekiq_retries_exhausted_block
187
+ block&.call(msg, exception)
187
188
  rescue => e
188
- handle_exception(e, { context: "Error calling retries_exhausted", job: msg })
189
+ handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
189
190
  end
190
191
 
192
+ send_to_morgue(msg) unless msg["dead"] == false
193
+
191
194
  Sidekiq.death_handlers.each do |handler|
192
- begin
193
- handler.call(msg, exception)
194
- rescue => e
195
- handle_exception(e, { context: "Error calling death handler", job: msg })
196
- end
195
+ handler.call(msg, exception)
196
+ rescue => e
197
+ handle_exception(e, {context: "Error calling death handler", job: msg})
197
198
  end
198
-
199
- send_to_morgue(msg) unless msg['dead'] == false
200
199
  end
201
200
 
202
201
  def send_to_morgue(msg)
203
- logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
202
+ logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
204
203
  payload = Sidekiq.dump_json(msg)
205
204
  DeadSet.new.kill(payload, notify_failure: false)
206
205
  end
@@ -214,7 +213,7 @@ module Sidekiq
214
213
  end
215
214
 
216
215
  def delay_for(worker, count, exception)
217
- if worker && worker.sidekiq_retry_in_block
216
+ if worker&.sidekiq_retry_in_block
218
217
  custom_retry_in = retry_in(worker, count, exception).to_i
219
218
  return custom_retry_in if custom_retry_in > 0
220
219
  end
@@ -223,16 +222,14 @@ module Sidekiq
223
222
 
224
223
  # delayed_job uses the same basic formula
225
224
  def seconds_to_delay(count)
226
- (count ** 4) + 15 + (rand(30)*(count+1))
225
+ (count**4) + 15 + (rand(30) * (count + 1))
227
226
  end
228
227
 
229
228
  def retry_in(worker, count, exception)
230
- begin
231
- worker.sidekiq_retry_in_block.call(count, exception)
232
- rescue Exception => e
233
- handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
234
- nil
235
- end
229
+ worker.sidekiq_retry_in_block.call(count, exception)
230
+ rescue Exception => e
231
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
232
+ nil
236
233
  end
237
234
 
238
235
  def exception_caused_by_shutdown?(e, checked_causes = [])
@@ -249,14 +246,17 @@ module Sidekiq
249
246
  # Extract message from exception.
250
247
  # Set a default if the message raises an error
251
248
  def exception_message(exception)
252
- begin
253
- # App code can stuff all sorts of crazy binary data into the error message
254
- # that won't convert to JSON.
255
- exception.message.to_s[0, 10_000]
256
- rescue
257
- "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
- end
249
+ # App code can stuff all sorts of crazy binary data into the error message
250
+ # that won't convert to JSON.
251
+ exception.message.to_s[0, 10_000]
252
+ rescue
253
+ +"!!! ERROR MESSAGE THREW AN ERROR !!!"
259
254
  end
260
255
 
256
+ def compress_backtrace(backtrace)
257
+ serialized = Sidekiq.dump_json(backtrace)
258
+ compressed = Zlib::Deflate.deflate(serialized)
259
+ Base64.encode64(compressed)
260
+ end
261
261
  end
262
262
  end
@@ -1,21 +1,28 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/manager'
3
- require 'sidekiq/fetch'
4
- require 'sidekiq/scheduled'
2
+
3
+ require "sidekiq/manager"
4
+ require "sidekiq/fetch"
5
+ require "sidekiq/scheduled"
5
6
 
6
7
  module Sidekiq
7
- # The Launcher is a very simple Actor whose job is to
8
- # start, monitor and stop the core Actors in Sidekiq.
9
- # If any of these actors die, the Sidekiq process exits
10
- # immediately.
8
+ # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
11
9
  class Launcher
12
10
  include Util
13
11
 
14
- attr_accessor :manager, :poller, :fetcher
12
+ STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
15
13
 
16
- STATS_TTL = 5*365*24*60*60
14
+ PROCTITLES = [
15
+ proc { "sidekiq" },
16
+ proc { Sidekiq::VERSION },
17
+ proc { |me, data| data["tag"] },
18
+ proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "stopping" if me.stopping? }
20
+ ]
21
+
22
+ attr_accessor :manager, :poller, :fetcher
17
23
 
18
24
  def initialize(options)
25
+ options[:fetch] ||= BasicFetch.new(options)
19
26
  @manager = Sidekiq::Manager.new(options)
20
27
  @poller = Sidekiq::Scheduled::Poller.new
21
28
  @done = false
@@ -50,7 +57,7 @@ module Sidekiq
50
57
 
51
58
  # Requeue everything in case there was a worker who grabbed work while stopped
52
59
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
53
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
60
+ strategy = @options[:fetch]
54
61
  strategy.bulk_requeue([], @options)
55
62
 
56
63
  clear_heartbeat
@@ -62,17 +69,64 @@ module Sidekiq
62
69
 
63
70
  private unless $TESTING
64
71
 
72
+ def start_heartbeat
73
+ loop do
74
+ heartbeat
75
+ sleep 5
76
+ end
77
+ Sidekiq.logger.info("Heartbeat stopping...")
78
+ end
79
+
80
+ def clear_heartbeat
81
+ # Remove record from Redis since we are shutting down.
82
+ # Note we don't stop the heartbeat thread; if the process
83
+ # doesn't actually exit, it'll reappear in the Web UI.
84
+ Sidekiq.redis do |conn|
85
+ conn.pipelined do
86
+ conn.srem("processes", identity)
87
+ conn.unlink("#{identity}:workers")
88
+ end
89
+ end
90
+ rescue
91
+ # best effort, ignore network errors
92
+ end
93
+
65
94
  def heartbeat
66
- results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
67
- results.compact!
68
- $0 = results.join(' ')
95
+ $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
69
96
 
70
97
 
71
98
  end
72
99
 
100
+ def self.flush_stats
101
+ fails = Processor::FAILURE.reset
102
+ procd = Processor::PROCESSED.reset
103
+ return if fails + procd == 0
104
+
105
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
106
+ begin
107
+ Sidekiq.redis do |conn|
108
+ conn.pipelined do
109
+ conn.incrby("stat:processed", procd)
110
+ conn.incrby("stat:processed:#{nowdate}", procd)
111
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
112
+
113
+ conn.incrby("stat:failed", fails)
114
+ conn.incrby("stat:failed:#{nowdate}", fails)
115
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
116
+ end
117
+ end
118
+ rescue => ex
119
+ # we're exiting the process, things might be shut down so don't
120
+ # try to handle the exception
121
+ Sidekiq.logger.warn("Unable to flush stats: #{ex}")
122
+ end
123
+ end
124
+ at_exit(&method(:flush_stats))
125
+
73
126
  def ❤
74
127
  key = identity
75
128
  fails = procd = 0
129
+
76
130
  begin
77
131
  fails = Processor::FAILURE.reset
78
132
  procd = Processor::PROCESSED.reset
@@ -80,6 +134,7 @@ module Sidekiq
80
134
 
81
135
  workers_key = "#{key}:workers"
82
136
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
137
+
83
138
  Sidekiq.redis do |conn|
84
139
  conn.multi do
85
140
  conn.incrby("stat:processed", procd)
@@ -90,59 +145,52 @@ module Sidekiq
90
145
  conn.incrby("stat:failed:#{nowdate}", fails)
91
146
  conn.expire("stat:failed:#{nowdate}", STATS_TTL)
92
147
 
93
- conn.del(workers_key)
148
+ conn.unlink(workers_key)
94
149
  curstate.each_pair do |tid, hash|
95
150
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
96
151
  end
97
152
  conn.expire(workers_key, 60)
98
153
  end
99
154
  end
155
+
100
156
  fails = procd = 0
101
157
 
102
- _, exists, _, _, msg = Sidekiq.redis do |conn|
103
- conn.multi do
104
- conn.sadd('processes', key)
105
- conn.exists(key)
106
- conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
158
+ _, exists, _, _, msg = Sidekiq.redis { |conn|
159
+ conn.multi {
160
+ conn.sadd("processes", key)
161
+ conn.exists?(key)
162
+ conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
107
163
  conn.expire(key, 60)
108
164
  conn.rpop("#{key}-signals")
109
- end
110
- end
165
+ }
166
+ }
111
167
 
112
168
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
113
- fire_event(:heartbeat) if !exists
169
+ fire_event(:heartbeat) unless exists
114
170
 
115
171
  return unless msg
116
172
 
117
- ::Process.kill(msg, $$)
173
+ ::Process.kill(msg, ::Process.pid)
118
174
  rescue => e
119
175
  # ignore all redis/network issues
120
- logger.error("heartbeat: #{e.message}")
176
+ logger.error("heartbeat: #{e}")
121
177
  # don't lose the counts if there was a network issue
122
178
  Processor::PROCESSED.incr(procd)
123
179
  Processor::FAILURE.incr(fails)
124
180
  end
125
181
  end
126
182
 
127
- def start_heartbeat
128
- while true
129
- heartbeat
130
- sleep 5
131
- end
132
- Sidekiq.logger.info("Heartbeat stopping...")
133
- end
134
-
135
183
  def to_data
136
184
  @data ||= begin
137
185
  {
138
- 'hostname' => hostname,
139
- 'started_at' => Time.now.to_f,
140
- 'pid' => $$,
141
- 'tag' => @options[:tag] || '',
142
- 'concurrency' => @options[:concurrency],
143
- 'queues' => @options[:queues].uniq,
144
- 'labels' => @options[:labels],
145
- 'identity' => identity,
186
+ "hostname" => hostname,
187
+ "started_at" => Time.now.to_f,
188
+ "pid" => ::Process.pid,
189
+ "tag" => @options[:tag] || "",
190
+ "concurrency" => @options[:concurrency],
191
+ "queues" => @options[:queues].uniq,
192
+ "labels" => @options[:labels],
193
+ "identity" => identity
146
194
  }
147
195
  end
148
196
  end
@@ -154,20 +202,5 @@ module Sidekiq
154
202
  Sidekiq.dump_json(to_data)
155
203
  end
156
204
  end
157
-
158
- def clear_heartbeat
159
- # Remove record from Redis since we are shutting down.
160
- # Note we don't stop the heartbeat thread; if the process
161
- # doesn't actually exit, it'll reappear in the Web UI.
162
- Sidekiq.redis do |conn|
163
- conn.pipelined do
164
- conn.srem('processes', identity)
165
- conn.del("#{identity}:workers")
166
- end
167
- end
168
- rescue
169
- # best effort, ignore network errors
170
- end
171
-
172
205
  end
173
206
  end
@@ -0,0 +1,165 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "logger"
4
+ require "time"
5
+
6
+ module Sidekiq
7
+ module Context
8
+ def self.with(hash)
9
+ current.merge!(hash)
10
+ yield
11
+ ensure
12
+ hash.each_key { |key| current.delete(key) }
13
+ end
14
+
15
+ def self.current
16
+ Thread.current[:sidekiq_context] ||= {}
17
+ end
18
+ end
19
+
20
+ module LoggingUtils
21
+ LEVELS = {
22
+ "debug" => 0,
23
+ "info" => 1,
24
+ "warn" => 2,
25
+ "error" => 3,
26
+ "fatal" => 4
27
+ }
28
+ LEVELS.default_proc = proc do |_, level|
29
+ Sidekiq.logger.warn("Invalid log level: #{level.inspect}")
30
+ nil
31
+ end
32
+
33
+ def debug?
34
+ level <= 0
35
+ end
36
+
37
+ def info?
38
+ level <= 1
39
+ end
40
+
41
+ def warn?
42
+ level <= 2
43
+ end
44
+
45
+ def error?
46
+ level <= 3
47
+ end
48
+
49
+ def fatal?
50
+ level <= 4
51
+ end
52
+
53
+ def local_level
54
+ Thread.current[:sidekiq_log_level]
55
+ end
56
+
57
+ def local_level=(level)
58
+ case level
59
+ when Integer
60
+ Thread.current[:sidekiq_log_level] = level
61
+ when Symbol, String
62
+ Thread.current[:sidekiq_log_level] = LEVELS[level.to_s]
63
+ when nil
64
+ Thread.current[:sidekiq_log_level] = nil
65
+ else
66
+ raise ArgumentError, "Invalid log level: #{level.inspect}"
67
+ end
68
+ end
69
+
70
+ def level
71
+ local_level || super
72
+ end
73
+
74
+ # Change the thread-local level for the duration of the given block.
75
+ def log_at(level)
76
+ old_local_level = local_level
77
+ self.local_level = level
78
+ yield
79
+ ensure
80
+ self.local_level = old_local_level
81
+ end
82
+
83
+ # Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
84
+ # FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
85
+ def add(severity, message = nil, progname = nil, &block)
86
+ severity ||= ::Logger::UNKNOWN
87
+ progname ||= @progname
88
+
89
+ return true if @logdev.nil? || severity < level
90
+
91
+ if message.nil?
92
+ if block_given?
93
+ message = yield
94
+ else
95
+ message = progname
96
+ progname = @progname
97
+ end
98
+ end
99
+
100
+ @logdev.write format_message(format_severity(severity), Time.now, progname, message)
101
+ end
102
+ end
103
+
104
+ class Logger < ::Logger
105
+ include LoggingUtils
106
+
107
+ def initialize(*args, **kwargs)
108
+ super
109
+ self.formatter = Sidekiq.log_formatter
110
+ end
111
+
112
+ module Formatters
113
+ class Base < ::Logger::Formatter
114
+ def tid
115
+ Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
116
+ end
117
+
118
+ def ctx
119
+ Sidekiq::Context.current
120
+ end
121
+
122
+ def format_context
123
+ if ctx.any?
124
+ " " + ctx.compact.map { |k, v|
125
+ case v
126
+ when Array
127
+ "#{k}=#{v.join(",")}"
128
+ else
129
+ "#{k}=#{v}"
130
+ end
131
+ }.join(" ")
132
+ end
133
+ end
134
+ end
135
+
136
+ class Pretty < Base
137
+ def call(severity, time, program_name, message)
138
+ "#{time.utc.iso8601(3)} pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
139
+ end
140
+ end
141
+
142
+ class WithoutTimestamp < Pretty
143
+ def call(severity, time, program_name, message)
144
+ "pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
145
+ end
146
+ end
147
+
148
+ class JSON < Base
149
+ def call(severity, time, program_name, message)
150
+ hash = {
151
+ ts: time.utc.iso8601(3),
152
+ pid: ::Process.pid,
153
+ tid: tid,
154
+ lvl: severity,
155
+ msg: message
156
+ }
157
+ c = ctx
158
+ hash["ctx"] = c unless c.empty?
159
+
160
+ Sidekiq.dump_json(hash) << "\n"
161
+ end
162
+ end
163
+ end
164
+ end
165
+ end
@@ -1,12 +1,11 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/util'
3
- require 'sidekiq/processor'
4
- require 'sidekiq/fetch'
5
- require 'thread'
6
- require 'set'
7
2
 
8
- module Sidekiq
3
+ require "sidekiq/util"
4
+ require "sidekiq/processor"
5
+ require "sidekiq/fetch"
6
+ require "set"
9
7
 
8
+ module Sidekiq
10
9
  ##
11
10
  # The Manager is the central coordination point in Sidekiq, controlling
12
11
  # the lifecycle of the Processors.
@@ -27,7 +26,7 @@ module Sidekiq
27
26
  attr_reader :workers
28
27
  attr_reader :options
29
28
 
30
- def initialize(options={})
29
+ def initialize(options = {})
31
30
  logger.debug { options.inspect }
32
31
  @options = options
33
32
  @count = options[:concurrency] || 10
@@ -36,7 +35,7 @@ module Sidekiq
36
35
  @done = false
37
36
  @workers = Set.new
38
37
  @count.times do
39
- @workers << Processor.new(self)
38
+ @workers << Processor.new(self, options)
40
39
  end
41
40
  @plock = Mutex.new
42
41
  end
@@ -57,7 +56,7 @@ module Sidekiq
57
56
  end
58
57
 
59
58
  # hack for quicker development / testing environment #2774
60
- PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
59
+ PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
61
60
 
62
61
  def stop(deadline)
63
62
  quiet
@@ -91,7 +90,7 @@ module Sidekiq
91
90
  @plock.synchronize do
92
91
  @workers.delete(processor)
93
92
  unless @done
94
- p = Processor.new(self)
93
+ p = Processor.new(self, options)
95
94
  @workers << p
96
95
  p.start
97
96
  end
@@ -113,7 +112,7 @@ module Sidekiq
113
112
  end
114
113
 
115
114
  if cleanup.size > 0
116
- jobs = cleanup.map {|p| p.job }.compact
115
+ jobs = cleanup.map { |p| p.job }.compact
117
116
 
118
117
  logger.warn { "Terminating #{cleanup.size} busy worker threads" }
119
118
  logger.warn { "Work still in progress #{jobs.inspect}" }
@@ -124,7 +123,7 @@ module Sidekiq
124
123
  # contract says that jobs are run AT LEAST once. Process termination
125
124
  # is delayed until we're certain the jobs are back in Redis because
126
125
  # it is worse to lose a job than to run it twice.
127
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
126
+ strategy = @options[:fetch]
128
127
  strategy.bulk_requeue(jobs, @options)
129
128
  end
130
129
 
@@ -132,6 +131,5 @@ module Sidekiq
132
131
  processor.kill
133
132
  end
134
133
  end
135
-
136
134
  end
137
135
  end