sidekiq 5.2.8 → 6.1.3

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (90) hide show
  1. checksums.yaml +4 -4
  2. data/.github/ISSUE_TEMPLATE/bug_report.md +20 -0
  3. data/.github/workflows/ci.yml +41 -0
  4. data/.gitignore +0 -2
  5. data/.standard.yml +20 -0
  6. data/5.0-Upgrade.md +1 -1
  7. data/6.0-Upgrade.md +72 -0
  8. data/Changes.md +196 -0
  9. data/Ent-2.0-Upgrade.md +37 -0
  10. data/Ent-Changes.md +72 -1
  11. data/Gemfile +12 -11
  12. data/Gemfile.lock +193 -0
  13. data/Pro-5.0-Upgrade.md +25 -0
  14. data/Pro-Changes.md +56 -2
  15. data/README.md +18 -34
  16. data/Rakefile +5 -4
  17. data/bin/sidekiq +26 -2
  18. data/bin/sidekiqload +32 -24
  19. data/bin/sidekiqmon +8 -0
  20. data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
  21. data/lib/generators/sidekiq/worker_generator.rb +21 -13
  22. data/lib/sidekiq/api.rb +245 -219
  23. data/lib/sidekiq/cli.rb +144 -180
  24. data/lib/sidekiq/client.rb +68 -48
  25. data/lib/sidekiq/delay.rb +5 -6
  26. data/lib/sidekiq/exception_handler.rb +10 -12
  27. data/lib/sidekiq/extensions/action_mailer.rb +13 -22
  28. data/lib/sidekiq/extensions/active_record.rb +13 -10
  29. data/lib/sidekiq/extensions/class_methods.rb +14 -11
  30. data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
  31. data/lib/sidekiq/fetch.rb +29 -30
  32. data/lib/sidekiq/job_logger.rb +45 -7
  33. data/lib/sidekiq/job_retry.rb +62 -61
  34. data/lib/sidekiq/launcher.rb +112 -54
  35. data/lib/sidekiq/logger.rb +166 -0
  36. data/lib/sidekiq/manager.rb +11 -13
  37. data/lib/sidekiq/middleware/chain.rb +15 -5
  38. data/lib/sidekiq/middleware/i18n.rb +5 -7
  39. data/lib/sidekiq/monitor.rb +133 -0
  40. data/lib/sidekiq/paginator.rb +18 -14
  41. data/lib/sidekiq/processor.rb +71 -70
  42. data/lib/sidekiq/rails.rb +29 -37
  43. data/lib/sidekiq/redis_connection.rb +50 -48
  44. data/lib/sidekiq/scheduled.rb +28 -29
  45. data/lib/sidekiq/sd_notify.rb +149 -0
  46. data/lib/sidekiq/systemd.rb +24 -0
  47. data/lib/sidekiq/testing/inline.rb +2 -1
  48. data/lib/sidekiq/testing.rb +35 -24
  49. data/lib/sidekiq/util.rb +17 -16
  50. data/lib/sidekiq/version.rb +2 -1
  51. data/lib/sidekiq/web/action.rb +14 -10
  52. data/lib/sidekiq/web/application.rb +74 -72
  53. data/lib/sidekiq/web/csrf_protection.rb +156 -0
  54. data/lib/sidekiq/web/helpers.rb +97 -77
  55. data/lib/sidekiq/web/router.rb +18 -17
  56. data/lib/sidekiq/web.rb +53 -53
  57. data/lib/sidekiq/worker.rb +126 -102
  58. data/lib/sidekiq.rb +69 -44
  59. data/sidekiq.gemspec +15 -16
  60. data/web/assets/javascripts/application.js +25 -27
  61. data/web/assets/javascripts/dashboard.js +4 -23
  62. data/web/assets/stylesheets/application-dark.css +149 -0
  63. data/web/assets/stylesheets/application.css +28 -6
  64. data/web/locales/de.yml +14 -2
  65. data/web/locales/en.yml +2 -0
  66. data/web/locales/fr.yml +3 -3
  67. data/web/locales/ja.yml +4 -1
  68. data/web/locales/lt.yml +83 -0
  69. data/web/locales/pl.yml +4 -4
  70. data/web/locales/ru.yml +4 -0
  71. data/web/locales/vi.yml +83 -0
  72. data/web/views/_job_info.erb +2 -1
  73. data/web/views/busy.erb +8 -3
  74. data/web/views/dead.erb +2 -2
  75. data/web/views/layout.erb +1 -0
  76. data/web/views/morgue.erb +5 -2
  77. data/web/views/queue.erb +10 -1
  78. data/web/views/queues.erb +9 -1
  79. data/web/views/retries.erb +5 -2
  80. data/web/views/retry.erb +2 -2
  81. data/web/views/scheduled.erb +5 -2
  82. metadata +31 -49
  83. data/.circleci/config.yml +0 -61
  84. data/.github/issue_template.md +0 -11
  85. data/.travis.yml +0 -11
  86. data/bin/sidekiqctl +0 -20
  87. data/lib/sidekiq/core_ext.rb +0 -1
  88. data/lib/sidekiq/ctl.rb +0 -221
  89. data/lib/sidekiq/logging.rb +0 -122
  90. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
@@ -1,6 +1,10 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/scheduled'
3
- require 'sidekiq/api'
2
+
3
+ require "sidekiq/scheduled"
4
+ require "sidekiq/api"
5
+
6
+ require "zlib"
7
+ require "base64"
4
8
 
5
9
  module Sidekiq
6
10
  ##
@@ -57,6 +61,7 @@ module Sidekiq
57
61
  #
58
62
  class JobRetry
59
63
  class Handled < ::RuntimeError; end
64
+
60
65
  class Skip < Handled; end
61
66
 
62
67
  include Sidekiq::Util
@@ -70,7 +75,7 @@ module Sidekiq
70
75
  # The global retry handler requires only the barest of data.
71
76
  # We want to be able to retry as much as possible so we don't
72
77
  # require the worker to be instantiated.
73
- def global(msg, queue)
78
+ def global(jobstr, queue)
74
79
  yield
75
80
  rescue Handled => ex
76
81
  raise ex
@@ -81,22 +86,20 @@ module Sidekiq
81
86
  # ignore, will be pushed back onto queue during hard_shutdown
82
87
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
83
88
 
84
- if msg['retry']
89
+ msg = Sidekiq.load_json(jobstr)
90
+ if msg["retry"]
85
91
  attempt_retry(nil, msg, queue, e)
86
92
  else
87
93
  Sidekiq.death_handlers.each do |handler|
88
- begin
89
- handler.call(msg, e)
90
- rescue => handler_ex
91
- handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
- end
94
+ handler.call(msg, e)
95
+ rescue => handler_ex
96
+ handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
93
97
  end
94
98
  end
95
99
 
96
100
  raise Handled
97
101
  end
98
102
 
99
-
100
103
  # The local retry support means that any errors that occur within
101
104
  # this block can be associated with the given worker instance.
102
105
  # This is required to support the `sidekiq_retries_exhausted` block.
@@ -105,7 +108,7 @@ module Sidekiq
105
108
  # exception so the global block does not reprocess the error. The
106
109
  # Skip exception is unwrapped within Sidekiq::Processor#process before
107
110
  # calling the handle_exception handlers.
108
- def local(worker, msg, queue)
111
+ def local(worker, jobstr, queue)
109
112
  yield
110
113
  rescue Handled => ex
111
114
  raise ex
@@ -116,11 +119,12 @@ module Sidekiq
116
119
  # ignore, will be pushed back onto queue during hard_shutdown
117
120
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
118
121
 
119
- if msg['retry'] == nil
120
- msg['retry'] = worker.class.get_sidekiq_options['retry']
122
+ msg = Sidekiq.load_json(jobstr)
123
+ if msg["retry"].nil?
124
+ msg["retry"] = worker.class.get_sidekiq_options["retry"]
121
125
  end
122
126
 
123
- raise e unless msg['retry']
127
+ raise e unless msg["retry"]
124
128
  attempt_retry(worker, msg, queue, e)
125
129
  # We've handled this error associated with this job, don't
126
130
  # need to handle it at the global level
@@ -133,13 +137,9 @@ module Sidekiq
133
137
  # instantiate the worker instance. All access must be guarded and
134
138
  # best effort.
135
139
  def attempt_retry(worker, msg, queue, exception)
136
- max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
140
+ max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
137
141
 
138
- msg['queue'] = if msg['retry_queue']
139
- msg['retry_queue']
140
- else
141
- queue
142
- end
142
+ msg["queue"] = (msg["retry_queue"] || queue)
143
143
 
144
144
  m = exception_message(exception)
145
145
  if m.respond_to?(:scrub!)
@@ -147,32 +147,34 @@ module Sidekiq
147
147
  m.scrub!
148
148
  end
149
149
 
150
- msg['error_message'] = m
151
- msg['error_class'] = exception.class.name
152
- count = if msg['retry_count']
153
- msg['retried_at'] = Time.now.to_f
154
- msg['retry_count'] += 1
150
+ msg["error_message"] = m
151
+ msg["error_class"] = exception.class.name
152
+ count = if msg["retry_count"]
153
+ msg["retried_at"] = Time.now.to_f
154
+ msg["retry_count"] += 1
155
155
  else
156
- msg['failed_at'] = Time.now.to_f
157
- msg['retry_count'] = 0
156
+ msg["failed_at"] = Time.now.to_f
157
+ msg["retry_count"] = 0
158
158
  end
159
159
 
160
- if msg['backtrace'] == true
161
- msg['error_backtrace'] = exception.backtrace
162
- elsif !msg['backtrace']
163
- # do nothing
164
- elsif msg['backtrace'].to_i != 0
165
- msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
160
+ if msg["backtrace"]
161
+ lines = if msg["backtrace"] == true
162
+ exception.backtrace
163
+ else
164
+ exception.backtrace[0...msg["backtrace"].to_i]
165
+ end
166
+
167
+ msg["error_backtrace"] = compress_backtrace(lines)
166
168
  end
167
169
 
168
170
  if count < max_retry_attempts
169
171
  delay = delay_for(worker, count, exception)
170
172
  # Logging here can break retries if the logging device raises ENOSPC #3979
171
- #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
173
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
172
174
  retry_at = Time.now.to_f + delay
173
175
  payload = Sidekiq.dump_json(msg)
174
176
  Sidekiq.redis do |conn|
175
- conn.zadd('retry', retry_at.to_s, payload)
177
+ conn.zadd("retry", retry_at.to_s, payload)
176
178
  end
177
179
  else
178
180
  # Goodbye dear message, you (re)tried your best I'm sure.
@@ -182,25 +184,23 @@ module Sidekiq
182
184
 
183
185
  def retries_exhausted(worker, msg, exception)
184
186
  begin
185
- block = worker && worker.sidekiq_retries_exhausted_block
186
- block.call(msg, exception) if block
187
+ block = worker&.sidekiq_retries_exhausted_block
188
+ block&.call(msg, exception)
187
189
  rescue => e
188
- handle_exception(e, { context: "Error calling retries_exhausted", job: msg })
190
+ handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
189
191
  end
190
192
 
193
+ send_to_morgue(msg) unless msg["dead"] == false
194
+
191
195
  Sidekiq.death_handlers.each do |handler|
192
- begin
193
- handler.call(msg, exception)
194
- rescue => e
195
- handle_exception(e, { context: "Error calling death handler", job: msg })
196
- end
196
+ handler.call(msg, exception)
197
+ rescue => e
198
+ handle_exception(e, {context: "Error calling death handler", job: msg})
197
199
  end
198
-
199
- send_to_morgue(msg) unless msg['dead'] == false
200
200
  end
201
201
 
202
202
  def send_to_morgue(msg)
203
- logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
203
+ logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
204
204
  payload = Sidekiq.dump_json(msg)
205
205
  DeadSet.new.kill(payload, notify_failure: false)
206
206
  end
@@ -214,7 +214,7 @@ module Sidekiq
214
214
  end
215
215
 
216
216
  def delay_for(worker, count, exception)
217
- if worker && worker.sidekiq_retry_in_block
217
+ if worker&.sidekiq_retry_in_block
218
218
  custom_retry_in = retry_in(worker, count, exception).to_i
219
219
  return custom_retry_in if custom_retry_in > 0
220
220
  end
@@ -223,16 +223,14 @@ module Sidekiq
223
223
 
224
224
  # delayed_job uses the same basic formula
225
225
  def seconds_to_delay(count)
226
- (count ** 4) + 15 + (rand(30)*(count+1))
226
+ (count**4) + 15 + (rand(30) * (count + 1))
227
227
  end
228
228
 
229
229
  def retry_in(worker, count, exception)
230
- begin
231
- worker.sidekiq_retry_in_block.call(count, exception)
232
- rescue Exception => e
233
- handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
234
- nil
235
- end
230
+ worker.sidekiq_retry_in_block.call(count, exception)
231
+ rescue Exception => e
232
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
233
+ nil
236
234
  end
237
235
 
238
236
  def exception_caused_by_shutdown?(e, checked_causes = [])
@@ -249,14 +247,17 @@ module Sidekiq
249
247
  # Extract message from exception.
250
248
  # Set a default if the message raises an error
251
249
  def exception_message(exception)
252
- begin
253
- # App code can stuff all sorts of crazy binary data into the error message
254
- # that won't convert to JSON.
255
- exception.message.to_s[0, 10_000]
256
- rescue
257
- "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
- end
250
+ # App code can stuff all sorts of crazy binary data into the error message
251
+ # that won't convert to JSON.
252
+ exception.message.to_s[0, 10_000]
253
+ rescue
254
+ +"!!! ERROR MESSAGE THREW AN ERROR !!!"
259
255
  end
260
256
 
257
+ def compress_backtrace(backtrace)
258
+ serialized = Sidekiq.dump_json(backtrace)
259
+ compressed = Zlib::Deflate.deflate(serialized)
260
+ Base64.encode64(compressed)
261
+ end
261
262
  end
262
263
  end
@@ -1,21 +1,28 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/manager'
3
- require 'sidekiq/fetch'
4
- require 'sidekiq/scheduled'
2
+
3
+ require "sidekiq/manager"
4
+ require "sidekiq/fetch"
5
+ require "sidekiq/scheduled"
5
6
 
6
7
  module Sidekiq
7
- # The Launcher is a very simple Actor whose job is to
8
- # start, monitor and stop the core Actors in Sidekiq.
9
- # If any of these actors die, the Sidekiq process exits
10
- # immediately.
8
+ # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
11
9
  class Launcher
12
10
  include Util
13
11
 
14
- attr_accessor :manager, :poller, :fetcher
12
+ STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
15
13
 
16
- STATS_TTL = 5*365*24*60*60
14
+ PROCTITLES = [
15
+ proc { "sidekiq" },
16
+ proc { Sidekiq::VERSION },
17
+ proc { |me, data| data["tag"] },
18
+ proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "stopping" if me.stopping? }
20
+ ]
21
+
22
+ attr_accessor :manager, :poller, :fetcher
17
23
 
18
24
  def initialize(options)
25
+ options[:fetch] ||= BasicFetch.new(options)
19
26
  @manager = Sidekiq::Manager.new(options)
20
27
  @poller = Sidekiq::Scheduled::Poller.new
21
28
  @done = false
@@ -50,7 +57,7 @@ module Sidekiq
50
57
 
51
58
  # Requeue everything in case there was a worker who grabbed work while stopped
52
59
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
53
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
60
+ strategy = @options[:fetch]
54
61
  strategy.bulk_requeue([], @options)
55
62
 
56
63
  clear_heartbeat
@@ -62,17 +69,64 @@ module Sidekiq
62
69
 
63
70
  private unless $TESTING
64
71
 
72
+ def start_heartbeat
73
+ loop do
74
+ heartbeat
75
+ sleep 5
76
+ end
77
+ Sidekiq.logger.info("Heartbeat stopping...")
78
+ end
79
+
80
+ def clear_heartbeat
81
+ # Remove record from Redis since we are shutting down.
82
+ # Note we don't stop the heartbeat thread; if the process
83
+ # doesn't actually exit, it'll reappear in the Web UI.
84
+ Sidekiq.redis do |conn|
85
+ conn.pipelined do
86
+ conn.srem("processes", identity)
87
+ conn.unlink("#{identity}:workers")
88
+ end
89
+ end
90
+ rescue
91
+ # best effort, ignore network errors
92
+ end
93
+
65
94
  def heartbeat
66
- results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
67
- results.compact!
68
- $0 = results.join(' ')
95
+ $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
69
96
 
70
97
 
71
98
  end
72
99
 
100
+ def self.flush_stats
101
+ fails = Processor::FAILURE.reset
102
+ procd = Processor::PROCESSED.reset
103
+ return if fails + procd == 0
104
+
105
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
106
+ begin
107
+ Sidekiq.redis do |conn|
108
+ conn.pipelined do
109
+ conn.incrby("stat:processed", procd)
110
+ conn.incrby("stat:processed:#{nowdate}", procd)
111
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
112
+
113
+ conn.incrby("stat:failed", fails)
114
+ conn.incrby("stat:failed:#{nowdate}", fails)
115
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
116
+ end
117
+ end
118
+ rescue => ex
119
+ # we're exiting the process, things might be shut down so don't
120
+ # try to handle the exception
121
+ Sidekiq.logger.warn("Unable to flush stats: #{ex}")
122
+ end
123
+ end
124
+ at_exit(&method(:flush_stats))
125
+
73
126
  def ❤
74
127
  key = identity
75
128
  fails = procd = 0
129
+
76
130
  begin
77
131
  fails = Processor::FAILURE.reset
78
132
  procd = Processor::PROCESSED.reset
@@ -80,6 +134,7 @@ module Sidekiq
80
134
 
81
135
  workers_key = "#{key}:workers"
82
136
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
137
+
83
138
  Sidekiq.redis do |conn|
84
139
  conn.multi do
85
140
  conn.incrby("stat:processed", procd)
@@ -90,59 +145,77 @@ module Sidekiq
90
145
  conn.incrby("stat:failed:#{nowdate}", fails)
91
146
  conn.expire("stat:failed:#{nowdate}", STATS_TTL)
92
147
 
93
- conn.del(workers_key)
148
+ conn.unlink(workers_key)
94
149
  curstate.each_pair do |tid, hash|
95
150
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
96
151
  end
97
152
  conn.expire(workers_key, 60)
98
153
  end
99
154
  end
100
- fails = procd = 0
101
155
 
102
- _, exists, _, _, msg = Sidekiq.redis do |conn|
103
- conn.multi do
104
- conn.sadd('processes', key)
105
- conn.exists(key)
106
- conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
156
+ fails = procd = 0
157
+ kb = memory_usage(::Process.pid)
158
+
159
+ _, exists, _, _, msg = Sidekiq.redis { |conn|
160
+ conn.multi {
161
+ conn.sadd("processes", key)
162
+ conn.exists?(key)
163
+ conn.hmset(key, "info", to_json,
164
+ "busy", curstate.size,
165
+ "beat", Time.now.to_f,
166
+ "quiet", @done,
167
+ "rss", kb)
107
168
  conn.expire(key, 60)
108
169
  conn.rpop("#{key}-signals")
109
- end
110
- end
170
+ }
171
+ }
111
172
 
112
173
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
113
- fire_event(:heartbeat) if !exists
174
+ fire_event(:heartbeat) unless exists
114
175
 
115
176
  return unless msg
116
177
 
117
- ::Process.kill(msg, $$)
178
+ ::Process.kill(msg, ::Process.pid)
118
179
  rescue => e
119
180
  # ignore all redis/network issues
120
- logger.error("heartbeat: #{e.message}")
181
+ logger.error("heartbeat: #{e}")
121
182
  # don't lose the counts if there was a network issue
122
183
  Processor::PROCESSED.incr(procd)
123
184
  Processor::FAILURE.incr(fails)
124
185
  end
125
186
  end
126
187
 
127
- def start_heartbeat
128
- while true
129
- heartbeat
130
- sleep 5
131
- end
132
- Sidekiq.logger.info("Heartbeat stopping...")
188
+ MEMORY_GRABBER = case RUBY_PLATFORM
189
+ when /linux/
190
+ ->(pid) {
191
+ IO.readlines("/proc/#{$$}/status").each do |line|
192
+ next unless line.start_with?("VmRSS:")
193
+ break line.split[1].to_i
194
+ end
195
+ }
196
+ when /darwin|bsd/
197
+ ->(pid) {
198
+ `ps -o pid,rss -p #{pid}`.lines.last.split.last.to_i
199
+ }
200
+ else
201
+ ->(pid) { 0 }
202
+ end
203
+
204
+ def memory_usage(pid)
205
+ MEMORY_GRABBER.call(pid)
133
206
  end
134
207
 
135
208
  def to_data
136
209
  @data ||= begin
137
210
  {
138
- 'hostname' => hostname,
139
- 'started_at' => Time.now.to_f,
140
- 'pid' => $$,
141
- 'tag' => @options[:tag] || '',
142
- 'concurrency' => @options[:concurrency],
143
- 'queues' => @options[:queues].uniq,
144
- 'labels' => @options[:labels],
145
- 'identity' => identity,
211
+ "hostname" => hostname,
212
+ "started_at" => Time.now.to_f,
213
+ "pid" => ::Process.pid,
214
+ "tag" => @options[:tag] || "",
215
+ "concurrency" => @options[:concurrency],
216
+ "queues" => @options[:queues].uniq,
217
+ "labels" => @options[:labels],
218
+ "identity" => identity
146
219
  }
147
220
  end
148
221
  end
@@ -154,20 +227,5 @@ module Sidekiq
154
227
  Sidekiq.dump_json(to_data)
155
228
  end
156
229
  end
157
-
158
- def clear_heartbeat
159
- # Remove record from Redis since we are shutting down.
160
- # Note we don't stop the heartbeat thread; if the process
161
- # doesn't actually exit, it'll reappear in the Web UI.
162
- Sidekiq.redis do |conn|
163
- conn.pipelined do
164
- conn.srem('processes', identity)
165
- conn.del("#{identity}:workers")
166
- end
167
- end
168
- rescue
169
- # best effort, ignore network errors
170
- end
171
-
172
230
  end
173
231
  end
@@ -0,0 +1,166 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "logger"
4
+ require "time"
5
+
6
+ module Sidekiq
7
+ module Context
8
+ def self.with(hash)
9
+ orig_context = current.dup
10
+ current.merge!(hash)
11
+ yield
12
+ ensure
13
+ Thread.current[:sidekiq_context] = orig_context
14
+ end
15
+
16
+ def self.current
17
+ Thread.current[:sidekiq_context] ||= {}
18
+ end
19
+ end
20
+
21
+ module LoggingUtils
22
+ LEVELS = {
23
+ "debug" => 0,
24
+ "info" => 1,
25
+ "warn" => 2,
26
+ "error" => 3,
27
+ "fatal" => 4
28
+ }
29
+ LEVELS.default_proc = proc do |_, level|
30
+ Sidekiq.logger.warn("Invalid log level: #{level.inspect}")
31
+ nil
32
+ end
33
+
34
+ def debug?
35
+ level <= 0
36
+ end
37
+
38
+ def info?
39
+ level <= 1
40
+ end
41
+
42
+ def warn?
43
+ level <= 2
44
+ end
45
+
46
+ def error?
47
+ level <= 3
48
+ end
49
+
50
+ def fatal?
51
+ level <= 4
52
+ end
53
+
54
+ def local_level
55
+ Thread.current[:sidekiq_log_level]
56
+ end
57
+
58
+ def local_level=(level)
59
+ case level
60
+ when Integer
61
+ Thread.current[:sidekiq_log_level] = level
62
+ when Symbol, String
63
+ Thread.current[:sidekiq_log_level] = LEVELS[level.to_s]
64
+ when nil
65
+ Thread.current[:sidekiq_log_level] = nil
66
+ else
67
+ raise ArgumentError, "Invalid log level: #{level.inspect}"
68
+ end
69
+ end
70
+
71
+ def level
72
+ local_level || super
73
+ end
74
+
75
+ # Change the thread-local level for the duration of the given block.
76
+ def log_at(level)
77
+ old_local_level = local_level
78
+ self.local_level = level
79
+ yield
80
+ ensure
81
+ self.local_level = old_local_level
82
+ end
83
+
84
+ # Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
85
+ # FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
86
+ def add(severity, message = nil, progname = nil, &block)
87
+ severity ||= ::Logger::UNKNOWN
88
+ progname ||= @progname
89
+
90
+ return true if @logdev.nil? || severity < level
91
+
92
+ if message.nil?
93
+ if block
94
+ message = yield
95
+ else
96
+ message = progname
97
+ progname = @progname
98
+ end
99
+ end
100
+
101
+ @logdev.write format_message(format_severity(severity), Time.now, progname, message)
102
+ end
103
+ end
104
+
105
+ class Logger < ::Logger
106
+ include LoggingUtils
107
+
108
+ def initialize(*args, **kwargs)
109
+ super
110
+ self.formatter = Sidekiq.log_formatter
111
+ end
112
+
113
+ module Formatters
114
+ class Base < ::Logger::Formatter
115
+ def tid
116
+ Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
117
+ end
118
+
119
+ def ctx
120
+ Sidekiq::Context.current
121
+ end
122
+
123
+ def format_context
124
+ if ctx.any?
125
+ " " + ctx.compact.map { |k, v|
126
+ case v
127
+ when Array
128
+ "#{k}=#{v.join(",")}"
129
+ else
130
+ "#{k}=#{v}"
131
+ end
132
+ }.join(" ")
133
+ end
134
+ end
135
+ end
136
+
137
+ class Pretty < Base
138
+ def call(severity, time, program_name, message)
139
+ "#{time.utc.iso8601(3)} pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
140
+ end
141
+ end
142
+
143
+ class WithoutTimestamp < Pretty
144
+ def call(severity, time, program_name, message)
145
+ "pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
146
+ end
147
+ end
148
+
149
+ class JSON < Base
150
+ def call(severity, time, program_name, message)
151
+ hash = {
152
+ ts: time.utc.iso8601(3),
153
+ pid: ::Process.pid,
154
+ tid: tid,
155
+ lvl: severity,
156
+ msg: message
157
+ }
158
+ c = ctx
159
+ hash["ctx"] = c unless c.empty?
160
+
161
+ Sidekiq.dump_json(hash) << "\n"
162
+ end
163
+ end
164
+ end
165
+ end
166
+ end