sidekiq 6.0.0 → 6.4.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (102) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +290 -2
  3. data/LICENSE +3 -3
  4. data/README.md +7 -9
  5. data/bin/sidekiq +26 -2
  6. data/bin/sidekiqload +8 -4
  7. data/bin/sidekiqmon +4 -5
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +222 -145
  13. data/lib/sidekiq/cli.rb +67 -28
  14. data/lib/sidekiq/client.rb +17 -34
  15. data/lib/sidekiq/delay.rb +2 -0
  16. data/lib/sidekiq/extensions/action_mailer.rb +5 -4
  17. data/lib/sidekiq/extensions/active_record.rb +6 -5
  18. data/lib/sidekiq/extensions/class_methods.rb +7 -6
  19. data/lib/sidekiq/extensions/generic_proxy.rb +5 -3
  20. data/lib/sidekiq/fetch.rb +36 -27
  21. data/lib/sidekiq/job.rb +13 -0
  22. data/lib/sidekiq/job_logger.rb +13 -5
  23. data/lib/sidekiq/job_retry.rb +33 -21
  24. data/lib/sidekiq/job_util.rb +65 -0
  25. data/lib/sidekiq/launcher.rb +110 -28
  26. data/lib/sidekiq/logger.rb +109 -12
  27. data/lib/sidekiq/manager.rb +10 -12
  28. data/lib/sidekiq/middleware/chain.rb +17 -6
  29. data/lib/sidekiq/middleware/current_attributes.rb +57 -0
  30. data/lib/sidekiq/monitor.rb +3 -18
  31. data/lib/sidekiq/paginator.rb +7 -2
  32. data/lib/sidekiq/processor.rb +22 -24
  33. data/lib/sidekiq/rails.rb +27 -18
  34. data/lib/sidekiq/redis_connection.rb +19 -13
  35. data/lib/sidekiq/scheduled.rb +48 -12
  36. data/lib/sidekiq/sd_notify.rb +149 -0
  37. data/lib/sidekiq/systemd.rb +24 -0
  38. data/lib/sidekiq/testing.rb +14 -4
  39. data/lib/sidekiq/util.rb +40 -1
  40. data/lib/sidekiq/version.rb +1 -1
  41. data/lib/sidekiq/web/action.rb +2 -2
  42. data/lib/sidekiq/web/application.rb +41 -31
  43. data/lib/sidekiq/web/csrf_protection.rb +180 -0
  44. data/lib/sidekiq/web/helpers.rb +51 -33
  45. data/lib/sidekiq/web/router.rb +6 -5
  46. data/lib/sidekiq/web.rb +37 -73
  47. data/lib/sidekiq/worker.rb +133 -16
  48. data/lib/sidekiq.rb +29 -8
  49. data/sidekiq.gemspec +13 -6
  50. data/web/assets/images/apple-touch-icon.png +0 -0
  51. data/web/assets/javascripts/application.js +83 -64
  52. data/web/assets/javascripts/dashboard.js +53 -53
  53. data/web/assets/stylesheets/application-dark.css +143 -0
  54. data/web/assets/stylesheets/application-rtl.css +0 -4
  55. data/web/assets/stylesheets/application.css +43 -232
  56. data/web/locales/ar.yml +8 -2
  57. data/web/locales/de.yml +14 -2
  58. data/web/locales/en.yml +6 -1
  59. data/web/locales/es.yml +18 -2
  60. data/web/locales/fr.yml +10 -3
  61. data/web/locales/ja.yml +5 -0
  62. data/web/locales/lt.yml +83 -0
  63. data/web/locales/pl.yml +4 -4
  64. data/web/locales/ru.yml +4 -0
  65. data/web/locales/vi.yml +83 -0
  66. data/web/views/_footer.erb +1 -1
  67. data/web/views/_job_info.erb +3 -2
  68. data/web/views/_poll_link.erb +2 -5
  69. data/web/views/_summary.erb +7 -7
  70. data/web/views/busy.erb +54 -20
  71. data/web/views/dashboard.erb +22 -14
  72. data/web/views/dead.erb +3 -3
  73. data/web/views/layout.erb +3 -1
  74. data/web/views/morgue.erb +9 -6
  75. data/web/views/queue.erb +19 -10
  76. data/web/views/queues.erb +10 -2
  77. data/web/views/retries.erb +11 -8
  78. data/web/views/retry.erb +3 -3
  79. data/web/views/scheduled.erb +5 -2
  80. metadata +34 -54
  81. data/.circleci/config.yml +0 -61
  82. data/.github/contributing.md +0 -32
  83. data/.github/issue_template.md +0 -11
  84. data/.gitignore +0 -13
  85. data/.standard.yml +0 -20
  86. data/3.0-Upgrade.md +0 -70
  87. data/4.0-Upgrade.md +0 -53
  88. data/5.0-Upgrade.md +0 -56
  89. data/6.0-Upgrade.md +0 -70
  90. data/COMM-LICENSE +0 -97
  91. data/Ent-2.0-Upgrade.md +0 -37
  92. data/Ent-Changes.md +0 -250
  93. data/Gemfile +0 -24
  94. data/Gemfile.lock +0 -196
  95. data/Pro-2.0-Upgrade.md +0 -138
  96. data/Pro-3.0-Upgrade.md +0 -44
  97. data/Pro-4.0-Upgrade.md +0 -35
  98. data/Pro-5.0-Upgrade.md +0 -25
  99. data/Pro-Changes.md +0 -768
  100. data/Rakefile +0 -10
  101. data/code_of_conduct.md +0 -50
  102. data/lib/generators/sidekiq/worker_generator.rb +0 -47
@@ -3,6 +3,9 @@
3
3
  require "sidekiq/scheduled"
4
4
  require "sidekiq/api"
5
5
 
6
+ require "zlib"
7
+ require "base64"
8
+
6
9
  module Sidekiq
7
10
  ##
8
11
  # Automatically retry jobs that fail in Sidekiq.
@@ -31,9 +34,10 @@ module Sidekiq
31
34
  # The job will be retried this number of times before giving up. (If simply
32
35
  # 'true', Sidekiq retries 25 times)
33
36
  #
34
- # We'll add a bit more data to the job to support retries:
37
+ # Relevant options for job retries:
35
38
  #
36
- # * 'queue' - the queue to use
39
+ # * 'queue' - the queue for the initial job
40
+ # * 'retry_queue' - if job retries should be pushed to a different (e.g. lower priority) queue
37
41
  # * 'retry_count' - number of times we've retried so far.
38
42
  # * 'error_message' - the message from the exception
39
43
  # * 'error_class' - the exception class
@@ -49,15 +53,17 @@ module Sidekiq
49
53
  #
50
54
  # Sidekiq.options[:max_retries] = 7
51
55
  #
52
- # or limit the number of retries for a particular worker with:
56
+ # or limit the number of retries for a particular worker and send retries to
57
+ # a low priority queue with:
53
58
  #
54
59
  # class MyWorker
55
60
  # include Sidekiq::Worker
56
- # sidekiq_options :retry => 10
61
+ # sidekiq_options retry: 10, retry_queue: 'low'
57
62
  # end
58
63
  #
59
64
  class JobRetry
60
65
  class Handled < ::RuntimeError; end
66
+
61
67
  class Skip < Handled; end
62
68
 
63
69
  include Sidekiq::Util
@@ -71,7 +77,7 @@ module Sidekiq
71
77
  # The global retry handler requires only the barest of data.
72
78
  # We want to be able to retry as much as possible so we don't
73
79
  # require the worker to be instantiated.
74
- def global(msg, queue)
80
+ def global(jobstr, queue)
75
81
  yield
76
82
  rescue Handled => ex
77
83
  raise ex
@@ -82,6 +88,7 @@ module Sidekiq
82
88
  # ignore, will be pushed back onto queue during hard_shutdown
83
89
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
84
90
 
91
+ msg = Sidekiq.load_json(jobstr)
85
92
  if msg["retry"]
86
93
  attempt_retry(nil, msg, queue, e)
87
94
  else
@@ -103,7 +110,7 @@ module Sidekiq
103
110
  # exception so the global block does not reprocess the error. The
104
111
  # Skip exception is unwrapped within Sidekiq::Processor#process before
105
112
  # calling the handle_exception handlers.
106
- def local(worker, msg, queue)
113
+ def local(worker, jobstr, queue)
107
114
  yield
108
115
  rescue Handled => ex
109
116
  raise ex
@@ -114,6 +121,7 @@ module Sidekiq
114
121
  # ignore, will be pushed back onto queue during hard_shutdown
115
122
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
116
123
 
124
+ msg = Sidekiq.load_json(jobstr)
117
125
  if msg["retry"].nil?
118
126
  msg["retry"] = worker.class.get_sidekiq_options["retry"]
119
127
  end
@@ -151,12 +159,14 @@ module Sidekiq
151
159
  msg["retry_count"] = 0
152
160
  end
153
161
 
154
- if msg["backtrace"] == true
155
- msg["error_backtrace"] = exception.backtrace
156
- elsif !msg["backtrace"]
157
- # do nothing
158
- elsif msg["backtrace"].to_i != 0
159
- msg["error_backtrace"] = exception.backtrace[0...msg["backtrace"].to_i]
162
+ if msg["backtrace"]
163
+ lines = if msg["backtrace"] == true
164
+ exception.backtrace
165
+ else
166
+ exception.backtrace[0...msg["backtrace"].to_i]
167
+ end
168
+
169
+ msg["error_backtrace"] = compress_backtrace(lines)
160
170
  end
161
171
 
162
172
  if count < max_retry_attempts
@@ -182,13 +192,13 @@ module Sidekiq
182
192
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
183
193
  end
184
194
 
195
+ send_to_morgue(msg) unless msg["dead"] == false
196
+
185
197
  Sidekiq.death_handlers.each do |handler|
186
198
  handler.call(msg, exception)
187
199
  rescue => e
188
200
  handle_exception(e, {context: "Error calling death handler", job: msg})
189
201
  end
190
-
191
- send_to_morgue(msg) unless msg["dead"] == false
192
202
  end
193
203
 
194
204
  def send_to_morgue(msg)
@@ -206,16 +216,12 @@ module Sidekiq
206
216
  end
207
217
 
208
218
  def delay_for(worker, count, exception)
219
+ jitter = rand(10) * (count + 1)
209
220
  if worker&.sidekiq_retry_in_block
210
221
  custom_retry_in = retry_in(worker, count, exception).to_i
211
- return custom_retry_in if custom_retry_in > 0
222
+ return custom_retry_in + jitter if custom_retry_in > 0
212
223
  end
213
- seconds_to_delay(count)
214
- end
215
-
216
- # delayed_job uses the same basic formula
217
- def seconds_to_delay(count)
218
- (count**4) + 15 + (rand(30) * (count + 1))
224
+ (count**4) + 15 + jitter
219
225
  end
220
226
 
221
227
  def retry_in(worker, count, exception)
@@ -245,5 +251,11 @@ module Sidekiq
245
251
  rescue
246
252
  +"!!! ERROR MESSAGE THREW AN ERROR !!!"
247
253
  end
254
+
255
+ def compress_backtrace(backtrace)
256
+ serialized = Sidekiq.dump_json(backtrace)
257
+ compressed = Zlib::Deflate.deflate(serialized)
258
+ Base64.encode64(compressed)
259
+ end
248
260
  end
249
261
  end
@@ -0,0 +1,65 @@
1
+ require "securerandom"
2
+ require "time"
3
+
4
+ module Sidekiq
5
+ module JobUtil
6
+ # These functions encapsulate various job utilities.
7
+ # They must be simple and free from side effects.
8
+
9
+ def validate(item)
10
+ raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
11
+ raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
12
+ raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
13
+ raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
14
+ raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
15
+
16
+ if Sidekiq.options[:on_complex_arguments] == :raise
17
+ msg = <<~EOM
18
+ Job arguments to #{item["class"]} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
19
+ To disable this error, remove `Sidekiq.strict_args!` from your initializer.
20
+ EOM
21
+ raise(ArgumentError, msg) unless json_safe?(item)
22
+ elsif Sidekiq.options[:on_complex_arguments] == :warn
23
+ Sidekiq.logger.warn <<~EOM unless json_safe?(item)
24
+ Job arguments to #{item["class"]} do not serialize to JSON safely. This will raise an error in
25
+ Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
26
+ by calling `Sidekiq.strict_args!` during Sidekiq initialization.
27
+ EOM
28
+ end
29
+ end
30
+
31
+ def normalize_item(item)
32
+ validate(item)
33
+
34
+ # merge in the default sidekiq_options for the item's class and/or wrapped element
35
+ # this allows ActiveJobs to control sidekiq_options too.
36
+ defaults = normalized_hash(item["class"])
37
+ defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?(:get_sidekiq_options)
38
+ item = defaults.merge(item)
39
+
40
+ raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
41
+
42
+ item["class"] = item["class"].to_s
43
+ item["queue"] = item["queue"].to_s
44
+ item["jid"] ||= SecureRandom.hex(12)
45
+ item["created_at"] ||= Time.now.to_f
46
+
47
+ item
48
+ end
49
+
50
+ def normalized_hash(item_class)
51
+ if item_class.is_a?(Class)
52
+ raise(ArgumentError, "Message must include a Sidekiq::Worker class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
53
+ item_class.get_sidekiq_options
54
+ else
55
+ Sidekiq.default_worker_options
56
+ end
57
+ end
58
+
59
+ private
60
+
61
+ def json_safe?(item)
62
+ JSON.parse(JSON.dump(item["args"])) == item["args"]
63
+ end
64
+ end
65
+ end
@@ -16,12 +16,13 @@ module Sidekiq
16
16
  proc { Sidekiq::VERSION },
17
17
  proc { |me, data| data["tag"] },
18
18
  proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
- proc { |me, data| "stopping" if me.stopping? },
19
+ proc { |me, data| "stopping" if me.stopping? }
20
20
  ]
21
21
 
22
22
  attr_accessor :manager, :poller, :fetcher
23
23
 
24
24
  def initialize(options)
25
+ options[:fetch] ||= BasicFetch.new(options)
25
26
  @manager = Sidekiq::Manager.new(options)
26
27
  @poller = Sidekiq::Scheduled::Poller.new
27
28
  @done = false
@@ -56,7 +57,7 @@ module Sidekiq
56
57
 
57
58
  # Requeue everything in case there was a worker who grabbed work while stopped
58
59
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
59
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
60
+ strategy = @options[:fetch]
60
61
  strategy.bulk_requeue([], @options)
61
62
 
62
63
  clear_heartbeat
@@ -68,10 +69,12 @@ module Sidekiq
68
69
 
69
70
  private unless $TESTING
70
71
 
72
+ BEAT_PAUSE = 5
73
+
71
74
  def start_heartbeat
72
75
  loop do
73
76
  heartbeat
74
- sleep 5
77
+ sleep BEAT_PAUSE
75
78
  end
76
79
  Sidekiq.logger.info("Heartbeat stopping...")
77
80
  end
@@ -83,7 +86,7 @@ module Sidekiq
83
86
  Sidekiq.redis do |conn|
84
87
  conn.pipelined do
85
88
  conn.srem("processes", identity)
86
- conn.del("#{identity}:workers")
89
+ conn.unlink("#{identity}:workers")
87
90
  end
88
91
  end
89
92
  rescue
@@ -96,6 +99,32 @@ module Sidekiq
96
99
 
97
100
  end
98
101
 
102
+ def self.flush_stats
103
+ fails = Processor::FAILURE.reset
104
+ procd = Processor::PROCESSED.reset
105
+ return if fails + procd == 0
106
+
107
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
108
+ begin
109
+ Sidekiq.redis do |conn|
110
+ conn.pipelined do
111
+ conn.incrby("stat:processed", procd)
112
+ conn.incrby("stat:processed:#{nowdate}", procd)
113
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
114
+
115
+ conn.incrby("stat:failed", fails)
116
+ conn.incrby("stat:failed:#{nowdate}", fails)
117
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
118
+ end
119
+ end
120
+ rescue => ex
121
+ # we're exiting the process, things might be shut down so don't
122
+ # try to handle the exception
123
+ Sidekiq.logger.warn("Unable to flush stats: #{ex}")
124
+ end
125
+ end
126
+ at_exit(&method(:flush_stats))
127
+
99
128
  def ❤
100
129
  key = identity
101
130
  fails = procd = 0
@@ -118,7 +147,7 @@ module Sidekiq
118
147
  conn.incrby("stat:failed:#{nowdate}", fails)
119
148
  conn.expire("stat:failed:#{nowdate}", STATS_TTL)
120
149
 
121
- conn.del(workers_key)
150
+ conn.unlink(workers_key)
122
151
  curstate.each_pair do |tid, hash|
123
152
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
124
153
  end
@@ -126,18 +155,24 @@ module Sidekiq
126
155
  end
127
156
  end
128
157
 
158
+ rtt = check_rtt
159
+
129
160
  fails = procd = 0
161
+ kb = memory_usage(::Process.pid)
130
162
 
131
163
  _, exists, _, _, msg = Sidekiq.redis { |conn|
132
- res = conn.multi {
164
+ conn.multi {
133
165
  conn.sadd("processes", key)
134
- conn.exists(key)
135
- conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
166
+ conn.exists?(key)
167
+ conn.hmset(key, "info", to_json,
168
+ "busy", curstate.size,
169
+ "beat", Time.now.to_f,
170
+ "rtt_us", rtt,
171
+ "quiet", @done,
172
+ "rss", kb)
136
173
  conn.expire(key, 60)
137
174
  conn.rpop("#{key}-signals")
138
175
  }
139
-
140
- res
141
176
  }
142
177
 
143
178
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
@@ -148,34 +183,81 @@ module Sidekiq
148
183
  ::Process.kill(msg, ::Process.pid)
149
184
  rescue => e
150
185
  # ignore all redis/network issues
151
- logger.error("heartbeat: #{e.message}")
186
+ logger.error("heartbeat: #{e}")
152
187
  # don't lose the counts if there was a network issue
153
188
  Processor::PROCESSED.incr(procd)
154
189
  Processor::FAILURE.incr(fails)
155
190
  end
156
191
  end
157
192
 
158
- def to_data
159
- @data ||= begin
160
- {
161
- "hostname" => hostname,
162
- "started_at" => Time.now.to_f,
163
- "pid" => ::Process.pid,
164
- "tag" => @options[:tag] || "",
165
- "concurrency" => @options[:concurrency],
166
- "queues" => @options[:queues].uniq,
167
- "labels" => @options[:labels],
168
- "identity" => identity,
169
- }
193
+ # We run the heartbeat every five seconds.
194
+ # Capture five samples of RTT, log a warning if each sample
195
+ # is above our warning threshold.
196
+ RTT_READINGS = RingBuffer.new(5)
197
+ RTT_WARNING_LEVEL = 50_000
198
+
199
+ def check_rtt
200
+ a = b = 0
201
+ Sidekiq.redis do |x|
202
+ a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
203
+ x.ping
204
+ b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
205
+ end
206
+ rtt = b - a
207
+ RTT_READINGS << rtt
208
+ # Ideal RTT for Redis is < 1000µs
209
+ # Workable is < 10,000µs
210
+ # Log a warning if it's a disaster.
211
+ if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
212
+ Sidekiq.logger.warn <<~EOM
213
+ Your Redis network connection is performing extremely poorly.
214
+ Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
215
+ Ensure Redis is running in the same AZ or datacenter as Sidekiq.
216
+ If these values are close to 100,000, that means your Sidekiq process may be
217
+ CPU overloaded; see https://github.com/mperham/sidekiq/discussions/5039
218
+ EOM
219
+ RTT_READINGS.reset
170
220
  end
221
+ rtt
222
+ end
223
+
224
+ MEMORY_GRABBER = case RUBY_PLATFORM
225
+ when /linux/
226
+ ->(pid) {
227
+ IO.readlines("/proc/#{$$}/status").each do |line|
228
+ next unless line.start_with?("VmRSS:")
229
+ break line.split[1].to_i
230
+ end
231
+ }
232
+ when /darwin|bsd/
233
+ ->(pid) {
234
+ `ps -o pid,rss -p #{pid}`.lines.last.split.last.to_i
235
+ }
236
+ else
237
+ ->(pid) { 0 }
238
+ end
239
+
240
+ def memory_usage(pid)
241
+ MEMORY_GRABBER.call(pid)
242
+ end
243
+
244
+ def to_data
245
+ @data ||= {
246
+ "hostname" => hostname,
247
+ "started_at" => Time.now.to_f,
248
+ "pid" => ::Process.pid,
249
+ "tag" => @options[:tag] || "",
250
+ "concurrency" => @options[:concurrency],
251
+ "queues" => @options[:queues].uniq,
252
+ "labels" => @options[:labels],
253
+ "identity" => identity
254
+ }
171
255
  end
172
256
 
173
257
  def to_json
174
- @json ||= begin
175
- # this data changes infrequently so dump it to a string
176
- # now so we don't need to dump it every heartbeat.
177
- Sidekiq.dump_json(to_data)
178
- end
258
+ # this data changes infrequently so dump it to a string
259
+ # now so we don't need to dump it every heartbeat.
260
+ @json ||= Sidekiq.dump_json(to_data)
179
261
  end
180
262
  end
181
263
  end
@@ -4,22 +4,110 @@ require "logger"
4
4
  require "time"
5
5
 
6
6
  module Sidekiq
7
- class Logger < ::Logger
8
- def initialize(*args)
9
- super
7
+ module Context
8
+ def self.with(hash)
9
+ orig_context = current.dup
10
+ current.merge!(hash)
11
+ yield
12
+ ensure
13
+ Thread.current[:sidekiq_context] = orig_context
14
+ end
10
15
 
11
- self.formatter = Sidekiq.log_formatter
16
+ def self.current
17
+ Thread.current[:sidekiq_context] ||= {}
18
+ end
19
+ end
20
+
21
+ module LoggingUtils
22
+ LEVELS = {
23
+ "debug" => 0,
24
+ "info" => 1,
25
+ "warn" => 2,
26
+ "error" => 3,
27
+ "fatal" => 4
28
+ }
29
+ LEVELS.default_proc = proc do |_, level|
30
+ Sidekiq.logger.warn("Invalid log level: #{level.inspect}")
31
+ nil
32
+ end
33
+
34
+ def debug?
35
+ level <= 0
36
+ end
37
+
38
+ def info?
39
+ level <= 1
40
+ end
41
+
42
+ def warn?
43
+ level <= 2
44
+ end
45
+
46
+ def error?
47
+ level <= 3
48
+ end
49
+
50
+ def fatal?
51
+ level <= 4
12
52
  end
13
53
 
14
- def with_context(hash)
15
- ctx.merge!(hash)
54
+ def local_level
55
+ Thread.current[:sidekiq_log_level]
56
+ end
57
+
58
+ def local_level=(level)
59
+ case level
60
+ when Integer
61
+ Thread.current[:sidekiq_log_level] = level
62
+ when Symbol, String
63
+ Thread.current[:sidekiq_log_level] = LEVELS[level.to_s]
64
+ when nil
65
+ Thread.current[:sidekiq_log_level] = nil
66
+ else
67
+ raise ArgumentError, "Invalid log level: #{level.inspect}"
68
+ end
69
+ end
70
+
71
+ def level
72
+ local_level || super
73
+ end
74
+
75
+ # Change the thread-local level for the duration of the given block.
76
+ def log_at(level)
77
+ old_local_level = local_level
78
+ self.local_level = level
16
79
  yield
17
80
  ensure
18
- hash.keys.each { |key| ctx.delete(key) }
81
+ self.local_level = old_local_level
19
82
  end
20
83
 
21
- def ctx
22
- Thread.current[:sidekiq_context] ||= {}
84
+ # Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
85
+ # FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
86
+ def add(severity, message = nil, progname = nil, &block)
87
+ severity ||= ::Logger::UNKNOWN
88
+ progname ||= @progname
89
+
90
+ return true if @logdev.nil? || severity < level
91
+
92
+ if message.nil?
93
+ if block
94
+ message = yield
95
+ else
96
+ message = progname
97
+ progname = @progname
98
+ end
99
+ end
100
+
101
+ @logdev.write format_message(format_severity(severity), Time.now, progname, message)
102
+ end
103
+ end
104
+
105
+ class Logger < ::Logger
106
+ include LoggingUtils
107
+
108
+ def initialize(*args, **kwargs)
109
+ super
110
+ self.formatter = Sidekiq.log_formatter
23
111
  end
24
112
 
25
113
  module Formatters
@@ -29,11 +117,20 @@ module Sidekiq
29
117
  end
30
118
 
31
119
  def ctx
32
- Thread.current[:sidekiq_context] ||= {}
120
+ Sidekiq::Context.current
33
121
  end
34
122
 
35
123
  def format_context
36
- " " + ctx.compact.map { |k, v| "#{k}=#{v}" }.join(" ") if ctx.any?
124
+ if ctx.any?
125
+ " " + ctx.compact.map { |k, v|
126
+ case v
127
+ when Array
128
+ "#{k}=#{v.join(",")}"
129
+ else
130
+ "#{k}=#{v}"
131
+ end
132
+ }.join(" ")
133
+ end
37
134
  end
38
135
  end
39
136
 
@@ -56,7 +153,7 @@ module Sidekiq
56
153
  pid: ::Process.pid,
57
154
  tid: tid,
58
155
  lvl: severity,
59
- msg: message,
156
+ msg: message
60
157
  }
61
158
  c = ctx
62
159
  hash["ctx"] = c unless c.empty?
@@ -35,7 +35,7 @@ module Sidekiq
35
35
  @done = false
36
36
  @workers = Set.new
37
37
  @count.times do
38
- @workers << Processor.new(self)
38
+ @workers << Processor.new(self, options)
39
39
  end
40
40
  @plock = Mutex.new
41
41
  end
@@ -55,9 +55,6 @@ module Sidekiq
55
55
  fire_event(:quiet, reverse: true)
56
56
  end
57
57
 
58
- # hack for quicker development / testing environment #2774
59
- PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
60
-
61
58
  def stop(deadline)
62
59
  quiet
63
60
  fire_event(:shutdown, reverse: true)
@@ -69,12 +66,7 @@ module Sidekiq
69
66
  return if @workers.empty?
70
67
 
71
68
  logger.info { "Pausing to allow workers to finish..." }
72
- remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
73
- while remaining > PAUSE_TIME
74
- return if @workers.empty?
75
- sleep PAUSE_TIME
76
- remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
77
- end
69
+ wait_for(deadline) { @workers.empty? }
78
70
  return if @workers.empty?
79
71
 
80
72
  hard_shutdown
@@ -90,7 +82,7 @@ module Sidekiq
90
82
  @plock.synchronize do
91
83
  @workers.delete(processor)
92
84
  unless @done
93
- p = Processor.new(self)
85
+ p = Processor.new(self, options)
94
86
  @workers << p
95
87
  p.start
96
88
  end
@@ -123,13 +115,19 @@ module Sidekiq
123
115
  # contract says that jobs are run AT LEAST once. Process termination
124
116
  # is delayed until we're certain the jobs are back in Redis because
125
117
  # it is worse to lose a job than to run it twice.
126
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
118
+ strategy = @options[:fetch]
127
119
  strategy.bulk_requeue(jobs, @options)
128
120
  end
129
121
 
130
122
  cleanup.each do |processor|
131
123
  processor.kill
132
124
  end
125
+
126
+ # when this method returns, we immediately call `exit` which may not give
127
+ # the remaining threads time to run `ensure` blocks, etc. We pause here up
128
+ # to 3 seconds to give threads a minimal amount of time to run `ensure` blocks.
129
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
130
+ wait_for(deadline) { @workers.empty? }
133
131
  end
134
132
  end
135
133
  end