sidekiq 7.0.3 → 7.1.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (45) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +82 -9
  3. data/README.md +29 -22
  4. data/bin/sidekiqload +204 -109
  5. data/bin/sidekiqmon +3 -0
  6. data/lib/sidekiq/api.rb +33 -10
  7. data/lib/sidekiq/capsule.rb +1 -0
  8. data/lib/sidekiq/cli.rb +3 -2
  9. data/lib/sidekiq/client.rb +33 -20
  10. data/lib/sidekiq/component.rb +3 -1
  11. data/lib/sidekiq/config.rb +12 -4
  12. data/lib/sidekiq/embedded.rb +1 -1
  13. data/lib/sidekiq/fetch.rb +1 -1
  14. data/lib/sidekiq/job.rb +1 -5
  15. data/lib/sidekiq/job_retry.rb +8 -5
  16. data/lib/sidekiq/job_util.rb +49 -15
  17. data/lib/sidekiq/launcher.rb +3 -2
  18. data/lib/sidekiq/metrics/query.rb +1 -1
  19. data/lib/sidekiq/metrics/shared.rb +3 -3
  20. data/lib/sidekiq/metrics/tracking.rb +2 -0
  21. data/lib/sidekiq/middleware/chain.rb +12 -9
  22. data/lib/sidekiq/middleware/current_attributes.rb +55 -16
  23. data/lib/sidekiq/monitor.rb +1 -3
  24. data/lib/sidekiq/paginator.rb +1 -1
  25. data/lib/sidekiq/processor.rb +4 -1
  26. data/lib/sidekiq/rails.rb +10 -0
  27. data/lib/sidekiq/redis_client_adapter.rb +5 -24
  28. data/lib/sidekiq/scheduled.rb +1 -1
  29. data/lib/sidekiq/version.rb +1 -1
  30. data/lib/sidekiq/web/application.rb +14 -2
  31. data/lib/sidekiq/web/helpers.rb +2 -2
  32. data/lib/sidekiq/web.rb +1 -1
  33. data/sidekiq.gemspec +7 -15
  34. data/web/assets/javascripts/metrics.js +30 -2
  35. data/web/assets/stylesheets/application.css +1 -1
  36. data/web/locales/da.yml +11 -4
  37. data/web/locales/fr.yml +14 -0
  38. data/web/locales/gd.yml +99 -0
  39. data/web/locales/ja.yml +3 -1
  40. data/web/views/_footer.erb +2 -2
  41. data/web/views/_metrics_period_select.erb +12 -0
  42. data/web/views/busy.erb +3 -3
  43. data/web/views/metrics.erb +6 -4
  44. data/web/views/metrics_for_job.erb +11 -12
  45. metadata +14 -19
data/lib/sidekiq/api.rb CHANGED
@@ -92,11 +92,11 @@ module Sidekiq
92
92
  pipeline.zcard("retry")
93
93
  pipeline.zcard("dead")
94
94
  pipeline.scard("processes")
95
- pipeline.lrange("queue:default", -1, -1)
95
+ pipeline.lindex("queue:default", -1)
96
96
  end
97
97
  }
98
98
 
99
- default_queue_latency = if (entry = pipe1_res[6].first)
99
+ default_queue_latency = if (entry = pipe1_res[6])
100
100
  job = begin
101
101
  Sidekiq.load_json(entry)
102
102
  rescue
@@ -264,8 +264,8 @@ module Sidekiq
264
264
  # @return [Float] in seconds
265
265
  def latency
266
266
  entry = Sidekiq.redis { |conn|
267
- conn.lrange(@rname, -1, -1)
268
- }.first
267
+ conn.lindex(@rname, -1)
268
+ }
269
269
  return 0 unless entry
270
270
  job = Sidekiq.load_json(entry)
271
271
  now = Time.now.to_f
@@ -391,13 +391,13 @@ module Sidekiq
391
391
  def display_args
392
392
  # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
393
393
  @display_args ||= if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
394
- job_args = self["wrapped"] ? args[0]["arguments"] : []
394
+ job_args = self["wrapped"] ? deserialize_argument(args[0]["arguments"]) : []
395
395
  if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
396
396
  # remove MailerClass, mailer_method and 'deliver_now'
397
397
  job_args.drop(3)
398
398
  elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob"
399
399
  # remove MailerClass, mailer_method and 'deliver_now'
400
- job_args.drop(3).first["args"]
400
+ job_args.drop(3).first.values_at("params", "args")
401
401
  else
402
402
  job_args
403
403
  end
@@ -467,6 +467,29 @@ module Sidekiq
467
467
 
468
468
  private
469
469
 
470
+ ACTIVE_JOB_PREFIX = "_aj_"
471
+ GLOBALID_KEY = "_aj_globalid"
472
+
473
+ def deserialize_argument(argument)
474
+ case argument
475
+ when Array
476
+ argument.map { |arg| deserialize_argument(arg) }
477
+ when Hash
478
+ if serialized_global_id?(argument)
479
+ argument[GLOBALID_KEY]
480
+ else
481
+ argument.transform_values { |v| deserialize_argument(v) }
482
+ .reject { |k, _| k.start_with?(ACTIVE_JOB_PREFIX) }
483
+ end
484
+ else
485
+ argument
486
+ end
487
+ end
488
+
489
+ def serialized_global_id?(hash)
490
+ hash.size == 1 && hash.include?(GLOBALID_KEY)
491
+ end
492
+
470
493
  def uncompress_backtrace(backtrace)
471
494
  decoded = Base64.decode64(backtrace)
472
495
  uncompressed = Zlib::Inflate.inflate(decoded)
@@ -548,7 +571,7 @@ module Sidekiq
548
571
  def remove_job
549
572
  Sidekiq.redis do |conn|
550
573
  results = conn.multi { |transaction|
551
- transaction.zrangebyscore(parent.name, score, score)
574
+ transaction.zrange(parent.name, score, score, "BYSCORE")
552
575
  transaction.zremrangebyscore(parent.name, score, score)
553
576
  }.first
554
577
 
@@ -683,7 +706,7 @@ module Sidekiq
683
706
  end
684
707
 
685
708
  elements = Sidekiq.redis { |conn|
686
- conn.zrangebyscore(name, begin_score, end_score, withscores: true)
709
+ conn.zrange(name, begin_score, end_score, "BYSCORE", withscores: true)
687
710
  }
688
711
 
689
712
  elements.each_with_object([]) do |element, result|
@@ -702,7 +725,7 @@ module Sidekiq
702
725
  def find_job(jid)
703
726
  Sidekiq.redis do |conn|
704
727
  conn.zscan(name, match: "*#{jid}*", count: 100) do |entry, score|
705
- job = JSON.parse(entry)
728
+ job = Sidekiq.load_json(entry)
706
729
  matched = job["jid"] == jid
707
730
  return SortedEntry.new(self, score, entry) if matched
708
731
  end
@@ -724,7 +747,7 @@ module Sidekiq
724
747
  # @api private
725
748
  def delete_by_jid(score, jid)
726
749
  Sidekiq.redis do |conn|
727
- elements = conn.zrangebyscore(name, score, score)
750
+ elements = conn.zrange(name, score, score, "BYSCORE")
728
751
  elements.each do |element|
729
752
  if element.index(jid)
730
753
  message = Sidekiq.load_json(element)
@@ -28,6 +28,7 @@ module Sidekiq
28
28
  @name = name
29
29
  @config = config
30
30
  @queues = ["default"]
31
+ @weights = {"default" => 0}
31
32
  @concurrency = config[:concurrency]
32
33
  @mode = :strict
33
34
  end
data/lib/sidekiq/cli.rb CHANGED
@@ -84,7 +84,7 @@ module Sidekiq # :nodoc:
84
84
 
85
85
  WARNING: Your Redis instance will evict Sidekiq data under heavy load.
86
86
  The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
87
- See: https://github.com/mperham/sidekiq/wiki/Using-Redis#memory
87
+ See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
88
88
 
89
89
  EOM
90
90
  end
@@ -230,6 +230,7 @@ module Sidekiq # :nodoc:
230
230
  # Both Sinatra 2.0+ and Sidekiq support this term.
231
231
  # RAILS_ENV and RACK_ENV are there for legacy support.
232
232
  @environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
233
+ config[:environment] = @environment
233
234
  end
234
235
 
235
236
  def symbolize_keys_deep!(hash)
@@ -396,7 +397,7 @@ module Sidekiq # :nodoc:
396
397
  end
397
398
 
398
399
  def parse_config(path)
399
- erb = ERB.new(File.read(path))
400
+ erb = ERB.new(File.read(path), trim_mode: "-")
400
401
  erb.filename = File.expand_path(path)
401
402
  opts = YAML.safe_load(erb.result, permitted_classes: [Symbol], aliases: true) || {}
402
403
 
@@ -96,8 +96,9 @@ module Sidekiq
96
96
 
97
97
  ##
98
98
  # Push a large number of jobs to Redis. This method cuts out the redis
99
- # network round trip latency. I wouldn't recommend pushing more than
100
- # 1000 per call but YMMV based on network quality, size of job args, etc.
99
+ # network round trip latency. It pushes jobs in batches if more than
100
+ # `:batch_size` (1000 by default) of jobs are passed. I wouldn't recommend making `:batch_size`
101
+ # larger than 1000 but YMMV based on network quality, size of job args, etc.
101
102
  # A large number of jobs can cause a bit of Redis command processing latency.
102
103
  #
103
104
  # Takes the same arguments as #push except that args is expected to be
@@ -105,13 +106,15 @@ module Sidekiq
105
106
  # is run through the client middleware pipeline and each job gets its own Job ID
106
107
  # as normal.
107
108
  #
108
- # Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
109
- # than the number given if the middleware stopped processing for one or more jobs.
109
+ # Returns an array of the of pushed jobs' jids, may contain nils if any client middleware
110
+ # prevented a job push.
111
+ #
112
+ # Example (pushing jobs in batches):
113
+ # push_bulk('class' => 'MyJob', 'args' => (1..100_000).to_a, batch_size: 1_000)
114
+ #
110
115
  def push_bulk(items)
116
+ batch_size = items.delete(:batch_size) || items.delete("batch_size") || 1_000
111
117
  args = items["args"]
112
- raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless args.is_a?(Array) && args.all?(Array)
113
- return [] if args.empty? # no jobs to push
114
-
115
118
  at = items.delete("at")
116
119
  raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
117
120
  raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
@@ -120,18 +123,26 @@ module Sidekiq
120
123
  raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
121
124
 
122
125
  normed = normalize_item(items)
123
- payloads = args.map.with_index { |job_args, index|
124
- copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
125
- copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
126
- result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
127
- verify_json(copy)
128
- copy
129
- end
130
- result || nil
131
- }.compact
126
+ result = args.each_slice(batch_size).flat_map do |slice|
127
+ raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless slice.is_a?(Array) && slice.all?(Array)
128
+ break [] if slice.empty? # no jobs to push
129
+
130
+ payloads = slice.map.with_index { |job_args, index|
131
+ copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
132
+ copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
133
+ result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
134
+ verify_json(copy)
135
+ copy
136
+ end
137
+ result || nil
138
+ }
139
+
140
+ to_push = payloads.compact
141
+ raw_push(to_push) unless to_push.empty?
142
+ payloads.map { |payload| payload&.[]("jid") }
143
+ end
132
144
 
133
- raw_push(payloads) unless payloads.empty?
134
- payloads.collect { |payload| payload["jid"] }
145
+ result.is_a?(Enumerator::Lazy) ? result.force : result
135
146
  end
136
147
 
137
148
  # Allows sharding of jobs across any number of Redis instances. All jobs
@@ -160,8 +171,8 @@ module Sidekiq
160
171
  new.push(item)
161
172
  end
162
173
 
163
- def push_bulk(items)
164
- new.push_bulk(items)
174
+ def push_bulk(...)
175
+ new.push_bulk(...)
165
176
  end
166
177
 
167
178
  # Resque compatibility helpers. Note all helpers
@@ -235,6 +246,8 @@ module Sidekiq
235
246
  if payloads.first.key?("at")
236
247
  conn.zadd("schedule", payloads.flat_map { |hash|
237
248
  at = hash.delete("at").to_s
249
+ # ActiveJob sets this but the job has not been enqueued yet
250
+ hash.delete("enqueued_at")
238
251
  [at, Sidekiq.dump_json(hash)]
239
252
  })
240
253
  else
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Sidekiq
2
4
  ##
3
5
  # Sidekiq::Component assumes a config instance is available at @config
@@ -13,7 +15,7 @@ module Sidekiq
13
15
 
14
16
  def safe_thread(name, &block)
15
17
  Thread.new do
16
- Thread.current.name = name
18
+ Thread.current.name = "sidekiq.#{name}"
17
19
  watchdog(name, &block)
18
20
  end
19
21
  end
@@ -30,7 +30,8 @@ module Sidekiq
30
30
  },
31
31
  dead_max_jobs: 10_000,
32
32
  dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
33
- reloader: proc { |&block| block.call }
33
+ reloader: proc { |&block| block.call },
34
+ backtrace_cleaner: ->(backtrace) { backtrace }
34
35
  }
35
36
 
36
37
  ERROR_HANDLER = ->(ex, ctx) {
@@ -38,7 +39,10 @@ module Sidekiq
38
39
  l = cfg.logger
39
40
  l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
40
41
  l.warn("#{ex.class.name}: #{ex.message}")
41
- l.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil?
42
+ unless ex.backtrace.nil?
43
+ backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
44
+ l.warn(backtrace.join("\n"))
45
+ end
42
46
  }
43
47
 
44
48
  def initialize(options = {})
@@ -52,6 +56,10 @@ module Sidekiq
52
56
  def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
53
57
  attr_reader :capsules
54
58
 
59
+ def to_json(*)
60
+ Sidekiq.dump_json(@options)
61
+ end
62
+
55
63
  # LEGACY: edits the default capsule
56
64
  # config.concurrency = 5
57
65
  def concurrency=(val)
@@ -123,7 +131,7 @@ module Sidekiq
123
131
  private def local_redis_pool
124
132
  # this is our internal client/housekeeping pool. each capsule has its
125
133
  # own pool for executing threads.
126
- @redis ||= new_redis_pool(5, "internal")
134
+ @redis ||= new_redis_pool(10, "internal")
127
135
  end
128
136
 
129
137
  def new_redis_pool(size, name = "unset")
@@ -259,7 +267,7 @@ module Sidekiq
259
267
  ctx[:_config] = self
260
268
  @options[:error_handlers].each do |handler|
261
269
  handler.call(ex, ctx)
262
- rescue => e
270
+ rescue Exception => e
263
271
  l = logger
264
272
  l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
265
273
  l.error e
@@ -49,7 +49,7 @@ module Sidekiq
49
49
 
50
50
  WARNING: Your Redis instance will evict Sidekiq data under heavy load.
51
51
  The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
52
- See: https://github.com/mperham/sidekiq/wiki/Using-Redis#memory
52
+ See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
53
53
 
54
54
  EOM
55
55
  end
data/lib/sidekiq/fetch.rb CHANGED
@@ -44,7 +44,7 @@ module Sidekiq # :nodoc:
44
44
  return nil
45
45
  end
46
46
 
47
- queue, job = redis { |conn| conn.blocking_call(false, "brpop", *qs, TIMEOUT) }
47
+ queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
48
48
  UnitOfWork.new(queue, job, config) if queue
49
49
  end
50
50
 
data/lib/sidekiq/job.rb CHANGED
@@ -239,11 +239,7 @@ module Sidekiq
239
239
 
240
240
  def perform_bulk(args, batch_size: 1_000)
241
241
  client = @klass.build_client
242
- result = args.each_slice(batch_size).flat_map do |slice|
243
- client.push_bulk(@opts.merge("class" => @klass, "args" => slice))
244
- end
245
-
246
- result.is_a?(Enumerator::Lazy) ? result.force : result
242
+ client.push_bulk(@opts.merge("class" => @klass, "args" => args, :batch_size => batch_size))
247
243
  end
248
244
 
249
245
  # +interval+ must be a timestamp, numeric or something that acts
@@ -71,6 +71,7 @@ module Sidekiq
71
71
  def initialize(capsule)
72
72
  @config = @capsule = capsule
73
73
  @max_retries = Sidekiq.default_configuration[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
74
+ @backtrace_cleaner = Sidekiq.default_configuration[:backtrace_cleaner]
74
75
  end
75
76
 
76
77
  # The global retry handler requires only the barest of data.
@@ -159,10 +160,11 @@ module Sidekiq
159
160
  end
160
161
 
161
162
  if msg["backtrace"]
163
+ backtrace = @backtrace_cleaner.call(exception.backtrace)
162
164
  lines = if msg["backtrace"] == true
163
- exception.backtrace
165
+ backtrace
164
166
  else
165
- exception.backtrace[0...msg["backtrace"].to_i]
167
+ backtrace[0...msg["backtrace"].to_i]
166
168
  end
167
169
 
168
170
  msg["error_backtrace"] = compress_backtrace(lines)
@@ -171,7 +173,7 @@ module Sidekiq
171
173
  # Goodbye dear message, you (re)tried your best I'm sure.
172
174
  return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
173
175
 
174
- strategy, delay = delay_for(jobinst, count, exception)
176
+ strategy, delay = delay_for(jobinst, count, exception, msg)
175
177
  case strategy
176
178
  when :discard
177
179
  return # poof!
@@ -190,17 +192,18 @@ module Sidekiq
190
192
  end
191
193
 
192
194
  # returns (strategy, seconds)
193
- def delay_for(jobinst, count, exception)
195
+ def delay_for(jobinst, count, exception, msg)
194
196
  rv = begin
195
197
  # sidekiq_retry_in can return two different things:
196
198
  # 1. When to retry next, as an integer of seconds
197
199
  # 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
198
- jobinst&.sidekiq_retry_in_block&.call(count, exception)
200
+ jobinst&.sidekiq_retry_in_block&.call(count, exception, msg)
199
201
  rescue Exception => e
200
202
  handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
201
203
  nil
202
204
  end
203
205
 
206
+ rv = rv.to_i if rv.respond_to?(:to_i)
204
207
  delay = (count**4) + 15
205
208
  if Integer === rv && rv > 0
206
209
  delay = rv
@@ -9,7 +9,7 @@ module Sidekiq
9
9
 
10
10
  def validate(item)
11
11
  raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
12
- raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
12
+ raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array) || item["args"].is_a?(Enumerator::Lazy)
13
13
  raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
14
14
  raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
15
15
  raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
@@ -17,18 +17,23 @@ module Sidekiq
17
17
 
18
18
  def verify_json(item)
19
19
  job_class = item["wrapped"] || item["class"]
20
- if Sidekiq::Config::DEFAULTS[:on_complex_arguments] == :raise
21
- msg = <<~EOM
22
- Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
23
- To disable this error, add `Sidekiq.strict_args!(false)` to your initializer.
24
- EOM
25
- raise(ArgumentError, msg) unless json_safe?(item)
26
- elsif Sidekiq::Config::DEFAULTS[:on_complex_arguments] == :warn
27
- warn <<~EOM unless json_safe?(item)
28
- Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
29
- Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
30
- by calling `Sidekiq.strict_args!` during Sidekiq initialization.
31
- EOM
20
+ args = item["args"]
21
+ mode = Sidekiq::Config::DEFAULTS[:on_complex_arguments]
22
+
23
+ if mode == :raise || mode == :warn
24
+ if (unsafe_item = json_unsafe?(args))
25
+ msg = <<~EOM
26
+ Job arguments to #{job_class} must be native JSON types, but #{unsafe_item.inspect} is a #{unsafe_item.class}.
27
+ See https://github.com/sidekiq/sidekiq/wiki/Best-Practices
28
+ To disable this error, add `Sidekiq.strict_args!(false)` to your initializer.
29
+ EOM
30
+
31
+ if mode == :raise
32
+ raise(ArgumentError, msg)
33
+ else
34
+ warn(msg)
35
+ end
36
+ end
32
37
  end
33
38
  end
34
39
 
@@ -64,8 +69,37 @@ module Sidekiq
64
69
 
65
70
  private
66
71
 
67
- def json_safe?(item)
68
- JSON.parse(JSON.dump(item["args"])) == item["args"]
72
+ RECURSIVE_JSON_UNSAFE = {
73
+ Integer => ->(val) {},
74
+ Float => ->(val) {},
75
+ TrueClass => ->(val) {},
76
+ FalseClass => ->(val) {},
77
+ NilClass => ->(val) {},
78
+ String => ->(val) {},
79
+ Array => ->(val) {
80
+ val.each do |e|
81
+ unsafe_item = RECURSIVE_JSON_UNSAFE[e.class].call(e)
82
+ return unsafe_item unless unsafe_item.nil?
83
+ end
84
+ nil
85
+ },
86
+ Hash => ->(val) {
87
+ val.each do |k, v|
88
+ return k unless String === k
89
+
90
+ unsafe_item = RECURSIVE_JSON_UNSAFE[v.class].call(v)
91
+ return unsafe_item unless unsafe_item.nil?
92
+ end
93
+ nil
94
+ }
95
+ }
96
+
97
+ RECURSIVE_JSON_UNSAFE.default = ->(val) { val }
98
+ RECURSIVE_JSON_UNSAFE.compare_by_identity
99
+ private_constant :RECURSIVE_JSON_UNSAFE
100
+
101
+ def json_unsafe?(item)
102
+ RECURSIVE_JSON_UNSAFE[item.class].call(item)
69
103
  end
70
104
  end
71
105
  end
@@ -37,6 +37,7 @@ module Sidekiq
37
37
  # and instead have thread call Launcher#heartbeat every N seconds.
38
38
  def run(async_beat: true)
39
39
  Sidekiq.freeze!
40
+ logger.debug { @config.merge!({}) }
40
41
  @thread = safe_thread("heartbeat", &method(:start_heartbeat)) if async_beat
41
42
  @poller.start
42
43
  @managers.each(&:start)
@@ -165,7 +166,7 @@ module Sidekiq
165
166
  conn.multi { |transaction|
166
167
  transaction.sadd("processes", [key])
167
168
  transaction.exists(key)
168
- transaction.hmset(key, "info", to_json,
169
+ transaction.hset(key, "info", to_json,
169
170
  "busy", curstate.size,
170
171
  "beat", Time.now.to_f,
171
172
  "rtt_us", rtt,
@@ -214,7 +215,7 @@ module Sidekiq
214
215
  Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
215
216
  Ensure Redis is running in the same AZ or datacenter as Sidekiq.
216
217
  If these values are close to 100,000, that means your Sidekiq process may be
217
- CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
218
+ CPU-saturated; reduce your concurrency and/or see https://github.com/sidekiq/sidekiq/discussions/5039
218
219
  EOM
219
220
  RTT_READINGS.reset
220
221
  end
@@ -70,7 +70,7 @@ module Sidekiq
70
70
  result.job_results[klass].add_metric "ms", time, ms.to_i if ms
71
71
  result.job_results[klass].add_metric "p", time, p.to_i if p
72
72
  result.job_results[klass].add_metric "f", time, f.to_i if f
73
- result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time)
73
+ result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time).reverse
74
74
  time -= 60
75
75
  end
76
76
  end
@@ -29,8 +29,8 @@ module Sidekiq
29
29
  1100, 1700, 2500, 3800, 5750,
30
30
  8500, 13000, 20000, 30000, 45000,
31
31
  65000, 100000, 150000, 225000, 335000,
32
- Float::INFINITY # the "maybe your job is too long" bucket
33
- ]
32
+ 1e20 # the "maybe your job is too long" bucket
33
+ ].freeze
34
34
  LABELS = [
35
35
  "20ms", "30ms", "45ms", "65ms", "100ms",
36
36
  "150ms", "225ms", "335ms", "500ms", "750ms",
@@ -38,7 +38,7 @@ module Sidekiq
38
38
  "8.5s", "13s", "20s", "30s", "45s",
39
39
  "65s", "100s", "150s", "225s", "335s",
40
40
  "Slow"
41
- ]
41
+ ].freeze
42
42
  FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \
43
43
  GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \
44
44
  GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "time"
2
4
  require "sidekiq"
3
5
  require "sidekiq/metrics/shared"
@@ -166,23 +166,26 @@ module Sidekiq
166
166
 
167
167
  # Used by Sidekiq to execute the middleware at runtime
168
168
  # @api private
169
- def invoke(*args)
169
+ def invoke(*args, &block)
170
170
  return yield if empty?
171
171
 
172
172
  chain = retrieve
173
- traverse_chain = proc do
174
- if chain.empty?
175
- yield
176
- else
177
- chain.shift.call(*args, &traverse_chain)
173
+ traverse(chain, 0, args, &block)
174
+ end
175
+
176
+ private
177
+
178
+ def traverse(chain, index, args, &block)
179
+ if index >= chain.size
180
+ yield
181
+ else
182
+ chain[index].call(*args) do
183
+ traverse(chain, index + 1, args, &block)
178
184
  end
179
185
  end
180
- traverse_chain.call
181
186
  end
182
187
  end
183
188
 
184
- private
185
-
186
189
  # Represents each link in the middleware chain
187
190
  # @api private
188
191
  class Entry