sidekiq 6.0.1 → 6.0.6

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

@@ -2,7 +2,13 @@
2
2
 
3
3
  [Sidekiq Changes](https://github.com/mperham/sidekiq/blob/master/Changes.md) | [Sidekiq Pro Changes](https://github.com/mperham/sidekiq/blob/master/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/mperham/sidekiq/blob/master/Ent-Changes.md)
4
4
 
5
- Please see [http://sidekiq.org/](http://sidekiq.org/) for more details and how to buy.
5
+ Please see [http://sidekiq.org](http://sidekiq.org) for more details and how to buy.
6
+
7
+ HEAD
8
+ ---------
9
+
10
+ - Update `constantize` for batch callbacks. [#4469]
11
+
6
12
 
7
13
  5.0.1
8
14
  ---------
data/README.md CHANGED
@@ -19,7 +19,8 @@ Performance
19
19
 
20
20
  Version | Latency | Garbage created for 10k jobs | Time to process 100k jobs | Throughput | Ruby
21
21
  -----------------|------|---------|---------|------------------------|-----
22
- Sidekiq 6.0.0 | 3 ms | 156 MB | 19 sec | **5200 jobs/sec** | MRI 2.6.3
22
+ Sidekiq 6.0.2 | 3 ms | 156 MB | 14.0 sec| **7100 jobs/sec** | MRI 2.6.3
23
+ Sidekiq 6.0.0 | 3 ms | 156 MB | 19 sec | 5200 jobs/sec | MRI 2.6.3
23
24
  Sidekiq 4.0.0 | 10 ms | 151 MB | 22 sec | 4500 jobs/sec |
24
25
  Sidekiq 3.5.1 | 22 ms | 1257 MB | 125 sec | 800 jobs/sec |
25
26
  Resque 1.25.2 | - | - | 420 sec | 240 jobs/sec |
@@ -18,7 +18,7 @@ module Sidekiq
18
18
  def create_test_file
19
19
  return unless test_framework
20
20
 
21
- if defined?(RSpec)
21
+ if test_framework == :rspec
22
22
  create_worker_spec
23
23
  else
24
24
  create_worker_test
@@ -30,16 +30,16 @@ module Sidekiq
30
30
  startup: [],
31
31
  quiet: [],
32
32
  shutdown: [],
33
- heartbeat: [],
33
+ heartbeat: []
34
34
  },
35
35
  dead_max_jobs: 10_000,
36
36
  dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
37
- reloader: proc { |&block| block.call },
37
+ reloader: proc { |&block| block.call }
38
38
  }
39
39
 
40
40
  DEFAULT_WORKER_OPTIONS = {
41
41
  "retry" => true,
42
- "queue" => "default",
42
+ "queue" => "default"
43
43
  }
44
44
 
45
45
  FAKE_INFO = {
@@ -47,7 +47,7 @@ module Sidekiq
47
47
  "uptime_in_days" => "9999",
48
48
  "connected_clients" => "9999",
49
49
  "used_memory_human" => "9P",
50
- "used_memory_peak_human" => "9P",
50
+ "used_memory_peak_human" => "9P"
51
51
  }
52
52
 
53
53
  def self.❨╯°□°❩╯︵┻━┻
@@ -154,7 +154,7 @@ module Sidekiq
154
154
 
155
155
  def self.default_worker_options=(hash)
156
156
  # stringify
157
- @default_worker_options = default_worker_options.merge(Hash[hash.map { |k, v| [k.to_s, v] }])
157
+ @default_worker_options = default_worker_options.merge(hash.transform_keys(&:to_s))
158
158
  end
159
159
 
160
160
  def self.default_worker_options
@@ -210,6 +210,10 @@ module Sidekiq
210
210
  @logger = logger
211
211
  end
212
212
 
213
+ def self.pro?
214
+ defined?(Sidekiq::Pro)
215
+ end
216
+
213
217
  # How frequently Redis should be checked by a random Sidekiq process for
214
218
  # scheduled and retriable jobs. Each individual process will take turns by
215
219
  # waiting some multiple of this value.
@@ -80,8 +80,8 @@ module Sidekiq
80
80
  }
81
81
 
82
82
  s = processes.size
83
- workers_size = pipe2_res[0...s].map(&:to_i).inject(0, &:+)
84
- enqueued = pipe2_res[s..-1].map(&:to_i).inject(0, &:+)
83
+ workers_size = pipe2_res[0...s].sum(&:to_i)
84
+ enqueued = pipe2_res[s..-1].sum(&:to_i)
85
85
 
86
86
  default_queue_latency = if (entry = pipe1_res[6].first)
87
87
  job = begin
@@ -105,7 +105,7 @@ module Sidekiq
105
105
 
106
106
  default_queue_latency: default_queue_latency,
107
107
  workers_size: workers_size,
108
- enqueued: enqueued,
108
+ enqueued: enqueued
109
109
  }
110
110
  end
111
111
 
@@ -140,13 +140,8 @@ module Sidekiq
140
140
  end
141
141
  }
142
142
 
143
- i = 0
144
- array_of_arrays = queues.each_with_object({}) { |queue, memo|
145
- memo[queue] = lengths[i]
146
- i += 1
147
- }.sort_by { |_, size| size }
148
-
149
- Hash[array_of_arrays.reverse]
143
+ array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
144
+ Hash[array_of_arrays]
150
145
  end
151
146
  end
152
147
  end
@@ -168,18 +163,12 @@ module Sidekiq
168
163
  private
169
164
 
170
165
  def date_stat_hash(stat)
171
- i = 0
172
166
  stat_hash = {}
173
- keys = []
174
- dates = []
175
-
176
- while i < @days_previous
177
- date = @start_date - i
178
- datestr = date.strftime("%Y-%m-%d")
179
- keys << "stat:#{stat}:#{datestr}"
180
- dates << datestr
181
- i += 1
182
- end
167
+ dates = @start_date.downto(@start_date - @days_previous + 1).map { |date|
168
+ date.strftime("%Y-%m-%d")
169
+ }
170
+
171
+ keys = dates.map { |datestr| "stat:#{stat}:#{datestr}" }
183
172
 
184
173
  begin
185
174
  Sidekiq.redis do |conn|
@@ -284,7 +273,7 @@ module Sidekiq
284
273
  def clear
285
274
  Sidekiq.redis do |conn|
286
275
  conn.multi do
287
- conn.del(@rname)
276
+ conn.unlink(@rname)
288
277
  conn.srem("queues", name)
289
278
  end
290
279
  end
@@ -438,12 +427,18 @@ module Sidekiq
438
427
 
439
428
  def uncompress_backtrace(backtrace)
440
429
  if backtrace.is_a?(Array)
441
- # Handle old jobs with previous backtrace format
430
+ # Handle old jobs with raw Array backtrace format
442
431
  backtrace
443
432
  else
444
433
  decoded = Base64.decode64(backtrace)
445
434
  uncompressed = Zlib::Inflate.inflate(decoded)
446
- Marshal.load(uncompressed)
435
+ begin
436
+ Sidekiq.load_json(uncompressed)
437
+ rescue
438
+ # Handle old jobs with marshalled backtrace format
439
+ # TODO Remove in 7.x
440
+ Marshal.load(uncompressed)
441
+ end
447
442
  end
448
443
  end
449
444
  end
@@ -471,8 +466,9 @@ module Sidekiq
471
466
  end
472
467
 
473
468
  def reschedule(at)
474
- delete
475
- @parent.schedule(at, item)
469
+ Sidekiq.redis do |conn|
470
+ conn.zincrby(@parent.name, at.to_f - @score, Sidekiq.dump_json(@item))
471
+ end
476
472
  end
477
473
 
478
474
  def add_to_queue
@@ -516,7 +512,7 @@ module Sidekiq
516
512
  else
517
513
  # multiple jobs with the same score
518
514
  # find the one with the right JID and push it
519
- hash = results.group_by { |message|
515
+ matched, nonmatched = results.partition { |message|
520
516
  if message.index(jid)
521
517
  msg = Sidekiq.load_json(message)
522
518
  msg["jid"] == jid
@@ -525,12 +521,12 @@ module Sidekiq
525
521
  end
526
522
  }
527
523
 
528
- msg = hash.fetch(true, []).first
524
+ msg = matched.first
529
525
  yield msg if msg
530
526
 
531
527
  # push the rest back onto the sorted set
532
528
  conn.multi do
533
- hash.fetch(false, []).each do |message|
529
+ nonmatched.each do |message|
534
530
  conn.zadd(parent.name, score.to_f.to_s, message)
535
531
  end
536
532
  end
@@ -554,7 +550,7 @@ module Sidekiq
554
550
  end
555
551
 
556
552
  def scan(match, count = 100)
557
- return to_enum(:scan, match) unless block_given?
553
+ return to_enum(:scan, match, count) unless block_given?
558
554
 
559
555
  match = "*#{match}*" unless match.include?("*")
560
556
  Sidekiq.redis do |conn|
@@ -566,7 +562,7 @@ module Sidekiq
566
562
 
567
563
  def clear
568
564
  Sidekiq.redis do |conn|
569
- conn.del(name)
565
+ conn.unlink(name)
570
566
  end
571
567
  end
572
568
  alias_method :💣, :clear
@@ -648,11 +644,13 @@ module Sidekiq
648
644
  Sidekiq.redis do |conn|
649
645
  elements = conn.zrangebyscore(name, score, score)
650
646
  elements.each do |element|
651
- message = Sidekiq.load_json(element)
652
- if message["jid"] == jid
653
- ret = conn.zrem(name, element)
654
- @_size -= 1 if ret
655
- break ret
647
+ if element.index(jid)
648
+ message = Sidekiq.load_json(element)
649
+ if message["jid"] == jid
650
+ ret = conn.zrem(name, element)
651
+ @_size -= 1 if ret
652
+ break ret
653
+ end
656
654
  end
657
655
  end
658
656
  end
@@ -776,40 +774,37 @@ module Sidekiq
776
774
  # the hash named key has an expiry of 60 seconds.
777
775
  # if it's not found, that means the process has not reported
778
776
  # in to Redis and probably died.
779
- to_prune = []
780
- heartbeats.each_with_index do |beat, i|
781
- to_prune << procs[i] if beat.nil?
782
- end
777
+ to_prune = procs.select.with_index { |proc, i|
778
+ heartbeats[i].nil?
779
+ }
783
780
  count = conn.srem("processes", to_prune) unless to_prune.empty?
784
781
  end
785
782
  count
786
783
  end
787
784
 
788
785
  def each
789
- procs = Sidekiq.redis { |conn| conn.sscan_each("processes").to_a }.sort
786
+ result = Sidekiq.redis { |conn|
787
+ procs = conn.sscan_each("processes").to_a.sort
790
788
 
791
- Sidekiq.redis do |conn|
792
789
  # We're making a tradeoff here between consuming more memory instead of
793
790
  # making more roundtrips to Redis, but if you have hundreds or thousands of workers,
794
791
  # you'll be happier this way
795
- result = conn.pipelined {
792
+ conn.pipelined do
796
793
  procs.each do |key|
797
794
  conn.hmget(key, "info", "busy", "beat", "quiet")
798
795
  end
799
- }
796
+ end
797
+ }
800
798
 
801
- result.each do |info, busy, at_s, quiet|
802
- # If a process is stopped between when we query Redis for `procs` and
803
- # when we query for `result`, we will have an item in `result` that is
804
- # composed of `nil` values.
805
- next if info.nil?
799
+ result.each do |info, busy, at_s, quiet|
800
+ # If a process is stopped between when we query Redis for `procs` and
801
+ # when we query for `result`, we will have an item in `result` that is
802
+ # composed of `nil` values.
803
+ next if info.nil?
806
804
 
807
- hash = Sidekiq.load_json(info)
808
- yield Process.new(hash.merge("busy" => busy.to_i, "beat" => at_s.to_f, "quiet" => quiet))
809
- end
805
+ hash = Sidekiq.load_json(info)
806
+ yield Process.new(hash.merge("busy" => busy.to_i, "beat" => at_s.to_f, "quiet" => quiet))
810
807
  end
811
-
812
- nil
813
808
  end
814
809
 
815
810
  # This method is not guaranteed accurate since it does not prune the set
@@ -931,7 +926,11 @@ module Sidekiq
931
926
  }
932
927
  next unless valid
933
928
  workers.each_pair do |tid, json|
934
- yield key, tid, Sidekiq.load_json(json)
929
+ hsh = Sidekiq.load_json(json)
930
+ p = hsh["payload"]
931
+ # avoid breaking API, this is a side effect of the JSON optimization in #4316
932
+ hsh["payload"] = Sidekiq.load_json(p) if p.is_a?(String)
933
+ yield key, tid, hsh
935
934
  end
936
935
  end
937
936
  end
@@ -953,7 +952,7 @@ module Sidekiq
953
952
  procs.each do |key|
954
953
  conn.hget(key, "busy")
955
954
  end
956
- }.map(&:to_i).inject(:+)
955
+ }.sum(&:to_i)
957
956
  end
958
957
  end
959
958
  end
@@ -38,9 +38,12 @@ module Sidekiq
38
38
  if environment == "development" && $stdout.tty? && Sidekiq.log_formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
39
39
  print_banner
40
40
  end
41
+ logger.info "Booted Rails #{::Rails.version} application in #{environment} environment" if rails_app?
41
42
 
42
43
  self_read, self_write = IO.pipe
43
44
  sigs = %w[INT TERM TTIN TSTP]
45
+ # USR1 and USR2 don't work on the JVM
46
+ sigs << "USR2" unless jruby?
44
47
  sigs.each do |sig|
45
48
  trap sig do
46
49
  self_write.puts(sig)
@@ -56,7 +59,7 @@ module Sidekiq
56
59
  # touch the connection pool so it is created before we
57
60
  # fire startup and start multithreading.
58
61
  ver = Sidekiq.redis_info["redis_version"]
59
- raise "You are using Redis v#{ver}, Sidekiq requires Redis v4.0.0 or greater" if ver < "4"
62
+ raise "You are connecting to Redis v#{ver}, Sidekiq requires Redis v4.0.0 or greater" if ver < "4"
60
63
 
61
64
  # Since the user can pass us a connection pool explicitly in the initializer, we
62
65
  # need to verify the size is large enough or else Sidekiq's performance is dramatically slowed.
@@ -160,7 +163,7 @@ module Sidekiq
160
163
  Sidekiq.logger.warn "<no backtrace available>"
161
164
  end
162
165
  end
163
- },
166
+ }
164
167
  }
165
168
  UNHANDLED_SIGNAL_HANDLER = ->(cli) { Sidekiq.logger.info "No signal handler registered, ignoring" }
166
169
  SIGNAL_HANDLERS.default = UNHANDLED_SIGNAL_HANDLER
@@ -179,7 +182,11 @@ module Sidekiq
179
182
  end
180
183
 
181
184
  def set_environment(cli_env)
182
- @environment = cli_env || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
185
+ # See #984 for discussion.
186
+ # APP_ENV is now the preferred ENV term since it is not tech-specific.
187
+ # Both Sinatra 2.0+ and Sidekiq support this term.
188
+ # RAILS_ENV and RACK_ENV are there for legacy support.
189
+ @environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
183
190
  end
184
191
 
185
192
  def symbolize_keys_deep!(hash)
@@ -376,5 +383,11 @@ module Sidekiq
376
383
  [weight.to_i, 1].max.times { opts[:queues] << queue }
377
384
  opts[:strict] = false if weight.to_i > 0
378
385
  end
386
+
387
+ def rails_app?
388
+ defined?(::Rails) && ::Rails.respond_to?(:application)
389
+ end
379
390
  end
380
391
  end
392
+
393
+ require "sidekiq/systemd"
@@ -99,8 +99,8 @@ module Sidekiq
99
99
 
100
100
  normed = normalize_item(items)
101
101
  payloads = items["args"].map.with_index { |args, index|
102
- single_at = at.is_a?(Array) ? at[index] : at
103
- copy = normed.merge("args" => args, "jid" => SecureRandom.hex(12), "at" => single_at, "enqueued_at" => Time.now.to_f)
102
+ copy = normed.merge("args" => args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
103
+ copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
104
104
 
105
105
  result = process_single(items["class"], copy)
106
106
  result || nil
@@ -193,7 +193,7 @@ module Sidekiq
193
193
  end
194
194
 
195
195
  def atomic_push(conn, payloads)
196
- if payloads.first["at"]
196
+ if payloads.first.key?("at")
197
197
  conn.zadd("schedule", payloads.map { |hash|
198
198
  at = hash.delete("at").to_s
199
199
  [at, Sidekiq.dump_json(hash)]
@@ -219,6 +219,10 @@ module Sidekiq
219
219
  end
220
220
 
221
221
  def normalize_item(item)
222
+ # 6.0.0 push_bulk bug, #4321
223
+ # TODO Remove after a while...
224
+ item.delete("at") if item.key?("at") && item["at"].nil?
225
+
222
226
  raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: { 'class' => SomeWorker, 'args' => ['bob', 1, :foo => 'bar'] }") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
223
227
  raise(ArgumentError, "Job args must be an Array") unless item["args"].is_a?(Array)
224
228
  raise(ArgumentError, "Job class must be either a Class or String representation of the class name") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
@@ -226,13 +230,19 @@ module Sidekiq
226
230
  raise(ArgumentError, "Job tags must be an Array") if item["tags"] && !item["tags"].is_a?(Array)
227
231
  # raise(ArgumentError, "Arguments must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices") unless JSON.load(JSON.dump(item['args'])) == item['args']
228
232
 
229
- normalized_hash(item["class"])
230
- .each { |key, value| item[key] = value if item[key].nil? }
233
+ # merge in the default sidekiq_options for the item's class and/or wrapped element
234
+ # this allows ActiveJobs to control sidekiq_options too.
235
+ defaults = normalized_hash(item["class"])
236
+ defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?("get_sidekiq_options")
237
+ item = defaults.merge(item)
238
+
239
+ raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
231
240
 
232
241
  item["class"] = item["class"].to_s
233
242
  item["queue"] = item["queue"].to_s
234
243
  item["jid"] ||= SecureRandom.hex(12)
235
244
  item["created_at"] ||= Time.now.to_f
245
+
236
246
  item
237
247
  end
238
248
 
@@ -39,7 +39,7 @@ module Sidekiq
39
39
  # attribute to expose the underlying thing.
40
40
  h = {
41
41
  class: job_hash["wrapped"] || job_hash["class"],
42
- jid: job_hash["jid"],
42
+ jid: job_hash["jid"]
43
43
  }
44
44
  h[:bid] = job_hash["bid"] if job_hash["bid"]
45
45
  h[:tags] = job_hash["tags"] if job_hash["tags"]
@@ -74,7 +74,7 @@ module Sidekiq
74
74
  # The global retry handler requires only the barest of data.
75
75
  # We want to be able to retry as much as possible so we don't
76
76
  # require the worker to be instantiated.
77
- def global(msg, queue)
77
+ def global(jobstr, queue)
78
78
  yield
79
79
  rescue Handled => ex
80
80
  raise ex
@@ -85,6 +85,7 @@ module Sidekiq
85
85
  # ignore, will be pushed back onto queue during hard_shutdown
86
86
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
87
87
 
88
+ msg = Sidekiq.load_json(jobstr)
88
89
  if msg["retry"]
89
90
  attempt_retry(nil, msg, queue, e)
90
91
  else
@@ -106,7 +107,7 @@ module Sidekiq
106
107
  # exception so the global block does not reprocess the error. The
107
108
  # Skip exception is unwrapped within Sidekiq::Processor#process before
108
109
  # calling the handle_exception handlers.
109
- def local(worker, msg, queue)
110
+ def local(worker, jobstr, queue)
110
111
  yield
111
112
  rescue Handled => ex
112
113
  raise ex
@@ -117,6 +118,7 @@ module Sidekiq
117
118
  # ignore, will be pushed back onto queue during hard_shutdown
118
119
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
119
120
 
121
+ msg = Sidekiq.load_json(jobstr)
120
122
  if msg["retry"].nil?
121
123
  msg["retry"] = worker.class.get_sidekiq_options["retry"]
122
124
  end
@@ -187,13 +189,13 @@ module Sidekiq
187
189
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
188
190
  end
189
191
 
192
+ send_to_morgue(msg) unless msg["dead"] == false
193
+
190
194
  Sidekiq.death_handlers.each do |handler|
191
195
  handler.call(msg, exception)
192
196
  rescue => e
193
197
  handle_exception(e, {context: "Error calling death handler", job: msg})
194
198
  end
195
-
196
- send_to_morgue(msg) unless msg["dead"] == false
197
199
  end
198
200
 
199
201
  def send_to_morgue(msg)
@@ -252,7 +254,7 @@ module Sidekiq
252
254
  end
253
255
 
254
256
  def compress_backtrace(backtrace)
255
- serialized = Marshal.dump(backtrace)
257
+ serialized = Sidekiq.dump_json(backtrace)
256
258
  compressed = Zlib::Deflate.deflate(serialized)
257
259
  Base64.encode64(compressed)
258
260
  end