sidekiq 6.0.2 → 6.0.7

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

@@ -2,7 +2,13 @@
2
2
 
3
3
  [Sidekiq Changes](https://github.com/mperham/sidekiq/blob/master/Changes.md) | [Sidekiq Pro Changes](https://github.com/mperham/sidekiq/blob/master/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/mperham/sidekiq/blob/master/Ent-Changes.md)
4
4
 
5
- Please see [http://sidekiq.org/](http://sidekiq.org/) for more details and how to buy.
5
+ Please see [http://sidekiq.org](http://sidekiq.org) for more details and how to buy.
6
+
7
+ HEAD
8
+ ---------
9
+
10
+ - Update `constantize` for batch callbacks. [#4469]
11
+
6
12
 
7
13
  5.0.1
8
14
  ---------
@@ -6,9 +6,28 @@ $TESTING = false
6
6
 
7
7
  require_relative '../lib/sidekiq/cli'
8
8
 
9
+ def integrate_with_systemd
10
+ return unless ENV["NOTIFY_SOCKET"]
11
+
12
+ Sidekiq.configure_server do |config|
13
+ Sidekiq.logger.info "Enabling systemd notification integration"
14
+ require "sidekiq/sd_notify"
15
+ config.on(:startup) do
16
+ Sidekiq::SdNotify.ready
17
+ end
18
+ config.on(:shutdown) do
19
+ Sidekiq::SdNotify.stopping
20
+ end
21
+ Sidekiq.start_watchdog if Sidekiq::SdNotify.watchdog?
22
+ end
23
+ end
24
+
9
25
  begin
10
26
  cli = Sidekiq::CLI.instance
11
27
  cli.parse
28
+
29
+ integrate_with_systemd
30
+
12
31
  cli.run
13
32
  rescue => e
14
33
  raise e if $DEBUG
@@ -18,7 +18,7 @@ module Sidekiq
18
18
  def create_test_file
19
19
  return unless test_framework
20
20
 
21
- if defined?(RSpec)
21
+ if test_framework == :rspec
22
22
  create_worker_spec
23
23
  else
24
24
  create_worker_test
@@ -30,16 +30,16 @@ module Sidekiq
30
30
  startup: [],
31
31
  quiet: [],
32
32
  shutdown: [],
33
- heartbeat: [],
33
+ heartbeat: []
34
34
  },
35
35
  dead_max_jobs: 10_000,
36
36
  dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
37
- reloader: proc { |&block| block.call },
37
+ reloader: proc { |&block| block.call }
38
38
  }
39
39
 
40
40
  DEFAULT_WORKER_OPTIONS = {
41
41
  "retry" => true,
42
- "queue" => "default",
42
+ "queue" => "default"
43
43
  }
44
44
 
45
45
  FAKE_INFO = {
@@ -47,7 +47,7 @@ module Sidekiq
47
47
  "uptime_in_days" => "9999",
48
48
  "connected_clients" => "9999",
49
49
  "used_memory_human" => "9P",
50
- "used_memory_peak_human" => "9P",
50
+ "used_memory_peak_human" => "9P"
51
51
  }
52
52
 
53
53
  def self.❨╯°□°❩╯︵┻━┻
@@ -154,7 +154,7 @@ module Sidekiq
154
154
 
155
155
  def self.default_worker_options=(hash)
156
156
  # stringify
157
- @default_worker_options = default_worker_options.merge(Hash[hash.map { |k, v| [k.to_s, v] }])
157
+ @default_worker_options = default_worker_options.merge(hash.transform_keys(&:to_s))
158
158
  end
159
159
 
160
160
  def self.default_worker_options
@@ -210,6 +210,10 @@ module Sidekiq
210
210
  @logger = logger
211
211
  end
212
212
 
213
+ def self.pro?
214
+ defined?(Sidekiq::Pro)
215
+ end
216
+
213
217
  # How frequently Redis should be checked by a random Sidekiq process for
214
218
  # scheduled and retriable jobs. Each individual process will take turns by
215
219
  # waiting some multiple of this value.
@@ -105,7 +105,7 @@ module Sidekiq
105
105
 
106
106
  default_queue_latency: default_queue_latency,
107
107
  workers_size: workers_size,
108
- enqueued: enqueued,
108
+ enqueued: enqueued
109
109
  }
110
110
  end
111
111
 
@@ -140,13 +140,8 @@ module Sidekiq
140
140
  end
141
141
  }
142
142
 
143
- i = 0
144
- array_of_arrays = queues.each_with_object({}) { |queue, memo|
145
- memo[queue] = lengths[i]
146
- i += 1
147
- }.sort_by { |_, size| size }
148
-
149
- Hash[array_of_arrays.reverse]
143
+ array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
144
+ Hash[array_of_arrays]
150
145
  end
151
146
  end
152
147
  end
@@ -168,18 +163,12 @@ module Sidekiq
168
163
  private
169
164
 
170
165
  def date_stat_hash(stat)
171
- i = 0
172
166
  stat_hash = {}
173
- keys = []
174
- dates = []
175
-
176
- while i < @days_previous
177
- date = @start_date - i
178
- datestr = date.strftime("%Y-%m-%d")
179
- keys << "stat:#{stat}:#{datestr}"
180
- dates << datestr
181
- i += 1
182
- end
167
+ dates = @start_date.downto(@start_date - @days_previous + 1).map { |date|
168
+ date.strftime("%Y-%m-%d")
169
+ }
170
+
171
+ keys = dates.map { |datestr| "stat:#{stat}:#{datestr}" }
183
172
 
184
173
  begin
185
174
  Sidekiq.redis do |conn|
@@ -284,7 +273,7 @@ module Sidekiq
284
273
  def clear
285
274
  Sidekiq.redis do |conn|
286
275
  conn.multi do
287
- conn.del(@rname)
276
+ conn.unlink(@rname)
288
277
  conn.srem("queues", name)
289
278
  end
290
279
  end
@@ -478,7 +467,7 @@ module Sidekiq
478
467
 
479
468
  def reschedule(at)
480
469
  Sidekiq.redis do |conn|
481
- conn.zincrby(@parent.name, at - @score, Sidekiq.dump_json(@item))
470
+ conn.zincrby(@parent.name, at.to_f - @score, Sidekiq.dump_json(@item))
482
471
  end
483
472
  end
484
473
 
@@ -523,7 +512,7 @@ module Sidekiq
523
512
  else
524
513
  # multiple jobs with the same score
525
514
  # find the one with the right JID and push it
526
- hash = results.group_by { |message|
515
+ matched, nonmatched = results.partition { |message|
527
516
  if message.index(jid)
528
517
  msg = Sidekiq.load_json(message)
529
518
  msg["jid"] == jid
@@ -532,12 +521,12 @@ module Sidekiq
532
521
  end
533
522
  }
534
523
 
535
- msg = hash.fetch(true, []).first
524
+ msg = matched.first
536
525
  yield msg if msg
537
526
 
538
527
  # push the rest back onto the sorted set
539
528
  conn.multi do
540
- hash.fetch(false, []).each do |message|
529
+ nonmatched.each do |message|
541
530
  conn.zadd(parent.name, score.to_f.to_s, message)
542
531
  end
543
532
  end
@@ -573,7 +562,7 @@ module Sidekiq
573
562
 
574
563
  def clear
575
564
  Sidekiq.redis do |conn|
576
- conn.del(name)
565
+ conn.unlink(name)
577
566
  end
578
567
  end
579
568
  alias_method :💣, :clear
@@ -785,10 +774,9 @@ module Sidekiq
785
774
  # the hash named key has an expiry of 60 seconds.
786
775
  # if it's not found, that means the process has not reported
787
776
  # in to Redis and probably died.
788
- to_prune = []
789
- heartbeats.each_with_index do |beat, i|
790
- to_prune << procs[i] if beat.nil?
791
- end
777
+ to_prune = procs.select.with_index { |proc, i|
778
+ heartbeats[i].nil?
779
+ }
792
780
  count = conn.srem("processes", to_prune) unless to_prune.empty?
793
781
  end
794
782
  count
@@ -938,7 +926,11 @@ module Sidekiq
938
926
  }
939
927
  next unless valid
940
928
  workers.each_pair do |tid, json|
941
- yield key, tid, Sidekiq.load_json(json)
929
+ hsh = Sidekiq.load_json(json)
930
+ p = hsh["payload"]
931
+ # avoid breaking API, this is a side effect of the JSON optimization in #4316
932
+ hsh["payload"] = Sidekiq.load_json(p) if p.is_a?(String)
933
+ yield key, tid, hsh
942
934
  end
943
935
  end
944
936
  end
@@ -38,6 +38,7 @@ module Sidekiq
38
38
  if environment == "development" && $stdout.tty? && Sidekiq.log_formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
39
39
  print_banner
40
40
  end
41
+ logger.info "Booted Rails #{::Rails.version} application in #{environment} environment" if rails_app?
41
42
 
42
43
  self_read, self_write = IO.pipe
43
44
  sigs = %w[INT TERM TTIN TSTP]
@@ -53,12 +54,12 @@ module Sidekiq
53
54
 
54
55
  logger.info "Running in #{RUBY_DESCRIPTION}"
55
56
  logger.info Sidekiq::LICENSE
56
- logger.info "Upgrade to Sidekiq Pro for more features and support: http://sidekiq.org" unless defined?(::Sidekiq::Pro)
57
+ logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
57
58
 
58
59
  # touch the connection pool so it is created before we
59
60
  # fire startup and start multithreading.
60
61
  ver = Sidekiq.redis_info["redis_version"]
61
- raise "You are using Redis v#{ver}, Sidekiq requires Redis v4.0.0 or greater" if ver < "4"
62
+ raise "You are connecting to Redis v#{ver}, Sidekiq requires Redis v4.0.0 or greater" if ver < "4"
62
63
 
63
64
  # Since the user can pass us a connection pool explicitly in the initializer, we
64
65
  # need to verify the size is large enough or else Sidekiq's performance is dramatically slowed.
@@ -162,7 +163,7 @@ module Sidekiq
162
163
  Sidekiq.logger.warn "<no backtrace available>"
163
164
  end
164
165
  end
165
- },
166
+ }
166
167
  }
167
168
  UNHANDLED_SIGNAL_HANDLER = ->(cli) { Sidekiq.logger.info "No signal handler registered, ignoring" }
168
169
  SIGNAL_HANDLERS.default = UNHANDLED_SIGNAL_HANDLER
@@ -181,7 +182,11 @@ module Sidekiq
181
182
  end
182
183
 
183
184
  def set_environment(cli_env)
184
- @environment = cli_env || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
185
+ # See #984 for discussion.
186
+ # APP_ENV is now the preferred ENV term since it is not tech-specific.
187
+ # Both Sinatra 2.0+ and Sidekiq support this term.
188
+ # RAILS_ENV and RACK_ENV are there for legacy support.
189
+ @environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
185
190
  end
186
191
 
187
192
  def symbolize_keys_deep!(hash)
@@ -378,5 +383,11 @@ module Sidekiq
378
383
  [weight.to_i, 1].max.times { opts[:queues] << queue }
379
384
  opts[:strict] = false if weight.to_i > 0
380
385
  end
386
+
387
+ def rails_app?
388
+ defined?(::Rails) && ::Rails.respond_to?(:application)
389
+ end
381
390
  end
382
391
  end
392
+
393
+ require "sidekiq/systemd"
@@ -99,8 +99,8 @@ module Sidekiq
99
99
 
100
100
  normed = normalize_item(items)
101
101
  payloads = items["args"].map.with_index { |args, index|
102
- single_at = at.is_a?(Array) ? at[index] : at
103
- copy = normed.merge("args" => args, "jid" => SecureRandom.hex(12), "at" => single_at, "enqueued_at" => Time.now.to_f)
102
+ copy = normed.merge("args" => args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
103
+ copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
104
104
 
105
105
  result = process_single(items["class"], copy)
106
106
  result || nil
@@ -193,7 +193,7 @@ module Sidekiq
193
193
  end
194
194
 
195
195
  def atomic_push(conn, payloads)
196
- if payloads.first["at"]
196
+ if payloads.first.key?("at")
197
197
  conn.zadd("schedule", payloads.map { |hash|
198
198
  at = hash.delete("at").to_s
199
199
  [at, Sidekiq.dump_json(hash)]
@@ -219,6 +219,10 @@ module Sidekiq
219
219
  end
220
220
 
221
221
  def normalize_item(item)
222
+ # 6.0.0 push_bulk bug, #4321
223
+ # TODO Remove after a while...
224
+ item.delete("at") if item.key?("at") && item["at"].nil?
225
+
222
226
  raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: { 'class' => SomeWorker, 'args' => ['bob', 1, :foo => 'bar'] }") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
223
227
  raise(ArgumentError, "Job args must be an Array") unless item["args"].is_a?(Array)
224
228
  raise(ArgumentError, "Job class must be either a Class or String representation of the class name") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
@@ -226,13 +230,19 @@ module Sidekiq
226
230
  raise(ArgumentError, "Job tags must be an Array") if item["tags"] && !item["tags"].is_a?(Array)
227
231
  # raise(ArgumentError, "Arguments must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices") unless JSON.load(JSON.dump(item['args'])) == item['args']
228
232
 
229
- normalized_hash(item["class"])
230
- .each { |key, value| item[key] = value if item[key].nil? }
233
+ # merge in the default sidekiq_options for the item's class and/or wrapped element
234
+ # this allows ActiveJobs to control sidekiq_options too.
235
+ defaults = normalized_hash(item["class"])
236
+ defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?("get_sidekiq_options")
237
+ item = defaults.merge(item)
238
+
239
+ raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
231
240
 
232
241
  item["class"] = item["class"].to_s
233
242
  item["queue"] = item["queue"].to_s
234
243
  item["jid"] ||= SecureRandom.hex(12)
235
244
  item["created_at"] ||= Time.now.to_f
245
+
236
246
  item
237
247
  end
238
248
 
@@ -39,7 +39,7 @@ module Sidekiq
39
39
  # attribute to expose the underlying thing.
40
40
  h = {
41
41
  class: job_hash["wrapped"] || job_hash["class"],
42
- jid: job_hash["jid"],
42
+ jid: job_hash["jid"]
43
43
  }
44
44
  h[:bid] = job_hash["bid"] if job_hash["bid"]
45
45
  h[:tags] = job_hash["tags"] if job_hash["tags"]
@@ -189,13 +189,13 @@ module Sidekiq
189
189
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
190
190
  end
191
191
 
192
+ send_to_morgue(msg) unless msg["dead"] == false
193
+
192
194
  Sidekiq.death_handlers.each do |handler|
193
195
  handler.call(msg, exception)
194
196
  rescue => e
195
197
  handle_exception(e, {context: "Error calling death handler", job: msg})
196
198
  end
197
-
198
- send_to_morgue(msg) unless msg["dead"] == false
199
199
  end
200
200
 
201
201
  def send_to_morgue(msg)
@@ -16,7 +16,7 @@ module Sidekiq
16
16
  proc { Sidekiq::VERSION },
17
17
  proc { |me, data| data["tag"] },
18
18
  proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
- proc { |me, data| "stopping" if me.stopping? },
19
+ proc { |me, data| "stopping" if me.stopping? }
20
20
  ]
21
21
 
22
22
  attr_accessor :manager, :poller, :fetcher
@@ -83,7 +83,7 @@ module Sidekiq
83
83
  Sidekiq.redis do |conn|
84
84
  conn.pipelined do
85
85
  conn.srem("processes", identity)
86
- conn.del("#{identity}:workers")
86
+ conn.unlink("#{identity}:workers")
87
87
  end
88
88
  end
89
89
  rescue
@@ -96,6 +96,32 @@ module Sidekiq
96
96
 
97
97
  end
98
98
 
99
+ def self.flush_stats
100
+ fails = Processor::FAILURE.reset
101
+ procd = Processor::PROCESSED.reset
102
+ return if fails + procd == 0
103
+
104
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
105
+ begin
106
+ Sidekiq.redis do |conn|
107
+ conn.pipelined do
108
+ conn.incrby("stat:processed", procd)
109
+ conn.incrby("stat:processed:#{nowdate}", procd)
110
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
111
+
112
+ conn.incrby("stat:failed", fails)
113
+ conn.incrby("stat:failed:#{nowdate}", fails)
114
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
115
+ end
116
+ end
117
+ rescue => ex
118
+ # we're exiting the process, things might be shut down so don't
119
+ # try to handle the exception
120
+ Sidekiq.logger.warn("Unable to flush stats: #{ex}")
121
+ end
122
+ end
123
+ at_exit(&method(:flush_stats))
124
+
99
125
  def ❤
100
126
  key = identity
101
127
  fails = procd = 0
@@ -118,7 +144,7 @@ module Sidekiq
118
144
  conn.incrby("stat:failed:#{nowdate}", fails)
119
145
  conn.expire("stat:failed:#{nowdate}", STATS_TTL)
120
146
 
121
- conn.del(workers_key)
147
+ conn.unlink(workers_key)
122
148
  curstate.each_pair do |tid, hash|
123
149
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
124
150
  end
@@ -146,7 +172,7 @@ module Sidekiq
146
172
  ::Process.kill(msg, ::Process.pid)
147
173
  rescue => e
148
174
  # ignore all redis/network issues
149
- logger.error("heartbeat: #{e.message}")
175
+ logger.error("heartbeat: #{e}")
150
176
  # don't lose the counts if there was a network issue
151
177
  Processor::PROCESSED.incr(procd)
152
178
  Processor::FAILURE.incr(fails)
@@ -163,7 +189,7 @@ module Sidekiq
163
189
  "concurrency" => @options[:concurrency],
164
190
  "queues" => @options[:queues].uniq,
165
191
  "labels" => @options[:labels],
166
- "identity" => identity,
192
+ "identity" => identity
167
193
  }
168
194
  end
169
195
  end
@@ -23,7 +23,7 @@ module Sidekiq
23
23
  "info" => 1,
24
24
  "warn" => 2,
25
25
  "error" => 3,
26
- "fatal" => 4,
26
+ "fatal" => 4
27
27
  }
28
28
  LEVELS.default_proc = proc do |_, level|
29
29
  Sidekiq.logger.warn("Invalid log level: #{level.inspect}")
@@ -31,23 +31,23 @@ module Sidekiq
31
31
  end
32
32
 
33
33
  def debug?
34
- level >= 0
34
+ level <= 0
35
35
  end
36
36
 
37
37
  def info?
38
- level >= 1
38
+ level <= 1
39
39
  end
40
40
 
41
41
  def warn?
42
- level >= 2
42
+ level <= 2
43
43
  end
44
44
 
45
45
  def error?
46
- level >= 3
46
+ level <= 3
47
47
  end
48
48
 
49
49
  def fatal?
50
- level >= 4
50
+ level <= 4
51
51
  end
52
52
 
53
53
  def local_level
@@ -83,7 +83,7 @@ module Sidekiq
83
83
  # Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
84
84
  # FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
85
85
  def add(severity, message = nil, progname = nil, &block)
86
- severity ||= UNKNOWN
86
+ severity ||= ::Logger::UNKNOWN
87
87
  progname ||= @progname
88
88
 
89
89
  return true if @logdev.nil? || severity < level
@@ -104,7 +104,7 @@ module Sidekiq
104
104
  class Logger < ::Logger
105
105
  include LoggingUtils
106
106
 
107
- def initialize(*args)
107
+ def initialize(*args, **kwargs)
108
108
  super
109
109
  self.formatter = Sidekiq.log_formatter
110
110
  end
@@ -152,7 +152,7 @@ module Sidekiq
152
152
  pid: ::Process.pid,
153
153
  tid: tid,
154
154
  lvl: severity,
155
- msg: message,
155
+ msg: message
156
156
  }
157
157
  c = ctx
158
158
  hash["ctx"] = c unless c.empty?