sidekiq 6.0.7 → 6.1.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

@@ -20,6 +20,7 @@ module Sidekiq
20
20
  labels: [],
21
21
  concurrency: 10,
22
22
  require: ".",
23
+ strict: true,
23
24
  environment: nil,
24
25
  timeout: 25,
25
26
  poll_interval_average: nil,
@@ -95,10 +96,11 @@ module Sidekiq
95
96
  retryable = true
96
97
  begin
97
98
  yield conn
98
- rescue Redis::CommandError => ex
99
+ rescue Redis::BaseError => ex
99
100
  # 2550 Failover can cause the server to become a replica, need
100
101
  # to disconnect and reopen the socket to get back to the primary.
101
- if retryable && ex.message =~ /READONLY/
102
+ # 4495 Use the same logic if we have a "Not enough replicas" error from the primary
103
+ if retryable && ex.message =~ /READONLY|NOREPLICAS/
102
104
  conn.disconnect!
103
105
  retryable = false
104
106
  retry
@@ -921,7 +921,7 @@ module Sidekiq
921
921
  procs = conn.sscan_each("processes").to_a
922
922
  procs.sort.each do |key|
923
923
  valid, workers = conn.pipelined {
924
- conn.exists(key)
924
+ conn.exists?(key)
925
925
  conn.hgetall("#{key}:workers")
926
926
  }
927
927
  next unless valid
@@ -228,8 +228,7 @@ module Sidekiq
228
228
  opts = parse_config(opts[:config_file]).merge(opts) if opts[:config_file]
229
229
 
230
230
  # set defaults
231
- opts[:queues] = ["default"] if opts[:queues].nil? || opts[:queues].empty?
232
- opts[:strict] = true if opts[:strict].nil?
231
+ opts[:queues] = ["default"] if opts[:queues].nil?
233
232
  opts[:concurrency] = Integer(ENV["RAILS_MAX_THREADS"]) if opts[:concurrency].nil? && ENV["RAILS_MAX_THREADS"]
234
233
 
235
234
  # merge with defaults
@@ -358,6 +357,12 @@ module Sidekiq
358
357
  Sidekiq.logger.level = ::Logger::DEBUG if options[:verbose]
359
358
  end
360
359
 
360
+ INTERNAL_OPTIONS = [
361
+ # These are options that are set internally and cannot be
362
+ # set via the config file or command line arguments.
363
+ :strict
364
+ ]
365
+
361
366
  def parse_config(path)
362
367
  opts = YAML.load(ERB.new(File.read(path)).result) || {}
363
368
 
@@ -368,6 +373,8 @@ module Sidekiq
368
373
  end
369
374
 
370
375
  opts = opts.merge(opts.delete(environment.to_sym) || {})
376
+ opts.delete(*INTERNAL_OPTIONS)
377
+
371
378
  parse_queues(opts, opts.delete(:queues) || [])
372
379
 
373
380
  opts
@@ -379,6 +386,7 @@ module Sidekiq
379
386
 
380
387
  def parse_queue(opts, queue, weight = nil)
381
388
  opts[:queues] ||= []
389
+ opts[:strict] = true if opts[:strict].nil?
382
390
  raise ArgumentError, "queues: #{queue} cannot be defined twice" if opts[:queues].include?(queue)
383
391
  [weight.to_i, 1].max.times { opts[:queues] << queue }
384
392
  opts[:strict] = false if weight.to_i > 0
@@ -90,16 +90,17 @@ module Sidekiq
90
90
  # Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
91
91
  # than the number given if the middleware stopped processing for one or more jobs.
92
92
  def push_bulk(items)
93
- arg = items["args"].first
94
- return [] unless arg # no jobs to push
95
- raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless arg.is_a?(Array)
93
+ args = items["args"]
94
+ raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless args.is_a?(Array) && args.all?(Array)
95
+ return [] if args.empty? # no jobs to push
96
96
 
97
97
  at = items.delete("at")
98
98
  raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all?(Numeric))
99
+ raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
99
100
 
100
101
  normed = normalize_item(items)
101
- payloads = items["args"].map.with_index { |args, index|
102
- copy = normed.merge("args" => args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
102
+ payloads = args.map.with_index { |job_args, index|
103
+ copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
103
104
  copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
104
105
 
105
106
  result = process_single(items["class"], copy)
@@ -218,16 +219,20 @@ module Sidekiq
218
219
  end
219
220
  end
220
221
 
222
+ def validate(item)
223
+ raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
224
+ raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
225
+ raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
226
+ raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
227
+ raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
228
+ end
229
+
221
230
  def normalize_item(item)
222
231
  # 6.0.0 push_bulk bug, #4321
223
232
  # TODO Remove after a while...
224
233
  item.delete("at") if item.key?("at") && item["at"].nil?
225
234
 
226
- raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: { 'class' => SomeWorker, 'args' => ['bob', 1, :foo => 'bar'] }") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
227
- raise(ArgumentError, "Job args must be an Array") unless item["args"].is_a?(Array)
228
- raise(ArgumentError, "Job class must be either a Class or String representation of the class name") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
229
- raise(ArgumentError, "Job 'at' must be a Numeric timestamp") if item.key?("at") && !item["at"].is_a?(Numeric)
230
- raise(ArgumentError, "Job tags must be an Array") if item["tags"] && !item["tags"].is_a?(Array)
235
+ validate(item)
231
236
  # raise(ArgumentError, "Arguments must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices") unless JSON.load(JSON.dump(item['args'])) == item['args']
232
237
 
233
238
  # merge in the default sidekiq_options for the item's class and/or wrapped element
@@ -6,9 +6,10 @@ module Sidekiq
6
6
  module Extensions
7
7
  ##
8
8
  # Adds 'delay', 'delay_for' and `delay_until` methods to ActiveRecord to offload instance method
9
- # execution to Sidekiq. Examples:
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.recent_signups.each { |user| user.delay.mark_as_awesome }
11
+ # @example
12
+ # User.recent_signups.each { |user| user.delay.mark_as_awesome }
12
13
  #
13
14
  # Please note, this is not recommended as this will serialize the entire
14
15
  # object to Redis. Your Sidekiq jobs should pass IDs, not entire instances.
@@ -5,11 +5,12 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to all Classes to offload class method
9
- # execution to Sidekiq. Examples:
8
+ # Adds `delay`, `delay_for` and `delay_until` methods to all Classes to offload class method
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.delay.delete_inactive
12
- # Wikipedia.delay.download_changes_for(Date.today)
11
+ # @example
12
+ # User.delay.delete_inactive
13
+ # Wikipedia.delay.download_changes_for(Date.today)
13
14
  #
14
15
  class DelayedClass
15
16
  include Sidekiq::Worker
@@ -25,8 +25,10 @@ module Sidekiq
25
25
  }
26
26
 
27
27
  def initialize(options)
28
- @strictly_ordered_queues = !!options[:strict]
29
- @queues = options[:queues].map { |q| "queue:#{q}" }
28
+ raise ArgumentError, "missing queue list" unless options[:queues]
29
+ @options = options
30
+ @strictly_ordered_queues = !!@options[:strict]
31
+ @queues = @options[:queues].map { |q| "queue:#{q}" }
30
32
  if @strictly_ordered_queues
31
33
  @queues.uniq!
32
34
  @queues << TIMEOUT
@@ -38,24 +40,9 @@ module Sidekiq
38
40
  UnitOfWork.new(*work) if work
39
41
  end
40
42
 
41
- # Creating the Redis#brpop command takes into account any
42
- # configured queue weights. By default Redis#brpop returns
43
- # data from the first queue that has pending elements. We
44
- # recreate the queue command each time we invoke Redis#brpop
45
- # to honor weights and avoid queue starvation.
46
- def queues_cmd
47
- if @strictly_ordered_queues
48
- @queues
49
- else
50
- queues = @queues.shuffle!.uniq
51
- queues << TIMEOUT
52
- queues
53
- end
54
- end
55
-
56
43
  # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
57
44
  # an instance method will make it async to the Fetcher actor
58
- def self.bulk_requeue(inprogress, options)
45
+ def bulk_requeue(inprogress, options)
59
46
  return if inprogress.empty?
60
47
 
61
48
  Sidekiq.logger.debug { "Re-queueing terminated jobs" }
@@ -76,5 +63,20 @@ module Sidekiq
76
63
  rescue => ex
77
64
  Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
78
65
  end
66
+
67
+ # Creating the Redis#brpop command takes into account any
68
+ # configured queue weights. By default Redis#brpop returns
69
+ # data from the first queue that has pending elements. We
70
+ # recreate the queue command each time we invoke Redis#brpop
71
+ # to honor weights and avoid queue starvation.
72
+ def queues_cmd
73
+ if @strictly_ordered_queues
74
+ @queues
75
+ else
76
+ queues = @queues.shuffle!.uniq
77
+ queues << TIMEOUT
78
+ queues
79
+ end
80
+ end
79
81
  end
80
82
  end
@@ -22,6 +22,7 @@ module Sidekiq
22
22
  attr_accessor :manager, :poller, :fetcher
23
23
 
24
24
  def initialize(options)
25
+ options[:fetch] ||= BasicFetch.new(options)
25
26
  @manager = Sidekiq::Manager.new(options)
26
27
  @poller = Sidekiq::Scheduled::Poller.new
27
28
  @done = false
@@ -56,7 +57,7 @@ module Sidekiq
56
57
 
57
58
  # Requeue everything in case there was a worker who grabbed work while stopped
58
59
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
59
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
60
+ strategy = @options[:fetch]
60
61
  strategy.bulk_requeue([], @options)
61
62
 
62
63
  clear_heartbeat
@@ -157,7 +158,7 @@ module Sidekiq
157
158
  _, exists, _, _, msg = Sidekiq.redis { |conn|
158
159
  conn.multi {
159
160
  conn.sadd("processes", key)
160
- conn.exists(key)
161
+ conn.exists?(key)
161
162
  conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
162
163
  conn.expire(key, 60)
163
164
  conn.rpop("#{key}-signals")
@@ -35,7 +35,7 @@ module Sidekiq
35
35
  @done = false
36
36
  @workers = Set.new
37
37
  @count.times do
38
- @workers << Processor.new(self)
38
+ @workers << Processor.new(self, options)
39
39
  end
40
40
  @plock = Mutex.new
41
41
  end
@@ -90,7 +90,7 @@ module Sidekiq
90
90
  @plock.synchronize do
91
91
  @workers.delete(processor)
92
92
  unless @done
93
- p = Processor.new(self)
93
+ p = Processor.new(self, options)
94
94
  @workers << p
95
95
  p.start
96
96
  end
@@ -123,7 +123,7 @@ module Sidekiq
123
123
  # contract says that jobs are run AT LEAST once. Process termination
124
124
  # is delayed until we're certain the jobs are back in Redis because
125
125
  # it is worse to lose a job than to run it twice.
126
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
126
+ strategy = @options[:fetch]
127
127
  strategy.bulk_requeue(jobs, @options)
128
128
  end
129
129
 
@@ -28,15 +28,15 @@ module Sidekiq
28
28
  attr_reader :thread
29
29
  attr_reader :job
30
30
 
31
- def initialize(mgr)
31
+ def initialize(mgr, options)
32
32
  @mgr = mgr
33
33
  @down = false
34
34
  @done = false
35
35
  @job = nil
36
36
  @thread = nil
37
- @strategy = (mgr.options[:fetch] || Sidekiq::BasicFetch).new(mgr.options)
38
- @reloader = Sidekiq.options[:reloader]
39
- @job_logger = (mgr.options[:job_logger] || Sidekiq::JobLogger).new
37
+ @strategy = options[:fetch]
38
+ @reloader = options[:reloader] || proc { |&block| block.call }
39
+ @job_logger = (options[:job_logger] || Sidekiq::JobLogger).new
40
40
  @retrier = Sidekiq::JobRetry.new
41
41
  end
42
42
 
@@ -4,6 +4,22 @@ require "sidekiq/worker"
4
4
 
5
5
  module Sidekiq
6
6
  class Rails < ::Rails::Engine
7
+ class Reloader
8
+ def initialize(app = ::Rails.application)
9
+ @app = app
10
+ end
11
+
12
+ def call
13
+ @app.reloader.wrap do
14
+ yield
15
+ end
16
+ end
17
+
18
+ def inspect
19
+ "#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
20
+ end
21
+ end
22
+
7
23
  # By including the Options module, we allow AJs to directly control sidekiq features
8
24
  # via the *sidekiq_options* class method and, for instance, not use AJ's retry system.
9
25
  # AJ retries don't show up in the Sidekiq UI Retries tab, save any error data, can't be
@@ -23,8 +39,6 @@ module Sidekiq
23
39
 
24
40
  # This hook happens after all initializers are run, just before returning
25
41
  # from config/environment.rb back to sidekiq/cli.rb.
26
- # We have to add the reloader after initialize to see if cache_classes has
27
- # been turned on.
28
42
  #
29
43
  # None of this matters on the client-side, only within the Sidekiq process itself.
30
44
  config.after_initialize do
@@ -32,21 +46,5 @@ module Sidekiq
32
46
  Sidekiq.options[:reloader] = Sidekiq::Rails::Reloader.new
33
47
  end
34
48
  end
35
-
36
- class Reloader
37
- def initialize(app = ::Rails.application)
38
- @app = app
39
- end
40
-
41
- def call
42
- @app.reloader.wrap do
43
- yield
44
- end
45
- end
46
-
47
- def inspect
48
- "#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
49
- end
50
- end
51
49
  end
52
50
  end
@@ -8,16 +8,14 @@ module Sidekiq
8
8
  class RedisConnection
9
9
  class << self
10
10
  def create(options = {})
11
- options.keys.each do |key|
12
- options[key.to_sym] = options.delete(key)
13
- end
11
+ symbolized_options = options.transform_keys(&:to_sym)
14
12
 
15
- if !options[:url] && (u = determine_redis_provider)
16
- options[:url] = u
13
+ if !symbolized_options[:url] && (u = determine_redis_provider)
14
+ symbolized_options[:url] = u
17
15
  end
18
16
 
19
- size = if options[:size]
20
- options[:size]
17
+ size = if symbolized_options[:size]
18
+ symbolized_options[:size]
21
19
  elsif Sidekiq.server?
22
20
  # Give ourselves plenty of connections. pool is lazy
23
21
  # so we won't create them until we need them.
@@ -30,11 +28,11 @@ module Sidekiq
30
28
 
31
29
  verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
32
30
 
33
- pool_timeout = options[:pool_timeout] || 1
34
- log_info(options)
31
+ pool_timeout = symbolized_options[:pool_timeout] || 1
32
+ log_info(symbolized_options)
35
33
 
36
34
  ConnectionPool.new(timeout: pool_timeout, size: size) do
37
- build_client(options)
35
+ build_client(symbolized_options)
38
36
  end
39
37
  end
40
38
 
@@ -97,7 +95,12 @@ module Sidekiq
97
95
  redacted = "REDACTED"
98
96
 
99
97
  # deep clone so we can muck with these options all we want
100
- scrubbed_options = Marshal.load(Marshal.dump(options))
98
+ #
99
+ # exclude SSL params from dump-and-load because some information isn't
100
+ # safely dumpable in current Rubies
101
+ keys = options.keys
102
+ keys.delete(:ssl_params)
103
+ scrubbed_options = Marshal.load(Marshal.dump(options.slice(*keys)))
101
104
  if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
102
105
  uri.password = redacted
103
106
  scrubbed_options[:url] = uri.to_s
@@ -124,7 +127,7 @@ module Sidekiq
124
127
  # initialization code at all.
125
128
  #
126
129
  p = ENV["REDIS_PROVIDER"]
127
- if p && p =~ /\:/
130
+ if p && p =~ /:/
128
131
  raise <<~EOM
129
132
  REDIS_PROVIDER should be set to the name of the variable which contains the Redis URL, not a URL itself.
130
133
  Platforms like Heroku will sell addons that publish a *_URL variable. You need to tell Sidekiq with REDIS_PROVIDER, e.g.:
@@ -85,7 +85,7 @@ module Sidekiq
85
85
  notify(FDSTORE, unset_env)
86
86
  end
87
87
 
88
- # @param [Boolean] true if the service manager expects watchdog keep-alive
88
+ # @return [Boolean] true if the service manager expects watchdog keep-alive
89
89
  # notification messages to be sent from this process.
90
90
  #
91
91
  # If the $WATCHDOG_USEC environment variable is set,
@@ -337,7 +337,7 @@ module Sidekiq
337
337
  Sidekiq::Extensions::DelayedModel.extend(TestingExtensions) if defined?(Sidekiq::Extensions::DelayedModel)
338
338
  end
339
339
 
340
- if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test?
340
+ if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test? && !$TESTING
341
341
  puts("**************************************************")
342
342
  puts("⛔️ WARNING: Sidekiq testing API enabled, but this is not the test environment. Your jobs will not go to Redis.")
343
343
  puts("**************************************************")
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Sidekiq
4
- VERSION = "6.0.7"
4
+ VERSION = "6.1.0"
5
5
  end
@@ -10,8 +10,9 @@ require "sidekiq/web/helpers"
10
10
  require "sidekiq/web/router"
11
11
  require "sidekiq/web/action"
12
12
  require "sidekiq/web/application"
13
+ require "sidekiq/web/csrf_protection"
13
14
 
14
- require "rack/protection"
15
+ require "rack/content_length"
15
16
 
16
17
  require "rack/builder"
17
18
  require "rack/file"
@@ -154,14 +155,14 @@ module Sidekiq
154
155
  def build_sessions
155
156
  middlewares = self.middlewares
156
157
 
157
- unless using?(::Rack::Protection) || ENV["RACK_ENV"] == "test"
158
- middlewares.unshift [[::Rack::Protection, {use: :authenticity_token}], nil]
159
- end
160
-
161
158
  s = sessions
162
- return unless s
163
159
 
164
- unless using? ::Rack::Session::Cookie
160
+ # turn on CSRF protection if sessions are enabled and this is not the test env
161
+ if s && !using?(CsrfProtection) && ENV["RACK_ENV"] != "test"
162
+ middlewares.unshift [[CsrfProtection], nil]
163
+ end
164
+
165
+ if s && !using?(::Rack::Session::Cookie)
165
166
  unless (secret = Web.session_secret)
166
167
  require "securerandom"
167
168
  secret = SecureRandom.hex(64)
@@ -172,6 +173,13 @@ module Sidekiq
172
173
 
173
174
  middlewares.unshift [[::Rack::Session::Cookie, options], nil]
174
175
  end
176
+
177
+ # Since Sidekiq::WebApplication no longer calculates its own
178
+ # Content-Length response header, we must ensure that the Rack middleware
179
+ # that does this is loaded
180
+ unless using? ::Rack::ContentLength
181
+ middlewares.unshift [[::Rack::ContentLength], nil]
182
+ end
175
183
  end
176
184
 
177
185
  def build