sidekiq 6.3.1 → 6.4.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +56 -0
  3. data/LICENSE +3 -3
  4. data/README.md +7 -2
  5. data/bin/sidekiq +3 -3
  6. data/bin/sidekiqload +57 -65
  7. data/bin/sidekiqmon +1 -1
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +68 -62
  13. data/lib/sidekiq/cli.rb +16 -6
  14. data/lib/sidekiq/client.rb +44 -64
  15. data/lib/sidekiq/delay.rb +2 -0
  16. data/lib/sidekiq/extensions/generic_proxy.rb +1 -1
  17. data/lib/sidekiq/fetch.rb +2 -2
  18. data/lib/sidekiq/job_logger.rb +15 -27
  19. data/lib/sidekiq/job_retry.rb +28 -26
  20. data/lib/sidekiq/job_util.rb +67 -0
  21. data/lib/sidekiq/launcher.rb +37 -36
  22. data/lib/sidekiq/logger.rb +8 -18
  23. data/lib/sidekiq/manager.rb +13 -15
  24. data/lib/sidekiq/middleware/chain.rb +4 -4
  25. data/lib/sidekiq/middleware/current_attributes.rb +6 -1
  26. data/lib/sidekiq/middleware/i18n.rb +4 -4
  27. data/lib/sidekiq/monitor.rb +1 -1
  28. data/lib/sidekiq/paginator.rb +8 -8
  29. data/lib/sidekiq/processor.rb +27 -27
  30. data/lib/sidekiq/rails.rb +11 -4
  31. data/lib/sidekiq/redis_connection.rb +2 -2
  32. data/lib/sidekiq/scheduled.rb +13 -3
  33. data/lib/sidekiq/testing/inline.rb +4 -4
  34. data/lib/sidekiq/testing.rb +36 -35
  35. data/lib/sidekiq/util.rb +13 -0
  36. data/lib/sidekiq/version.rb +1 -1
  37. data/lib/sidekiq/web/application.rb +5 -2
  38. data/lib/sidekiq/web/csrf_protection.rb +2 -2
  39. data/lib/sidekiq/web/helpers.rb +4 -4
  40. data/lib/sidekiq/web.rb +3 -3
  41. data/lib/sidekiq/worker.rb +71 -16
  42. data/lib/sidekiq.rb +27 -15
  43. data/web/assets/javascripts/application.js +58 -26
  44. data/web/assets/stylesheets/application-dark.css +13 -17
  45. data/web/assets/stylesheets/application.css +4 -5
  46. data/web/views/_summary.erb +1 -1
  47. data/web/views/busy.erb +3 -3
  48. metadata +8 -7
  49. data/lib/generators/sidekiq/worker_generator.rb +0 -57
@@ -15,7 +15,7 @@ module Sidekiq
15
15
  proc { "sidekiq" },
16
16
  proc { Sidekiq::VERSION },
17
17
  proc { |me, data| data["tag"] },
18
- proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
18
+ proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
19
19
  proc { |me, data| "stopping" if me.stopping? }
20
20
  ]
21
21
 
@@ -43,9 +43,7 @@ module Sidekiq
43
43
  @poller.terminate
44
44
  end
45
45
 
46
- # Shuts down the process. This method does not
47
- # return until all work is complete and cleaned up.
48
- # It can take up to the timeout to complete.
46
+ # Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
49
47
  def stop
50
48
  deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
51
49
 
@@ -55,7 +53,7 @@ module Sidekiq
55
53
 
56
54
  @manager.stop(deadline)
57
55
 
58
- # Requeue everything in case there was a worker who grabbed work while stopped
56
+ # Requeue everything in case there was a thread which fetched a job while the process was stopped.
59
57
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
60
58
  strategy = @options[:fetch]
61
59
  strategy.bulk_requeue([], @options)
@@ -84,9 +82,9 @@ module Sidekiq
84
82
  # Note we don't stop the heartbeat thread; if the process
85
83
  # doesn't actually exit, it'll reappear in the Web UI.
86
84
  Sidekiq.redis do |conn|
87
- conn.pipelined do
88
- conn.srem("processes", identity)
89
- conn.unlink("#{identity}:workers")
85
+ conn.pipelined do |pipeline|
86
+ pipeline.srem("processes", identity)
87
+ pipeline.unlink("#{identity}:work")
90
88
  end
91
89
  end
92
90
  rescue
@@ -107,14 +105,14 @@ module Sidekiq
107
105
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
108
106
  begin
109
107
  Sidekiq.redis do |conn|
110
- conn.pipelined do
111
- conn.incrby("stat:processed", procd)
112
- conn.incrby("stat:processed:#{nowdate}", procd)
113
- conn.expire("stat:processed:#{nowdate}", STATS_TTL)
114
-
115
- conn.incrby("stat:failed", fails)
116
- conn.incrby("stat:failed:#{nowdate}", fails)
117
- conn.expire("stat:failed:#{nowdate}", STATS_TTL)
108
+ conn.pipelined do |pipeline|
109
+ pipeline.incrby("stat:processed", procd)
110
+ pipeline.incrby("stat:processed:#{nowdate}", procd)
111
+ pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
112
+
113
+ pipeline.incrby("stat:failed", fails)
114
+ pipeline.incrby("stat:failed:#{nowdate}", fails)
115
+ pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
118
116
  end
119
117
  end
120
118
  rescue => ex
@@ -132,26 +130,29 @@ module Sidekiq
132
130
  begin
133
131
  fails = Processor::FAILURE.reset
134
132
  procd = Processor::PROCESSED.reset
135
- curstate = Processor::WORKER_STATE.dup
133
+ curstate = Processor::WORK_STATE.dup
136
134
 
137
- workers_key = "#{key}:workers"
138
135
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
139
136
 
140
137
  Sidekiq.redis do |conn|
141
- conn.multi do
142
- conn.incrby("stat:processed", procd)
143
- conn.incrby("stat:processed:#{nowdate}", procd)
144
- conn.expire("stat:processed:#{nowdate}", STATS_TTL)
145
-
146
- conn.incrby("stat:failed", fails)
147
- conn.incrby("stat:failed:#{nowdate}", fails)
148
- conn.expire("stat:failed:#{nowdate}", STATS_TTL)
138
+ conn.multi do |transaction|
139
+ transaction.incrby("stat:processed", procd)
140
+ transaction.incrby("stat:processed:#{nowdate}", procd)
141
+ transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
142
+
143
+ transaction.incrby("stat:failed", fails)
144
+ transaction.incrby("stat:failed:#{nowdate}", fails)
145
+ transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
146
+ end
149
147
 
150
- conn.unlink(workers_key)
148
+ # work is the current set of executing jobs
149
+ work_key = "#{key}:work"
150
+ conn.pipelined do |transaction|
151
+ transaction.unlink(work_key)
151
152
  curstate.each_pair do |tid, hash|
152
- conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
153
+ transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
153
154
  end
154
- conn.expire(workers_key, 60)
155
+ transaction.expire(work_key, 60)
155
156
  end
156
157
  end
157
158
 
@@ -161,17 +162,17 @@ module Sidekiq
161
162
  kb = memory_usage(::Process.pid)
162
163
 
163
164
  _, exists, _, _, msg = Sidekiq.redis { |conn|
164
- conn.multi {
165
- conn.sadd("processes", key)
166
- conn.exists?(key)
167
- conn.hmset(key, "info", to_json,
165
+ conn.multi { |transaction|
166
+ transaction.sadd("processes", key)
167
+ transaction.exists?(key)
168
+ transaction.hmset(key, "info", to_json,
168
169
  "busy", curstate.size,
169
170
  "beat", Time.now.to_f,
170
171
  "rtt_us", rtt,
171
172
  "quiet", @done,
172
173
  "rss", kb)
173
- conn.expire(key, 60)
174
- conn.rpop("#{key}-signals")
174
+ transaction.expire(key, 60)
175
+ transaction.rpop("#{key}-signals")
175
176
  }
176
177
  }
177
178
 
@@ -214,7 +215,7 @@ module Sidekiq
214
215
  Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
215
216
  Ensure Redis is running in the same AZ or datacenter as Sidekiq.
216
217
  If these values are close to 100,000, that means your Sidekiq process may be
217
- CPU overloaded; see https://github.com/mperham/sidekiq/discussions/5039
218
+ CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
218
219
  EOM
219
220
  RTT_READINGS.reset
220
221
  end
@@ -16,6 +16,10 @@ module Sidekiq
16
16
  def self.current
17
17
  Thread.current[:sidekiq_context] ||= {}
18
18
  end
19
+
20
+ def self.add(k, v)
21
+ Thread.current[:sidekiq_context][k] = v
22
+ end
19
23
  end
20
24
 
21
25
  module LoggingUtils
@@ -31,24 +35,10 @@ module Sidekiq
31
35
  nil
32
36
  end
33
37
 
34
- def debug?
35
- level <= 0
36
- end
37
-
38
- def info?
39
- level <= 1
40
- end
41
-
42
- def warn?
43
- level <= 2
44
- end
45
-
46
- def error?
47
- level <= 3
48
- end
49
-
50
- def fatal?
51
- level <= 4
38
+ LEVELS.each do |level, numeric_level|
39
+ define_method("#{level}?") do
40
+ local_level.nil? ? super() : local_level <= numeric_level
41
+ end
52
42
  end
53
43
 
54
44
  def local_level
@@ -50,14 +50,11 @@ module Sidekiq
50
50
  return if @done
51
51
  @done = true
52
52
 
53
- logger.info { "Terminating quiet workers" }
53
+ logger.info { "Terminating quiet threads" }
54
54
  @workers.each { |x| x.terminate }
55
55
  fire_event(:quiet, reverse: true)
56
56
  end
57
57
 
58
- # hack for quicker development / testing environment #2774
59
- PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
60
-
61
58
  def stop(deadline)
62
59
  quiet
63
60
  fire_event(:shutdown, reverse: true)
@@ -68,13 +65,8 @@ module Sidekiq
68
65
  sleep PAUSE_TIME
69
66
  return if @workers.empty?
70
67
 
71
- logger.info { "Pausing to allow workers to finish..." }
72
- remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
73
- while remaining > PAUSE_TIME
74
- return if @workers.empty?
75
- sleep PAUSE_TIME
76
- remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
77
- end
68
+ logger.info { "Pausing to allow jobs to finish..." }
69
+ wait_for(deadline) { @workers.empty? }
78
70
  return if @workers.empty?
79
71
 
80
72
  hard_shutdown
@@ -104,7 +96,7 @@ module Sidekiq
104
96
  private
105
97
 
106
98
  def hard_shutdown
107
- # We've reached the timeout and we still have busy workers.
99
+ # We've reached the timeout and we still have busy threads.
108
100
  # They must die but their jobs shall live on.
109
101
  cleanup = nil
110
102
  @plock.synchronize do
@@ -114,12 +106,12 @@ module Sidekiq
114
106
  if cleanup.size > 0
115
107
  jobs = cleanup.map { |p| p.job }.compact
116
108
 
117
- logger.warn { "Terminating #{cleanup.size} busy worker threads" }
118
- logger.warn { "Work still in progress #{jobs.inspect}" }
109
+ logger.warn { "Terminating #{cleanup.size} busy threads" }
110
+ logger.warn { "Jobs still in progress #{jobs.inspect}" }
119
111
 
120
112
  # Re-enqueue unfinished jobs
121
113
  # NOTE: You may notice that we may push a job back to redis before
122
- # the worker thread is terminated. This is ok because Sidekiq's
114
+ # the thread is terminated. This is ok because Sidekiq's
123
115
  # contract says that jobs are run AT LEAST once. Process termination
124
116
  # is delayed until we're certain the jobs are back in Redis because
125
117
  # it is worse to lose a job than to run it twice.
@@ -130,6 +122,12 @@ module Sidekiq
130
122
  cleanup.each do |processor|
131
123
  processor.kill
132
124
  end
125
+
126
+ # when this method returns, we immediately call `exit` which may not give
127
+ # the remaining threads time to run `ensure` blocks, etc. We pause here up
128
+ # to 3 seconds to give threads a minimal amount of time to run `ensure` blocks.
129
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
130
+ wait_for(deadline) { @workers.empty? }
133
131
  end
134
132
  end
135
133
  end
@@ -44,10 +44,10 @@ module Sidekiq
44
44
  # This is an example of a minimal server middleware:
45
45
  #
46
46
  # class MyServerHook
47
- # def call(worker_instance, msg, queue)
48
- # puts "Before work"
47
+ # def call(job_instance, msg, queue)
48
+ # puts "Before job"
49
49
  # yield
50
- # puts "After work"
50
+ # puts "After job"
51
51
  # end
52
52
  # end
53
53
  #
@@ -56,7 +56,7 @@ module Sidekiq
56
56
  # to Redis:
57
57
  #
58
58
  # class MyClientHook
59
- # def call(worker_class, msg, queue, redis_pool)
59
+ # def call(job_class, msg, queue, redis_pool)
60
60
  # puts "Before push"
61
61
  # result = yield
62
62
  # puts "After push"
@@ -20,7 +20,12 @@ module Sidekiq
20
20
  end
21
21
 
22
22
  def call(_, job, _, _)
23
- job["cattr"] = @klass.attributes
23
+ attrs = @klass.attributes
24
+ if job.has_key?("cattr")
25
+ job["cattr"].merge!(attrs)
26
+ else
27
+ job["cattr"] = attrs
28
+ end
24
29
  yield
25
30
  end
26
31
  end
@@ -10,16 +10,16 @@ module Sidekiq::Middleware::I18n
10
10
  # Get the current locale and store it in the message
11
11
  # to be sent to Sidekiq.
12
12
  class Client
13
- def call(_worker, msg, _queue, _redis)
14
- msg["locale"] ||= I18n.locale
13
+ def call(_jobclass, job, _queue, _redis)
14
+ job["locale"] ||= I18n.locale
15
15
  yield
16
16
  end
17
17
  end
18
18
 
19
19
  # Pull the msg locale out and set the current thread to use it.
20
20
  class Server
21
- def call(_worker, msg, _queue, &block)
22
- I18n.with_locale(msg.fetch("locale", I18n.default_locale), &block)
21
+ def call(_jobclass, job, _queue, &block)
22
+ I18n.with_locale(job.fetch("locale", I18n.default_locale), &block)
23
23
  end
24
24
  end
25
25
  end
@@ -17,7 +17,7 @@ class Sidekiq::Monitor
17
17
  end
18
18
  send(section)
19
19
  rescue => e
20
- puts "Couldn't get status: #{e}"
20
+ abort "Couldn't get status: #{e}"
21
21
  end
22
22
 
23
23
  def all
@@ -16,22 +16,22 @@ module Sidekiq
16
16
 
17
17
  case type
18
18
  when "zset"
19
- total_size, items = conn.multi {
20
- conn.zcard(key)
19
+ total_size, items = conn.multi { |transaction|
20
+ transaction.zcard(key)
21
21
  if rev
22
- conn.zrevrange(key, starting, ending, with_scores: true)
22
+ transaction.zrevrange(key, starting, ending, with_scores: true)
23
23
  else
24
- conn.zrange(key, starting, ending, with_scores: true)
24
+ transaction.zrange(key, starting, ending, with_scores: true)
25
25
  end
26
26
  }
27
27
  [current_page, total_size, items]
28
28
  when "list"
29
- total_size, items = conn.multi {
30
- conn.llen(key)
29
+ total_size, items = conn.multi { |transaction|
30
+ transaction.llen(key)
31
31
  if rev
32
- conn.lrange(key, -ending - 1, -starting - 1)
32
+ transaction.lrange(key, -ending - 1, -starting - 1)
33
33
  else
34
- conn.lrange(key, starting, ending)
34
+ transaction.lrange(key, starting, ending)
35
35
  end
36
36
  }
37
37
  items.reverse! if rev
@@ -11,7 +11,7 @@ module Sidekiq
11
11
  #
12
12
  # 1. fetches a job from Redis
13
13
  # 2. executes the job
14
- # a. instantiate the Worker
14
+ # a. instantiate the job class
15
15
  # b. run the middleware chain
16
16
  # c. call #perform
17
17
  #
@@ -80,12 +80,12 @@ module Sidekiq
80
80
  end
81
81
 
82
82
  def get_one
83
- work = @strategy.retrieve_work
83
+ uow = @strategy.retrieve_work
84
84
  if @down
85
85
  logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" }
86
86
  @down = nil
87
87
  end
88
- work
88
+ uow
89
89
  rescue Sidekiq::Shutdown
90
90
  rescue => ex
91
91
  handle_fetch_exception(ex)
@@ -130,10 +130,10 @@ module Sidekiq
130
130
  # Effectively this block denotes a "unit of work" to Rails.
131
131
  @reloader.call do
132
132
  klass = constantize(job_hash["class"])
133
- worker = klass.new
134
- worker.jid = job_hash["jid"]
135
- @retrier.local(worker, jobstr, queue) do
136
- yield worker
133
+ inst = klass.new
134
+ inst.jid = job_hash["jid"]
135
+ @retrier.local(inst, jobstr, queue) do
136
+ yield inst
137
137
  end
138
138
  end
139
139
  end
@@ -142,9 +142,9 @@ module Sidekiq
142
142
  end
143
143
  end
144
144
 
145
- def process(work)
146
- jobstr = work.job
147
- queue = work.queue_name
145
+ def process(uow)
146
+ jobstr = uow.job
147
+ queue = uow.queue_name
148
148
 
149
149
  # Treat malformed JSON as a special case: job goes straight to the morgue.
150
150
  job_hash = nil
@@ -154,14 +154,14 @@ module Sidekiq
154
154
  handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
155
155
  # we can't notify because the job isn't a valid hash payload.
156
156
  DeadSet.new.kill(jobstr, notify_failure: false)
157
- return work.acknowledge
157
+ return uow.acknowledge
158
158
  end
159
159
 
160
160
  ack = false
161
161
  begin
162
- dispatch(job_hash, queue, jobstr) do |worker|
163
- Sidekiq.server_middleware.invoke(worker, job_hash, queue) do
164
- execute_job(worker, job_hash["args"])
162
+ dispatch(job_hash, queue, jobstr) do |inst|
163
+ Sidekiq.server_middleware.invoke(inst, job_hash, queue) do
164
+ execute_job(inst, job_hash["args"])
165
165
  end
166
166
  end
167
167
  ack = true
@@ -186,14 +186,14 @@ module Sidekiq
186
186
  if ack
187
187
  # We don't want a shutdown signal to interrupt job acknowledgment.
188
188
  Thread.handle_interrupt(Sidekiq::Shutdown => :never) do
189
- work.acknowledge
189
+ uow.acknowledge
190
190
  end
191
191
  end
192
192
  end
193
193
  end
194
194
 
195
- def execute_job(worker, cloned_args)
196
- worker.perform(*cloned_args)
195
+ def execute_job(inst, cloned_args)
196
+ inst.perform(*cloned_args)
197
197
  end
198
198
 
199
199
  # Ruby doesn't provide atomic counters out of the box so we'll
@@ -219,39 +219,39 @@ module Sidekiq
219
219
  end
220
220
 
221
221
  # jruby's Hash implementation is not threadsafe, so we wrap it in a mutex here
222
- class SharedWorkerState
222
+ class SharedWorkState
223
223
  def initialize
224
- @worker_state = {}
224
+ @work_state = {}
225
225
  @lock = Mutex.new
226
226
  end
227
227
 
228
228
  def set(tid, hash)
229
- @lock.synchronize { @worker_state[tid] = hash }
229
+ @lock.synchronize { @work_state[tid] = hash }
230
230
  end
231
231
 
232
232
  def delete(tid)
233
- @lock.synchronize { @worker_state.delete(tid) }
233
+ @lock.synchronize { @work_state.delete(tid) }
234
234
  end
235
235
 
236
236
  def dup
237
- @lock.synchronize { @worker_state.dup }
237
+ @lock.synchronize { @work_state.dup }
238
238
  end
239
239
 
240
240
  def size
241
- @lock.synchronize { @worker_state.size }
241
+ @lock.synchronize { @work_state.size }
242
242
  end
243
243
 
244
244
  def clear
245
- @lock.synchronize { @worker_state.clear }
245
+ @lock.synchronize { @work_state.clear }
246
246
  end
247
247
  end
248
248
 
249
249
  PROCESSED = Counter.new
250
250
  FAILURE = Counter.new
251
- WORKER_STATE = SharedWorkerState.new
251
+ WORK_STATE = SharedWorkState.new
252
252
 
253
253
  def stats(jobstr, queue)
254
- WORKER_STATE.set(tid, {queue: queue, payload: jobstr, run_at: Time.now.to_i})
254
+ WORK_STATE.set(tid, {queue: queue, payload: jobstr, run_at: Time.now.to_i})
255
255
 
256
256
  begin
257
257
  yield
@@ -259,7 +259,7 @@ module Sidekiq
259
259
  FAILURE.incr
260
260
  raise
261
261
  ensure
262
- WORKER_STATE.delete(tid)
262
+ WORK_STATE.delete(tid)
263
263
  PROCESSED.incr
264
264
  end
265
265
  end
data/lib/sidekiq/rails.rb CHANGED
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "sidekiq/worker"
3
+ require "sidekiq/job"
4
4
 
5
5
  module Sidekiq
6
6
  class Rails < ::Rails::Engine
@@ -33,21 +33,28 @@ module Sidekiq
33
33
  # end
34
34
  initializer "sidekiq.active_job_integration" do
35
35
  ActiveSupport.on_load(:active_job) do
36
- include ::Sidekiq::Worker::Options unless respond_to?(:sidekiq_options)
36
+ include ::Sidekiq::Job::Options unless respond_to?(:sidekiq_options)
37
37
  end
38
38
  end
39
39
 
40
40
  initializer "sidekiq.rails_logger" do
41
41
  Sidekiq.configure_server do |_|
42
- # This is the integration code necessary so that if code uses `Rails.logger.info "Hello"`,
42
+ # This is the integration code necessary so that if a job uses `Rails.logger.info "Hello"`,
43
43
  # it will appear in the Sidekiq console with all of the job context. See #5021 and
44
44
  # https://github.com/rails/rails/blob/b5f2b550f69a99336482739000c58e4e04e033aa/railties/lib/rails/commands/server/server_command.rb#L82-L84
45
- unless ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout)
45
+ unless ::Rails.logger == ::Sidekiq.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout)
46
46
  ::Rails.logger.extend(::ActiveSupport::Logger.broadcast(::Sidekiq.logger))
47
47
  end
48
48
  end
49
49
  end
50
50
 
51
+ config.before_configuration do
52
+ dep = ActiveSupport::Deprecation.new("7.0", "Sidekiq")
53
+ dep.deprecate_methods(Sidekiq.singleton_class,
54
+ default_worker_options: :default_job_options,
55
+ "default_worker_options=": :default_job_options=)
56
+ end
57
+
51
58
  # This hook happens after all initializers are run, just before returning
52
59
  # from config/environment.rb back to sidekiq/cli.rb.
53
60
  #
@@ -38,7 +38,7 @@ module Sidekiq
38
38
 
39
39
  private
40
40
 
41
- # Sidekiq needs a lot of concurrent Redis connections.
41
+ # Sidekiq needs many concurrent Redis connections.
42
42
  #
43
43
  # We need a connection for each Processor.
44
44
  # We need a connection for Pro's real-time change listener
@@ -47,7 +47,7 @@ module Sidekiq
47
47
  # - enterprise's leader election
48
48
  # - enterprise's cron support
49
49
  def verify_sizing(size, concurrency)
50
- raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
50
+ raise ArgumentError, "Your Redis connection pool is too small for Sidekiq. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
51
51
  end
52
52
 
53
53
  def build_client(options)
@@ -19,10 +19,11 @@ module Sidekiq
19
19
  LUA
20
20
 
21
21
  def initialize
22
+ @done = false
22
23
  @lua_zpopbyscore_sha = nil
23
24
  end
24
25
 
25
- def enqueue_jobs(now = Time.now.to_f.to_s, sorted_sets = SETS)
26
+ def enqueue_jobs(sorted_sets = SETS)
26
27
  # A job's "score" in Redis is the time at which it should be processed.
27
28
  # Just check Redis for the set of jobs with a timestamp before now.
28
29
  Sidekiq.redis do |conn|
@@ -31,7 +32,7 @@ module Sidekiq
31
32
  # We need to go through the list one at a time to reduce the risk of something
32
33
  # going wrong between the time jobs are popped from the scheduled queue and when
33
34
  # they are pushed onto a work queue and losing the jobs.
34
- while (job = zpopbyscore(conn, keys: [sorted_set], argv: [now]))
35
+ while !@done && (job = zpopbyscore(conn, keys: [sorted_set], argv: [Time.now.to_f.to_s]))
35
36
  Sidekiq::Client.push(Sidekiq.load_json(job))
36
37
  Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
37
38
  end
@@ -39,10 +40,17 @@ module Sidekiq
39
40
  end
40
41
  end
41
42
 
43
+ def terminate
44
+ @done = true
45
+ end
46
+
42
47
  private
43
48
 
44
49
  def zpopbyscore(conn, keys: nil, argv: nil)
45
- @lua_zpopbyscore_sha = conn.script(:load, LUA_ZPOPBYSCORE) if @lua_zpopbyscore_sha.nil?
50
+ if @lua_zpopbyscore_sha.nil?
51
+ raw_conn = conn.respond_to?(:redis) ? conn.redis : conn
52
+ @lua_zpopbyscore_sha = raw_conn.script(:load, LUA_ZPOPBYSCORE)
53
+ end
46
54
 
47
55
  conn.evalsha(@lua_zpopbyscore_sha, keys: keys, argv: argv)
48
56
  rescue Redis::CommandError => e
@@ -74,6 +82,8 @@ module Sidekiq
74
82
  # Shut down this instance, will pause until the thread is dead.
75
83
  def terminate
76
84
  @done = true
85
+ @enq.terminate if @enq.respond_to?(:terminate)
86
+
77
87
  if @thread
78
88
  t = @thread
79
89
  @thread = nil
@@ -4,7 +4,7 @@ require "sidekiq/testing"
4
4
 
5
5
  ##
6
6
  # The Sidekiq inline infrastructure overrides perform_async so that it
7
- # actually calls perform instead. This allows workers to be run inline in a
7
+ # actually calls perform instead. This allows jobs to be run inline in a
8
8
  # testing environment.
9
9
  #
10
10
  # This is similar to `Resque.inline = true` functionality.
@@ -15,8 +15,8 @@ require "sidekiq/testing"
15
15
  #
16
16
  # $external_variable = 0
17
17
  #
18
- # class ExternalWorker
19
- # include Sidekiq::Worker
18
+ # class ExternalJob
19
+ # include Sidekiq::Job
20
20
  #
21
21
  # def perform
22
22
  # $external_variable = 1
@@ -24,7 +24,7 @@ require "sidekiq/testing"
24
24
  # end
25
25
  #
26
26
  # assert_equal 0, $external_variable
27
- # ExternalWorker.perform_async
27
+ # ExternalJob.perform_async
28
28
  # assert_equal 1, $external_variable
29
29
  #
30
30
  Sidekiq::Testing.inline!