sidekiq 8.0.7 → 8.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +32 -0
  3. data/lib/generators/sidekiq/templates/job.rb.erb +1 -1
  4. data/lib/sidekiq/api.rb +20 -6
  5. data/lib/sidekiq/capsule.rb +4 -0
  6. data/lib/sidekiq/client.rb +15 -1
  7. data/lib/sidekiq/component.rb +2 -1
  8. data/lib/sidekiq/config.rb +11 -6
  9. data/lib/sidekiq/fetch.rb +1 -0
  10. data/lib/sidekiq/job/iterable.rb +23 -14
  11. data/lib/sidekiq/job.rb +2 -2
  12. data/lib/sidekiq/job_logger.rb +4 -2
  13. data/lib/sidekiq/job_retry.rb +10 -2
  14. data/lib/sidekiq/launcher.rb +14 -5
  15. data/lib/sidekiq/loader.rb +57 -0
  16. data/lib/sidekiq/middleware/i18n.rb +2 -0
  17. data/lib/sidekiq/monitor.rb +4 -8
  18. data/lib/sidekiq/profiler.rb +1 -0
  19. data/lib/sidekiq/rails.rb +3 -1
  20. data/lib/sidekiq/ring_buffer.rb +1 -0
  21. data/lib/sidekiq/version.rb +1 -1
  22. data/lib/sidekiq/web/action.rb +3 -3
  23. data/lib/sidekiq/web/application.rb +10 -0
  24. data/lib/sidekiq/web/helpers.rb +0 -8
  25. data/lib/sidekiq.rb +5 -0
  26. data/web/assets/javascripts/application.js +21 -5
  27. data/web/assets/stylesheets/style.css +17 -7
  28. data/web/locales/ar.yml +1 -0
  29. data/web/locales/cs.yml +1 -0
  30. data/web/locales/da.yml +1 -0
  31. data/web/locales/de.yml +1 -0
  32. data/web/locales/el.yml +1 -0
  33. data/web/locales/en.yml +1 -0
  34. data/web/locales/es.yml +1 -0
  35. data/web/locales/fa.yml +1 -0
  36. data/web/locales/fr.yml +1 -0
  37. data/web/locales/gd.yml +1 -0
  38. data/web/locales/he.yml +1 -0
  39. data/web/locales/hi.yml +1 -0
  40. data/web/locales/it.yml +1 -0
  41. data/web/locales/ja.yml +1 -0
  42. data/web/locales/ko.yml +1 -0
  43. data/web/locales/lt.yml +1 -0
  44. data/web/locales/nb.yml +1 -0
  45. data/web/locales/nl.yml +1 -0
  46. data/web/locales/pl.yml +1 -0
  47. data/web/locales/pt-BR.yml +1 -0
  48. data/web/locales/pt.yml +1 -0
  49. data/web/locales/ru.yml +1 -0
  50. data/web/locales/sv.yml +1 -0
  51. data/web/locales/ta.yml +1 -0
  52. data/web/locales/tr.yml +1 -0
  53. data/web/locales/uk.yml +1 -0
  54. data/web/locales/ur.yml +1 -0
  55. data/web/locales/vi.yml +1 -0
  56. data/web/locales/zh-CN.yml +1 -0
  57. data/web/locales/zh-TW.yml +1 -0
  58. data/web/views/_footer.erb +1 -1
  59. data/web/views/_metrics_period_select.erb +1 -1
  60. data/web/views/_paging.erb +0 -1
  61. data/web/views/_poll_link.erb +2 -2
  62. data/web/views/busy.erb +4 -8
  63. data/web/views/dashboard.erb +3 -3
  64. data/web/views/dead.erb +3 -3
  65. data/web/views/filtering.erb +2 -2
  66. data/web/views/layout.erb +7 -7
  67. data/web/views/metrics.erb +7 -7
  68. data/web/views/morgue.erb +8 -4
  69. data/web/views/queue.erb +2 -2
  70. data/web/views/queues.erb +4 -4
  71. data/web/views/retries.erb +9 -5
  72. data/web/views/retry.erb +2 -2
  73. data/web/views/scheduled.erb +8 -4
  74. data/web/views/scheduled_job_info.erb +2 -2
  75. metadata +2 -1
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b6411cfbee23ece1d53e775bf2726c85d236469d655eedfc8bc808277693fa1b
4
- data.tar.gz: eef531b98d9f9e6fd5dc2fe2d2d59ac4a033134417be773b453dbf159f125156
3
+ metadata.gz: ef51643693879cd574d6edd54ef41443defc4825c855d5eed0e07d42e35aa1cd
4
+ data.tar.gz: 2d9acb8d89326185356ee9818ca543e8a6992bac0a00262e06aa9394052978c1
5
5
  SHA512:
6
- metadata.gz: 8cd9e7e77116f9a9d07ac4cf9f805ffd58d6d81fe53fa52cf5c293fb33ae6bdaff6fce351527bf87df056225794016157949c6dcb9f1d3ddc37169c105cc15ce
7
- data.tar.gz: f1b0fceebc6a94e2d4ca441fe834f3e82a1d0702da0cd70d14cf8fb0eb2f9490d6b1842e0e5c34e4a5f4e6a13a193806920826bd8b6f816297db471fc7464313
6
+ metadata.gz: 761f705abeb6b5deb31591d07d9d9ea0d79d5d6f74367f622842994aa0a57866bd3af9add1089ce2b29d8dd4a9cd110d839036504a1ae0830ac59dbca186ff26
7
+ data.tar.gz: 5df2e3b47cb7ed56a51ff90b1a8d23ea96f74e9fd64a9116d249a266c7a074ef2a4bac09d66202c66a4f631c6f15bcae167df8ae733f43020f0b53886ae3e564
data/Changes.md CHANGED
@@ -2,6 +2,38 @@
2
2
 
3
3
  [Sidekiq Changes](https://github.com/sidekiq/sidekiq/blob/main/Changes.md) | [Sidekiq Pro Changes](https://github.com/sidekiq/sidekiq/blob/main/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/sidekiq/sidekiq/blob/main/Ent-Changes.md)
4
4
 
5
+ 8.0.9
6
+ ----------
7
+
8
+ - Implement idle Redis connection reaping, will be activated in 8.1 [#6663]
9
+ - Updated `Sidekiq::Process` API to provide capsule data. The `queues` and `weights`
10
+ data will be removed from Redis in Sidekiq 8.1, as this data can now be found in the
11
+ `capsules` element. [#6295]
12
+ - Restore bulk action buttons on Scheduled, Retry and Dead tabs [#6833, deve1212]
13
+ - Support logging additional job attributes [#6846, bschrag620]
14
+ - Fix display of long job args [#6836]
15
+ - Create development lifecycle (`docs/sdlc.md`) and security (`docs/SECURITY.md`) policy
16
+ documentation for Sidekiq's current workflows
17
+
18
+ 8.0.8
19
+ ----------
20
+
21
+ - Allow an optional global iteration max runtime. After executing for this length of time,
22
+ Sidekiq will re-queue the job to continue execution at a later time [#6819, fatkodima]
23
+ ```ruby
24
+ Sidekiq.configure_server do |cfg|
25
+ cfg[:max_iteration_runtime] = 600 # ten minutes
26
+ end
27
+ ```
28
+ - Add `discarded_at` attribute when discarding a job so death handlers can distinguish between
29
+ a job which was killed and one that was discarded. [#6820, gstokkink]
30
+ - `perform_bulk` now accepts an `:at` array of times to schedule each job at the corresponding time.
31
+ `perform_bulk(args: [[1], [2]], at: [Time.now, Time.now + 1])` [#6790, fatkodima]
32
+ - `perform_bulk` now accepts a `:spread_interval` value to schedule jobs over
33
+ the next N seconds. `perform_bulk(..., spread_interval: 60)` [#6792, fatkodima]
34
+ - Fix unintended display of flash messages in the Web UI due to session key collision
35
+ - Add support for lazy load hooks [#6825]
36
+
5
37
  8.0.7
6
38
  ----------
7
39
 
@@ -6,4 +6,4 @@ class <%= class_name %>Job
6
6
  # Do something
7
7
  end
8
8
  end
9
- <% end -%>
9
+ <% end -%>
data/lib/sidekiq/api.rb CHANGED
@@ -1059,9 +1059,9 @@ module Sidekiq
1059
1059
  # 'started_at' => <process start time>,
1060
1060
  # 'pid' => 12345,
1061
1061
  # 'tag' => 'myapp'
1062
- # 'concurrency' => 25,
1063
- # 'queues' => ['default', 'low'],
1064
- # 'busy' => 10,
1062
+ # 'concurrency' => 5,
1063
+ # 'capsules' => {"default" => {"mode" => "weighted", "concurrency" => 5, "weights" => {"default" => 2, "low" => 1}}},
1064
+ # 'busy' => 3,
1065
1065
  # 'beat' => <last heartbeat>,
1066
1066
  # 'identity' => <unique string identifying the process>,
1067
1067
  # 'embedded' => true,
@@ -1089,12 +1089,25 @@ module Sidekiq
1089
1089
  self["identity"]
1090
1090
  end
1091
1091
 
1092
+ # deprecated, use capsules below
1092
1093
  def queues
1093
- self["queues"]
1094
+ capsules.values.flat_map { |x| x["weights"].keys }.uniq
1094
1095
  end
1095
1096
 
1097
+ # deprecated, use capsules below
1096
1098
  def weights
1097
- self["weights"]
1099
+ hash = {}
1100
+ capsules.values.each do |cap|
1101
+ # Note: will lose data if two capsules are processing the same named queue
1102
+ cap["weights"].each_pair do |queue, weight|
1103
+ hash[queue] = weight
1104
+ end
1105
+ end
1106
+ hash
1107
+ end
1108
+
1109
+ def capsules
1110
+ self["capsules"]
1098
1111
  end
1099
1112
 
1100
1113
  def version
@@ -1168,7 +1181,6 @@ module Sidekiq
1168
1181
  # # thread_id is a unique identifier per thread
1169
1182
  # # work is a `Sidekiq::Work` instance that has the following accessor methods.
1170
1183
  # # [work.queue, work.run_at, work.payload]
1171
- # # run_at is an epoch Integer.
1172
1184
  # end
1173
1185
  #
1174
1186
  class WorkSet
@@ -1322,3 +1334,5 @@ module Sidekiq
1322
1334
  end
1323
1335
  end
1324
1336
  end
1337
+
1338
+ Sidekiq.loader.run_load_hooks(:api)
@@ -38,6 +38,10 @@ module Sidekiq
38
38
  @mode = :strict
39
39
  end
40
40
 
41
+ def to_h
42
+ {concurrency: concurrency, mode: mode, weights: weights}
43
+ end
44
+
41
45
  def fetcher
42
46
  @fetcher ||= begin
43
47
  instance = (config[:fetch_class] || Sidekiq::BasicFetch).new(self)
@@ -117,6 +117,9 @@ module Sidekiq
117
117
  # larger than 1000 but YMMV based on network quality, size of job args, etc.
118
118
  # A large number of jobs can cause a bit of Redis command processing latency.
119
119
  #
120
+ # Accepts an additional `:spread_interval` option (in seconds) to randomly spread
121
+ # the jobs schedule times over the specified interval.
122
+ #
120
123
  # Takes the same arguments as #push except that args is expected to be
121
124
  # an Array of Arrays. All other keys are duplicated for each job. Each job
122
125
  # is run through the client middleware pipeline and each job gets its own Job ID
@@ -131,13 +134,24 @@ module Sidekiq
131
134
  def push_bulk(items)
132
135
  batch_size = items.delete(:batch_size) || items.delete("batch_size") || 1_000
133
136
  args = items["args"]
134
- at = items.delete("at")
137
+ at = items.delete("at") || items.delete(:at)
135
138
  raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
136
139
  raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
137
140
 
138
141
  jid = items.delete("jid")
139
142
  raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
140
143
 
144
+ spread_interval = items.delete(:spread_interval) || items.delete("spread_interval")
145
+ raise ArgumentError, "Jobs 'spread_interval' must be a positive Numeric" if spread_interval && (!spread_interval.is_a?(Numeric) || spread_interval <= 0)
146
+ raise ArgumentError, "Only one of 'at' or 'spread_interval' can be provided" if at && spread_interval
147
+
148
+ if !at && spread_interval
149
+ # Do not use spread interval smaller than pooling interval.
150
+ spread_interval = [spread_interval, 5].max
151
+ now = Time.now.to_f
152
+ at = args.map { now + rand * spread_interval }
153
+ end
154
+
141
155
  normed = normalize_item(items)
142
156
  slice_index = 0
143
157
  result = args.each_slice(batch_size).flat_map do |slice|
@@ -19,7 +19,8 @@ module Sidekiq
19
19
  DEFAULT_THREAD_PRIORITY = -1
20
20
 
21
21
  ##
22
- # Sidekiq::Component assumes a config instance is available at @config
22
+ # Sidekiq::Component provides a set of utility methods depending only
23
+ # on Sidekiq::Config. It assumes a config instance is available at @config.
23
24
  module Component # :nodoc:
24
25
  attr_reader :config
25
26
 
@@ -17,10 +17,9 @@ module Sidekiq
17
17
  poll_interval_average: nil,
18
18
  average_scheduled_poll_interval: 5,
19
19
  on_complex_arguments: :raise,
20
- iteration: {
21
- max_job_runtime: nil,
22
- retry_backoff: 0
23
- },
20
+ # if the Iterable job runs longer than this value (in seconds), then the job
21
+ # will be interrupted after the current iteration and re-enqueued at the back of the queue
22
+ max_iteration_runtime: nil,
24
23
  error_handlers: [],
25
24
  death_handlers: [],
26
25
  lifecycle_events: {
@@ -36,7 +35,8 @@ module Sidekiq
36
35
  dead_max_jobs: 10_000,
37
36
  dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
38
37
  reloader: proc { |&block| block.call },
39
- backtrace_cleaner: ->(backtrace) { backtrace }
38
+ backtrace_cleaner: ->(backtrace) { backtrace },
39
+ logged_job_attributes: ["bid", "tags"]
40
40
  }
41
41
 
42
42
  ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.default_configuration) {
@@ -145,11 +145,16 @@ module Sidekiq
145
145
  @redis_config = @redis_config.merge(hash)
146
146
  end
147
147
 
148
+ def reap_idle_redis_connections(timeout = nil)
149
+ self[:reap_connections] = timeout
150
+ end
151
+ alias_method :reap, :reap_idle_redis_connections
152
+
148
153
  def redis_pool
149
154
  Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
150
155
  end
151
156
 
152
- private def local_redis_pool
157
+ def local_redis_pool
153
158
  # this is our internal client/housekeeping pool. each capsule has its
154
159
  # own pool for executing threads.
155
160
  @redis ||= new_redis_pool(10, "internal")
data/lib/sidekiq/fetch.rb CHANGED
@@ -7,6 +7,7 @@ require "sidekiq/capsule"
7
7
  module Sidekiq # :nodoc:
8
8
  class BasicFetch
9
9
  include Sidekiq::Component
10
+
10
11
  # We want the fetch operation to timeout every few seconds so the thread
11
12
  # can check if the process is shutting down.
12
13
  TIMEOUT = 2
@@ -55,7 +55,7 @@ module Sidekiq
55
55
  def cancel!
56
56
  return @_cancelled if cancelled?
57
57
 
58
- key = "it-#{jid}"
58
+ key = iteration_key
59
59
  _, result, _ = Sidekiq.redis do |c|
60
60
  c.pipelined do |p|
61
61
  p.hsetnx(key, "cancelled", Time.now.to_i)
@@ -143,7 +143,7 @@ module Sidekiq
143
143
  fetch_previous_iteration_state
144
144
 
145
145
  @_executions += 1
146
- @_start_time = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
146
+ @_start_time = mono_now
147
147
 
148
148
  enumerator = build_enumerator(*args, cursor: @_cursor)
149
149
  unless enumerator
@@ -177,7 +177,7 @@ module Sidekiq
177
177
  private
178
178
 
179
179
  def is_cancelled?
180
- @_cancelled = Sidekiq.redis { |c| c.hget("it-#{jid}", "cancelled") }
180
+ @_cancelled = Sidekiq.redis { |c| c.hget(iteration_key, "cancelled") }
181
181
  end
182
182
 
183
183
  def fetch_previous_iteration_state
@@ -204,17 +204,17 @@ module Sidekiq
204
204
 
205
205
  time_limit = Sidekiq.default_configuration[:timeout]
206
206
  found_record = false
207
- state_flushed_at = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
207
+ state_flushed_at = mono_now
208
208
 
209
209
  enumerator.each do |object, cursor|
210
210
  found_record = true
211
211
  @_cursor = cursor
212
212
  @current_object = object
213
213
 
214
- is_interrupted = interrupted?
215
- if ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - state_flushed_at >= STATE_FLUSH_INTERVAL || is_interrupted
214
+ interrupt_job = interrupted? || should_interrupt?
215
+ if mono_now - state_flushed_at >= STATE_FLUSH_INTERVAL || interrupt_job
216
216
  _, _, cancelled = flush_state
217
- state_flushed_at = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
217
+ state_flushed_at = mono_now
218
218
  if cancelled
219
219
  @_cancelled = true
220
220
  on_cancel
@@ -223,9 +223,9 @@ module Sidekiq
223
223
  end
224
224
  end
225
225
 
226
- return false if is_interrupted
226
+ return false if interrupt_job
227
227
 
228
- verify_iteration_time(time_limit, object) do
228
+ verify_iteration_time(time_limit) do
229
229
  around_iteration do
230
230
  each_iteration(object, *arguments)
231
231
  rescue Exception
@@ -238,16 +238,16 @@ module Sidekiq
238
238
  logger.debug("Enumerator found nothing to iterate!") unless found_record
239
239
  true
240
240
  ensure
241
- @_runtime += (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @_start_time)
241
+ @_runtime += (mono_now - @_start_time)
242
242
  end
243
243
 
244
- def verify_iteration_time(time_limit, object)
245
- start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
244
+ def verify_iteration_time(time_limit)
245
+ start = mono_now
246
246
  yield
247
- finish = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
247
+ finish = mono_now
248
248
  total = finish - start
249
249
  if total > time_limit
250
- logger.warn { "Iteration took longer (%.2f) than Sidekiq's shutdown timeout (%d) when processing `%s`. This can lead to job processing problems during deploys" % [total, time_limit, object] }
250
+ logger.warn { "Iteration took longer (%.2f) than Sidekiq's shutdown timeout (%d). This can lead to job processing problems during deploys" % [total, time_limit] }
251
251
  end
252
252
  end
253
253
 
@@ -273,6 +273,11 @@ module Sidekiq
273
273
  end
274
274
  end
275
275
 
276
+ def should_interrupt?
277
+ max_iteration_runtime = Sidekiq.default_configuration[:max_iteration_runtime]
278
+ max_iteration_runtime && (mono_now - @_start_time > max_iteration_runtime)
279
+ end
280
+
276
281
  def flush_state
277
282
  key = iteration_key
278
283
  state = {
@@ -308,6 +313,10 @@ module Sidekiq
308
313
  raise "Unexpected thrown value: #{completed.inspect}"
309
314
  end
310
315
  end
316
+
317
+ def mono_now
318
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
319
+ end
311
320
  end
312
321
  end
313
322
  end
data/lib/sidekiq/job.rb CHANGED
@@ -248,9 +248,9 @@ module Sidekiq
248
248
  end
249
249
  alias_method :perform_sync, :perform_inline
250
250
 
251
- def perform_bulk(args, batch_size: 1_000)
251
+ def perform_bulk(args, **options)
252
252
  client = @klass.build_client
253
- client.push_bulk(@opts.merge("class" => @klass, "args" => args, :batch_size => batch_size))
253
+ client.push_bulk(@opts.merge({"class" => @klass, "args" => args}, options))
254
254
  end
255
255
 
256
256
  # +interval+ must be a timestamp, numeric or something that acts
@@ -29,8 +29,10 @@ module Sidekiq
29
29
  jid: job_hash["jid"],
30
30
  class: job_hash["wrapped"] || job_hash["class"]
31
31
  }
32
- h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
33
- h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
32
+
33
+ @config[:logged_job_attributes].each do |attr|
34
+ h[attr.to_sym] = job_hash[attr] if job_hash.has_key?(attr)
35
+ end
34
36
 
35
37
  Thread.current[:sidekiq_context] = h
36
38
  level = job_hash["log_level"]
@@ -186,6 +186,8 @@ module Sidekiq
186
186
  strategy, delay = delay_for(jobinst, count, exception, msg)
187
187
  case strategy
188
188
  when :discard
189
+ msg["discarded_at"] = now_ms
190
+
189
191
  return run_death_handlers(msg, exception)
190
192
  when :kill
191
193
  return retries_exhausted(jobinst, msg, exception)
@@ -255,8 +257,14 @@ module Sidekiq
255
257
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
256
258
  end
257
259
 
258
- to_morgue = !(msg["dead"] == false || rv == :discard)
259
- send_to_morgue(msg) if to_morgue
260
+ discarded = msg["dead"] == false || rv == :discard
261
+
262
+ if discarded
263
+ msg["discarded_at"] = now_ms
264
+ else
265
+ send_to_morgue(msg)
266
+ end
267
+
260
268
  run_death_handlers(msg, exception)
261
269
  end
262
270
 
@@ -142,6 +142,12 @@ module Sidekiq
142
142
  key = identity
143
143
  fails = procd = 0
144
144
 
145
+ idle = config[:reap_connections]
146
+ if idle
147
+ config.capsules.each_value { |cap| cap.local_redis_pool.reap(idle, &:close) }
148
+ config.local_redis_pool.reap(idle, &:close)
149
+ end
150
+
145
151
  begin
146
152
  flush_stats
147
153
 
@@ -252,8 +258,15 @@ module Sidekiq
252
258
  "pid" => ::Process.pid,
253
259
  "tag" => @config[:tag] || "",
254
260
  "concurrency" => @config.total_concurrency,
261
+ "capsules" => @config.capsules.each_with_object({}) { |(name, cap), memo|
262
+ memo[name] = cap.to_h
263
+ },
264
+ #####
265
+ # TODO deprecated, remove in 9.0
266
+ # This data is now found in the `capsules` element above
255
267
  "queues" => @config.capsules.values.flat_map { |cap| cap.queues }.uniq,
256
- "weights" => to_weights,
268
+ "weights" => @config.capsules.values.map(&:weights),
269
+ #####
257
270
  "labels" => @config[:labels].to_a,
258
271
  "identity" => identity,
259
272
  "version" => Sidekiq::VERSION,
@@ -261,10 +274,6 @@ module Sidekiq
261
274
  }
262
275
  end
263
276
 
264
- def to_weights
265
- @config.capsules.values.map(&:weights)
266
- end
267
-
268
277
  def to_json
269
278
  # this data changes infrequently so dump it to a string
270
279
  # now so we don't need to dump it every heartbeat.
@@ -0,0 +1,57 @@
1
+ module Sidekiq
2
+ require "sidekiq/component"
3
+
4
+ class Loader
5
+ include Sidekiq::Component
6
+
7
+ def initialize(cfg = Sidekiq.default_configuration)
8
+ @config = cfg
9
+ @load_hooks = Hash.new { |h, k| h[k] = [] }
10
+ @loaded = Set.new
11
+ @lock = Mutex.new
12
+ end
13
+
14
+ # Declares a block that will be executed when a Sidekiq component is fully
15
+ # loaded. If the component has already loaded, the block is executed
16
+ # immediately.
17
+ #
18
+ # Sidekiq.loader.on_load(:api) do
19
+ # # extend the sidekiq API
20
+ # end
21
+ #
22
+ def on_load(name, &block)
23
+ # we don't want to hold the lock while calling the block
24
+ to_run = nil
25
+
26
+ @lock.synchronize do
27
+ if @loaded.include?(name)
28
+ to_run = block
29
+ else
30
+ @load_hooks[name] << block
31
+ end
32
+ end
33
+
34
+ to_run&.call
35
+ nil
36
+ end
37
+
38
+ # Executes all blocks registered to +name+ via on_load.
39
+ #
40
+ # Sidekiq.loader.run_load_hooks(:api)
41
+ #
42
+ # In the case of the above example, it will execute all hooks registered for +:api+.
43
+ #
44
+ def run_load_hooks(name)
45
+ hks = @lock.synchronize do
46
+ @loaded << name
47
+ @load_hooks.delete(name)
48
+ end
49
+
50
+ hks&.each do |blk|
51
+ blk.call
52
+ rescue => ex
53
+ handle_exception(ex, hook: name)
54
+ end
55
+ end
56
+ end
57
+ end
@@ -11,6 +11,7 @@ module Sidekiq::Middleware::I18n
11
11
  # to be sent to Sidekiq.
12
12
  class Client
13
13
  include Sidekiq::ClientMiddleware
14
+
14
15
  def call(_jobclass, job, _queue, _redis)
15
16
  job["locale"] ||= I18n.locale
16
17
  yield
@@ -20,6 +21,7 @@ module Sidekiq::Middleware::I18n
20
21
  # Pull the msg locale out and set the current thread to use it.
21
22
  class Server
22
23
  include Sidekiq::ServerMiddleware
24
+
23
25
  def call(_jobclass, job, _queue, &block)
24
26
  I18n.with_locale(job.fetch("locale", I18n.default_locale), &block)
25
27
  end
@@ -49,14 +49,10 @@ class Sidekiq::Monitor
49
49
  puts "---- Processes (#{process_set.size}) ----"
50
50
  process_set.each_with_index do |process, index|
51
51
  # Keep compatibility with legacy versions since we don't want to break sidekiqmon during rolling upgrades or downgrades.
52
- #
53
- # Before:
54
- # ["default", "critical"]
55
- #
56
- # After:
57
- # {"default" => 1, "critical" => 10}
58
52
  queues =
59
- if process["weights"]
53
+ if process["capsules"] # 8.0.6+
54
+ process["capsules"].values.map { |x| x["weights"].keys.join(", ") }
55
+ elsif process["weights"]
60
56
  process["weights"].sort_by { |queue| queue[0] }.map { |capsule| capsule.map { |name, weight| (weight > 0) ? "#{name}: #{weight}" : name }.join(", ") }
61
57
  else
62
58
  process["queues"].sort
@@ -105,7 +101,7 @@ class Sidekiq::Monitor
105
101
  out << line
106
102
  line = " " * pad
107
103
  end
108
- line << value + ", "
104
+ line << value + "; "
109
105
  end
110
106
  out << line[0..-3]
111
107
  out.join("\n")
@@ -11,6 +11,7 @@ module Sidekiq
11
11
  }
12
12
 
13
13
  include Sidekiq::Component
14
+
14
15
  def initialize(config)
15
16
  @config = config
16
17
  @vernier_output_dir = ENV.fetch("VERNIER_OUTPUT_DIR") { Dir.tmpdir }
data/lib/sidekiq/rails.rb CHANGED
@@ -48,8 +48,10 @@ module Sidekiq
48
48
  unless ::Rails.logger == config.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout)
49
49
  if ::Rails.logger.respond_to?(:broadcast_to)
50
50
  ::Rails.logger.broadcast_to(config.logger)
51
- else
51
+ elsif ::ActiveSupport::Logger.respond_to?(:broadcast)
52
52
  ::Rails.logger.extend(::ActiveSupport::Logger.broadcast(config.logger))
53
+ else
54
+ ::Rails.logger = ::ActiveSupport::BroadcastLogger.new(::Rails.logger, config.logger)
53
55
  end
54
56
  end
55
57
  end
@@ -6,6 +6,7 @@ module Sidekiq
6
6
  class RingBuffer
7
7
  include Enumerable
8
8
  extend Forwardable
9
+
9
10
  def_delegators :@buf, :[], :each, :size
10
11
 
11
12
  def initialize(size, default = 0)
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Sidekiq
4
- VERSION = "8.0.7"
4
+ VERSION = "8.0.9"
5
5
  MAJOR = 8
6
6
 
7
7
  def self.gem_version
@@ -102,15 +102,15 @@ module Sidekiq
102
102
  def flash
103
103
  msg = yield
104
104
  logger.info msg
105
- session[:flash] = msg
105
+ session[:skq_flash] = msg
106
106
  end
107
107
 
108
108
  def flash?
109
- session&.[](:flash)
109
+ session&.[](:skq_flash)
110
110
  end
111
111
 
112
112
  def get_flash
113
- @flash ||= session.delete(:flash)
113
+ @flash ||= session.delete(:skq_flash)
114
114
  end
115
115
 
116
116
  def erb(content, options = {})
@@ -318,6 +318,16 @@ module Sidekiq
318
318
  redirect_with_query("#{root_path}scheduled")
319
319
  end
320
320
 
321
+ post "/scheduled/all/delete" do
322
+ Sidekiq::ScheduledSet.new.clear
323
+ redirect "#{root_path}scheduled"
324
+ end
325
+
326
+ post "/scheduled/all/add_to_queue" do
327
+ Sidekiq::ScheduledSet.new.each(&:add_to_queue)
328
+ redirect "#{root_path}scheduled"
329
+ end
330
+
321
331
  get "/dashboard/stats" do
322
332
  redirect "#{root_path}stats"
323
333
  end
@@ -256,14 +256,6 @@ module Sidekiq
256
256
  end
257
257
  end
258
258
 
259
- def busy_weights(capsule_weights)
260
- # backwards compat with 7.0.0, remove in 7.1
261
- cw = [capsule_weights].flatten
262
- cw.map { |hash|
263
- hash.map { |name, weight| (weight > 0) ? +name << ": " << weight.to_s : name }.join(", ")
264
- }.join("; ")
265
- end
266
-
267
259
  def stats
268
260
  @stats ||= Sidekiq::Stats.new
269
261
  end
data/lib/sidekiq.rb CHANGED
@@ -29,6 +29,7 @@ end
29
29
 
30
30
  require "sidekiq/config"
31
31
  require "sidekiq/logger"
32
+ require "sidekiq/loader"
32
33
  require "sidekiq/client"
33
34
  require "sidekiq/transaction_aware_client"
34
35
  require "sidekiq/job"
@@ -94,6 +95,10 @@ module Sidekiq
94
95
  default_configuration.logger
95
96
  end
96
97
 
98
+ def self.loader
99
+ @loader ||= Loader.new
100
+ end
101
+
97
102
  def self.configure_server(&block)
98
103
  (@config_blocks ||= []) << block
99
104
  yield default_configuration if server?