sidekiq 7.1.4 → 8.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +333 -0
  3. data/README.md +16 -13
  4. data/bin/multi_queue_bench +271 -0
  5. data/bin/sidekiqload +31 -22
  6. data/bin/webload +69 -0
  7. data/lib/active_job/queue_adapters/sidekiq_adapter.rb +121 -0
  8. data/lib/generators/sidekiq/job_generator.rb +2 -0
  9. data/lib/generators/sidekiq/templates/job.rb.erb +1 -1
  10. data/lib/sidekiq/api.rb +260 -67
  11. data/lib/sidekiq/capsule.rb +17 -8
  12. data/lib/sidekiq/cli.rb +19 -20
  13. data/lib/sidekiq/client.rb +48 -15
  14. data/lib/sidekiq/component.rb +64 -3
  15. data/lib/sidekiq/config.rb +60 -18
  16. data/lib/sidekiq/deploy.rb +4 -2
  17. data/lib/sidekiq/embedded.rb +4 -1
  18. data/lib/sidekiq/fetch.rb +2 -1
  19. data/lib/sidekiq/iterable_job.rb +56 -0
  20. data/lib/sidekiq/job/interrupt_handler.rb +24 -0
  21. data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
  22. data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
  23. data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
  24. data/lib/sidekiq/job/iterable.rb +322 -0
  25. data/lib/sidekiq/job.rb +16 -5
  26. data/lib/sidekiq/job_logger.rb +15 -12
  27. data/lib/sidekiq/job_retry.rb +41 -13
  28. data/lib/sidekiq/job_util.rb +7 -1
  29. data/lib/sidekiq/launcher.rb +23 -11
  30. data/lib/sidekiq/loader.rb +57 -0
  31. data/lib/sidekiq/logger.rb +25 -69
  32. data/lib/sidekiq/manager.rb +0 -1
  33. data/lib/sidekiq/metrics/query.rb +76 -45
  34. data/lib/sidekiq/metrics/shared.rb +23 -9
  35. data/lib/sidekiq/metrics/tracking.rb +32 -15
  36. data/lib/sidekiq/middleware/current_attributes.rb +39 -14
  37. data/lib/sidekiq/middleware/i18n.rb +2 -0
  38. data/lib/sidekiq/middleware/modules.rb +2 -0
  39. data/lib/sidekiq/monitor.rb +6 -9
  40. data/lib/sidekiq/paginator.rb +16 -3
  41. data/lib/sidekiq/processor.rb +37 -20
  42. data/lib/sidekiq/profiler.rb +73 -0
  43. data/lib/sidekiq/rails.rb +47 -57
  44. data/lib/sidekiq/redis_client_adapter.rb +25 -8
  45. data/lib/sidekiq/redis_connection.rb +49 -9
  46. data/lib/sidekiq/ring_buffer.rb +3 -0
  47. data/lib/sidekiq/scheduled.rb +2 -2
  48. data/lib/sidekiq/systemd.rb +2 -0
  49. data/lib/sidekiq/testing.rb +34 -15
  50. data/lib/sidekiq/transaction_aware_client.rb +20 -5
  51. data/lib/sidekiq/version.rb +6 -2
  52. data/lib/sidekiq/web/action.rb +149 -64
  53. data/lib/sidekiq/web/application.rb +367 -297
  54. data/lib/sidekiq/web/config.rb +120 -0
  55. data/lib/sidekiq/web/csrf_protection.rb +8 -5
  56. data/lib/sidekiq/web/helpers.rb +146 -64
  57. data/lib/sidekiq/web/router.rb +61 -74
  58. data/lib/sidekiq/web.rb +53 -106
  59. data/lib/sidekiq.rb +11 -4
  60. data/sidekiq.gemspec +6 -5
  61. data/web/assets/images/logo.png +0 -0
  62. data/web/assets/images/status.png +0 -0
  63. data/web/assets/javascripts/application.js +66 -24
  64. data/web/assets/javascripts/base-charts.js +30 -16
  65. data/web/assets/javascripts/chartjs-adapter-date-fns.min.js +7 -0
  66. data/web/assets/javascripts/dashboard-charts.js +37 -11
  67. data/web/assets/javascripts/dashboard.js +15 -11
  68. data/web/assets/javascripts/metrics.js +50 -34
  69. data/web/assets/stylesheets/style.css +776 -0
  70. data/web/locales/ar.yml +2 -0
  71. data/web/locales/cs.yml +2 -0
  72. data/web/locales/da.yml +2 -0
  73. data/web/locales/de.yml +2 -0
  74. data/web/locales/el.yml +2 -0
  75. data/web/locales/en.yml +12 -1
  76. data/web/locales/es.yml +25 -2
  77. data/web/locales/fa.yml +2 -0
  78. data/web/locales/fr.yml +2 -1
  79. data/web/locales/gd.yml +2 -1
  80. data/web/locales/he.yml +2 -0
  81. data/web/locales/hi.yml +2 -0
  82. data/web/locales/it.yml +41 -1
  83. data/web/locales/ja.yml +2 -1
  84. data/web/locales/ko.yml +2 -0
  85. data/web/locales/lt.yml +2 -0
  86. data/web/locales/nb.yml +2 -0
  87. data/web/locales/nl.yml +2 -0
  88. data/web/locales/pl.yml +2 -0
  89. data/web/locales/{pt-br.yml → pt-BR.yml} +4 -3
  90. data/web/locales/pt.yml +2 -0
  91. data/web/locales/ru.yml +2 -0
  92. data/web/locales/sv.yml +2 -0
  93. data/web/locales/ta.yml +2 -0
  94. data/web/locales/tr.yml +102 -0
  95. data/web/locales/uk.yml +29 -4
  96. data/web/locales/ur.yml +2 -0
  97. data/web/locales/vi.yml +2 -0
  98. data/web/locales/{zh-cn.yml → zh-CN.yml} +86 -74
  99. data/web/locales/{zh-tw.yml → zh-TW.yml} +3 -2
  100. data/web/views/_footer.erb +31 -22
  101. data/web/views/_job_info.erb +91 -89
  102. data/web/views/_metrics_period_select.erb +13 -10
  103. data/web/views/_nav.erb +14 -21
  104. data/web/views/_paging.erb +22 -21
  105. data/web/views/_poll_link.erb +2 -2
  106. data/web/views/_summary.erb +23 -23
  107. data/web/views/busy.erb +123 -125
  108. data/web/views/dashboard.erb +71 -82
  109. data/web/views/dead.erb +31 -27
  110. data/web/views/filtering.erb +6 -0
  111. data/web/views/layout.erb +13 -29
  112. data/web/views/metrics.erb +70 -68
  113. data/web/views/metrics_for_job.erb +30 -40
  114. data/web/views/morgue.erb +65 -70
  115. data/web/views/profiles.erb +43 -0
  116. data/web/views/queue.erb +54 -52
  117. data/web/views/queues.erb +43 -37
  118. data/web/views/retries.erb +70 -75
  119. data/web/views/retry.erb +32 -27
  120. data/web/views/scheduled.erb +63 -55
  121. data/web/views/scheduled_job_info.erb +3 -3
  122. metadata +49 -27
  123. data/web/assets/stylesheets/application-dark.css +0 -147
  124. data/web/assets/stylesheets/application-rtl.css +0 -153
  125. data/web/assets/stylesheets/application.css +0 -724
  126. data/web/assets/stylesheets/bootstrap-rtl.min.css +0 -9
  127. data/web/assets/stylesheets/bootstrap.css +0 -5
  128. data/web/views/_status.erb +0 -4
@@ -58,6 +58,21 @@ module Sidekiq
58
58
  end
59
59
  end
60
60
 
61
+ # Cancel the IterableJob with the given JID.
62
+ # **NB: Cancellation is asynchronous.** Iteration checks every
63
+ # five seconds so this will not immediately stop the given job.
64
+ def cancel!(jid)
65
+ key = "it-#{jid}"
66
+ _, result, _ = Sidekiq.redis do |c|
67
+ c.pipelined do |p|
68
+ p.hsetnx(key, "cancelled", Time.now.to_i)
69
+ p.hget(key, "cancelled")
70
+ p.expire(key, Sidekiq::Job::Iterable::STATE_TTL, "nx")
71
+ end
72
+ end
73
+ result.to_i
74
+ end
75
+
61
76
  ##
62
77
  # The main method used to push a job to Redis. Accepts a number of options:
63
78
  #
@@ -74,7 +89,7 @@ module Sidekiq
74
89
  #
75
90
  # Any options valid for a job class's sidekiq_options are also available here.
76
91
  #
77
- # All options must be strings, not symbols. NB: because we are serializing to JSON, all
92
+ # All keys must be strings, not symbols. NB: because we are serializing to JSON, all
78
93
  # symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
79
94
  # space in Redis; a large volume of failing jobs can start Redis swapping if you aren't careful.
80
95
  #
@@ -102,6 +117,9 @@ module Sidekiq
102
117
  # larger than 1000 but YMMV based on network quality, size of job args, etc.
103
118
  # A large number of jobs can cause a bit of Redis command processing latency.
104
119
  #
120
+ # Accepts an additional `:spread_interval` option (in seconds) to randomly spread
121
+ # the jobs schedule times over the specified interval.
122
+ #
105
123
  # Takes the same arguments as #push except that args is expected to be
106
124
  # an Array of Arrays. All other keys are duplicated for each job. Each job
107
125
  # is run through the client middleware pipeline and each job gets its own Job ID
@@ -111,32 +129,45 @@ module Sidekiq
111
129
  # prevented a job push.
112
130
  #
113
131
  # Example (pushing jobs in batches):
114
- # push_bulk('class' => 'MyJob', 'args' => (1..100_000).to_a, batch_size: 1_000)
132
+ # push_bulk('class' => MyJob, 'args' => (1..100_000).to_a, batch_size: 1_000)
115
133
  #
116
134
  def push_bulk(items)
117
135
  batch_size = items.delete(:batch_size) || items.delete("batch_size") || 1_000
118
136
  args = items["args"]
119
- at = items.delete("at")
137
+ at = items.delete("at") || items.delete(:at)
120
138
  raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
121
139
  raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
122
140
 
123
141
  jid = items.delete("jid")
124
142
  raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
125
143
 
144
+ spread_interval = items.delete(:spread_interval) || items.delete("spread_interval")
145
+ raise ArgumentError, "Jobs 'spread_interval' must be a positive Numeric" if spread_interval && (!spread_interval.is_a?(Numeric) || spread_interval <= 0)
146
+ raise ArgumentError, "Only one of 'at' or 'spread_interval' can be provided" if at && spread_interval
147
+
148
+ if !at && spread_interval
149
+ # Do not use spread interval smaller than pooling interval.
150
+ spread_interval = [spread_interval, 5].max
151
+ now = Time.now.to_f
152
+ at = args.map { now + rand * spread_interval }
153
+ end
154
+
126
155
  normed = normalize_item(items)
156
+ slice_index = 0
127
157
  result = args.each_slice(batch_size).flat_map do |slice|
128
158
  raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless slice.is_a?(Array) && slice.all?(Array)
129
159
  break [] if slice.empty? # no jobs to push
130
160
 
131
161
  payloads = slice.map.with_index { |job_args, index|
132
162
  copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
133
- copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
163
+ copy["at"] = (at.is_a?(Array) ? at[slice_index + index] : at) if at
134
164
  result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
135
165
  verify_json(copy)
136
166
  copy
137
167
  end
138
168
  result || nil
139
169
  }
170
+ slice_index += batch_size
140
171
 
141
172
  to_push = payloads.compact
142
173
  raw_push(to_push) unless to_push.empty?
@@ -246,20 +277,22 @@ module Sidekiq
246
277
  def atomic_push(conn, payloads)
247
278
  if payloads.first.key?("at")
248
279
  conn.zadd("schedule", payloads.flat_map { |hash|
249
- at = hash.delete("at").to_s
250
- # ActiveJob sets this but the job has not been enqueued yet
251
- hash.delete("enqueued_at")
280
+ at = hash["at"].to_s
281
+ # ActiveJob sets enqueued_at but the job has not been enqueued yet
282
+ hash = hash.except("enqueued_at", "at")
252
283
  [at, Sidekiq.dump_json(hash)]
253
284
  })
254
285
  else
255
- queue = payloads.first["queue"]
256
- now = Time.now.to_f
257
- to_push = payloads.map { |entry|
258
- entry["enqueued_at"] = now
259
- Sidekiq.dump_json(entry)
260
- }
261
- conn.sadd("queues", [queue])
262
- conn.lpush("queue:#{queue}", to_push)
286
+ now = ::Process.clock_gettime(::Process::CLOCK_REALTIME, :millisecond) # milliseconds since the epoch
287
+ grouped_queues = payloads.group_by { |job| job["queue"] }
288
+ conn.sadd("queues", grouped_queues.keys)
289
+ grouped_queues.each do |queue, grouped_payloads|
290
+ to_push = grouped_payloads.map { |entry|
291
+ entry["enqueued_at"] = now
292
+ Sidekiq.dump_json(entry)
293
+ }
294
+ conn.lpush("queue:#{queue}", to_push)
295
+ end
263
296
  end
264
297
  end
265
298
  end
@@ -1,11 +1,39 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Sidekiq
4
+ # Ruby's default thread priority is 0, which uses 100ms time slices.
5
+ # This can lead to some surprising thread starvation; if using a lot of
6
+ # CPU-heavy concurrency, it may take several seconds before a Thread gets
7
+ # on the CPU.
8
+ #
9
+ # Negative priorities lower the timeslice by half, so -1 = 50ms, -2 = 25ms, etc.
10
+ # With more frequent timeslices, we reduce the risk of unintentional timeouts
11
+ # and starvation.
12
+ #
13
+ # Customize like so:
14
+ #
15
+ # Sidekiq.configure_server do |cfg|
16
+ # cfg.thread_priority = 0
17
+ # end
18
+ #
19
+ DEFAULT_THREAD_PRIORITY = -1
20
+
4
21
  ##
5
- # Sidekiq::Component assumes a config instance is available at @config
22
+ # Sidekiq::Component provides a set of utility methods depending only
23
+ # on Sidekiq::Config. It assumes a config instance is available at @config.
6
24
  module Component # :nodoc:
7
25
  attr_reader :config
8
26
 
27
+ # This is epoch milliseconds, appropriate for persistence
28
+ def real_ms
29
+ ::Process.clock_gettime(::Process::CLOCK_REALTIME, :millisecond)
30
+ end
31
+
32
+ # used for time difference and relative comparisons, not persistence.
33
+ def mono_ms
34
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
35
+ end
36
+
9
37
  def watchdog(last_words)
10
38
  yield
11
39
  rescue Exception => ex
@@ -13,11 +41,11 @@ module Sidekiq
13
41
  raise ex
14
42
  end
15
43
 
16
- def safe_thread(name, &block)
44
+ def safe_thread(name, priority: nil, &block)
17
45
  Thread.new do
18
46
  Thread.current.name = "sidekiq.#{name}"
19
47
  watchdog(name, &block)
20
- end
48
+ end.tap { |t| t.priority = (priority || config.thread_priority || DEFAULT_THREAD_PRIORITY) }
21
49
  end
22
50
 
23
51
  def logger
@@ -64,5 +92,38 @@ module Sidekiq
64
92
  end
65
93
  arr.clear if oneshot # once we've fired an event, we never fire it again
66
94
  end
95
+
96
+ # When you have a large tree of components, the `inspect` output
97
+ # can get out of hand, especially with lots of Sidekiq::Config
98
+ # references everywhere. We avoid calling `inspect` on more complex
99
+ # state and use `to_s` instead to keep output manageable, #6553
100
+ def inspect
101
+ "#<#{self.class.name} #{
102
+ instance_variables.map do |name|
103
+ value = instance_variable_get(name)
104
+ case value
105
+ when Proc
106
+ "#{name}=#{value}"
107
+ when Sidekiq::Config
108
+ "#{name}=#{value}"
109
+ when Sidekiq::Component
110
+ "#{name}=#{value}"
111
+ else
112
+ "#{name}=#{value.inspect}"
113
+ end
114
+ end.join(", ")
115
+ }>"
116
+ end
117
+
118
+ def default_tag(dir = Dir.pwd)
119
+ name = File.basename(dir)
120
+ prevdir = File.dirname(dir) # Capistrano release directory?
121
+ if name.to_i != 0 && prevdir
122
+ if File.basename(prevdir) == "releases"
123
+ return File.basename(File.dirname(prevdir))
124
+ end
125
+ end
126
+ name
127
+ end
67
128
  end
68
129
  end
@@ -1,6 +1,6 @@
1
- require "forwardable"
1
+ # frozen_string_literal: true
2
2
 
3
- require "set"
3
+ require "forwardable"
4
4
  require "sidekiq/redis_connection"
5
5
 
6
6
  module Sidekiq
@@ -17,12 +17,16 @@ module Sidekiq
17
17
  poll_interval_average: nil,
18
18
  average_scheduled_poll_interval: 5,
19
19
  on_complex_arguments: :raise,
20
+ # if the Iterable job runs longer than this value (in seconds), then the job
21
+ # will be interrupted after the current iteration and re-enqueued at the back of the queue
22
+ max_iteration_runtime: nil,
20
23
  error_handlers: [],
21
24
  death_handlers: [],
22
25
  lifecycle_events: {
23
26
  startup: [],
24
27
  quiet: [],
25
28
  shutdown: [],
29
+ exit: [],
26
30
  # triggers when we fire the first heartbeat on startup OR repairing a network partition
27
31
  heartbeat: [],
28
32
  # triggers on EVERY heartbeat call, every 10 seconds
@@ -31,17 +35,27 @@ module Sidekiq
31
35
  dead_max_jobs: 10_000,
32
36
  dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
33
37
  reloader: proc { |&block| block.call },
34
- backtrace_cleaner: ->(backtrace) { backtrace }
38
+ backtrace_cleaner: ->(backtrace) { backtrace },
39
+ logged_job_attributes: ["bid", "tags"]
35
40
  }
36
41
 
37
- ERROR_HANDLER = ->(ex, ctx) {
38
- cfg = ctx[:_config] || Sidekiq.default_configuration
39
- l = cfg.logger
40
- l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
41
- l.warn("#{ex.class.name}: #{ex.message}")
42
- unless ex.backtrace.nil?
43
- backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
44
- l.warn(backtrace.join("\n"))
42
+ ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.default_configuration) {
43
+ Sidekiq::Context.with(ctx) do
44
+ dev = cfg[:environment] == "development"
45
+ fancy = dev && $stdout.tty? # 🎩
46
+ # Weird logic here but we want to show the backtrace in local
47
+ # development or if verbose logging is enabled.
48
+ #
49
+ # `full_message` contains the error class, message and backtrace
50
+ # `detailed_message` contains the error class and message
51
+ #
52
+ # Absolutely terrible API names. Not useful at all to have two
53
+ # methods with similar but obscure names.
54
+ if dev || cfg.logger.debug?
55
+ cfg.logger.info { ex.full_message(highlight: fancy) }
56
+ else
57
+ cfg.logger.info { ex.detailed_message(highlight: fancy) }
58
+ end
45
59
  end
46
60
  }
47
61
 
@@ -53,8 +67,15 @@ module Sidekiq
53
67
  @capsules = {}
54
68
  end
55
69
 
56
- def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
70
+ def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!, :dig
57
71
  attr_reader :capsules
72
+ attr_accessor :thread_priority
73
+
74
+ def inspect
75
+ "#<#{self.class.name} @options=#{
76
+ @options.except(:lifecycle_events, :reloader, :death_handlers, :error_handlers).inspect
77
+ }>"
78
+ end
58
79
 
59
80
  def to_json(*)
60
81
  Sidekiq.dump_json(@options)
@@ -124,11 +145,16 @@ module Sidekiq
124
145
  @redis_config = @redis_config.merge(hash)
125
146
  end
126
147
 
148
+ def reap_idle_redis_connections(timeout = nil)
149
+ self[:reap_connections] = timeout
150
+ end
151
+ alias_method :reap, :reap_idle_redis_connections
152
+
127
153
  def redis_pool
128
154
  Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
129
155
  end
130
156
 
131
- private def local_redis_pool
157
+ def local_redis_pool
132
158
  # this is our internal client/housekeeping pool. each capsule has its
133
159
  # own pool for executing threads.
134
160
  @redis ||= new_redis_pool(10, "internal")
@@ -180,7 +206,13 @@ module Sidekiq
180
206
 
181
207
  # register global singletons which can be accessed elsewhere
182
208
  def register(name, instance)
183
- @directory[name] = instance
209
+ # logger.debug("register[#{name}] = #{instance}")
210
+ # Sidekiq Enterprise lazy registers a few services so we
211
+ # can't lock down this hash completely.
212
+ hash = @directory.dup
213
+ hash[name] = instance
214
+ @directory = hash.freeze
215
+ instance
184
216
  end
185
217
 
186
218
  # find a singleton
@@ -188,10 +220,16 @@ module Sidekiq
188
220
  # JNDI is just a fancy name for a hash lookup
189
221
  @directory.fetch(name) do |key|
190
222
  return nil unless default_class
191
- @directory[key] = default_class.new(self)
223
+ register(key, default_class.new(self))
192
224
  end
193
225
  end
194
226
 
227
+ def freeze!
228
+ @directory.freeze
229
+ @options.freeze
230
+ true
231
+ end
232
+
195
233
  ##
196
234
  # Death handlers are called when all retries for a job have been exhausted and
197
235
  # the job dies. It's the notification to your application
@@ -226,7 +264,7 @@ module Sidekiq
226
264
  end
227
265
 
228
266
  # Register a block to run at a point in the Sidekiq lifecycle.
229
- # :startup, :quiet or :shutdown are valid events.
267
+ # :startup, :quiet, :shutdown, or :exit are valid events.
230
268
  #
231
269
  # Sidekiq.configure_server do |config|
232
270
  # config.on(:shutdown) do
@@ -259,14 +297,18 @@ module Sidekiq
259
297
  @logger = logger
260
298
  end
261
299
 
300
+ private def parameter_size(handler)
301
+ target = handler.is_a?(Proc) ? handler : handler.method(:call)
302
+ target.parameters.size
303
+ end
304
+
262
305
  # INTERNAL USE ONLY
263
306
  def handle_exception(ex, ctx = {})
264
307
  if @options[:error_handlers].size == 0
265
308
  p ["!!!!!", ex]
266
309
  end
267
- ctx[:_config] = self
268
310
  @options[:error_handlers].each do |handler|
269
- handler.call(ex, ctx)
311
+ handler.call(ex, ctx, self)
270
312
  rescue Exception => e
271
313
  l = logger
272
314
  l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "sidekiq/redis_connection"
2
4
  require "time"
3
5
 
@@ -34,7 +36,7 @@ module Sidekiq
34
36
  # handle an very common error in marking deploys:
35
37
  # having every process mark its deploy, leading
36
38
  # to N marks for each deploy. Instead we round the time
37
- # to the minute so that multple marks within that minute
39
+ # to the minute so that multiple marks within that minute
38
40
  # will all naturally rollup into one mark per minute.
39
41
  whence = at.utc
40
42
  floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
@@ -44,7 +46,7 @@ module Sidekiq
44
46
 
45
47
  @pool.with do |c|
46
48
  # only allow one deploy mark for a given label for the next minute
47
- lock = c.set("deploylock-#{label}", stamp, nx: true, ex: 60)
49
+ lock = c.set("deploylock-#{label}", stamp, "nx", "ex", "60")
48
50
  if lock
49
51
  c.multi do |pipe|
50
52
  pipe.hsetnx(key, stamp, label)
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "sidekiq/component"
2
4
  require "sidekiq/launcher"
3
5
  require "sidekiq/metrics/tracking"
@@ -32,6 +34,7 @@ module Sidekiq
32
34
  private
33
35
 
34
36
  def housekeeping
37
+ @config[:tag] ||= default_tag
35
38
  logger.info "Running in #{RUBY_DESCRIPTION}"
36
39
  logger.info Sidekiq::LICENSE
37
40
  logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
@@ -40,7 +43,7 @@ module Sidekiq
40
43
  # fire startup and start multithreading.
41
44
  info = config.redis_info
42
45
  ver = Gem::Version.new(info["redis_version"])
43
- raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
46
+ raise "You are connected to Redis #{ver}, Sidekiq requires Redis 7.0.0 or greater" if ver < Gem::Version.new("7.0.0")
44
47
 
45
48
  maxmemory_policy = info["maxmemory_policy"]
46
49
  if maxmemory_policy != "noeviction"
data/lib/sidekiq/fetch.rb CHANGED
@@ -7,6 +7,7 @@ require "sidekiq/capsule"
7
7
  module Sidekiq # :nodoc:
8
8
  class BasicFetch
9
9
  include Sidekiq::Component
10
+
10
11
  # We want the fetch operation to timeout every few seconds so the thread
11
12
  # can check if the process is shutting down.
12
13
  TIMEOUT = 2
@@ -44,7 +45,7 @@ module Sidekiq # :nodoc:
44
45
  return nil
45
46
  end
46
47
 
47
- queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
48
+ queue, job = redis { |conn| conn.blocking_call(TIMEOUT, "brpop", *qs, TIMEOUT) }
48
49
  UnitOfWork.new(queue, job, config) if queue
49
50
  end
50
51
 
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "sidekiq/job/iterable"
4
+
5
+ # Iterable jobs are ones which provide a sequence to process using
6
+ # `build_enumerator(*args, cursor: cursor)` and then process each
7
+ # element of that sequence in `each_iteration(item, *args)`.
8
+ #
9
+ # The job is kicked off as normal:
10
+ #
11
+ # ProcessUserSet.perform_async(123)
12
+ #
13
+ # but instead of calling `perform`, Sidekiq will call:
14
+ #
15
+ # enum = ProcessUserSet#build_enumerator(123, cursor:nil)
16
+ #
17
+ # Your Enumerator must yield `(object, updated_cursor)` and
18
+ # Sidekiq will call your `each_iteration` method:
19
+ #
20
+ # ProcessUserSet#each_iteration(object, 123)
21
+ #
22
+ # After every iteration, Sidekiq will check for shutdown. If we are
23
+ # stopping, the cursor will be saved to Redis and the job re-queued
24
+ # to pick up the rest of the work upon restart. Your job will get
25
+ # the updated_cursor so it can pick up right where it stopped.
26
+ #
27
+ # enum = ProcessUserSet#build_enumerator(123, cursor: updated_cursor)
28
+ #
29
+ # The cursor object must be serializable to JSON.
30
+ #
31
+ # Note there are several APIs to help you build enumerators for
32
+ # ActiveRecord Relations, CSV files, etc. See sidekiq/job/iterable/*.rb.
33
+ module Sidekiq
34
+ module IterableJob
35
+ def self.included(base)
36
+ base.include Sidekiq::Job
37
+ base.include Sidekiq::Job::Iterable
38
+ end
39
+
40
+ # def build_enumerator(*args, cursor:)
41
+ # def each_iteration(item, *args)
42
+
43
+ # Your job can also define several callbacks during points
44
+ # in each job's lifecycle.
45
+ #
46
+ # def on_start
47
+ # def on_resume
48
+ # def on_stop
49
+ # def on_cancel
50
+ # def on_complete
51
+ # def around_iteration
52
+ #
53
+ # To keep things simple and compatible, this is the same
54
+ # API as the `sidekiq-iteration` gem.
55
+ end
56
+ end
@@ -0,0 +1,24 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sidekiq
4
+ module Job
5
+ class InterruptHandler
6
+ include Sidekiq::ServerMiddleware
7
+
8
+ def call(instance, hash, queue)
9
+ yield
10
+ rescue Interrupted
11
+ logger.debug "Interrupted, re-queueing..."
12
+ c = Sidekiq::Client.new
13
+ c.push(hash)
14
+ raise Sidekiq::JobRetry::Skip
15
+ end
16
+ end
17
+ end
18
+ end
19
+
20
+ Sidekiq.configure_server do |config|
21
+ config.server_middleware do |chain|
22
+ chain.add Sidekiq::Job::InterruptHandler
23
+ end
24
+ end
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sidekiq
4
+ module Job
5
+ module Iterable
6
+ # @api private
7
+ class ActiveRecordEnumerator
8
+ def initialize(relation, cursor: nil, **options)
9
+ @relation = relation
10
+ @cursor = cursor
11
+ @options = options
12
+ end
13
+
14
+ def records
15
+ Enumerator.new(-> { @relation.count }) do |yielder|
16
+ @relation.find_each(**@options, start: @cursor) do |record|
17
+ yielder.yield(record, record.id)
18
+ end
19
+ end
20
+ end
21
+
22
+ def batches
23
+ Enumerator.new(-> { @relation.count }) do |yielder|
24
+ @relation.find_in_batches(**@options, start: @cursor) do |batch|
25
+ yielder.yield(batch, batch.first.id)
26
+ end
27
+ end
28
+ end
29
+
30
+ def relations
31
+ Enumerator.new(-> { relations_size }) do |yielder|
32
+ # Convenience to use :batch_size for all the
33
+ # ActiveRecord batching methods.
34
+ options = @options.dup
35
+ options[:of] ||= options.delete(:batch_size)
36
+
37
+ @relation.in_batches(**options, start: @cursor) do |relation|
38
+ first_record = relation.first
39
+ yielder.yield(relation, first_record.id)
40
+ end
41
+ end
42
+ end
43
+
44
+ private
45
+
46
+ def relations_size
47
+ batch_size = @options[:batch_size] || 1000
48
+ (@relation.count + batch_size - 1) / batch_size # ceiling division
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sidekiq
4
+ module Job
5
+ module Iterable
6
+ # @api private
7
+ class CsvEnumerator
8
+ def initialize(csv)
9
+ unless defined?(CSV) && csv.instance_of?(CSV)
10
+ raise ArgumentError, "CsvEnumerator.new takes CSV object"
11
+ end
12
+
13
+ @csv = csv
14
+ end
15
+
16
+ def rows(cursor:)
17
+ @csv.lazy
18
+ .each_with_index
19
+ .drop(cursor || 0)
20
+ .to_enum { count_of_rows_in_file }
21
+ end
22
+
23
+ def batches(cursor:, batch_size: 100)
24
+ @csv.lazy
25
+ .each_slice(batch_size)
26
+ .with_index
27
+ .drop(cursor || 0)
28
+ .to_enum { (count_of_rows_in_file.to_f / batch_size).ceil }
29
+ end
30
+
31
+ private
32
+
33
+ def count_of_rows_in_file
34
+ filepath = @csv.path
35
+ return unless filepath
36
+
37
+ count = IO.popen(["wc", "-l", filepath]) do |out|
38
+ out.read.strip.to_i
39
+ end
40
+
41
+ count -= 1 if @csv.headers
42
+ count
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end