sidekiq 6.4.0 → 7.1.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (114) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +232 -12
  3. data/README.md +44 -31
  4. data/bin/sidekiq +4 -9
  5. data/bin/sidekiqload +207 -117
  6. data/bin/sidekiqmon +4 -1
  7. data/lib/sidekiq/api.rb +329 -188
  8. data/lib/sidekiq/capsule.rb +127 -0
  9. data/lib/sidekiq/cli.rb +85 -81
  10. data/lib/sidekiq/client.rb +98 -58
  11. data/lib/sidekiq/component.rb +68 -0
  12. data/lib/sidekiq/config.rb +278 -0
  13. data/lib/sidekiq/deploy.rb +62 -0
  14. data/lib/sidekiq/embedded.rb +61 -0
  15. data/lib/sidekiq/fetch.rb +23 -24
  16. data/lib/sidekiq/job.rb +371 -10
  17. data/lib/sidekiq/job_logger.rb +16 -28
  18. data/lib/sidekiq/job_retry.rb +80 -56
  19. data/lib/sidekiq/job_util.rb +60 -20
  20. data/lib/sidekiq/launcher.rb +103 -95
  21. data/lib/sidekiq/logger.rb +9 -44
  22. data/lib/sidekiq/manager.rb +33 -32
  23. data/lib/sidekiq/metrics/query.rb +153 -0
  24. data/lib/sidekiq/metrics/shared.rb +95 -0
  25. data/lib/sidekiq/metrics/tracking.rb +136 -0
  26. data/lib/sidekiq/middleware/chain.rb +96 -51
  27. data/lib/sidekiq/middleware/current_attributes.rb +58 -20
  28. data/lib/sidekiq/middleware/i18n.rb +6 -4
  29. data/lib/sidekiq/middleware/modules.rb +21 -0
  30. data/lib/sidekiq/monitor.rb +17 -4
  31. data/lib/sidekiq/paginator.rb +17 -9
  32. data/lib/sidekiq/processor.rb +60 -60
  33. data/lib/sidekiq/rails.rb +22 -10
  34. data/lib/sidekiq/redis_client_adapter.rb +96 -0
  35. data/lib/sidekiq/redis_connection.rb +13 -82
  36. data/lib/sidekiq/ring_buffer.rb +29 -0
  37. data/lib/sidekiq/scheduled.rb +66 -38
  38. data/lib/sidekiq/testing/inline.rb +4 -4
  39. data/lib/sidekiq/testing.rb +41 -68
  40. data/lib/sidekiq/transaction_aware_client.rb +44 -0
  41. data/lib/sidekiq/version.rb +2 -1
  42. data/lib/sidekiq/web/action.rb +3 -3
  43. data/lib/sidekiq/web/application.rb +40 -9
  44. data/lib/sidekiq/web/csrf_protection.rb +3 -3
  45. data/lib/sidekiq/web/helpers.rb +35 -21
  46. data/lib/sidekiq/web.rb +10 -17
  47. data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
  48. data/lib/sidekiq.rb +84 -206
  49. data/sidekiq.gemspec +12 -10
  50. data/web/assets/javascripts/application.js +76 -26
  51. data/web/assets/javascripts/base-charts.js +106 -0
  52. data/web/assets/javascripts/chart.min.js +13 -0
  53. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  54. data/web/assets/javascripts/dashboard-charts.js +166 -0
  55. data/web/assets/javascripts/dashboard.js +3 -240
  56. data/web/assets/javascripts/metrics.js +264 -0
  57. data/web/assets/stylesheets/application-dark.css +4 -0
  58. data/web/assets/stylesheets/application-rtl.css +2 -91
  59. data/web/assets/stylesheets/application.css +66 -297
  60. data/web/locales/ar.yml +70 -70
  61. data/web/locales/cs.yml +62 -62
  62. data/web/locales/da.yml +60 -53
  63. data/web/locales/de.yml +65 -65
  64. data/web/locales/el.yml +43 -24
  65. data/web/locales/en.yml +82 -69
  66. data/web/locales/es.yml +68 -68
  67. data/web/locales/fa.yml +65 -65
  68. data/web/locales/fr.yml +81 -67
  69. data/web/locales/gd.yml +99 -0
  70. data/web/locales/he.yml +65 -64
  71. data/web/locales/hi.yml +59 -59
  72. data/web/locales/it.yml +53 -53
  73. data/web/locales/ja.yml +73 -68
  74. data/web/locales/ko.yml +52 -52
  75. data/web/locales/lt.yml +66 -66
  76. data/web/locales/nb.yml +61 -61
  77. data/web/locales/nl.yml +52 -52
  78. data/web/locales/pl.yml +45 -45
  79. data/web/locales/pt-br.yml +63 -55
  80. data/web/locales/pt.yml +51 -51
  81. data/web/locales/ru.yml +67 -66
  82. data/web/locales/sv.yml +53 -53
  83. data/web/locales/ta.yml +60 -60
  84. data/web/locales/uk.yml +62 -61
  85. data/web/locales/ur.yml +64 -64
  86. data/web/locales/vi.yml +67 -67
  87. data/web/locales/zh-cn.yml +43 -16
  88. data/web/locales/zh-tw.yml +42 -8
  89. data/web/views/_footer.erb +5 -2
  90. data/web/views/_job_info.erb +18 -2
  91. data/web/views/_metrics_period_select.erb +12 -0
  92. data/web/views/_nav.erb +1 -1
  93. data/web/views/_paging.erb +2 -0
  94. data/web/views/_poll_link.erb +1 -1
  95. data/web/views/_summary.erb +1 -1
  96. data/web/views/busy.erb +44 -28
  97. data/web/views/dashboard.erb +36 -4
  98. data/web/views/metrics.erb +82 -0
  99. data/web/views/metrics_for_job.erb +68 -0
  100. data/web/views/morgue.erb +5 -9
  101. data/web/views/queue.erb +15 -15
  102. data/web/views/queues.erb +3 -1
  103. data/web/views/retries.erb +5 -9
  104. data/web/views/scheduled.erb +12 -13
  105. metadata +56 -27
  106. data/lib/sidekiq/delay.rb +0 -43
  107. data/lib/sidekiq/exception_handler.rb +0 -27
  108. data/lib/sidekiq/extensions/action_mailer.rb +0 -48
  109. data/lib/sidekiq/extensions/active_record.rb +0 -43
  110. data/lib/sidekiq/extensions/class_methods.rb +0 -43
  111. data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
  112. data/lib/sidekiq/util.rb +0 -108
  113. data/lib/sidekiq/worker.rb +0 -364
  114. /data/{LICENSE → LICENSE.txt} +0 -0
@@ -15,13 +15,12 @@ module Sidekiq
15
15
  # client.middleware do |chain|
16
16
  # chain.use MyClientMiddleware
17
17
  # end
18
- # client.push('class' => 'SomeWorker', 'args' => [1,2,3])
18
+ # client.push('class' => 'SomeJob', 'args' => [1,2,3])
19
19
  #
20
20
  # All client instances default to the globally-defined
21
21
  # Sidekiq.client_middleware but you can change as necessary.
22
22
  #
23
23
  def middleware(&block)
24
- @chain ||= Sidekiq.client_middleware
25
24
  if block
26
25
  @chain = @chain.dup
27
26
  yield @chain
@@ -31,34 +30,48 @@ module Sidekiq
31
30
 
32
31
  attr_accessor :redis_pool
33
32
 
34
- # Sidekiq::Client normally uses the default Redis pool but you may
35
- # pass a custom ConnectionPool if you want to shard your
36
- # Sidekiq jobs across several Redis instances (for scalability
37
- # reasons, e.g.)
33
+ # Sidekiq::Client is responsible for pushing job payloads to Redis.
34
+ # Requires the :pool or :config keyword argument.
38
35
  #
39
- # Sidekiq::Client.new(ConnectionPool.new { Redis.new })
36
+ # Sidekiq::Client.new(pool: Sidekiq::RedisConnection.create)
40
37
  #
41
- # Generally this is only needed for very large Sidekiq installs processing
42
- # thousands of jobs per second. I don't recommend sharding unless you
43
- # cannot scale any other way (e.g. splitting your app into smaller apps).
44
- def initialize(redis_pool = nil)
45
- @redis_pool = redis_pool || Thread.current[:sidekiq_via_pool] || Sidekiq.redis_pool
38
+ # Inside the Sidekiq process, you can reuse the configured resources:
39
+ #
40
+ # Sidekiq::Client.new(config: config)
41
+ #
42
+ # @param pool [ConnectionPool] explicit Redis pool to use
43
+ # @param config [Sidekiq::Config] use the pool and middleware from the given Sidekiq container
44
+ # @param chain [Sidekiq::Middleware::Chain] use the given middleware chain
45
+ def initialize(*args, **kwargs)
46
+ if args.size == 1 && kwargs.size == 0
47
+ warn "Sidekiq::Client.new(pool) is deprecated, please use Sidekiq::Client.new(pool: pool), #{caller(0..3)}"
48
+ # old calling method, accept 1 pool argument
49
+ @redis_pool = args[0]
50
+ @chain = Sidekiq.default_configuration.client_middleware
51
+ @config = Sidekiq.default_configuration
52
+ else
53
+ # new calling method: keyword arguments
54
+ @config = kwargs[:config] || Sidekiq.default_configuration
55
+ @redis_pool = kwargs[:pool] || Thread.current[:sidekiq_redis_pool] || @config&.redis_pool
56
+ @chain = kwargs[:chain] || @config&.client_middleware
57
+ raise ArgumentError, "No Redis pool available for Sidekiq::Client" unless @redis_pool
58
+ end
46
59
  end
47
60
 
48
61
  ##
49
62
  # The main method used to push a job to Redis. Accepts a number of options:
50
63
  #
51
64
  # queue - the named queue to use, default 'default'
52
- # class - the worker class to call, required
65
+ # class - the job class to call, required
53
66
  # args - an array of simple arguments to the perform method, must be JSON-serializable
54
67
  # at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
55
68
  # retry - whether to retry this job if it fails, default true or an integer number of retries
56
69
  # backtrace - whether to save any error backtrace, default false
57
70
  #
58
71
  # If class is set to the class name, the jobs' options will be based on Sidekiq's default
59
- # worker options. Otherwise, they will be based on the job class's options.
72
+ # job options. Otherwise, they will be based on the job class's options.
60
73
  #
61
- # Any options valid for a worker class's sidekiq_options are also available here.
74
+ # Any options valid for a job class's sidekiq_options are also available here.
62
75
  #
63
76
  # All options must be strings, not symbols. NB: because we are serializing to JSON, all
64
77
  # symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
@@ -67,13 +80,15 @@ module Sidekiq
67
80
  # Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
68
81
  #
69
82
  # Example:
70
- # push('queue' => 'my_queue', 'class' => MyWorker, 'args' => ['foo', 1, :bat => 'bar'])
83
+ # push('queue' => 'my_queue', 'class' => MyJob, 'args' => ['foo', 1, :bat => 'bar'])
71
84
  #
72
85
  def push(item)
73
86
  normed = normalize_item(item)
74
- payload = process_single(item["class"], normed)
75
-
87
+ payload = middleware.invoke(item["class"], normed, normed["queue"], @redis_pool) do
88
+ normed
89
+ end
76
90
  if payload
91
+ verify_json(payload)
77
92
  raw_push([payload])
78
93
  payload["jid"]
79
94
  end
@@ -81,8 +96,9 @@ module Sidekiq
81
96
 
82
97
  ##
83
98
  # Push a large number of jobs to Redis. This method cuts out the redis
84
- # network round trip latency. I wouldn't recommend pushing more than
85
- # 1000 per call but YMMV based on network quality, size of job args, etc.
99
+ # network round trip latency. It pushes jobs in batches if more than
100
+ # `:batch_size` (1000 by default) of jobs are passed. I wouldn't recommend making `:batch_size`
101
+ # larger than 1000 but YMMV based on network quality, size of job args, etc.
86
102
  # A large number of jobs can cause a bit of Redis command processing latency.
87
103
  #
88
104
  # Takes the same arguments as #push except that args is expected to be
@@ -90,28 +106,43 @@ module Sidekiq
90
106
  # is run through the client middleware pipeline and each job gets its own Job ID
91
107
  # as normal.
92
108
  #
93
- # Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
94
- # than the number given if the middleware stopped processing for one or more jobs.
109
+ # Returns an array of the of pushed jobs' jids, may contain nils if any client middleware
110
+ # prevented a job push.
111
+ #
112
+ # Example (pushing jobs in batches):
113
+ # push_bulk('class' => 'MyJob', 'args' => (1..100_000).to_a, batch_size: 1_000)
114
+ #
95
115
  def push_bulk(items)
116
+ batch_size = items.delete(:batch_size) || items.delete("batch_size") || 1_000
96
117
  args = items["args"]
97
- raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless args.is_a?(Array) && args.all?(Array)
98
- return [] if args.empty? # no jobs to push
99
-
100
118
  at = items.delete("at")
101
119
  raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
102
120
  raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
103
121
 
122
+ jid = items.delete("jid")
123
+ raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
124
+
104
125
  normed = normalize_item(items)
105
- payloads = args.map.with_index { |job_args, index|
106
- copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
107
- copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
126
+ result = args.each_slice(batch_size).flat_map do |slice|
127
+ raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless slice.is_a?(Array) && slice.all?(Array)
128
+ break [] if slice.empty? # no jobs to push
108
129
 
109
- result = process_single(items["class"], copy)
110
- result || nil
111
- }.compact
130
+ payloads = slice.map.with_index { |job_args, index|
131
+ copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
132
+ copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
133
+ result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
134
+ verify_json(copy)
135
+ copy
136
+ end
137
+ result || nil
138
+ }
139
+
140
+ to_push = payloads.compact
141
+ raw_push(to_push) unless to_push.empty?
142
+ payloads.map { |payload| payload&.[]("jid") }
143
+ end
112
144
 
113
- raw_push(payloads) unless payloads.empty?
114
- payloads.collect { |payload| payload["jid"] }
145
+ result.is_a?(Enumerator::Lazy) ? result.force : result
115
146
  end
116
147
 
117
148
  # Allows sharding of jobs across any number of Redis instances. All jobs
@@ -119,8 +150,8 @@ module Sidekiq
119
150
  #
120
151
  # pool = ConnectionPool.new { Redis.new }
121
152
  # Sidekiq::Client.via(pool) do
122
- # SomeWorker.perform_async(1,2,3)
123
- # SomeOtherWorker.perform_async(1,2,3)
153
+ # SomeJob.perform_async(1,2,3)
154
+ # SomeOtherJob.perform_async(1,2,3)
124
155
  # end
125
156
  #
126
157
  # Generally this is only needed for very large Sidekiq installs processing
@@ -128,11 +159,11 @@ module Sidekiq
128
159
  # you cannot scale any other way (e.g. splitting your app into smaller apps).
129
160
  def self.via(pool)
130
161
  raise ArgumentError, "No pool given" if pool.nil?
131
- current_sidekiq_pool = Thread.current[:sidekiq_via_pool]
132
- Thread.current[:sidekiq_via_pool] = pool
162
+ current_sidekiq_pool = Thread.current[:sidekiq_redis_pool]
163
+ Thread.current[:sidekiq_redis_pool] = pool
133
164
  yield
134
165
  ensure
135
- Thread.current[:sidekiq_via_pool] = current_sidekiq_pool
166
+ Thread.current[:sidekiq_redis_pool] = current_sidekiq_pool
136
167
  end
137
168
 
138
169
  class << self
@@ -140,15 +171,15 @@ module Sidekiq
140
171
  new.push(item)
141
172
  end
142
173
 
143
- def push_bulk(items)
144
- new.push_bulk(items)
174
+ def push_bulk(...)
175
+ new.push_bulk(...)
145
176
  end
146
177
 
147
178
  # Resque compatibility helpers. Note all helpers
148
- # should go through Worker#client_push.
179
+ # should go through Sidekiq::Job#client_push.
149
180
  #
150
181
  # Example usage:
151
- # Sidekiq::Client.enqueue(MyWorker, 'foo', 1, :bat => 'bar')
182
+ # Sidekiq::Client.enqueue(MyJob, 'foo', 1, :bat => 'bar')
152
183
  #
153
184
  # Messages are enqueued to the 'default' queue.
154
185
  #
@@ -157,19 +188,19 @@ module Sidekiq
157
188
  end
158
189
 
159
190
  # Example usage:
160
- # Sidekiq::Client.enqueue_to(:queue_name, MyWorker, 'foo', 1, :bat => 'bar')
191
+ # Sidekiq::Client.enqueue_to(:queue_name, MyJob, 'foo', 1, :bat => 'bar')
161
192
  #
162
193
  def enqueue_to(queue, klass, *args)
163
194
  klass.client_push("queue" => queue, "class" => klass, "args" => args)
164
195
  end
165
196
 
166
197
  # Example usage:
167
- # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
198
+ # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyJob, 'foo', 1, :bat => 'bar')
168
199
  #
169
200
  def enqueue_to_in(queue, interval, klass, *args)
170
201
  int = interval.to_f
171
202
  now = Time.now.to_f
172
- ts = (int < 1_000_000_000 ? now + int : int)
203
+ ts = ((int < 1_000_000_000) ? now + int : int)
173
204
 
174
205
  item = {"class" => klass, "args" => args, "at" => ts, "queue" => queue}
175
206
  item.delete("at") if ts <= now
@@ -178,7 +209,7 @@ module Sidekiq
178
209
  end
179
210
 
180
211
  # Example usage:
181
- # Sidekiq::Client.enqueue_in(3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
212
+ # Sidekiq::Client.enqueue_in(3.minutes, MyJob, 'foo', 1, :bat => 'bar')
182
213
  #
183
214
  def enqueue_in(interval, klass, *args)
184
215
  klass.perform_in(interval, *args)
@@ -189,8 +220,23 @@ module Sidekiq
189
220
 
190
221
  def raw_push(payloads)
191
222
  @redis_pool.with do |conn|
192
- conn.pipelined do
193
- atomic_push(conn, payloads)
223
+ retryable = true
224
+ begin
225
+ conn.pipelined do |pipeline|
226
+ atomic_push(pipeline, payloads)
227
+ end
228
+ rescue RedisClient::Error => ex
229
+ # 2550 Failover can cause the server to become a replica, need
230
+ # to disconnect and reopen the socket to get back to the primary.
231
+ # 4495 Use the same logic if we have a "Not enough replicas" error from the primary
232
+ # 4985 Use the same logic when a blocking command is force-unblocked
233
+ # The retry logic is copied from sidekiq.rb
234
+ if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
235
+ conn.close
236
+ retryable = false
237
+ retry
238
+ end
239
+ raise
194
240
  end
195
241
  end
196
242
  true
@@ -198,8 +244,10 @@ module Sidekiq
198
244
 
199
245
  def atomic_push(conn, payloads)
200
246
  if payloads.first.key?("at")
201
- conn.zadd("schedule", payloads.map { |hash|
247
+ conn.zadd("schedule", payloads.flat_map { |hash|
202
248
  at = hash.delete("at").to_s
249
+ # ActiveJob sets this but the job has not been enqueued yet
250
+ hash.delete("enqueued_at")
203
251
  [at, Sidekiq.dump_json(hash)]
204
252
  })
205
253
  else
@@ -209,17 +257,9 @@ module Sidekiq
209
257
  entry["enqueued_at"] = now
210
258
  Sidekiq.dump_json(entry)
211
259
  }
212
- conn.sadd("queues", queue)
260
+ conn.sadd("queues", [queue])
213
261
  conn.lpush("queue:#{queue}", to_push)
214
262
  end
215
263
  end
216
-
217
- def process_single(worker_class, item)
218
- queue = item["queue"]
219
-
220
- middleware.invoke(worker_class, item, queue, @redis_pool) do
221
- item
222
- end
223
- end
224
264
  end
225
265
  end
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sidekiq
4
+ ##
5
+ # Sidekiq::Component assumes a config instance is available at @config
6
+ module Component # :nodoc:
7
+ attr_reader :config
8
+
9
+ def watchdog(last_words)
10
+ yield
11
+ rescue Exception => ex
12
+ handle_exception(ex, {context: last_words})
13
+ raise ex
14
+ end
15
+
16
+ def safe_thread(name, &block)
17
+ Thread.new do
18
+ Thread.current.name = "sidekiq.#{name}"
19
+ watchdog(name, &block)
20
+ end
21
+ end
22
+
23
+ def logger
24
+ config.logger
25
+ end
26
+
27
+ def redis(&block)
28
+ config.redis(&block)
29
+ end
30
+
31
+ def tid
32
+ Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
33
+ end
34
+
35
+ def hostname
36
+ ENV["DYNO"] || Socket.gethostname
37
+ end
38
+
39
+ def process_nonce
40
+ @@process_nonce ||= SecureRandom.hex(6)
41
+ end
42
+
43
+ def identity
44
+ @@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}"
45
+ end
46
+
47
+ def handle_exception(ex, ctx = {})
48
+ config.handle_exception(ex, ctx)
49
+ end
50
+
51
+ def fire_event(event, options = {})
52
+ oneshot = options.fetch(:oneshot, true)
53
+ reverse = options[:reverse]
54
+ reraise = options[:reraise]
55
+ logger.debug("Firing #{event} event") if oneshot
56
+
57
+ arr = config[:lifecycle_events][event]
58
+ arr.reverse! if reverse
59
+ arr.each do |block|
60
+ block.call
61
+ rescue => ex
62
+ handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
63
+ raise ex if reraise
64
+ end
65
+ arr.clear if oneshot # once we've fired an event, we never fire it again
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,278 @@
1
+ require "forwardable"
2
+
3
+ require "set"
4
+ require "sidekiq/redis_connection"
5
+
6
+ module Sidekiq
7
+ # Sidekiq::Config represents the global configuration for an instance of Sidekiq.
8
+ class Config
9
+ extend Forwardable
10
+
11
+ DEFAULTS = {
12
+ labels: Set.new,
13
+ require: ".",
14
+ environment: nil,
15
+ concurrency: 5,
16
+ timeout: 25,
17
+ poll_interval_average: nil,
18
+ average_scheduled_poll_interval: 5,
19
+ on_complex_arguments: :raise,
20
+ error_handlers: [],
21
+ death_handlers: [],
22
+ lifecycle_events: {
23
+ startup: [],
24
+ quiet: [],
25
+ shutdown: [],
26
+ # triggers when we fire the first heartbeat on startup OR repairing a network partition
27
+ heartbeat: [],
28
+ # triggers on EVERY heartbeat call, every 10 seconds
29
+ beat: []
30
+ },
31
+ dead_max_jobs: 10_000,
32
+ dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
33
+ reloader: proc { |&block| block.call },
34
+ backtrace_cleaner: ->(backtrace) { backtrace }
35
+ }
36
+
37
+ ERROR_HANDLER = ->(ex, ctx) {
38
+ cfg = ctx[:_config] || Sidekiq.default_configuration
39
+ l = cfg.logger
40
+ l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
41
+ l.warn("#{ex.class.name}: #{ex.message}")
42
+ unless ex.backtrace.nil?
43
+ backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
44
+ l.warn(backtrace.join("\n"))
45
+ end
46
+ }
47
+
48
+ def initialize(options = {})
49
+ @options = DEFAULTS.merge(options)
50
+ @options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
51
+ @directory = {}
52
+ @redis_config = {}
53
+ @capsules = {}
54
+ end
55
+
56
+ def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
57
+ attr_reader :capsules
58
+
59
+ def to_json(*)
60
+ Sidekiq.dump_json(@options)
61
+ end
62
+
63
+ # LEGACY: edits the default capsule
64
+ # config.concurrency = 5
65
+ def concurrency=(val)
66
+ default_capsule.concurrency = Integer(val)
67
+ end
68
+
69
+ def concurrency
70
+ default_capsule.concurrency
71
+ end
72
+
73
+ def total_concurrency
74
+ capsules.each_value.sum(&:concurrency)
75
+ end
76
+
77
+ # Edit the default capsule.
78
+ # config.queues = %w( high default low ) # strict
79
+ # config.queues = %w( high,3 default,2 low,1 ) # weighted
80
+ # config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
81
+ #
82
+ # With weighted priority, queue will be checked first (weight / total) of the time.
83
+ # high will be checked first (3/6) or 50% of the time.
84
+ # I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
85
+ # are ridiculous and unnecessarily expensive. You can get random queue ordering
86
+ # by explicitly setting all weights to 1.
87
+ def queues=(val)
88
+ default_capsule.queues = val
89
+ end
90
+
91
+ def queues
92
+ default_capsule.queues
93
+ end
94
+
95
+ def client_middleware
96
+ @client_chain ||= Sidekiq::Middleware::Chain.new(self)
97
+ yield @client_chain if block_given?
98
+ @client_chain
99
+ end
100
+
101
+ def server_middleware
102
+ @server_chain ||= Sidekiq::Middleware::Chain.new(self)
103
+ yield @server_chain if block_given?
104
+ @server_chain
105
+ end
106
+
107
+ def default_capsule(&block)
108
+ capsule("default", &block)
109
+ end
110
+
111
+ # register a new queue processing subsystem
112
+ def capsule(name)
113
+ nm = name.to_s
114
+ cap = @capsules.fetch(nm) do
115
+ cap = Sidekiq::Capsule.new(nm, self)
116
+ @capsules[nm] = cap
117
+ end
118
+ yield cap if block_given?
119
+ cap
120
+ end
121
+
122
+ # All capsules must use the same Redis configuration
123
+ def redis=(hash)
124
+ @redis_config = @redis_config.merge(hash)
125
+ end
126
+
127
+ def redis_pool
128
+ Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
129
+ end
130
+
131
+ private def local_redis_pool
132
+ # this is our internal client/housekeeping pool. each capsule has its
133
+ # own pool for executing threads.
134
+ @redis ||= new_redis_pool(10, "internal")
135
+ end
136
+
137
+ def new_redis_pool(size, name = "unset")
138
+ # connection pool is lazy, it will not create connections unless you actually need them
139
+ # so don't be skimpy!
140
+ RedisConnection.create({size: size, logger: logger, pool_name: name}.merge(@redis_config))
141
+ end
142
+
143
+ def redis_info
144
+ redis do |conn|
145
+ conn.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
146
+ rescue RedisClientAdapter::CommandError => ex
147
+ # 2850 return fake version when INFO command has (probably) been renamed
148
+ raise unless /unknown command/.match?(ex.message)
149
+ {
150
+ "redis_version" => "9.9.9",
151
+ "uptime_in_days" => "9999",
152
+ "connected_clients" => "9999",
153
+ "used_memory_human" => "9P",
154
+ "used_memory_peak_human" => "9P"
155
+ }.freeze
156
+ end
157
+ end
158
+
159
+ def redis
160
+ raise ArgumentError, "requires a block" unless block_given?
161
+ redis_pool.with do |conn|
162
+ retryable = true
163
+ begin
164
+ yield conn
165
+ rescue RedisClientAdapter::BaseError => ex
166
+ # 2550 Failover can cause the server to become a replica, need
167
+ # to disconnect and reopen the socket to get back to the primary.
168
+ # 4495 Use the same logic if we have a "Not enough replicas" error from the primary
169
+ # 4985 Use the same logic when a blocking command is force-unblocked
170
+ # The same retry logic is also used in client.rb
171
+ if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
172
+ conn.close
173
+ retryable = false
174
+ retry
175
+ end
176
+ raise
177
+ end
178
+ end
179
+ end
180
+
181
+ # register global singletons which can be accessed elsewhere
182
+ def register(name, instance)
183
+ @directory[name] = instance
184
+ end
185
+
186
+ # find a singleton
187
+ def lookup(name, default_class = nil)
188
+ # JNDI is just a fancy name for a hash lookup
189
+ @directory.fetch(name) do |key|
190
+ return nil unless default_class
191
+ @directory[key] = default_class.new(self)
192
+ end
193
+ end
194
+
195
+ ##
196
+ # Death handlers are called when all retries for a job have been exhausted and
197
+ # the job dies. It's the notification to your application
198
+ # that this job will not succeed without manual intervention.
199
+ #
200
+ # Sidekiq.configure_server do |config|
201
+ # config.death_handlers << ->(job, ex) do
202
+ # end
203
+ # end
204
+ def death_handlers
205
+ @options[:death_handlers]
206
+ end
207
+
208
+ # How frequently Redis should be checked by a random Sidekiq process for
209
+ # scheduled and retriable jobs. Each individual process will take turns by
210
+ # waiting some multiple of this value.
211
+ #
212
+ # See sidekiq/scheduled.rb for an in-depth explanation of this value
213
+ def average_scheduled_poll_interval=(interval)
214
+ @options[:average_scheduled_poll_interval] = interval
215
+ end
216
+
217
+ # Register a proc to handle any error which occurs within the Sidekiq process.
218
+ #
219
+ # Sidekiq.configure_server do |config|
220
+ # config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
221
+ # end
222
+ #
223
+ # The default error handler logs errors to @logger.
224
+ def error_handlers
225
+ @options[:error_handlers]
226
+ end
227
+
228
+ # Register a block to run at a point in the Sidekiq lifecycle.
229
+ # :startup, :quiet or :shutdown are valid events.
230
+ #
231
+ # Sidekiq.configure_server do |config|
232
+ # config.on(:shutdown) do
233
+ # puts "Goodbye cruel world!"
234
+ # end
235
+ # end
236
+ def on(event, &block)
237
+ raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
238
+ raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
239
+ @options[:lifecycle_events][event] << block
240
+ end
241
+
242
+ def logger
243
+ @logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
244
+ log.level = Logger::INFO
245
+ log.formatter = if ENV["DYNO"]
246
+ Sidekiq::Logger::Formatters::WithoutTimestamp.new
247
+ else
248
+ Sidekiq::Logger::Formatters::Pretty.new
249
+ end
250
+ end
251
+ end
252
+
253
+ def logger=(logger)
254
+ if logger.nil?
255
+ self.logger.level = Logger::FATAL
256
+ return
257
+ end
258
+
259
+ @logger = logger
260
+ end
261
+
262
+ # INTERNAL USE ONLY
263
+ def handle_exception(ex, ctx = {})
264
+ if @options[:error_handlers].size == 0
265
+ p ["!!!!!", ex]
266
+ end
267
+ ctx[:_config] = self
268
+ @options[:error_handlers].each do |handler|
269
+ handler.call(ex, ctx)
270
+ rescue Exception => e
271
+ l = logger
272
+ l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
273
+ l.error e
274
+ l.error e.backtrace.join("\n") unless e.backtrace.nil?
275
+ end
276
+ end
277
+ end
278
+ end