sidekiq 5.2.7 → 8.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +845 -8
  3. data/LICENSE.txt +9 -0
  4. data/README.md +54 -54
  5. data/bin/multi_queue_bench +271 -0
  6. data/bin/sidekiq +22 -3
  7. data/bin/sidekiqload +219 -112
  8. data/bin/sidekiqmon +11 -0
  9. data/bin/webload +69 -0
  10. data/lib/active_job/queue_adapters/sidekiq_adapter.rb +120 -0
  11. data/lib/generators/sidekiq/job_generator.rb +59 -0
  12. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  13. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  14. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  15. data/lib/sidekiq/api.rb +757 -373
  16. data/lib/sidekiq/capsule.rb +132 -0
  17. data/lib/sidekiq/cli.rb +210 -233
  18. data/lib/sidekiq/client.rb +145 -103
  19. data/lib/sidekiq/component.rb +128 -0
  20. data/lib/sidekiq/config.rb +315 -0
  21. data/lib/sidekiq/deploy.rb +64 -0
  22. data/lib/sidekiq/embedded.rb +64 -0
  23. data/lib/sidekiq/fetch.rb +49 -42
  24. data/lib/sidekiq/iterable_job.rb +56 -0
  25. data/lib/sidekiq/job/interrupt_handler.rb +24 -0
  26. data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
  27. data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
  28. data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
  29. data/lib/sidekiq/job/iterable.rb +306 -0
  30. data/lib/sidekiq/job.rb +385 -0
  31. data/lib/sidekiq/job_logger.rb +34 -7
  32. data/lib/sidekiq/job_retry.rb +164 -109
  33. data/lib/sidekiq/job_util.rb +113 -0
  34. data/lib/sidekiq/launcher.rb +208 -107
  35. data/lib/sidekiq/logger.rb +80 -0
  36. data/lib/sidekiq/manager.rb +42 -46
  37. data/lib/sidekiq/metrics/query.rb +184 -0
  38. data/lib/sidekiq/metrics/shared.rb +109 -0
  39. data/lib/sidekiq/metrics/tracking.rb +150 -0
  40. data/lib/sidekiq/middleware/chain.rb +113 -56
  41. data/lib/sidekiq/middleware/current_attributes.rb +119 -0
  42. data/lib/sidekiq/middleware/i18n.rb +7 -7
  43. data/lib/sidekiq/middleware/modules.rb +23 -0
  44. data/lib/sidekiq/monitor.rb +147 -0
  45. data/lib/sidekiq/paginator.rb +41 -16
  46. data/lib/sidekiq/processor.rb +146 -127
  47. data/lib/sidekiq/profiler.rb +72 -0
  48. data/lib/sidekiq/rails.rb +46 -43
  49. data/lib/sidekiq/redis_client_adapter.rb +113 -0
  50. data/lib/sidekiq/redis_connection.rb +79 -108
  51. data/lib/sidekiq/ring_buffer.rb +31 -0
  52. data/lib/sidekiq/scheduled.rb +112 -50
  53. data/lib/sidekiq/sd_notify.rb +149 -0
  54. data/lib/sidekiq/systemd.rb +26 -0
  55. data/lib/sidekiq/testing/inline.rb +6 -5
  56. data/lib/sidekiq/testing.rb +91 -90
  57. data/lib/sidekiq/transaction_aware_client.rb +51 -0
  58. data/lib/sidekiq/version.rb +7 -1
  59. data/lib/sidekiq/web/action.rb +125 -60
  60. data/lib/sidekiq/web/application.rb +363 -259
  61. data/lib/sidekiq/web/config.rb +120 -0
  62. data/lib/sidekiq/web/csrf_protection.rb +183 -0
  63. data/lib/sidekiq/web/helpers.rb +241 -120
  64. data/lib/sidekiq/web/router.rb +62 -71
  65. data/lib/sidekiq/web.rb +69 -161
  66. data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
  67. data/lib/sidekiq.rb +94 -182
  68. data/sidekiq.gemspec +26 -16
  69. data/web/assets/images/apple-touch-icon.png +0 -0
  70. data/web/assets/javascripts/application.js +150 -61
  71. data/web/assets/javascripts/base-charts.js +120 -0
  72. data/web/assets/javascripts/chart.min.js +13 -0
  73. data/web/assets/javascripts/chartjs-adapter-date-fns.min.js +7 -0
  74. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  75. data/web/assets/javascripts/dashboard-charts.js +194 -0
  76. data/web/assets/javascripts/dashboard.js +41 -293
  77. data/web/assets/javascripts/metrics.js +280 -0
  78. data/web/assets/stylesheets/style.css +766 -0
  79. data/web/locales/ar.yml +72 -65
  80. data/web/locales/cs.yml +63 -62
  81. data/web/locales/da.yml +61 -53
  82. data/web/locales/de.yml +66 -53
  83. data/web/locales/el.yml +44 -24
  84. data/web/locales/en.yml +94 -66
  85. data/web/locales/es.yml +92 -54
  86. data/web/locales/fa.yml +66 -65
  87. data/web/locales/fr.yml +83 -62
  88. data/web/locales/gd.yml +99 -0
  89. data/web/locales/he.yml +66 -64
  90. data/web/locales/hi.yml +60 -59
  91. data/web/locales/it.yml +93 -54
  92. data/web/locales/ja.yml +75 -64
  93. data/web/locales/ko.yml +53 -52
  94. data/web/locales/lt.yml +84 -0
  95. data/web/locales/nb.yml +62 -61
  96. data/web/locales/nl.yml +53 -52
  97. data/web/locales/pl.yml +46 -45
  98. data/web/locales/{pt-br.yml → pt-BR.yml} +84 -56
  99. data/web/locales/pt.yml +52 -51
  100. data/web/locales/ru.yml +69 -63
  101. data/web/locales/sv.yml +54 -53
  102. data/web/locales/ta.yml +61 -60
  103. data/web/locales/tr.yml +101 -0
  104. data/web/locales/uk.yml +86 -61
  105. data/web/locales/ur.yml +65 -64
  106. data/web/locales/vi.yml +84 -0
  107. data/web/locales/zh-CN.yml +106 -0
  108. data/web/locales/{zh-tw.yml → zh-TW.yml} +43 -9
  109. data/web/views/_footer.erb +31 -19
  110. data/web/views/_job_info.erb +94 -75
  111. data/web/views/_metrics_period_select.erb +15 -0
  112. data/web/views/_nav.erb +14 -21
  113. data/web/views/_paging.erb +23 -19
  114. data/web/views/_poll_link.erb +3 -6
  115. data/web/views/_summary.erb +23 -23
  116. data/web/views/busy.erb +139 -87
  117. data/web/views/dashboard.erb +82 -53
  118. data/web/views/dead.erb +31 -27
  119. data/web/views/filtering.erb +6 -0
  120. data/web/views/layout.erb +15 -29
  121. data/web/views/metrics.erb +84 -0
  122. data/web/views/metrics_for_job.erb +58 -0
  123. data/web/views/morgue.erb +60 -70
  124. data/web/views/profiles.erb +43 -0
  125. data/web/views/queue.erb +50 -39
  126. data/web/views/queues.erb +45 -29
  127. data/web/views/retries.erb +65 -75
  128. data/web/views/retry.erb +32 -27
  129. data/web/views/scheduled.erb +58 -52
  130. data/web/views/scheduled_job_info.erb +1 -1
  131. metadata +96 -76
  132. data/.circleci/config.yml +0 -61
  133. data/.github/contributing.md +0 -32
  134. data/.github/issue_template.md +0 -11
  135. data/.gitignore +0 -15
  136. data/.travis.yml +0 -11
  137. data/3.0-Upgrade.md +0 -70
  138. data/4.0-Upgrade.md +0 -53
  139. data/5.0-Upgrade.md +0 -56
  140. data/COMM-LICENSE +0 -97
  141. data/Ent-Changes.md +0 -238
  142. data/Gemfile +0 -23
  143. data/LICENSE +0 -9
  144. data/Pro-2.0-Upgrade.md +0 -138
  145. data/Pro-3.0-Upgrade.md +0 -44
  146. data/Pro-4.0-Upgrade.md +0 -35
  147. data/Pro-Changes.md +0 -759
  148. data/Rakefile +0 -9
  149. data/bin/sidekiqctl +0 -20
  150. data/code_of_conduct.md +0 -50
  151. data/lib/generators/sidekiq/worker_generator.rb +0 -49
  152. data/lib/sidekiq/core_ext.rb +0 -1
  153. data/lib/sidekiq/ctl.rb +0 -221
  154. data/lib/sidekiq/delay.rb +0 -42
  155. data/lib/sidekiq/exception_handler.rb +0 -29
  156. data/lib/sidekiq/extensions/action_mailer.rb +0 -57
  157. data/lib/sidekiq/extensions/active_record.rb +0 -40
  158. data/lib/sidekiq/extensions/class_methods.rb +0 -40
  159. data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
  160. data/lib/sidekiq/logging.rb +0 -122
  161. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
  162. data/lib/sidekiq/util.rb +0 -66
  163. data/lib/sidekiq/worker.rb +0 -220
  164. data/web/assets/stylesheets/application-rtl.css +0 -246
  165. data/web/assets/stylesheets/application.css +0 -1144
  166. data/web/assets/stylesheets/bootstrap-rtl.min.css +0 -9
  167. data/web/assets/stylesheets/bootstrap.css +0 -5
  168. data/web/locales/zh-cn.yml +0 -68
  169. data/web/views/_status.erb +0 -4
@@ -1,9 +1,12 @@
1
1
  # frozen_string_literal: true
2
- require 'securerandom'
3
- require 'sidekiq/middleware/chain'
2
+
3
+ require "securerandom"
4
+ require "sidekiq/middleware/chain"
5
+ require "sidekiq/job_util"
4
6
 
5
7
  module Sidekiq
6
8
  class Client
9
+ include Sidekiq::JobUtil
7
10
 
8
11
  ##
9
12
  # Define client-side middleware:
@@ -12,14 +15,13 @@ module Sidekiq
12
15
  # client.middleware do |chain|
13
16
  # chain.use MyClientMiddleware
14
17
  # end
15
- # client.push('class' => 'SomeWorker', 'args' => [1,2,3])
18
+ # client.push('class' => 'SomeJob', 'args' => [1,2,3])
16
19
  #
17
20
  # All client instances default to the globally-defined
18
21
  # Sidekiq.client_middleware but you can change as necessary.
19
22
  #
20
23
  def middleware(&block)
21
- @chain ||= Sidekiq.client_middleware
22
- if block_given?
24
+ if block
23
25
  @chain = @chain.dup
24
26
  yield @chain
25
27
  end
@@ -28,58 +30,91 @@ module Sidekiq
28
30
 
29
31
  attr_accessor :redis_pool
30
32
 
31
- # Sidekiq::Client normally uses the default Redis pool but you may
32
- # pass a custom ConnectionPool if you want to shard your
33
- # Sidekiq jobs across several Redis instances (for scalability
34
- # reasons, e.g.)
33
+ # Sidekiq::Client is responsible for pushing job payloads to Redis.
34
+ # Requires the :pool or :config keyword argument.
35
35
  #
36
- # Sidekiq::Client.new(ConnectionPool.new { Redis.new })
36
+ # Sidekiq::Client.new(pool: Sidekiq::RedisConnection.create)
37
37
  #
38
- # Generally this is only needed for very large Sidekiq installs processing
39
- # thousands of jobs per second. I don't recommend sharding unless you
40
- # cannot scale any other way (e.g. splitting your app into smaller apps).
41
- def initialize(redis_pool=nil)
42
- @redis_pool = redis_pool || Thread.current[:sidekiq_via_pool] || Sidekiq.redis_pool
38
+ # Inside the Sidekiq process, you can reuse the configured resources:
39
+ #
40
+ # Sidekiq::Client.new(config: config)
41
+ #
42
+ # @param pool [ConnectionPool] explicit Redis pool to use
43
+ # @param config [Sidekiq::Config] use the pool and middleware from the given Sidekiq container
44
+ # @param chain [Sidekiq::Middleware::Chain] use the given middleware chain
45
+ def initialize(*args, **kwargs)
46
+ if args.size == 1 && kwargs.size == 0
47
+ warn "Sidekiq::Client.new(pool) is deprecated, please use Sidekiq::Client.new(pool: pool), #{caller(0..3)}"
48
+ # old calling method, accept 1 pool argument
49
+ @redis_pool = args[0]
50
+ @chain = Sidekiq.default_configuration.client_middleware
51
+ @config = Sidekiq.default_configuration
52
+ else
53
+ # new calling method: keyword arguments
54
+ @config = kwargs[:config] || Sidekiq.default_configuration
55
+ @redis_pool = kwargs[:pool] || Thread.current[:sidekiq_redis_pool] || @config&.redis_pool
56
+ @chain = kwargs[:chain] || @config&.client_middleware
57
+ raise ArgumentError, "No Redis pool available for Sidekiq::Client" unless @redis_pool
58
+ end
59
+ end
60
+
61
+ # Cancel the IterableJob with the given JID.
62
+ # **NB: Cancellation is asynchronous.** Iteration checks every
63
+ # five seconds so this will not immediately stop the given job.
64
+ def cancel!(jid)
65
+ key = "it-#{jid}"
66
+ _, result, _ = Sidekiq.redis do |c|
67
+ c.pipelined do |p|
68
+ p.hsetnx(key, "cancelled", Time.now.to_i)
69
+ p.hget(key, "cancelled")
70
+ p.expire(key, Sidekiq::Job::Iterable::STATE_TTL, "nx")
71
+ end
72
+ end
73
+ result.to_i
43
74
  end
44
75
 
45
76
  ##
46
77
  # The main method used to push a job to Redis. Accepts a number of options:
47
78
  #
48
79
  # queue - the named queue to use, default 'default'
49
- # class - the worker class to call, required
80
+ # class - the job class to call, required
50
81
  # args - an array of simple arguments to the perform method, must be JSON-serializable
51
82
  # at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
52
83
  # retry - whether to retry this job if it fails, default true or an integer number of retries
84
+ # retry_for - relative amount of time to retry this job if it fails, default nil
53
85
  # backtrace - whether to save any error backtrace, default false
54
86
  #
55
87
  # If class is set to the class name, the jobs' options will be based on Sidekiq's default
56
- # worker options. Otherwise, they will be based on the job class's options.
88
+ # job options. Otherwise, they will be based on the job class's options.
57
89
  #
58
- # Any options valid for a worker class's sidekiq_options are also available here.
90
+ # Any options valid for a job class's sidekiq_options are also available here.
59
91
  #
60
- # All options must be strings, not symbols. NB: because we are serializing to JSON, all
92
+ # All keys must be strings, not symbols. NB: because we are serializing to JSON, all
61
93
  # symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
62
94
  # space in Redis; a large volume of failing jobs can start Redis swapping if you aren't careful.
63
95
  #
64
96
  # Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
65
97
  #
66
98
  # Example:
67
- # push('queue' => 'my_queue', 'class' => MyWorker, 'args' => ['foo', 1, :bat => 'bar'])
99
+ # push('queue' => 'my_queue', 'class' => MyJob, 'args' => ['foo', 1, :bat => 'bar'])
68
100
  #
69
101
  def push(item)
70
102
  normed = normalize_item(item)
71
- payload = process_single(item['class'], normed)
72
-
103
+ payload = middleware.invoke(item["class"], normed, normed["queue"], @redis_pool) do
104
+ normed
105
+ end
73
106
  if payload
107
+ verify_json(payload)
74
108
  raw_push([payload])
75
- payload['jid']
109
+ payload["jid"]
76
110
  end
77
111
  end
78
112
 
79
113
  ##
80
114
  # Push a large number of jobs to Redis. This method cuts out the redis
81
- # network round trip latency. I wouldn't recommend pushing more than
82
- # 1000 per call but YMMV based on network quality, size of job args, etc.
115
+ # network round trip latency. It pushes jobs in batches if more than
116
+ # `:batch_size` (1000 by default) of jobs are passed. I wouldn't recommend making `:batch_size`
117
+ # larger than 1000 but YMMV based on network quality, size of job args, etc.
83
118
  # A large number of jobs can cause a bit of Redis command processing latency.
84
119
  #
85
120
  # Takes the same arguments as #push except that args is expected to be
@@ -87,22 +122,45 @@ module Sidekiq
87
122
  # is run through the client middleware pipeline and each job gets its own Job ID
88
123
  # as normal.
89
124
  #
90
- # Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
91
- # than the number given if the middleware stopped processing for one or more jobs.
125
+ # Returns an array of the of pushed jobs' jids, may contain nils if any client middleware
126
+ # prevented a job push.
127
+ #
128
+ # Example (pushing jobs in batches):
129
+ # push_bulk('class' => MyJob, 'args' => (1..100_000).to_a, batch_size: 1_000)
130
+ #
92
131
  def push_bulk(items)
93
- arg = items['args'].first
94
- return [] unless arg # no jobs to push
95
- raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" if !arg.is_a?(Array)
132
+ batch_size = items.delete(:batch_size) || items.delete("batch_size") || 1_000
133
+ args = items["args"]
134
+ at = items.delete("at")
135
+ raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
136
+ raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
137
+
138
+ jid = items.delete("jid")
139
+ raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
96
140
 
97
141
  normed = normalize_item(items)
98
- payloads = items['args'].map do |args|
99
- copy = normed.merge('args' => args, 'jid' => SecureRandom.hex(12), 'enqueued_at' => Time.now.to_f)
100
- result = process_single(items['class'], copy)
101
- result ? result : nil
102
- end.compact
103
-
104
- raw_push(payloads) if !payloads.empty?
105
- payloads.collect { |payload| payload['jid'] }
142
+ slice_index = 0
143
+ result = args.each_slice(batch_size).flat_map do |slice|
144
+ raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless slice.is_a?(Array) && slice.all?(Array)
145
+ break [] if slice.empty? # no jobs to push
146
+
147
+ payloads = slice.map.with_index { |job_args, index|
148
+ copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
149
+ copy["at"] = (at.is_a?(Array) ? at[slice_index + index] : at) if at
150
+ result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
151
+ verify_json(copy)
152
+ copy
153
+ end
154
+ result || nil
155
+ }
156
+ slice_index += batch_size
157
+
158
+ to_push = payloads.compact
159
+ raw_push(to_push) unless to_push.empty?
160
+ payloads.map { |payload| payload&.[]("jid") }
161
+ end
162
+
163
+ result.is_a?(Enumerator::Lazy) ? result.force : result
106
164
  end
107
165
 
108
166
  # Allows sharding of jobs across any number of Redis instances. All jobs
@@ -110,8 +168,8 @@ module Sidekiq
110
168
  #
111
169
  # pool = ConnectionPool.new { Redis.new }
112
170
  # Sidekiq::Client.via(pool) do
113
- # SomeWorker.perform_async(1,2,3)
114
- # SomeOtherWorker.perform_async(1,2,3)
171
+ # SomeJob.perform_async(1,2,3)
172
+ # SomeOtherJob.perform_async(1,2,3)
115
173
  # end
116
174
  #
117
175
  # Generally this is only needed for very large Sidekiq installs processing
@@ -119,58 +177,57 @@ module Sidekiq
119
177
  # you cannot scale any other way (e.g. splitting your app into smaller apps).
120
178
  def self.via(pool)
121
179
  raise ArgumentError, "No pool given" if pool.nil?
122
- current_sidekiq_pool = Thread.current[:sidekiq_via_pool]
123
- Thread.current[:sidekiq_via_pool] = pool
180
+ current_sidekiq_pool = Thread.current[:sidekiq_redis_pool]
181
+ Thread.current[:sidekiq_redis_pool] = pool
124
182
  yield
125
183
  ensure
126
- Thread.current[:sidekiq_via_pool] = current_sidekiq_pool
184
+ Thread.current[:sidekiq_redis_pool] = current_sidekiq_pool
127
185
  end
128
186
 
129
187
  class << self
130
-
131
188
  def push(item)
132
189
  new.push(item)
133
190
  end
134
191
 
135
- def push_bulk(items)
136
- new.push_bulk(items)
192
+ def push_bulk(...)
193
+ new.push_bulk(...)
137
194
  end
138
195
 
139
196
  # Resque compatibility helpers. Note all helpers
140
- # should go through Worker#client_push.
197
+ # should go through Sidekiq::Job#client_push.
141
198
  #
142
199
  # Example usage:
143
- # Sidekiq::Client.enqueue(MyWorker, 'foo', 1, :bat => 'bar')
200
+ # Sidekiq::Client.enqueue(MyJob, 'foo', 1, :bat => 'bar')
144
201
  #
145
202
  # Messages are enqueued to the 'default' queue.
146
203
  #
147
204
  def enqueue(klass, *args)
148
- klass.client_push('class' => klass, 'args' => args)
205
+ klass.client_push("class" => klass, "args" => args)
149
206
  end
150
207
 
151
208
  # Example usage:
152
- # Sidekiq::Client.enqueue_to(:queue_name, MyWorker, 'foo', 1, :bat => 'bar')
209
+ # Sidekiq::Client.enqueue_to(:queue_name, MyJob, 'foo', 1, :bat => 'bar')
153
210
  #
154
211
  def enqueue_to(queue, klass, *args)
155
- klass.client_push('queue' => queue, 'class' => klass, 'args' => args)
212
+ klass.client_push("queue" => queue, "class" => klass, "args" => args)
156
213
  end
157
214
 
158
215
  # Example usage:
159
- # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
216
+ # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyJob, 'foo', 1, :bat => 'bar')
160
217
  #
161
218
  def enqueue_to_in(queue, interval, klass, *args)
162
219
  int = interval.to_f
163
220
  now = Time.now.to_f
164
- ts = (int < 1_000_000_000 ? now + int : int)
221
+ ts = ((int < 1_000_000_000) ? now + int : int)
165
222
 
166
- item = { 'class' => klass, 'args' => args, 'at' => ts, 'queue' => queue }
167
- item.delete('at') if ts <= now
223
+ item = {"class" => klass, "args" => args, "at" => ts, "queue" => queue}
224
+ item.delete("at") if ts <= now
168
225
 
169
226
  klass.client_push(item)
170
227
  end
171
228
 
172
229
  # Example usage:
173
- # Sidekiq::Client.enqueue_in(3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
230
+ # Sidekiq::Client.enqueue_in(3.minutes, MyJob, 'foo', 1, :bat => 'bar')
174
231
  #
175
232
  def enqueue_in(interval, klass, *args)
176
233
  klass.perform_in(interval, *args)
@@ -181,62 +238,47 @@ module Sidekiq
181
238
 
182
239
  def raw_push(payloads)
183
240
  @redis_pool.with do |conn|
184
- conn.multi do
185
- atomic_push(conn, payloads)
241
+ retryable = true
242
+ begin
243
+ conn.pipelined do |pipeline|
244
+ atomic_push(pipeline, payloads)
245
+ end
246
+ rescue RedisClient::Error => ex
247
+ # 2550 Failover can cause the server to become a replica, need
248
+ # to disconnect and reopen the socket to get back to the primary.
249
+ # 4495 Use the same logic if we have a "Not enough replicas" error from the primary
250
+ # 4985 Use the same logic when a blocking command is force-unblocked
251
+ # The retry logic is copied from sidekiq.rb
252
+ if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
253
+ conn.close
254
+ retryable = false
255
+ retry
256
+ end
257
+ raise
186
258
  end
187
259
  end
188
260
  true
189
261
  end
190
262
 
191
263
  def atomic_push(conn, payloads)
192
- if payloads.first['at']
193
- conn.zadd('schedule', payloads.map do |hash|
194
- at = hash.delete('at').to_s
264
+ if payloads.first.key?("at")
265
+ conn.zadd("schedule", payloads.flat_map { |hash|
266
+ at = hash["at"].to_s
267
+ # ActiveJob sets enqueued_at but the job has not been enqueued yet
268
+ hash = hash.except("enqueued_at", "at")
195
269
  [at, Sidekiq.dump_json(hash)]
196
- end)
270
+ })
197
271
  else
198
- q = payloads.first['queue']
199
- now = Time.now.to_f
200
- to_push = payloads.map do |entry|
201
- entry['enqueued_at'] = now
202
- Sidekiq.dump_json(entry)
272
+ now = ::Process.clock_gettime(::Process::CLOCK_REALTIME, :millisecond) # milliseconds since the epoch
273
+ grouped_queues = payloads.group_by { |job| job["queue"] }
274
+ conn.sadd("queues", grouped_queues.keys)
275
+ grouped_queues.each do |queue, grouped_payloads|
276
+ to_push = grouped_payloads.map { |entry|
277
+ entry["enqueued_at"] = now
278
+ Sidekiq.dump_json(entry)
279
+ }
280
+ conn.lpush("queue:#{queue}", to_push)
203
281
  end
204
- conn.sadd('queues', q)
205
- conn.lpush("queue:#{q}", to_push)
206
- end
207
- end
208
-
209
- def process_single(worker_class, item)
210
- queue = item['queue']
211
-
212
- middleware.invoke(worker_class, item, queue, @redis_pool) do
213
- item
214
- end
215
- end
216
-
217
- def normalize_item(item)
218
- raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: { 'class' => SomeWorker, 'args' => ['bob', 1, :foo => 'bar'] }") unless item.is_a?(Hash) && item.has_key?('class') && item.has_key?('args')
219
- raise(ArgumentError, "Job args must be an Array") unless item['args'].is_a?(Array)
220
- raise(ArgumentError, "Job class must be either a Class or String representation of the class name") unless item['class'].is_a?(Class) || item['class'].is_a?(String)
221
- raise(ArgumentError, "Job 'at' must be a Numeric timestamp") if item.has_key?('at') && !item['at'].is_a?(Numeric)
222
- #raise(ArgumentError, "Arguments must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices") unless JSON.load(JSON.dump(item['args'])) == item['args']
223
-
224
- normalized_hash(item['class'])
225
- .each{ |key, value| item[key] = value if item[key].nil? }
226
-
227
- item['class'] = item['class'].to_s
228
- item['queue'] = item['queue'].to_s
229
- item['jid'] ||= SecureRandom.hex(12)
230
- item['created_at'] ||= Time.now.to_f
231
- item
232
- end
233
-
234
- def normalized_hash(item_class)
235
- if item_class.is_a?(Class)
236
- raise(ArgumentError, "Message must include a Sidekiq::Worker class, not class name: #{item_class.ancestors.inspect}") if !item_class.respond_to?('get_sidekiq_options')
237
- item_class.get_sidekiq_options
238
- else
239
- Sidekiq.default_worker_options
240
282
  end
241
283
  end
242
284
  end
@@ -0,0 +1,128 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sidekiq
4
+ # Ruby's default thread priority is 0, which uses 100ms time slices.
5
+ # This can lead to some surprising thread starvation; if using a lot of
6
+ # CPU-heavy concurrency, it may take several seconds before a Thread gets
7
+ # on the CPU.
8
+ #
9
+ # Negative priorities lower the timeslice by half, so -1 = 50ms, -2 = 25ms, etc.
10
+ # With more frequent timeslices, we reduce the risk of unintentional timeouts
11
+ # and starvation.
12
+ #
13
+ # Customize like so:
14
+ #
15
+ # Sidekiq.configure_server do |cfg|
16
+ # cfg.thread_priority = 0
17
+ # end
18
+ #
19
+ DEFAULT_THREAD_PRIORITY = -1
20
+
21
+ ##
22
+ # Sidekiq::Component assumes a config instance is available at @config
23
+ module Component # :nodoc:
24
+ attr_reader :config
25
+
26
+ # This is epoch milliseconds, appropriate for persistence
27
+ def real_ms
28
+ ::Process.clock_gettime(::Process::CLOCK_REALTIME, :millisecond)
29
+ end
30
+
31
+ # used for time difference and relative comparisons, not persistence.
32
+ def mono_ms
33
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
34
+ end
35
+
36
+ def watchdog(last_words)
37
+ yield
38
+ rescue Exception => ex
39
+ handle_exception(ex, {context: last_words})
40
+ raise ex
41
+ end
42
+
43
+ def safe_thread(name, priority: nil, &block)
44
+ Thread.new do
45
+ Thread.current.name = "sidekiq.#{name}"
46
+ watchdog(name, &block)
47
+ end.tap { |t| t.priority = (priority || config.thread_priority || DEFAULT_THREAD_PRIORITY) }
48
+ end
49
+
50
+ def logger
51
+ config.logger
52
+ end
53
+
54
+ def redis(&block)
55
+ config.redis(&block)
56
+ end
57
+
58
+ def tid
59
+ Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
60
+ end
61
+
62
+ def hostname
63
+ ENV["DYNO"] || Socket.gethostname
64
+ end
65
+
66
+ def process_nonce
67
+ @@process_nonce ||= SecureRandom.hex(6)
68
+ end
69
+
70
+ def identity
71
+ @@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}"
72
+ end
73
+
74
+ def handle_exception(ex, ctx = {})
75
+ config.handle_exception(ex, ctx)
76
+ end
77
+
78
+ def fire_event(event, options = {})
79
+ oneshot = options.fetch(:oneshot, true)
80
+ reverse = options[:reverse]
81
+ reraise = options[:reraise]
82
+ logger.debug("Firing #{event} event") if oneshot
83
+
84
+ arr = config[:lifecycle_events][event]
85
+ arr.reverse! if reverse
86
+ arr.each do |block|
87
+ block.call
88
+ rescue => ex
89
+ handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
90
+ raise ex if reraise
91
+ end
92
+ arr.clear if oneshot # once we've fired an event, we never fire it again
93
+ end
94
+
95
+ # When you have a large tree of components, the `inspect` output
96
+ # can get out of hand, especially with lots of Sidekiq::Config
97
+ # references everywhere. We avoid calling `inspect` on more complex
98
+ # state and use `to_s` instead to keep output manageable, #6553
99
+ def inspect
100
+ "#<#{self.class.name} #{
101
+ instance_variables.map do |name|
102
+ value = instance_variable_get(name)
103
+ case value
104
+ when Proc
105
+ "#{name}=#{value}"
106
+ when Sidekiq::Config
107
+ "#{name}=#{value}"
108
+ when Sidekiq::Component
109
+ "#{name}=#{value}"
110
+ else
111
+ "#{name}=#{value.inspect}"
112
+ end
113
+ end.join(", ")
114
+ }>"
115
+ end
116
+
117
+ def default_tag(dir = Dir.pwd)
118
+ name = File.basename(dir)
119
+ prevdir = File.dirname(dir) # Capistrano release directory?
120
+ if name.to_i != 0 && prevdir
121
+ if File.basename(prevdir) == "releases"
122
+ return File.basename(File.dirname(prevdir))
123
+ end
124
+ end
125
+ name
126
+ end
127
+ end
128
+ end