sidekiq 5.1.3 → 7.3.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (157) hide show
  1. checksums.yaml +5 -5
  2. data/Changes.md +756 -8
  3. data/LICENSE.txt +9 -0
  4. data/README.md +48 -51
  5. data/bin/multi_queue_bench +271 -0
  6. data/bin/sidekiq +22 -3
  7. data/bin/sidekiqload +213 -115
  8. data/bin/sidekiqmon +11 -0
  9. data/lib/generators/sidekiq/job_generator.rb +59 -0
  10. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  11. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  12. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  13. data/lib/sidekiq/api.rb +640 -330
  14. data/lib/sidekiq/capsule.rb +132 -0
  15. data/lib/sidekiq/cli.rb +244 -257
  16. data/lib/sidekiq/client.rb +132 -103
  17. data/lib/sidekiq/component.rb +68 -0
  18. data/lib/sidekiq/config.rb +293 -0
  19. data/lib/sidekiq/deploy.rb +64 -0
  20. data/lib/sidekiq/embedded.rb +63 -0
  21. data/lib/sidekiq/fetch.rb +49 -42
  22. data/lib/sidekiq/iterable_job.rb +55 -0
  23. data/lib/sidekiq/job/interrupt_handler.rb +24 -0
  24. data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
  25. data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
  26. data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
  27. data/lib/sidekiq/job/iterable.rb +231 -0
  28. data/lib/sidekiq/job.rb +385 -0
  29. data/lib/sidekiq/job_logger.rb +49 -12
  30. data/lib/sidekiq/job_retry.rb +167 -103
  31. data/lib/sidekiq/job_util.rb +109 -0
  32. data/lib/sidekiq/launcher.rb +209 -102
  33. data/lib/sidekiq/logger.rb +131 -0
  34. data/lib/sidekiq/manager.rb +43 -46
  35. data/lib/sidekiq/metrics/query.rb +158 -0
  36. data/lib/sidekiq/metrics/shared.rb +97 -0
  37. data/lib/sidekiq/metrics/tracking.rb +148 -0
  38. data/lib/sidekiq/middleware/chain.rb +113 -56
  39. data/lib/sidekiq/middleware/current_attributes.rb +113 -0
  40. data/lib/sidekiq/middleware/i18n.rb +7 -7
  41. data/lib/sidekiq/middleware/modules.rb +23 -0
  42. data/lib/sidekiq/monitor.rb +147 -0
  43. data/lib/sidekiq/paginator.rb +28 -16
  44. data/lib/sidekiq/processor.rb +175 -112
  45. data/lib/sidekiq/rails.rb +54 -39
  46. data/lib/sidekiq/redis_client_adapter.rb +114 -0
  47. data/lib/sidekiq/redis_connection.rb +65 -86
  48. data/lib/sidekiq/ring_buffer.rb +31 -0
  49. data/lib/sidekiq/scheduled.rb +139 -48
  50. data/lib/sidekiq/sd_notify.rb +149 -0
  51. data/lib/sidekiq/systemd.rb +26 -0
  52. data/lib/sidekiq/testing/inline.rb +6 -5
  53. data/lib/sidekiq/testing.rb +95 -94
  54. data/lib/sidekiq/transaction_aware_client.rb +51 -0
  55. data/lib/sidekiq/version.rb +3 -1
  56. data/lib/sidekiq/web/action.rb +22 -12
  57. data/lib/sidekiq/web/application.rb +225 -76
  58. data/lib/sidekiq/web/csrf_protection.rb +183 -0
  59. data/lib/sidekiq/web/helpers.rb +215 -118
  60. data/lib/sidekiq/web/router.rb +23 -19
  61. data/lib/sidekiq/web.rb +114 -106
  62. data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
  63. data/lib/sidekiq.rb +95 -182
  64. data/sidekiq.gemspec +26 -23
  65. data/web/assets/images/apple-touch-icon.png +0 -0
  66. data/web/assets/javascripts/application.js +157 -61
  67. data/web/assets/javascripts/base-charts.js +106 -0
  68. data/web/assets/javascripts/chart.min.js +13 -0
  69. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  70. data/web/assets/javascripts/dashboard-charts.js +192 -0
  71. data/web/assets/javascripts/dashboard.js +35 -283
  72. data/web/assets/javascripts/metrics.js +298 -0
  73. data/web/assets/stylesheets/application-dark.css +147 -0
  74. data/web/assets/stylesheets/application-rtl.css +10 -93
  75. data/web/assets/stylesheets/application.css +169 -522
  76. data/web/assets/stylesheets/bootstrap.css +2 -2
  77. data/web/locales/ar.yml +71 -64
  78. data/web/locales/cs.yml +62 -62
  79. data/web/locales/da.yml +60 -53
  80. data/web/locales/de.yml +65 -53
  81. data/web/locales/el.yml +43 -24
  82. data/web/locales/en.yml +86 -65
  83. data/web/locales/es.yml +70 -54
  84. data/web/locales/fa.yml +65 -65
  85. data/web/locales/fr.yml +83 -62
  86. data/web/locales/gd.yml +99 -0
  87. data/web/locales/he.yml +65 -64
  88. data/web/locales/hi.yml +59 -59
  89. data/web/locales/it.yml +53 -53
  90. data/web/locales/ja.yml +75 -64
  91. data/web/locales/ko.yml +52 -52
  92. data/web/locales/lt.yml +83 -0
  93. data/web/locales/nb.yml +61 -61
  94. data/web/locales/nl.yml +52 -52
  95. data/web/locales/pl.yml +45 -45
  96. data/web/locales/pt-br.yml +83 -55
  97. data/web/locales/pt.yml +51 -51
  98. data/web/locales/ru.yml +68 -63
  99. data/web/locales/sv.yml +53 -53
  100. data/web/locales/ta.yml +60 -60
  101. data/web/locales/tr.yml +101 -0
  102. data/web/locales/uk.yml +62 -61
  103. data/web/locales/ur.yml +64 -64
  104. data/web/locales/vi.yml +83 -0
  105. data/web/locales/zh-cn.yml +43 -16
  106. data/web/locales/zh-tw.yml +42 -8
  107. data/web/views/_footer.erb +18 -3
  108. data/web/views/_job_info.erb +21 -4
  109. data/web/views/_metrics_period_select.erb +12 -0
  110. data/web/views/_nav.erb +4 -18
  111. data/web/views/_paging.erb +2 -0
  112. data/web/views/_poll_link.erb +3 -6
  113. data/web/views/_summary.erb +7 -7
  114. data/web/views/busy.erb +79 -29
  115. data/web/views/dashboard.erb +49 -19
  116. data/web/views/dead.erb +3 -3
  117. data/web/views/filtering.erb +7 -0
  118. data/web/views/layout.erb +9 -7
  119. data/web/views/metrics.erb +91 -0
  120. data/web/views/metrics_for_job.erb +59 -0
  121. data/web/views/morgue.erb +14 -15
  122. data/web/views/queue.erb +33 -23
  123. data/web/views/queues.erb +19 -5
  124. data/web/views/retries.erb +19 -16
  125. data/web/views/retry.erb +3 -3
  126. data/web/views/scheduled.erb +17 -15
  127. metadata +84 -129
  128. data/.github/contributing.md +0 -32
  129. data/.github/issue_template.md +0 -11
  130. data/.gitignore +0 -13
  131. data/.travis.yml +0 -14
  132. data/3.0-Upgrade.md +0 -70
  133. data/4.0-Upgrade.md +0 -53
  134. data/5.0-Upgrade.md +0 -56
  135. data/COMM-LICENSE +0 -95
  136. data/Ent-Changes.md +0 -216
  137. data/Gemfile +0 -8
  138. data/LICENSE +0 -9
  139. data/Pro-2.0-Upgrade.md +0 -138
  140. data/Pro-3.0-Upgrade.md +0 -44
  141. data/Pro-4.0-Upgrade.md +0 -35
  142. data/Pro-Changes.md +0 -729
  143. data/Rakefile +0 -8
  144. data/bin/sidekiqctl +0 -99
  145. data/code_of_conduct.md +0 -50
  146. data/lib/generators/sidekiq/worker_generator.rb +0 -49
  147. data/lib/sidekiq/core_ext.rb +0 -1
  148. data/lib/sidekiq/delay.rb +0 -42
  149. data/lib/sidekiq/exception_handler.rb +0 -29
  150. data/lib/sidekiq/extensions/action_mailer.rb +0 -57
  151. data/lib/sidekiq/extensions/active_record.rb +0 -40
  152. data/lib/sidekiq/extensions/class_methods.rb +0 -40
  153. data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
  154. data/lib/sidekiq/logging.rb +0 -122
  155. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
  156. data/lib/sidekiq/util.rb +0 -66
  157. data/lib/sidekiq/worker.rb +0 -204
@@ -1,9 +1,12 @@
1
1
  # frozen_string_literal: true
2
- require 'securerandom'
3
- require 'sidekiq/middleware/chain'
2
+
3
+ require "securerandom"
4
+ require "sidekiq/middleware/chain"
5
+ require "sidekiq/job_util"
4
6
 
5
7
  module Sidekiq
6
8
  class Client
9
+ include Sidekiq::JobUtil
7
10
 
8
11
  ##
9
12
  # Define client-side middleware:
@@ -12,14 +15,13 @@ module Sidekiq
12
15
  # client.middleware do |chain|
13
16
  # chain.use MyClientMiddleware
14
17
  # end
15
- # client.push('class' => 'SomeWorker', 'args' => [1,2,3])
18
+ # client.push('class' => 'SomeJob', 'args' => [1,2,3])
16
19
  #
17
20
  # All client instances default to the globally-defined
18
21
  # Sidekiq.client_middleware but you can change as necessary.
19
22
  #
20
23
  def middleware(&block)
21
- @chain ||= Sidekiq.client_middleware
22
- if block_given?
24
+ if block
23
25
  @chain = @chain.dup
24
26
  yield @chain
25
27
  end
@@ -28,80 +30,122 @@ module Sidekiq
28
30
 
29
31
  attr_accessor :redis_pool
30
32
 
31
- # Sidekiq::Client normally uses the default Redis pool but you may
32
- # pass a custom ConnectionPool if you want to shard your
33
- # Sidekiq jobs across several Redis instances (for scalability
34
- # reasons, e.g.)
33
+ # Sidekiq::Client is responsible for pushing job payloads to Redis.
34
+ # Requires the :pool or :config keyword argument.
35
35
  #
36
- # Sidekiq::Client.new(ConnectionPool.new { Redis.new })
36
+ # Sidekiq::Client.new(pool: Sidekiq::RedisConnection.create)
37
37
  #
38
- # Generally this is only needed for very large Sidekiq installs processing
39
- # thousands of jobs per second. I don't recommend sharding unless you
40
- # cannot scale any other way (e.g. splitting your app into smaller apps).
41
- def initialize(redis_pool=nil)
42
- @redis_pool = redis_pool || Thread.current[:sidekiq_via_pool] || Sidekiq.redis_pool
38
+ # Inside the Sidekiq process, you can reuse the configured resources:
39
+ #
40
+ # Sidekiq::Client.new(config: config)
41
+ #
42
+ # @param pool [ConnectionPool] explicit Redis pool to use
43
+ # @param config [Sidekiq::Config] use the pool and middleware from the given Sidekiq container
44
+ # @param chain [Sidekiq::Middleware::Chain] use the given middleware chain
45
+ def initialize(*args, **kwargs)
46
+ if args.size == 1 && kwargs.size == 0
47
+ warn "Sidekiq::Client.new(pool) is deprecated, please use Sidekiq::Client.new(pool: pool), #{caller(0..3)}"
48
+ # old calling method, accept 1 pool argument
49
+ @redis_pool = args[0]
50
+ @chain = Sidekiq.default_configuration.client_middleware
51
+ @config = Sidekiq.default_configuration
52
+ else
53
+ # new calling method: keyword arguments
54
+ @config = kwargs[:config] || Sidekiq.default_configuration
55
+ @redis_pool = kwargs[:pool] || Thread.current[:sidekiq_redis_pool] || @config&.redis_pool
56
+ @chain = kwargs[:chain] || @config&.client_middleware
57
+ raise ArgumentError, "No Redis pool available for Sidekiq::Client" unless @redis_pool
58
+ end
43
59
  end
44
60
 
45
61
  ##
46
62
  # The main method used to push a job to Redis. Accepts a number of options:
47
63
  #
48
64
  # queue - the named queue to use, default 'default'
49
- # class - the worker class to call, required
65
+ # class - the job class to call, required
50
66
  # args - an array of simple arguments to the perform method, must be JSON-serializable
51
67
  # at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
52
68
  # retry - whether to retry this job if it fails, default true or an integer number of retries
69
+ # retry_for - relative amount of time to retry this job if it fails, default nil
53
70
  # backtrace - whether to save any error backtrace, default false
54
71
  #
55
72
  # If class is set to the class name, the jobs' options will be based on Sidekiq's default
56
- # worker options. Otherwise, they will be based on the job class's options.
73
+ # job options. Otherwise, they will be based on the job class's options.
57
74
  #
58
- # Any options valid for a worker class's sidekiq_options are also available here.
75
+ # Any options valid for a job class's sidekiq_options are also available here.
59
76
  #
60
- # All options must be strings, not symbols. NB: because we are serializing to JSON, all
77
+ # All keys must be strings, not symbols. NB: because we are serializing to JSON, all
61
78
  # symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
62
79
  # space in Redis; a large volume of failing jobs can start Redis swapping if you aren't careful.
63
80
  #
64
81
  # Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
65
82
  #
66
83
  # Example:
67
- # push('queue' => 'my_queue', 'class' => MyWorker, 'args' => ['foo', 1, :bat => 'bar'])
84
+ # push('queue' => 'my_queue', 'class' => MyJob, 'args' => ['foo', 1, :bat => 'bar'])
68
85
  #
69
86
  def push(item)
70
87
  normed = normalize_item(item)
71
- payload = process_single(item['class'], normed)
72
-
88
+ payload = middleware.invoke(item["class"], normed, normed["queue"], @redis_pool) do
89
+ normed
90
+ end
73
91
  if payload
92
+ verify_json(payload)
74
93
  raw_push([payload])
75
- payload['jid']
94
+ payload["jid"]
76
95
  end
77
96
  end
78
97
 
79
98
  ##
80
- # Push a large number of jobs to Redis. In practice this method is only
81
- # useful if you are pushing thousands of jobs or more. This method
82
- # cuts out the redis network round trip latency.
99
+ # Push a large number of jobs to Redis. This method cuts out the redis
100
+ # network round trip latency. It pushes jobs in batches if more than
101
+ # `:batch_size` (1000 by default) of jobs are passed. I wouldn't recommend making `:batch_size`
102
+ # larger than 1000 but YMMV based on network quality, size of job args, etc.
103
+ # A large number of jobs can cause a bit of Redis command processing latency.
83
104
  #
84
105
  # Takes the same arguments as #push except that args is expected to be
85
106
  # an Array of Arrays. All other keys are duplicated for each job. Each job
86
107
  # is run through the client middleware pipeline and each job gets its own Job ID
87
108
  # as normal.
88
109
  #
89
- # Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
90
- # than the number given if the middleware stopped processing for one or more jobs.
110
+ # Returns an array of the of pushed jobs' jids, may contain nils if any client middleware
111
+ # prevented a job push.
112
+ #
113
+ # Example (pushing jobs in batches):
114
+ # push_bulk('class' => MyJob, 'args' => (1..100_000).to_a, batch_size: 1_000)
115
+ #
91
116
  def push_bulk(items)
92
- arg = items['args'].first
93
- return [] unless arg # no jobs to push
94
- raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" if !arg.is_a?(Array)
117
+ batch_size = items.delete(:batch_size) || items.delete("batch_size") || 1_000
118
+ args = items["args"]
119
+ at = items.delete("at")
120
+ raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
121
+ raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
122
+
123
+ jid = items.delete("jid")
124
+ raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
95
125
 
96
126
  normed = normalize_item(items)
97
- payloads = items['args'].map do |args|
98
- copy = normed.merge('args' => args, 'jid' => SecureRandom.hex(12), 'enqueued_at' => Time.now.to_f)
99
- result = process_single(items['class'], copy)
100
- result ? result : nil
101
- end.compact
102
-
103
- raw_push(payloads) if !payloads.empty?
104
- payloads.collect { |payload| payload['jid'] }
127
+ slice_index = 0
128
+ result = args.each_slice(batch_size).flat_map do |slice|
129
+ raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless slice.is_a?(Array) && slice.all?(Array)
130
+ break [] if slice.empty? # no jobs to push
131
+
132
+ payloads = slice.map.with_index { |job_args, index|
133
+ copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
134
+ copy["at"] = (at.is_a?(Array) ? at[slice_index + index] : at) if at
135
+ result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
136
+ verify_json(copy)
137
+ copy
138
+ end
139
+ result || nil
140
+ }
141
+ slice_index += batch_size
142
+
143
+ to_push = payloads.compact
144
+ raw_push(to_push) unless to_push.empty?
145
+ payloads.map { |payload| payload&.[]("jid") }
146
+ end
147
+
148
+ result.is_a?(Enumerator::Lazy) ? result.force : result
105
149
  end
106
150
 
107
151
  # Allows sharding of jobs across any number of Redis instances. All jobs
@@ -109,8 +153,8 @@ module Sidekiq
109
153
  #
110
154
  # pool = ConnectionPool.new { Redis.new }
111
155
  # Sidekiq::Client.via(pool) do
112
- # SomeWorker.perform_async(1,2,3)
113
- # SomeOtherWorker.perform_async(1,2,3)
156
+ # SomeJob.perform_async(1,2,3)
157
+ # SomeOtherJob.perform_async(1,2,3)
114
158
  # end
115
159
  #
116
160
  # Generally this is only needed for very large Sidekiq installs processing
@@ -118,58 +162,57 @@ module Sidekiq
118
162
  # you cannot scale any other way (e.g. splitting your app into smaller apps).
119
163
  def self.via(pool)
120
164
  raise ArgumentError, "No pool given" if pool.nil?
121
- current_sidekiq_pool = Thread.current[:sidekiq_via_pool]
122
- Thread.current[:sidekiq_via_pool] = pool
165
+ current_sidekiq_pool = Thread.current[:sidekiq_redis_pool]
166
+ Thread.current[:sidekiq_redis_pool] = pool
123
167
  yield
124
168
  ensure
125
- Thread.current[:sidekiq_via_pool] = current_sidekiq_pool
169
+ Thread.current[:sidekiq_redis_pool] = current_sidekiq_pool
126
170
  end
127
171
 
128
172
  class << self
129
-
130
173
  def push(item)
131
174
  new.push(item)
132
175
  end
133
176
 
134
- def push_bulk(items)
135
- new.push_bulk(items)
177
+ def push_bulk(...)
178
+ new.push_bulk(...)
136
179
  end
137
180
 
138
181
  # Resque compatibility helpers. Note all helpers
139
- # should go through Worker#client_push.
182
+ # should go through Sidekiq::Job#client_push.
140
183
  #
141
184
  # Example usage:
142
- # Sidekiq::Client.enqueue(MyWorker, 'foo', 1, :bat => 'bar')
185
+ # Sidekiq::Client.enqueue(MyJob, 'foo', 1, :bat => 'bar')
143
186
  #
144
187
  # Messages are enqueued to the 'default' queue.
145
188
  #
146
189
  def enqueue(klass, *args)
147
- klass.client_push('class' => klass, 'args' => args)
190
+ klass.client_push("class" => klass, "args" => args)
148
191
  end
149
192
 
150
193
  # Example usage:
151
- # Sidekiq::Client.enqueue_to(:queue_name, MyWorker, 'foo', 1, :bat => 'bar')
194
+ # Sidekiq::Client.enqueue_to(:queue_name, MyJob, 'foo', 1, :bat => 'bar')
152
195
  #
153
196
  def enqueue_to(queue, klass, *args)
154
- klass.client_push('queue' => queue, 'class' => klass, 'args' => args)
197
+ klass.client_push("queue" => queue, "class" => klass, "args" => args)
155
198
  end
156
199
 
157
200
  # Example usage:
158
- # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
201
+ # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyJob, 'foo', 1, :bat => 'bar')
159
202
  #
160
203
  def enqueue_to_in(queue, interval, klass, *args)
161
204
  int = interval.to_f
162
205
  now = Time.now.to_f
163
- ts = (int < 1_000_000_000 ? now + int : int)
206
+ ts = ((int < 1_000_000_000) ? now + int : int)
164
207
 
165
- item = { 'class' => klass, 'args' => args, 'at' => ts, 'queue' => queue }
166
- item.delete('at') if ts <= now
208
+ item = {"class" => klass, "args" => args, "at" => ts, "queue" => queue}
209
+ item.delete("at") if ts <= now
167
210
 
168
211
  klass.client_push(item)
169
212
  end
170
213
 
171
214
  # Example usage:
172
- # Sidekiq::Client.enqueue_in(3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
215
+ # Sidekiq::Client.enqueue_in(3.minutes, MyJob, 'foo', 1, :bat => 'bar')
173
216
  #
174
217
  def enqueue_in(interval, klass, *args)
175
218
  klass.perform_in(interval, *args)
@@ -180,62 +223,48 @@ module Sidekiq
180
223
 
181
224
  def raw_push(payloads)
182
225
  @redis_pool.with do |conn|
183
- conn.multi do
184
- atomic_push(conn, payloads)
226
+ retryable = true
227
+ begin
228
+ conn.pipelined do |pipeline|
229
+ atomic_push(pipeline, payloads)
230
+ end
231
+ rescue RedisClient::Error => ex
232
+ # 2550 Failover can cause the server to become a replica, need
233
+ # to disconnect and reopen the socket to get back to the primary.
234
+ # 4495 Use the same logic if we have a "Not enough replicas" error from the primary
235
+ # 4985 Use the same logic when a blocking command is force-unblocked
236
+ # The retry logic is copied from sidekiq.rb
237
+ if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
238
+ conn.close
239
+ retryable = false
240
+ retry
241
+ end
242
+ raise
185
243
  end
186
244
  end
187
245
  true
188
246
  end
189
247
 
190
248
  def atomic_push(conn, payloads)
191
- if payloads.first['at']
192
- conn.zadd('schedule', payloads.map do |hash|
193
- at = hash.delete('at').to_s
249
+ if payloads.first.key?("at")
250
+ conn.zadd("schedule", payloads.flat_map { |hash|
251
+ at = hash["at"].to_s
252
+ # ActiveJob sets this but the job has not been enqueued yet
253
+ hash.delete("enqueued_at")
254
+ # TODO: Use hash.except("at") when support for Ruby 2.7 is dropped
255
+ hash = hash.dup
256
+ hash.delete("at")
194
257
  [at, Sidekiq.dump_json(hash)]
195
- end)
258
+ })
196
259
  else
197
- q = payloads.first['queue']
260
+ queue = payloads.first["queue"]
198
261
  now = Time.now.to_f
199
- to_push = payloads.map do |entry|
200
- entry['enqueued_at'] = now
262
+ to_push = payloads.map { |entry|
263
+ entry["enqueued_at"] = now
201
264
  Sidekiq.dump_json(entry)
202
- end
203
- conn.sadd('queues', q)
204
- conn.lpush("queue:#{q}", to_push)
205
- end
206
- end
207
-
208
- def process_single(worker_class, item)
209
- queue = item['queue']
210
-
211
- middleware.invoke(worker_class, item, queue, @redis_pool) do
212
- item
213
- end
214
- end
215
-
216
- def normalize_item(item)
217
- raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: { 'class' => SomeWorker, 'args' => ['bob', 1, :foo => 'bar'] }") unless item.is_a?(Hash) && item.has_key?('class') && item.has_key?('args')
218
- raise(ArgumentError, "Job args must be an Array") unless item['args'].is_a?(Array)
219
- raise(ArgumentError, "Job class must be either a Class or String representation of the class name") unless item['class'].is_a?(Class) || item['class'].is_a?(String)
220
- raise(ArgumentError, "Job 'at' must be a Numeric timestamp") if item.has_key?('at') && !item['at'].is_a?(Numeric)
221
- #raise(ArgumentError, "Arguments must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices") unless JSON.load(JSON.dump(item['args'])) == item['args']
222
-
223
- normalized_hash(item['class'])
224
- .each{ |key, value| item[key] = value if item[key].nil? }
225
-
226
- item['class'] = item['class'].to_s
227
- item['queue'] = item['queue'].to_s
228
- item['jid'] ||= SecureRandom.hex(12)
229
- item['created_at'] ||= Time.now.to_f
230
- item
231
- end
232
-
233
- def normalized_hash(item_class)
234
- if item_class.is_a?(Class)
235
- raise(ArgumentError, "Message must include a Sidekiq::Worker class, not class name: #{item_class.ancestors.inspect}") if !item_class.respond_to?('get_sidekiq_options')
236
- item_class.get_sidekiq_options
237
- else
238
- Sidekiq.default_worker_options
265
+ }
266
+ conn.sadd("queues", [queue])
267
+ conn.lpush("queue:#{queue}", to_push)
239
268
  end
240
269
  end
241
270
  end
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sidekiq
4
+ ##
5
+ # Sidekiq::Component assumes a config instance is available at @config
6
+ module Component # :nodoc:
7
+ attr_reader :config
8
+
9
+ def watchdog(last_words)
10
+ yield
11
+ rescue Exception => ex
12
+ handle_exception(ex, {context: last_words})
13
+ raise ex
14
+ end
15
+
16
+ def safe_thread(name, &block)
17
+ Thread.new do
18
+ Thread.current.name = "sidekiq.#{name}"
19
+ watchdog(name, &block)
20
+ end
21
+ end
22
+
23
+ def logger
24
+ config.logger
25
+ end
26
+
27
+ def redis(&block)
28
+ config.redis(&block)
29
+ end
30
+
31
+ def tid
32
+ Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
33
+ end
34
+
35
+ def hostname
36
+ ENV["DYNO"] || Socket.gethostname
37
+ end
38
+
39
+ def process_nonce
40
+ @@process_nonce ||= SecureRandom.hex(6)
41
+ end
42
+
43
+ def identity
44
+ @@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}"
45
+ end
46
+
47
+ def handle_exception(ex, ctx = {})
48
+ config.handle_exception(ex, ctx)
49
+ end
50
+
51
+ def fire_event(event, options = {})
52
+ oneshot = options.fetch(:oneshot, true)
53
+ reverse = options[:reverse]
54
+ reraise = options[:reraise]
55
+ logger.debug("Firing #{event} event") if oneshot
56
+
57
+ arr = config[:lifecycle_events][event]
58
+ arr.reverse! if reverse
59
+ arr.each do |block|
60
+ block.call
61
+ rescue => ex
62
+ handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
63
+ raise ex if reraise
64
+ end
65
+ arr.clear if oneshot # once we've fired an event, we never fire it again
66
+ end
67
+ end
68
+ end