sidekiq 6.0.7 → 6.4.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +167 -2
  3. data/LICENSE +3 -3
  4. data/README.md +10 -9
  5. data/bin/sidekiq +8 -3
  6. data/bin/sidekiqload +56 -58
  7. data/bin/sidekiqmon +1 -1
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +145 -97
  13. data/lib/sidekiq/cli.rb +46 -12
  14. data/lib/sidekiq/client.rb +28 -46
  15. data/lib/sidekiq/delay.rb +2 -0
  16. data/lib/sidekiq/extensions/action_mailer.rb +3 -2
  17. data/lib/sidekiq/extensions/active_record.rb +4 -3
  18. data/lib/sidekiq/extensions/class_methods.rb +5 -4
  19. data/lib/sidekiq/extensions/generic_proxy.rb +3 -1
  20. data/lib/sidekiq/fetch.rb +32 -23
  21. data/lib/sidekiq/job.rb +13 -0
  22. data/lib/sidekiq/job_logger.rb +16 -28
  23. data/lib/sidekiq/job_retry.rb +10 -11
  24. data/lib/sidekiq/job_util.rb +65 -0
  25. data/lib/sidekiq/launcher.rb +104 -46
  26. data/lib/sidekiq/logger.rb +7 -2
  27. data/lib/sidekiq/manager.rb +10 -12
  28. data/lib/sidekiq/middleware/chain.rb +6 -4
  29. data/lib/sidekiq/middleware/current_attributes.rb +57 -0
  30. data/lib/sidekiq/paginator.rb +8 -8
  31. data/lib/sidekiq/processor.rb +4 -4
  32. data/lib/sidekiq/rails.rb +27 -18
  33. data/lib/sidekiq/redis_connection.rb +14 -13
  34. data/lib/sidekiq/scheduled.rb +51 -16
  35. data/lib/sidekiq/sd_notify.rb +1 -1
  36. data/lib/sidekiq/testing.rb +2 -4
  37. data/lib/sidekiq/util.rb +41 -0
  38. data/lib/sidekiq/version.rb +1 -1
  39. data/lib/sidekiq/web/action.rb +2 -2
  40. data/lib/sidekiq/web/application.rb +21 -12
  41. data/lib/sidekiq/web/csrf_protection.rb +180 -0
  42. data/lib/sidekiq/web/helpers.rb +36 -30
  43. data/lib/sidekiq/web/router.rb +5 -2
  44. data/lib/sidekiq/web.rb +36 -72
  45. data/lib/sidekiq/worker.rb +127 -12
  46. data/lib/sidekiq.rb +13 -3
  47. data/sidekiq.gemspec +11 -4
  48. data/web/assets/images/apple-touch-icon.png +0 -0
  49. data/web/assets/javascripts/application.js +82 -66
  50. data/web/assets/javascripts/dashboard.js +51 -51
  51. data/web/assets/stylesheets/application-dark.css +64 -43
  52. data/web/assets/stylesheets/application-rtl.css +0 -4
  53. data/web/assets/stylesheets/application.css +43 -239
  54. data/web/locales/ar.yml +8 -2
  55. data/web/locales/en.yml +4 -1
  56. data/web/locales/es.yml +18 -2
  57. data/web/locales/fr.yml +8 -1
  58. data/web/locales/ja.yml +3 -0
  59. data/web/locales/lt.yml +1 -1
  60. data/web/locales/pl.yml +4 -4
  61. data/web/locales/ru.yml +4 -0
  62. data/web/views/_footer.erb +1 -1
  63. data/web/views/_job_info.erb +1 -1
  64. data/web/views/_poll_link.erb +2 -5
  65. data/web/views/_summary.erb +7 -7
  66. data/web/views/busy.erb +50 -19
  67. data/web/views/dashboard.erb +22 -14
  68. data/web/views/dead.erb +1 -1
  69. data/web/views/layout.erb +2 -1
  70. data/web/views/morgue.erb +6 -6
  71. data/web/views/queue.erb +11 -11
  72. data/web/views/queues.erb +4 -4
  73. data/web/views/retries.erb +7 -7
  74. data/web/views/retry.erb +1 -1
  75. data/web/views/scheduled.erb +1 -1
  76. metadata +24 -49
  77. data/.circleci/config.yml +0 -60
  78. data/.github/contributing.md +0 -32
  79. data/.github/issue_template.md +0 -11
  80. data/.gitignore +0 -13
  81. data/.standard.yml +0 -20
  82. data/3.0-Upgrade.md +0 -70
  83. data/4.0-Upgrade.md +0 -53
  84. data/5.0-Upgrade.md +0 -56
  85. data/6.0-Upgrade.md +0 -72
  86. data/COMM-LICENSE +0 -97
  87. data/Ent-2.0-Upgrade.md +0 -37
  88. data/Ent-Changes.md +0 -256
  89. data/Gemfile +0 -24
  90. data/Gemfile.lock +0 -208
  91. data/Pro-2.0-Upgrade.md +0 -138
  92. data/Pro-3.0-Upgrade.md +0 -44
  93. data/Pro-4.0-Upgrade.md +0 -35
  94. data/Pro-5.0-Upgrade.md +0 -25
  95. data/Pro-Changes.md +0 -782
  96. data/Rakefile +0 -10
  97. data/code_of_conduct.md +0 -50
  98. data/lib/generators/sidekiq/worker_generator.rb +0 -57
@@ -22,6 +22,7 @@ module Sidekiq
22
22
  attr_accessor :manager, :poller, :fetcher
23
23
 
24
24
  def initialize(options)
25
+ options[:fetch] ||= BasicFetch.new(options)
25
26
  @manager = Sidekiq::Manager.new(options)
26
27
  @poller = Sidekiq::Scheduled::Poller.new
27
28
  @done = false
@@ -56,7 +57,7 @@ module Sidekiq
56
57
 
57
58
  # Requeue everything in case there was a worker who grabbed work while stopped
58
59
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
59
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
60
+ strategy = @options[:fetch]
60
61
  strategy.bulk_requeue([], @options)
61
62
 
62
63
  clear_heartbeat
@@ -68,10 +69,12 @@ module Sidekiq
68
69
 
69
70
  private unless $TESTING
70
71
 
72
+ BEAT_PAUSE = 5
73
+
71
74
  def start_heartbeat
72
75
  loop do
73
76
  heartbeat
74
- sleep 5
77
+ sleep BEAT_PAUSE
75
78
  end
76
79
  Sidekiq.logger.info("Heartbeat stopping...")
77
80
  end
@@ -81,9 +84,9 @@ module Sidekiq
81
84
  # Note we don't stop the heartbeat thread; if the process
82
85
  # doesn't actually exit, it'll reappear in the Web UI.
83
86
  Sidekiq.redis do |conn|
84
- conn.pipelined do
85
- conn.srem("processes", identity)
86
- conn.unlink("#{identity}:workers")
87
+ conn.pipelined do |pipeline|
88
+ pipeline.srem("processes", identity)
89
+ pipeline.unlink("#{identity}:workers")
87
90
  end
88
91
  end
89
92
  rescue
@@ -104,14 +107,14 @@ module Sidekiq
104
107
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
105
108
  begin
106
109
  Sidekiq.redis do |conn|
107
- conn.pipelined do
108
- conn.incrby("stat:processed", procd)
109
- conn.incrby("stat:processed:#{nowdate}", procd)
110
- conn.expire("stat:processed:#{nowdate}", STATS_TTL)
111
-
112
- conn.incrby("stat:failed", fails)
113
- conn.incrby("stat:failed:#{nowdate}", fails)
114
- conn.expire("stat:failed:#{nowdate}", STATS_TTL)
110
+ conn.pipelined do |pipeline|
111
+ pipeline.incrby("stat:processed", procd)
112
+ pipeline.incrby("stat:processed:#{nowdate}", procd)
113
+ pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
114
+
115
+ pipeline.incrby("stat:failed", fails)
116
+ pipeline.incrby("stat:failed:#{nowdate}", fails)
117
+ pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
115
118
  end
116
119
  end
117
120
  rescue => ex
@@ -135,32 +138,40 @@ module Sidekiq
135
138
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
136
139
 
137
140
  Sidekiq.redis do |conn|
138
- conn.multi do
139
- conn.incrby("stat:processed", procd)
140
- conn.incrby("stat:processed:#{nowdate}", procd)
141
- conn.expire("stat:processed:#{nowdate}", STATS_TTL)
141
+ conn.multi do |transaction|
142
+ transaction.incrby("stat:processed", procd)
143
+ transaction.incrby("stat:processed:#{nowdate}", procd)
144
+ transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
142
145
 
143
- conn.incrby("stat:failed", fails)
144
- conn.incrby("stat:failed:#{nowdate}", fails)
145
- conn.expire("stat:failed:#{nowdate}", STATS_TTL)
146
+ transaction.incrby("stat:failed", fails)
147
+ transaction.incrby("stat:failed:#{nowdate}", fails)
148
+ transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
146
149
 
147
- conn.unlink(workers_key)
150
+ transaction.unlink(workers_key)
148
151
  curstate.each_pair do |tid, hash|
149
- conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
152
+ transaction.hset(workers_key, tid, Sidekiq.dump_json(hash))
150
153
  end
151
- conn.expire(workers_key, 60)
154
+ transaction.expire(workers_key, 60)
152
155
  end
153
156
  end
154
157
 
158
+ rtt = check_rtt
159
+
155
160
  fails = procd = 0
161
+ kb = memory_usage(::Process.pid)
156
162
 
157
163
  _, exists, _, _, msg = Sidekiq.redis { |conn|
158
- conn.multi {
159
- conn.sadd("processes", key)
160
- conn.exists(key)
161
- conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
162
- conn.expire(key, 60)
163
- conn.rpop("#{key}-signals")
164
+ conn.multi { |transaction|
165
+ transaction.sadd("processes", key)
166
+ transaction.exists?(key)
167
+ transaction.hmset(key, "info", to_json,
168
+ "busy", curstate.size,
169
+ "beat", Time.now.to_f,
170
+ "rtt_us", rtt,
171
+ "quiet", @done,
172
+ "rss", kb)
173
+ transaction.expire(key, 60)
174
+ transaction.rpop("#{key}-signals")
164
175
  }
165
176
  }
166
177
 
@@ -179,27 +190,74 @@ module Sidekiq
179
190
  end
180
191
  end
181
192
 
182
- def to_data
183
- @data ||= begin
184
- {
185
- "hostname" => hostname,
186
- "started_at" => Time.now.to_f,
187
- "pid" => ::Process.pid,
188
- "tag" => @options[:tag] || "",
189
- "concurrency" => @options[:concurrency],
190
- "queues" => @options[:queues].uniq,
191
- "labels" => @options[:labels],
192
- "identity" => identity
193
- }
193
+ # We run the heartbeat every five seconds.
194
+ # Capture five samples of RTT, log a warning if each sample
195
+ # is above our warning threshold.
196
+ RTT_READINGS = RingBuffer.new(5)
197
+ RTT_WARNING_LEVEL = 50_000
198
+
199
+ def check_rtt
200
+ a = b = 0
201
+ Sidekiq.redis do |x|
202
+ a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
203
+ x.ping
204
+ b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
194
205
  end
206
+ rtt = b - a
207
+ RTT_READINGS << rtt
208
+ # Ideal RTT for Redis is < 1000µs
209
+ # Workable is < 10,000µs
210
+ # Log a warning if it's a disaster.
211
+ if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
212
+ Sidekiq.logger.warn <<~EOM
213
+ Your Redis network connection is performing extremely poorly.
214
+ Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
215
+ Ensure Redis is running in the same AZ or datacenter as Sidekiq.
216
+ If these values are close to 100,000, that means your Sidekiq process may be
217
+ CPU overloaded; see https://github.com/mperham/sidekiq/discussions/5039
218
+ EOM
219
+ RTT_READINGS.reset
220
+ end
221
+ rtt
222
+ end
223
+
224
+ MEMORY_GRABBER = case RUBY_PLATFORM
225
+ when /linux/
226
+ ->(pid) {
227
+ IO.readlines("/proc/#{$$}/status").each do |line|
228
+ next unless line.start_with?("VmRSS:")
229
+ break line.split[1].to_i
230
+ end
231
+ }
232
+ when /darwin|bsd/
233
+ ->(pid) {
234
+ `ps -o pid,rss -p #{pid}`.lines.last.split.last.to_i
235
+ }
236
+ else
237
+ ->(pid) { 0 }
238
+ end
239
+
240
+ def memory_usage(pid)
241
+ MEMORY_GRABBER.call(pid)
242
+ end
243
+
244
+ def to_data
245
+ @data ||= {
246
+ "hostname" => hostname,
247
+ "started_at" => Time.now.to_f,
248
+ "pid" => ::Process.pid,
249
+ "tag" => @options[:tag] || "",
250
+ "concurrency" => @options[:concurrency],
251
+ "queues" => @options[:queues].uniq,
252
+ "labels" => @options[:labels],
253
+ "identity" => identity
254
+ }
195
255
  end
196
256
 
197
257
  def to_json
198
- @json ||= begin
199
- # this data changes infrequently so dump it to a string
200
- # now so we don't need to dump it every heartbeat.
201
- Sidekiq.dump_json(to_data)
202
- end
258
+ # this data changes infrequently so dump it to a string
259
+ # now so we don't need to dump it every heartbeat.
260
+ @json ||= Sidekiq.dump_json(to_data)
203
261
  end
204
262
  end
205
263
  end
@@ -6,15 +6,20 @@ require "time"
6
6
  module Sidekiq
7
7
  module Context
8
8
  def self.with(hash)
9
+ orig_context = current.dup
9
10
  current.merge!(hash)
10
11
  yield
11
12
  ensure
12
- hash.each_key { |key| current.delete(key) }
13
+ Thread.current[:sidekiq_context] = orig_context
13
14
  end
14
15
 
15
16
  def self.current
16
17
  Thread.current[:sidekiq_context] ||= {}
17
18
  end
19
+
20
+ def self.add(k, v)
21
+ Thread.current[:sidekiq_context][k] = v
22
+ end
18
23
  end
19
24
 
20
25
  module LoggingUtils
@@ -89,7 +94,7 @@ module Sidekiq
89
94
  return true if @logdev.nil? || severity < level
90
95
 
91
96
  if message.nil?
92
- if block_given?
97
+ if block
93
98
  message = yield
94
99
  else
95
100
  message = progname
@@ -35,7 +35,7 @@ module Sidekiq
35
35
  @done = false
36
36
  @workers = Set.new
37
37
  @count.times do
38
- @workers << Processor.new(self)
38
+ @workers << Processor.new(self, options)
39
39
  end
40
40
  @plock = Mutex.new
41
41
  end
@@ -55,9 +55,6 @@ module Sidekiq
55
55
  fire_event(:quiet, reverse: true)
56
56
  end
57
57
 
58
- # hack for quicker development / testing environment #2774
59
- PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
60
-
61
58
  def stop(deadline)
62
59
  quiet
63
60
  fire_event(:shutdown, reverse: true)
@@ -69,12 +66,7 @@ module Sidekiq
69
66
  return if @workers.empty?
70
67
 
71
68
  logger.info { "Pausing to allow workers to finish..." }
72
- remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
73
- while remaining > PAUSE_TIME
74
- return if @workers.empty?
75
- sleep PAUSE_TIME
76
- remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
77
- end
69
+ wait_for(deadline) { @workers.empty? }
78
70
  return if @workers.empty?
79
71
 
80
72
  hard_shutdown
@@ -90,7 +82,7 @@ module Sidekiq
90
82
  @plock.synchronize do
91
83
  @workers.delete(processor)
92
84
  unless @done
93
- p = Processor.new(self)
85
+ p = Processor.new(self, options)
94
86
  @workers << p
95
87
  p.start
96
88
  end
@@ -123,13 +115,19 @@ module Sidekiq
123
115
  # contract says that jobs are run AT LEAST once. Process termination
124
116
  # is delayed until we're certain the jobs are back in Redis because
125
117
  # it is worse to lose a job than to run it twice.
126
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
118
+ strategy = @options[:fetch]
127
119
  strategy.bulk_requeue(jobs, @options)
128
120
  end
129
121
 
130
122
  cleanup.each do |processor|
131
123
  processor.kill
132
124
  end
125
+
126
+ # when this method returns, we immediately call `exit` which may not give
127
+ # the remaining threads time to run `ensure` blocks, etc. We pause here up
128
+ # to 3 seconds to give threads a minimal amount of time to run `ensure` blocks.
129
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
130
+ wait_for(deadline) { @workers.empty? }
133
131
  end
134
132
  end
135
133
  end
@@ -90,12 +90,12 @@ module Sidekiq
90
90
  end
91
91
 
92
92
  def add(klass, *args)
93
- remove(klass) if exists?(klass)
93
+ remove(klass)
94
94
  entries << Entry.new(klass, *args)
95
95
  end
96
96
 
97
97
  def prepend(klass, *args)
98
- remove(klass) if exists?(klass)
98
+ remove(klass)
99
99
  entries.insert(0, Entry.new(klass, *args))
100
100
  end
101
101
 
@@ -132,8 +132,8 @@ module Sidekiq
132
132
  def invoke(*args)
133
133
  return yield if empty?
134
134
 
135
- chain = retrieve.dup
136
- traverse_chain = lambda do
135
+ chain = retrieve
136
+ traverse_chain = proc do
137
137
  if chain.empty?
138
138
  yield
139
139
  else
@@ -144,6 +144,8 @@ module Sidekiq
144
144
  end
145
145
  end
146
146
 
147
+ private
148
+
147
149
  class Entry
148
150
  attr_reader :klass
149
151
 
@@ -0,0 +1,57 @@
1
+ require "active_support/current_attributes"
2
+
3
+ module Sidekiq
4
+ ##
5
+ # Automatically save and load any current attributes in the execution context
6
+ # so context attributes "flow" from Rails actions into any associated jobs.
7
+ # This can be useful for multi-tenancy, i18n locale, timezone, any implicit
8
+ # per-request attribute. See +ActiveSupport::CurrentAttributes+.
9
+ #
10
+ # @example
11
+ #
12
+ # # in your initializer
13
+ # require "sidekiq/middleware/current_attributes"
14
+ # Sidekiq::CurrentAttributes.persist(Myapp::Current)
15
+ #
16
+ module CurrentAttributes
17
+ class Save
18
+ def initialize(cattr)
19
+ @klass = cattr
20
+ end
21
+
22
+ def call(_, job, _, _)
23
+ attrs = @klass.attributes
24
+ if job.has_key?("cattr")
25
+ job["cattr"].merge!(attrs)
26
+ else
27
+ job["cattr"] = attrs
28
+ end
29
+ yield
30
+ end
31
+ end
32
+
33
+ class Load
34
+ def initialize(cattr)
35
+ @klass = cattr
36
+ end
37
+
38
+ def call(_, job, _, &block)
39
+ if job.has_key?("cattr")
40
+ @klass.set(job["cattr"], &block)
41
+ else
42
+ yield
43
+ end
44
+ end
45
+ end
46
+
47
+ def self.persist(klass)
48
+ Sidekiq.configure_client do |config|
49
+ config.client_middleware.add Save, klass
50
+ end
51
+ Sidekiq.configure_server do |config|
52
+ config.client_middleware.add Save, klass
53
+ config.server_middleware.add Load, klass
54
+ end
55
+ end
56
+ end
57
+ end
@@ -16,22 +16,22 @@ module Sidekiq
16
16
 
17
17
  case type
18
18
  when "zset"
19
- total_size, items = conn.multi {
20
- conn.zcard(key)
19
+ total_size, items = conn.multi { |transaction|
20
+ transaction.zcard(key)
21
21
  if rev
22
- conn.zrevrange(key, starting, ending, with_scores: true)
22
+ transaction.zrevrange(key, starting, ending, with_scores: true)
23
23
  else
24
- conn.zrange(key, starting, ending, with_scores: true)
24
+ transaction.zrange(key, starting, ending, with_scores: true)
25
25
  end
26
26
  }
27
27
  [current_page, total_size, items]
28
28
  when "list"
29
- total_size, items = conn.multi {
30
- conn.llen(key)
29
+ total_size, items = conn.multi { |transaction|
30
+ transaction.llen(key)
31
31
  if rev
32
- conn.lrange(key, -ending - 1, -starting - 1)
32
+ transaction.lrange(key, -ending - 1, -starting - 1)
33
33
  else
34
- conn.lrange(key, starting, ending)
34
+ transaction.lrange(key, starting, ending)
35
35
  end
36
36
  }
37
37
  items.reverse! if rev
@@ -28,15 +28,15 @@ module Sidekiq
28
28
  attr_reader :thread
29
29
  attr_reader :job
30
30
 
31
- def initialize(mgr)
31
+ def initialize(mgr, options)
32
32
  @mgr = mgr
33
33
  @down = false
34
34
  @done = false
35
35
  @job = nil
36
36
  @thread = nil
37
- @strategy = (mgr.options[:fetch] || Sidekiq::BasicFetch).new(mgr.options)
38
- @reloader = Sidekiq.options[:reloader]
39
- @job_logger = (mgr.options[:job_logger] || Sidekiq::JobLogger).new
37
+ @strategy = options[:fetch]
38
+ @reloader = options[:reloader] || proc { |&block| block.call }
39
+ @job_logger = (options[:job_logger] || Sidekiq::JobLogger).new
40
40
  @retrier = Sidekiq::JobRetry.new
41
41
  end
42
42
 
data/lib/sidekiq/rails.rb CHANGED
@@ -4,6 +4,22 @@ require "sidekiq/worker"
4
4
 
5
5
  module Sidekiq
6
6
  class Rails < ::Rails::Engine
7
+ class Reloader
8
+ def initialize(app = ::Rails.application)
9
+ @app = app
10
+ end
11
+
12
+ def call
13
+ @app.reloader.wrap do
14
+ yield
15
+ end
16
+ end
17
+
18
+ def inspect
19
+ "#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
20
+ end
21
+ end
22
+
7
23
  # By including the Options module, we allow AJs to directly control sidekiq features
8
24
  # via the *sidekiq_options* class method and, for instance, not use AJ's retry system.
9
25
  # AJ retries don't show up in the Sidekiq UI Retries tab, save any error data, can't be
@@ -21,10 +37,19 @@ module Sidekiq
21
37
  end
22
38
  end
23
39
 
40
+ initializer "sidekiq.rails_logger" do
41
+ Sidekiq.configure_server do |_|
42
+ # This is the integration code necessary so that if code uses `Rails.logger.info "Hello"`,
43
+ # it will appear in the Sidekiq console with all of the job context. See #5021 and
44
+ # https://github.com/rails/rails/blob/b5f2b550f69a99336482739000c58e4e04e033aa/railties/lib/rails/commands/server/server_command.rb#L82-L84
45
+ unless ::Rails.logger == ::Sidekiq.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout)
46
+ ::Rails.logger.extend(::ActiveSupport::Logger.broadcast(::Sidekiq.logger))
47
+ end
48
+ end
49
+ end
50
+
24
51
  # This hook happens after all initializers are run, just before returning
25
52
  # from config/environment.rb back to sidekiq/cli.rb.
26
- # We have to add the reloader after initialize to see if cache_classes has
27
- # been turned on.
28
53
  #
29
54
  # None of this matters on the client-side, only within the Sidekiq process itself.
30
55
  config.after_initialize do
@@ -32,21 +57,5 @@ module Sidekiq
32
57
  Sidekiq.options[:reloader] = Sidekiq::Rails::Reloader.new
33
58
  end
34
59
  end
35
-
36
- class Reloader
37
- def initialize(app = ::Rails.application)
38
- @app = app
39
- end
40
-
41
- def call
42
- @app.reloader.wrap do
43
- yield
44
- end
45
- end
46
-
47
- def inspect
48
- "#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
49
- end
50
- end
51
60
  end
52
61
  end
@@ -8,16 +8,14 @@ module Sidekiq
8
8
  class RedisConnection
9
9
  class << self
10
10
  def create(options = {})
11
- options.keys.each do |key|
12
- options[key.to_sym] = options.delete(key)
13
- end
11
+ symbolized_options = options.transform_keys(&:to_sym)
14
12
 
15
- if !options[:url] && (u = determine_redis_provider)
16
- options[:url] = u
13
+ if !symbolized_options[:url] && (u = determine_redis_provider)
14
+ symbolized_options[:url] = u
17
15
  end
18
16
 
19
- size = if options[:size]
20
- options[:size]
17
+ size = if symbolized_options[:size]
18
+ symbolized_options[:size]
21
19
  elsif Sidekiq.server?
22
20
  # Give ourselves plenty of connections. pool is lazy
23
21
  # so we won't create them until we need them.
@@ -30,11 +28,11 @@ module Sidekiq
30
28
 
31
29
  verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
32
30
 
33
- pool_timeout = options[:pool_timeout] || 1
34
- log_info(options)
31
+ pool_timeout = symbolized_options[:pool_timeout] || 1
32
+ log_info(symbolized_options)
35
33
 
36
34
  ConnectionPool.new(timeout: pool_timeout, size: size) do
37
- build_client(options)
35
+ build_client(symbolized_options)
38
36
  end
39
37
  end
40
38
 
@@ -96,8 +94,11 @@ module Sidekiq
96
94
  def log_info(options)
97
95
  redacted = "REDACTED"
98
96
 
99
- # deep clone so we can muck with these options all we want
100
- scrubbed_options = Marshal.load(Marshal.dump(options))
97
+ # Deep clone so we can muck with these options all we want and exclude
98
+ # params from dump-and-load that may contain objects that Marshal is
99
+ # unable to safely dump.
100
+ keys = options.keys - [:logger, :ssl_params]
101
+ scrubbed_options = Marshal.load(Marshal.dump(options.slice(*keys)))
101
102
  if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
102
103
  uri.password = redacted
103
104
  scrubbed_options[:url] = uri.to_s
@@ -124,7 +125,7 @@ module Sidekiq
124
125
  # initialization code at all.
125
126
  #
126
127
  p = ENV["REDIS_PROVIDER"]
127
- if p && p =~ /\:/
128
+ if p && p =~ /:/
128
129
  raise <<~EOM
129
130
  REDIS_PROVIDER should be set to the name of the variable which contains the Redis URL, not a URL itself.
130
131
  Platforms like Heroku will sell addons that publish a *_URL variable. You need to tell Sidekiq with REDIS_PROVIDER, e.g.:
@@ -9,29 +9,56 @@ module Sidekiq
9
9
  SETS = %w[retry schedule]
10
10
 
11
11
  class Enq
12
- def enqueue_jobs(now = Time.now.to_f.to_s, sorted_sets = SETS)
12
+ LUA_ZPOPBYSCORE = <<~LUA
13
+ local key, now = KEYS[1], ARGV[1]
14
+ local jobs = redis.call("zrangebyscore", key, "-inf", now, "limit", 0, 1)
15
+ if jobs[1] then
16
+ redis.call("zrem", key, jobs[1])
17
+ return jobs[1]
18
+ end
19
+ LUA
20
+
21
+ def initialize
22
+ @done = false
23
+ @lua_zpopbyscore_sha = nil
24
+ end
25
+
26
+ def enqueue_jobs(sorted_sets = SETS)
13
27
  # A job's "score" in Redis is the time at which it should be processed.
14
28
  # Just check Redis for the set of jobs with a timestamp before now.
15
29
  Sidekiq.redis do |conn|
16
30
  sorted_sets.each do |sorted_set|
17
- # Get next items in the queue with scores (time to execute) <= now.
18
- until (jobs = conn.zrangebyscore(sorted_set, "-inf", now, limit: [0, 100])).empty?
19
- # We need to go through the list one at a time to reduce the risk of something
20
- # going wrong between the time jobs are popped from the scheduled queue and when
21
- # they are pushed onto a work queue and losing the jobs.
22
- jobs.each do |job|
23
- # Pop item off the queue and add it to the work queue. If the job can't be popped from
24
- # the queue, it's because another process already popped it so we can move on to the
25
- # next one.
26
- if conn.zrem(sorted_set, job)
27
- Sidekiq::Client.push(Sidekiq.load_json(job))
28
- Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
29
- end
30
- end
31
+ # Get next item in the queue with score (time to execute) <= now.
32
+ # We need to go through the list one at a time to reduce the risk of something
33
+ # going wrong between the time jobs are popped from the scheduled queue and when
34
+ # they are pushed onto a work queue and losing the jobs.
35
+ while !@done && (job = zpopbyscore(conn, keys: [sorted_set], argv: [Time.now.to_f.to_s]))
36
+ Sidekiq::Client.push(Sidekiq.load_json(job))
37
+ Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
31
38
  end
32
39
  end
33
40
  end
34
41
  end
42
+
43
+ def terminate
44
+ @done = true
45
+ end
46
+
47
+ private
48
+
49
+ def zpopbyscore(conn, keys: nil, argv: nil)
50
+ if @lua_zpopbyscore_sha.nil?
51
+ raw_conn = conn.respond_to?(:redis) ? conn.redis : conn
52
+ @lua_zpopbyscore_sha = raw_conn.script(:load, LUA_ZPOPBYSCORE)
53
+ end
54
+
55
+ conn.evalsha(@lua_zpopbyscore_sha, keys: keys, argv: argv)
56
+ rescue Redis::CommandError => e
57
+ raise unless e.message.start_with?("NOSCRIPT")
58
+
59
+ @lua_zpopbyscore_sha = nil
60
+ retry
61
+ end
35
62
  end
36
63
 
37
64
  ##
@@ -49,11 +76,14 @@ module Sidekiq
49
76
  @sleeper = ConnectionPool::TimedStack.new
50
77
  @done = false
51
78
  @thread = nil
79
+ @count_calls = 0
52
80
  end
53
81
 
54
82
  # Shut down this instance, will pause until the thread is dead.
55
83
  def terminate
56
84
  @done = true
85
+ @enq.terminate if @enq.respond_to?(:terminate)
86
+
57
87
  if @thread
58
88
  t = @thread
59
89
  @thread = nil
@@ -152,8 +182,13 @@ module Sidekiq
152
182
  end
153
183
 
154
184
  def process_count
155
- pcount = Sidekiq::ProcessSet.new.size
185
+ # The work buried within Sidekiq::ProcessSet#cleanup can be
186
+ # expensive at scale. Cut it down by 90% with this counter.
187
+ # NB: This method is only called by the scheduler thread so we
188
+ # don't need to worry about the thread safety of +=.
189
+ pcount = Sidekiq::ProcessSet.new(@count_calls % 10 == 0).size
156
190
  pcount = 1 if pcount == 0
191
+ @count_calls += 1
157
192
  pcount
158
193
  end
159
194