sidekiq 5.1.3 → 7.3.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (157) hide show
  1. checksums.yaml +5 -5
  2. data/Changes.md +756 -8
  3. data/LICENSE.txt +9 -0
  4. data/README.md +48 -51
  5. data/bin/multi_queue_bench +271 -0
  6. data/bin/sidekiq +22 -3
  7. data/bin/sidekiqload +213 -115
  8. data/bin/sidekiqmon +11 -0
  9. data/lib/generators/sidekiq/job_generator.rb +59 -0
  10. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  11. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  12. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  13. data/lib/sidekiq/api.rb +640 -330
  14. data/lib/sidekiq/capsule.rb +132 -0
  15. data/lib/sidekiq/cli.rb +244 -257
  16. data/lib/sidekiq/client.rb +132 -103
  17. data/lib/sidekiq/component.rb +68 -0
  18. data/lib/sidekiq/config.rb +293 -0
  19. data/lib/sidekiq/deploy.rb +64 -0
  20. data/lib/sidekiq/embedded.rb +63 -0
  21. data/lib/sidekiq/fetch.rb +49 -42
  22. data/lib/sidekiq/iterable_job.rb +55 -0
  23. data/lib/sidekiq/job/interrupt_handler.rb +24 -0
  24. data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
  25. data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
  26. data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
  27. data/lib/sidekiq/job/iterable.rb +231 -0
  28. data/lib/sidekiq/job.rb +385 -0
  29. data/lib/sidekiq/job_logger.rb +49 -12
  30. data/lib/sidekiq/job_retry.rb +167 -103
  31. data/lib/sidekiq/job_util.rb +109 -0
  32. data/lib/sidekiq/launcher.rb +209 -102
  33. data/lib/sidekiq/logger.rb +131 -0
  34. data/lib/sidekiq/manager.rb +43 -46
  35. data/lib/sidekiq/metrics/query.rb +158 -0
  36. data/lib/sidekiq/metrics/shared.rb +97 -0
  37. data/lib/sidekiq/metrics/tracking.rb +148 -0
  38. data/lib/sidekiq/middleware/chain.rb +113 -56
  39. data/lib/sidekiq/middleware/current_attributes.rb +113 -0
  40. data/lib/sidekiq/middleware/i18n.rb +7 -7
  41. data/lib/sidekiq/middleware/modules.rb +23 -0
  42. data/lib/sidekiq/monitor.rb +147 -0
  43. data/lib/sidekiq/paginator.rb +28 -16
  44. data/lib/sidekiq/processor.rb +175 -112
  45. data/lib/sidekiq/rails.rb +54 -39
  46. data/lib/sidekiq/redis_client_adapter.rb +114 -0
  47. data/lib/sidekiq/redis_connection.rb +65 -86
  48. data/lib/sidekiq/ring_buffer.rb +31 -0
  49. data/lib/sidekiq/scheduled.rb +139 -48
  50. data/lib/sidekiq/sd_notify.rb +149 -0
  51. data/lib/sidekiq/systemd.rb +26 -0
  52. data/lib/sidekiq/testing/inline.rb +6 -5
  53. data/lib/sidekiq/testing.rb +95 -94
  54. data/lib/sidekiq/transaction_aware_client.rb +51 -0
  55. data/lib/sidekiq/version.rb +3 -1
  56. data/lib/sidekiq/web/action.rb +22 -12
  57. data/lib/sidekiq/web/application.rb +225 -76
  58. data/lib/sidekiq/web/csrf_protection.rb +183 -0
  59. data/lib/sidekiq/web/helpers.rb +215 -118
  60. data/lib/sidekiq/web/router.rb +23 -19
  61. data/lib/sidekiq/web.rb +114 -106
  62. data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
  63. data/lib/sidekiq.rb +95 -182
  64. data/sidekiq.gemspec +26 -23
  65. data/web/assets/images/apple-touch-icon.png +0 -0
  66. data/web/assets/javascripts/application.js +157 -61
  67. data/web/assets/javascripts/base-charts.js +106 -0
  68. data/web/assets/javascripts/chart.min.js +13 -0
  69. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  70. data/web/assets/javascripts/dashboard-charts.js +192 -0
  71. data/web/assets/javascripts/dashboard.js +35 -283
  72. data/web/assets/javascripts/metrics.js +298 -0
  73. data/web/assets/stylesheets/application-dark.css +147 -0
  74. data/web/assets/stylesheets/application-rtl.css +10 -93
  75. data/web/assets/stylesheets/application.css +169 -522
  76. data/web/assets/stylesheets/bootstrap.css +2 -2
  77. data/web/locales/ar.yml +71 -64
  78. data/web/locales/cs.yml +62 -62
  79. data/web/locales/da.yml +60 -53
  80. data/web/locales/de.yml +65 -53
  81. data/web/locales/el.yml +43 -24
  82. data/web/locales/en.yml +86 -65
  83. data/web/locales/es.yml +70 -54
  84. data/web/locales/fa.yml +65 -65
  85. data/web/locales/fr.yml +83 -62
  86. data/web/locales/gd.yml +99 -0
  87. data/web/locales/he.yml +65 -64
  88. data/web/locales/hi.yml +59 -59
  89. data/web/locales/it.yml +53 -53
  90. data/web/locales/ja.yml +75 -64
  91. data/web/locales/ko.yml +52 -52
  92. data/web/locales/lt.yml +83 -0
  93. data/web/locales/nb.yml +61 -61
  94. data/web/locales/nl.yml +52 -52
  95. data/web/locales/pl.yml +45 -45
  96. data/web/locales/pt-br.yml +83 -55
  97. data/web/locales/pt.yml +51 -51
  98. data/web/locales/ru.yml +68 -63
  99. data/web/locales/sv.yml +53 -53
  100. data/web/locales/ta.yml +60 -60
  101. data/web/locales/tr.yml +101 -0
  102. data/web/locales/uk.yml +62 -61
  103. data/web/locales/ur.yml +64 -64
  104. data/web/locales/vi.yml +83 -0
  105. data/web/locales/zh-cn.yml +43 -16
  106. data/web/locales/zh-tw.yml +42 -8
  107. data/web/views/_footer.erb +18 -3
  108. data/web/views/_job_info.erb +21 -4
  109. data/web/views/_metrics_period_select.erb +12 -0
  110. data/web/views/_nav.erb +4 -18
  111. data/web/views/_paging.erb +2 -0
  112. data/web/views/_poll_link.erb +3 -6
  113. data/web/views/_summary.erb +7 -7
  114. data/web/views/busy.erb +79 -29
  115. data/web/views/dashboard.erb +49 -19
  116. data/web/views/dead.erb +3 -3
  117. data/web/views/filtering.erb +7 -0
  118. data/web/views/layout.erb +9 -7
  119. data/web/views/metrics.erb +91 -0
  120. data/web/views/metrics_for_job.erb +59 -0
  121. data/web/views/morgue.erb +14 -15
  122. data/web/views/queue.erb +33 -23
  123. data/web/views/queues.erb +19 -5
  124. data/web/views/retries.erb +19 -16
  125. data/web/views/retry.erb +3 -3
  126. data/web/views/scheduled.erb +17 -15
  127. metadata +84 -129
  128. data/.github/contributing.md +0 -32
  129. data/.github/issue_template.md +0 -11
  130. data/.gitignore +0 -13
  131. data/.travis.yml +0 -14
  132. data/3.0-Upgrade.md +0 -70
  133. data/4.0-Upgrade.md +0 -53
  134. data/5.0-Upgrade.md +0 -56
  135. data/COMM-LICENSE +0 -95
  136. data/Ent-Changes.md +0 -216
  137. data/Gemfile +0 -8
  138. data/LICENSE +0 -9
  139. data/Pro-2.0-Upgrade.md +0 -138
  140. data/Pro-3.0-Upgrade.md +0 -44
  141. data/Pro-4.0-Upgrade.md +0 -35
  142. data/Pro-Changes.md +0 -729
  143. data/Rakefile +0 -8
  144. data/bin/sidekiqctl +0 -99
  145. data/code_of_conduct.md +0 -50
  146. data/lib/generators/sidekiq/worker_generator.rb +0 -49
  147. data/lib/sidekiq/core_ext.rb +0 -1
  148. data/lib/sidekiq/delay.rb +0 -42
  149. data/lib/sidekiq/exception_handler.rb +0 -29
  150. data/lib/sidekiq/extensions/action_mailer.rb +0 -57
  151. data/lib/sidekiq/extensions/active_record.rb +0 -40
  152. data/lib/sidekiq/extensions/class_methods.rb +0 -40
  153. data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
  154. data/lib/sidekiq/logging.rb +0 -122
  155. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
  156. data/lib/sidekiq/util.rb +0 -66
  157. data/lib/sidekiq/worker.rb +0 -204
@@ -1,55 +1,71 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/manager'
3
- require 'sidekiq/fetch'
4
- require 'sidekiq/scheduled'
2
+
3
+ require "sidekiq/manager"
4
+ require "sidekiq/capsule"
5
+ require "sidekiq/scheduled"
6
+ require "sidekiq/ring_buffer"
5
7
 
6
8
  module Sidekiq
7
- # The Launcher is a very simple Actor whose job is to
8
- # start, monitor and stop the core Actors in Sidekiq.
9
- # If any of these actors die, the Sidekiq process exits
10
- # immediately.
9
+ # The Launcher starts the Capsule Managers, the Poller thread and provides the process heartbeat.
11
10
  class Launcher
12
- include Util
11
+ include Sidekiq::Component
12
+
13
+ STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
14
+
15
+ PROCTITLES = [
16
+ proc { "sidekiq" },
17
+ proc { Sidekiq::VERSION },
18
+ proc { |me, data| data["tag"] },
19
+ proc { |me, data| "[#{Processor::WORK_STATE.size} of #{me.config.total_concurrency} busy]" },
20
+ proc { |me, data| "stopping" if me.stopping? }
21
+ ]
13
22
 
14
- attr_accessor :manager, :poller, :fetcher
23
+ attr_accessor :managers, :poller
15
24
 
16
- def initialize(options)
17
- @manager = Sidekiq::Manager.new(options)
18
- @poller = Sidekiq::Scheduled::Poller.new
25
+ def initialize(config, embedded: false)
26
+ @config = config
27
+ @embedded = embedded
28
+ @managers = config.capsules.values.map do |cap|
29
+ Sidekiq::Manager.new(cap)
30
+ end
31
+ @poller = Sidekiq::Scheduled::Poller.new(@config)
19
32
  @done = false
20
- @options = options
21
33
  end
22
34
 
23
- def run
24
- @thread = safe_thread("heartbeat", &method(:start_heartbeat))
35
+ # Start this Sidekiq instance. If an embedding process already
36
+ # has a heartbeat thread, caller can use `async_beat: false`
37
+ # and instead have thread call Launcher#heartbeat every N seconds.
38
+ def run(async_beat: true)
39
+ Sidekiq.freeze!
40
+ logger.debug { @config.merge!({}) }
41
+ @thread = safe_thread("heartbeat", &method(:start_heartbeat)) if async_beat
25
42
  @poller.start
26
- @manager.start
43
+ @managers.each(&:start)
27
44
  end
28
45
 
29
46
  # Stops this instance from processing any more jobs,
30
- #
31
47
  def quiet
48
+ return if @done
49
+
32
50
  @done = true
33
- @manager.quiet
51
+ @managers.each(&:quiet)
34
52
  @poller.terminate
53
+ fire_event(:quiet, reverse: true)
35
54
  end
36
55
 
37
- # Shuts down the process. This method does not
38
- # return until all work is complete and cleaned up.
39
- # It can take up to the timeout to complete.
56
+ # Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
40
57
  def stop
41
- deadline = Time.now + @options[:timeout]
58
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
42
59
 
43
- @done = true
44
- @manager.quiet
45
- @poller.terminate
46
-
47
- @manager.stop(deadline)
60
+ quiet
61
+ stoppers = @managers.map do |mgr|
62
+ Thread.new do
63
+ mgr.stop(deadline)
64
+ end
65
+ end
48
66
 
49
- # Requeue everything in case there was a worker who grabbed work while stopped
50
- # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
51
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
52
- strategy.bulk_requeue([], @options)
67
+ fire_event(:shutdown, reverse: true)
68
+ stoppers.each(&:join)
53
69
 
54
70
  clear_heartbeat
55
71
  end
@@ -58,109 +74,200 @@ module Sidekiq
58
74
  @done
59
75
  end
60
76
 
77
+ # If embedding Sidekiq, you can have the process heartbeat
78
+ # call this method to regularly heartbeat rather than creating
79
+ # a separate thread.
80
+ def heartbeat
81
+
82
+ end
83
+
61
84
  private unless $TESTING
62
85
 
63
- def heartbeat
64
- results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
65
- results.compact!
66
- $0 = results.join(' ')
86
+ BEAT_PAUSE = 10
67
87
 
88
+ def start_heartbeat
89
+ loop do
90
+ beat
91
+ sleep BEAT_PAUSE
92
+ end
93
+ logger.info("Heartbeat stopping...")
94
+ end
95
+
96
+ def beat
97
+ $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ") unless @embedded
68
98
 
69
99
  end
70
100
 
101
+ def clear_heartbeat
102
+ flush_stats
103
+
104
+ # Remove record from Redis since we are shutting down.
105
+ # Note we don't stop the heartbeat thread; if the process
106
+ # doesn't actually exit, it'll reappear in the Web UI.
107
+ redis do |conn|
108
+ conn.pipelined do |pipeline|
109
+ pipeline.srem("processes", [identity])
110
+ pipeline.unlink("#{identity}:work")
111
+ end
112
+ end
113
+ rescue
114
+ # best effort, ignore network errors
115
+ end
116
+
117
+ def flush_stats
118
+ fails = Processor::FAILURE.reset
119
+ procd = Processor::PROCESSED.reset
120
+ return if fails + procd == 0
121
+
122
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
123
+ begin
124
+ redis do |conn|
125
+ conn.pipelined do |pipeline|
126
+ pipeline.incrby("stat:processed", procd)
127
+ pipeline.incrby("stat:processed:#{nowdate}", procd)
128
+ pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
129
+
130
+ pipeline.incrby("stat:failed", fails)
131
+ pipeline.incrby("stat:failed:#{nowdate}", fails)
132
+ pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
133
+ end
134
+ end
135
+ rescue => ex
136
+ logger.warn("Unable to flush stats: #{ex}")
137
+ end
138
+ end
139
+
71
140
  def ❤
72
141
  key = identity
73
142
  fails = procd = 0
143
+
74
144
  begin
75
- Processor::FAILURE.update {|curr| fails = curr; 0 }
76
- Processor::PROCESSED.update {|curr| procd = curr; 0 }
77
-
78
- workers_key = "#{key}:workers"
79
- nowdate = Time.now.utc.strftime("%Y-%m-%d")
80
- Sidekiq.redis do |conn|
81
- conn.multi do
82
- conn.incrby("stat:processed", procd)
83
- conn.incrby("stat:processed:#{nowdate}", procd)
84
- conn.incrby("stat:failed", fails)
85
- conn.incrby("stat:failed:#{nowdate}", fails)
86
- conn.del(workers_key)
87
- Processor::WORKER_STATE.each_pair do |tid, hash|
88
- conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
145
+ flush_stats
146
+
147
+ curstate = Processor::WORK_STATE.dup
148
+ curstate.transform_values! { |val| Sidekiq.dump_json(val) }
149
+
150
+ redis do |conn|
151
+ # work is the current set of executing jobs
152
+ work_key = "#{key}:work"
153
+ conn.multi do |transaction|
154
+ transaction.unlink(work_key)
155
+ if curstate.size > 0
156
+ transaction.hset(work_key, curstate)
157
+ transaction.expire(work_key, 60)
89
158
  end
90
- conn.expire(workers_key, 60)
91
159
  end
92
160
  end
161
+
162
+ rtt = check_rtt
163
+
93
164
  fails = procd = 0
165
+ kb = memory_usage(::Process.pid)
94
166
 
95
- _, exists, _, _, msg = Sidekiq.redis do |conn|
96
- conn.multi do
97
- conn.sadd('processes', key)
98
- conn.exists(key)
99
- conn.hmset(key, 'info', to_json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f, 'quiet', @done)
100
- conn.expire(key, 60)
101
- conn.rpop("#{key}-signals")
102
- end
103
- end
167
+ _, exists, _, _, signal = redis { |conn|
168
+ conn.multi { |transaction|
169
+ transaction.sadd("processes", [key])
170
+ transaction.exists(key)
171
+ transaction.hset(key, "info", to_json,
172
+ "busy", curstate.size,
173
+ "beat", Time.now.to_f,
174
+ "rtt_us", rtt,
175
+ "quiet", @done.to_s,
176
+ "rss", kb)
177
+ transaction.expire(key, 60)
178
+ transaction.rpop("#{key}-signals")
179
+ }
180
+ }
104
181
 
105
182
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
106
- fire_event(:heartbeat) if !exists
183
+ fire_event(:heartbeat) unless exists > 0
184
+ fire_event(:beat, oneshot: false)
107
185
 
108
- return unless msg
109
-
110
- ::Process.kill(msg, $$)
186
+ ::Process.kill(signal, ::Process.pid) if signal && !@embedded
111
187
  rescue => e
112
188
  # ignore all redis/network issues
113
- logger.error("heartbeat: #{e.message}")
189
+ logger.error("heartbeat: #{e}")
114
190
  # don't lose the counts if there was a network issue
115
- Processor::PROCESSED.increment(procd)
116
- Processor::FAILURE.increment(fails)
191
+ Processor::PROCESSED.incr(procd)
192
+ Processor::FAILURE.incr(fails)
117
193
  end
118
194
  end
119
195
 
120
- def start_heartbeat
121
- while true
122
- heartbeat
123
- sleep 5
196
+ # We run the heartbeat every five seconds.
197
+ # Capture five samples of RTT, log a warning if each sample
198
+ # is above our warning threshold.
199
+ RTT_READINGS = RingBuffer.new(5)
200
+ RTT_WARNING_LEVEL = 50_000
201
+
202
+ def check_rtt
203
+ a = b = 0
204
+ redis do |x|
205
+ a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
206
+ x.ping
207
+ b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
208
+ end
209
+ rtt = b - a
210
+ RTT_READINGS << rtt
211
+ # Ideal RTT for Redis is < 1000µs
212
+ # Workable is < 10,000µs
213
+ # Log a warning if it's a disaster.
214
+ if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
215
+ logger.warn <<~EOM
216
+ Your Redis network connection is performing extremely poorly.
217
+ Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
218
+ Ensure Redis is running in the same AZ or datacenter as Sidekiq.
219
+ If these values are close to 100,000, that means your Sidekiq process may be
220
+ CPU-saturated; reduce your concurrency and/or see https://github.com/sidekiq/sidekiq/discussions/5039
221
+ EOM
222
+ RTT_READINGS.reset
124
223
  end
125
- Sidekiq.logger.info("Heartbeat stopping...")
224
+ rtt
126
225
  end
127
226
 
128
- def to_data
129
- @data ||= begin
130
- {
131
- 'hostname' => hostname,
132
- 'started_at' => Time.now.to_f,
133
- 'pid' => $$,
134
- 'tag' => @options[:tag] || '',
135
- 'concurrency' => @options[:concurrency],
136
- 'queues' => @options[:queues].uniq,
137
- 'labels' => @options[:labels],
138
- 'identity' => identity,
139
- }
140
- end
227
+ MEMORY_GRABBER = case RUBY_PLATFORM
228
+ when /linux/
229
+ ->(pid) {
230
+ IO.readlines("/proc/#{$$}/status").each do |line|
231
+ next unless line.start_with?("VmRSS:")
232
+ break line.split[1].to_i
233
+ end
234
+ }
235
+ when /darwin|bsd/
236
+ ->(pid) {
237
+ `ps -o pid,rss -p #{pid}`.lines.last.split.last.to_i
238
+ }
239
+ else
240
+ ->(pid) { 0 }
141
241
  end
142
242
 
143
- def to_json
144
- @json ||= begin
145
- # this data changes infrequently so dump it to a string
146
- # now so we don't need to dump it every heartbeat.
147
- Sidekiq.dump_json(to_data)
148
- end
243
+ def memory_usage(pid)
244
+ MEMORY_GRABBER.call(pid)
149
245
  end
150
246
 
151
- def clear_heartbeat
152
- # Remove record from Redis since we are shutting down.
153
- # Note we don't stop the heartbeat thread; if the process
154
- # doesn't actually exit, it'll reappear in the Web UI.
155
- Sidekiq.redis do |conn|
156
- conn.pipelined do
157
- conn.srem('processes', identity)
158
- conn.del("#{identity}:workers")
159
- end
160
- end
161
- rescue
162
- # best effort, ignore network errors
247
+ def to_data
248
+ @data ||= {
249
+ "hostname" => hostname,
250
+ "started_at" => Time.now.to_f,
251
+ "pid" => ::Process.pid,
252
+ "tag" => @config[:tag] || "",
253
+ "concurrency" => @config.total_concurrency,
254
+ "queues" => @config.capsules.values.flat_map { |cap| cap.queues }.uniq,
255
+ "weights" => to_weights,
256
+ "labels" => @config[:labels].to_a,
257
+ "identity" => identity,
258
+ "version" => Sidekiq::VERSION,
259
+ "embedded" => @embedded
260
+ }
163
261
  end
164
262
 
263
+ def to_weights
264
+ @config.capsules.values.map(&:weights)
265
+ end
266
+
267
+ def to_json
268
+ # this data changes infrequently so dump it to a string
269
+ # now so we don't need to dump it every heartbeat.
270
+ @json ||= Sidekiq.dump_json(to_data)
271
+ end
165
272
  end
166
273
  end
@@ -0,0 +1,131 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "logger"
4
+ require "time"
5
+
6
+ module Sidekiq
7
+ module Context
8
+ def self.with(hash)
9
+ orig_context = current.dup
10
+ current.merge!(hash)
11
+ yield
12
+ ensure
13
+ Thread.current[:sidekiq_context] = orig_context
14
+ end
15
+
16
+ def self.current
17
+ Thread.current[:sidekiq_context] ||= {}
18
+ end
19
+
20
+ def self.add(k, v)
21
+ current[k] = v
22
+ end
23
+ end
24
+
25
+ module LoggingUtils
26
+ LEVELS = {
27
+ "debug" => 0,
28
+ "info" => 1,
29
+ "warn" => 2,
30
+ "error" => 3,
31
+ "fatal" => 4
32
+ }
33
+ LEVELS.default_proc = proc do |_, level|
34
+ puts("Invalid log level: #{level.inspect}")
35
+ nil
36
+ end
37
+
38
+ LEVELS.each do |level, numeric_level|
39
+ define_method(:"#{level}?") do
40
+ local_level.nil? ? super() : local_level <= numeric_level
41
+ end
42
+ end
43
+
44
+ def local_level
45
+ Thread.current[:sidekiq_log_level]
46
+ end
47
+
48
+ def local_level=(level)
49
+ case level
50
+ when Integer
51
+ Thread.current[:sidekiq_log_level] = level
52
+ when Symbol, String
53
+ Thread.current[:sidekiq_log_level] = LEVELS[level.to_s]
54
+ when nil
55
+ Thread.current[:sidekiq_log_level] = nil
56
+ else
57
+ raise ArgumentError, "Invalid log level: #{level.inspect}"
58
+ end
59
+ end
60
+
61
+ def level
62
+ local_level || super
63
+ end
64
+
65
+ # Change the thread-local level for the duration of the given block.
66
+ def log_at(level)
67
+ old_local_level = local_level
68
+ self.local_level = level
69
+ yield
70
+ ensure
71
+ self.local_level = old_local_level
72
+ end
73
+ end
74
+
75
+ class Logger < ::Logger
76
+ include LoggingUtils
77
+
78
+ module Formatters
79
+ class Base < ::Logger::Formatter
80
+ def tid
81
+ Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
82
+ end
83
+
84
+ def ctx
85
+ Sidekiq::Context.current
86
+ end
87
+
88
+ def format_context
89
+ if ctx.any?
90
+ " " + ctx.compact.map { |k, v|
91
+ case v
92
+ when Array
93
+ "#{k}=#{v.join(",")}"
94
+ else
95
+ "#{k}=#{v}"
96
+ end
97
+ }.join(" ")
98
+ end
99
+ end
100
+ end
101
+
102
+ class Pretty < Base
103
+ def call(severity, time, program_name, message)
104
+ "#{time.utc.iso8601(3)} pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
105
+ end
106
+ end
107
+
108
+ class WithoutTimestamp < Pretty
109
+ def call(severity, time, program_name, message)
110
+ "pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
111
+ end
112
+ end
113
+
114
+ class JSON < Base
115
+ def call(severity, time, program_name, message)
116
+ hash = {
117
+ ts: time.utc.iso8601(3),
118
+ pid: ::Process.pid,
119
+ tid: tid,
120
+ lvl: severity,
121
+ msg: message
122
+ }
123
+ c = ctx
124
+ hash["ctx"] = c unless c.empty?
125
+
126
+ Sidekiq.dump_json(hash) << "\n"
127
+ end
128
+ end
129
+ end
130
+ end
131
+ end
@@ -1,12 +1,9 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/util'
3
- require 'sidekiq/processor'
4
- require 'sidekiq/fetch'
5
- require 'thread'
6
- require 'set'
7
2
 
8
- module Sidekiq
3
+ require "sidekiq/processor"
4
+ require "set"
9
5
 
6
+ module Sidekiq
10
7
  ##
11
8
  # The Manager is the central coordination point in Sidekiq, controlling
12
9
  # the lifecycle of the Processors.
@@ -22,46 +19,38 @@ module Sidekiq
22
19
  # the shutdown process. The other tasks are performed by other threads.
23
20
  #
24
21
  class Manager
25
- include Util
22
+ include Sidekiq::Component
26
23
 
27
24
  attr_reader :workers
28
- attr_reader :options
25
+ attr_reader :capsule
29
26
 
30
- def initialize(options={})
31
- logger.debug { options.inspect }
32
- @options = options
33
- @count = options[:concurrency] || 25
27
+ def initialize(capsule)
28
+ @config = @capsule = capsule
29
+ @count = capsule.concurrency
34
30
  raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
35
31
 
36
32
  @done = false
37
33
  @workers = Set.new
34
+ @plock = Mutex.new
38
35
  @count.times do
39
- @workers << Processor.new(self)
36
+ @workers << Processor.new(@config, &method(:processor_result))
40
37
  end
41
- @plock = Mutex.new
42
38
  end
43
39
 
44
40
  def start
45
- @workers.each do |x|
46
- x.start
47
- end
41
+ @workers.each(&:start)
48
42
  end
49
43
 
50
44
  def quiet
51
45
  return if @done
52
46
  @done = true
53
47
 
54
- logger.info { "Terminating quiet workers" }
55
- @workers.each { |x| x.terminate }
56
- fire_event(:quiet, reverse: true)
48
+ logger.info { "Terminating quiet threads for #{capsule.name} capsule" }
49
+ @workers.each(&:terminate)
57
50
  end
58
51
 
59
- # hack for quicker development / testing environment #2774
60
- PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
61
-
62
52
  def stop(deadline)
63
53
  quiet
64
- fire_event(:shutdown, reverse: true)
65
54
 
66
55
  # some of the shutdown events can be async,
67
56
  # we don't have any way to know when they're done but
@@ -69,29 +58,20 @@ module Sidekiq
69
58
  sleep PAUSE_TIME
70
59
  return if @workers.empty?
71
60
 
72
- logger.info { "Pausing to allow workers to finish..." }
73
- remaining = deadline - Time.now
74
- while remaining > PAUSE_TIME
75
- return if @workers.empty?
76
- sleep PAUSE_TIME
77
- remaining = deadline - Time.now
78
- end
61
+ logger.info { "Pausing to allow jobs to finish..." }
62
+ wait_for(deadline) { @workers.empty? }
79
63
  return if @workers.empty?
80
64
 
81
65
  hard_shutdown
66
+ ensure
67
+ capsule.stop
82
68
  end
83
69
 
84
- def processor_stopped(processor)
85
- @plock.synchronize do
86
- @workers.delete(processor)
87
- end
88
- end
89
-
90
- def processor_died(processor, reason)
70
+ def processor_result(processor, reason = nil)
91
71
  @plock.synchronize do
92
72
  @workers.delete(processor)
93
73
  unless @done
94
- p = Processor.new(self)
74
+ p = Processor.new(@config, &method(:processor_result))
95
75
  @workers << p
96
76
  p.start
97
77
  end
@@ -105,7 +85,7 @@ module Sidekiq
105
85
  private
106
86
 
107
87
  def hard_shutdown
108
- # We've reached the timeout and we still have busy workers.
88
+ # We've reached the timeout and we still have busy threads.
109
89
  # They must die but their jobs shall live on.
110
90
  cleanup = nil
111
91
  @plock.synchronize do
@@ -113,25 +93,42 @@ module Sidekiq
113
93
  end
114
94
 
115
95
  if cleanup.size > 0
116
- jobs = cleanup.map {|p| p.job }.compact
96
+ jobs = cleanup.map { |p| p.job }.compact
117
97
 
118
- logger.warn { "Terminating #{cleanup.size} busy worker threads" }
119
- logger.warn { "Work still in progress #{jobs.inspect}" }
98
+ logger.warn { "Terminating #{cleanup.size} busy threads" }
99
+ logger.debug { "Jobs still in progress #{jobs.inspect}" }
120
100
 
121
101
  # Re-enqueue unfinished jobs
122
102
  # NOTE: You may notice that we may push a job back to redis before
123
- # the worker thread is terminated. This is ok because Sidekiq's
103
+ # the thread is terminated. This is ok because Sidekiq's
124
104
  # contract says that jobs are run AT LEAST once. Process termination
125
105
  # is delayed until we're certain the jobs are back in Redis because
126
106
  # it is worse to lose a job than to run it twice.
127
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
128
- strategy.bulk_requeue(jobs, @options)
107
+ capsule.fetcher.bulk_requeue(jobs)
129
108
  end
130
109
 
131
110
  cleanup.each do |processor|
132
111
  processor.kill
133
112
  end
113
+
114
+ # when this method returns, we immediately call `exit` which may not give
115
+ # the remaining threads time to run `ensure` blocks, etc. We pause here up
116
+ # to 3 seconds to give threads a minimal amount of time to run `ensure` blocks.
117
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
118
+ wait_for(deadline) { @workers.empty? }
134
119
  end
135
120
 
121
+ # hack for quicker development / testing environment #2774
122
+ PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
123
+
124
+ # Wait for the orblock to be true or the deadline passed.
125
+ def wait_for(deadline, &condblock)
126
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
127
+ while remaining > PAUSE_TIME
128
+ return if condblock.call
129
+ sleep PAUSE_TIME
130
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
131
+ end
132
+ end
136
133
  end
137
134
  end