sidekiq 5.1.1 → 6.5.9

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (126) hide show
  1. checksums.yaml +5 -5
  2. data/Changes.md +507 -1
  3. data/LICENSE +3 -3
  4. data/README.md +24 -35
  5. data/bin/sidekiq +27 -3
  6. data/bin/sidekiqload +80 -68
  7. data/bin/sidekiqmon +8 -0
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +537 -286
  13. data/lib/sidekiq/cli.rb +243 -240
  14. data/lib/sidekiq/client.rb +82 -85
  15. data/lib/sidekiq/component.rb +65 -0
  16. data/lib/sidekiq/delay.rb +9 -7
  17. data/lib/sidekiq/extensions/action_mailer.rb +13 -22
  18. data/lib/sidekiq/extensions/active_record.rb +13 -10
  19. data/lib/sidekiq/extensions/class_methods.rb +14 -11
  20. data/lib/sidekiq/extensions/generic_proxy.rb +7 -5
  21. data/lib/sidekiq/fetch.rb +50 -40
  22. data/lib/sidekiq/job.rb +13 -0
  23. data/lib/sidekiq/job_logger.rb +36 -9
  24. data/lib/sidekiq/job_retry.rb +143 -97
  25. data/lib/sidekiq/job_util.rb +71 -0
  26. data/lib/sidekiq/launcher.rb +185 -85
  27. data/lib/sidekiq/logger.rb +156 -0
  28. data/lib/sidekiq/manager.rb +41 -43
  29. data/lib/sidekiq/metrics/deploy.rb +47 -0
  30. data/lib/sidekiq/metrics/query.rb +153 -0
  31. data/lib/sidekiq/metrics/shared.rb +94 -0
  32. data/lib/sidekiq/metrics/tracking.rb +134 -0
  33. data/lib/sidekiq/middleware/chain.rb +102 -46
  34. data/lib/sidekiq/middleware/current_attributes.rb +63 -0
  35. data/lib/sidekiq/middleware/i18n.rb +7 -7
  36. data/lib/sidekiq/middleware/modules.rb +21 -0
  37. data/lib/sidekiq/monitor.rb +133 -0
  38. data/lib/sidekiq/paginator.rb +28 -16
  39. data/lib/sidekiq/processor.rb +156 -98
  40. data/lib/sidekiq/rails.rb +48 -42
  41. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  42. data/lib/sidekiq/redis_connection.rb +109 -51
  43. data/lib/sidekiq/ring_buffer.rb +29 -0
  44. data/lib/sidekiq/scheduled.rb +133 -41
  45. data/lib/sidekiq/sd_notify.rb +149 -0
  46. data/lib/sidekiq/systemd.rb +24 -0
  47. data/lib/sidekiq/testing/inline.rb +6 -5
  48. data/lib/sidekiq/testing.rb +72 -62
  49. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  50. data/lib/sidekiq/version.rb +2 -1
  51. data/lib/sidekiq/web/action.rb +15 -11
  52. data/lib/sidekiq/web/application.rb +127 -76
  53. data/lib/sidekiq/web/csrf_protection.rb +180 -0
  54. data/lib/sidekiq/web/helpers.rb +133 -96
  55. data/lib/sidekiq/web/router.rb +23 -19
  56. data/lib/sidekiq/web.rb +69 -109
  57. data/lib/sidekiq/worker.rb +268 -102
  58. data/lib/sidekiq.rb +175 -66
  59. data/sidekiq.gemspec +23 -23
  60. data/web/assets/images/apple-touch-icon.png +0 -0
  61. data/web/assets/javascripts/application.js +112 -61
  62. data/web/assets/javascripts/chart.min.js +13 -0
  63. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  64. data/web/assets/javascripts/dashboard.js +65 -91
  65. data/web/assets/javascripts/graph.js +16 -0
  66. data/web/assets/javascripts/metrics.js +262 -0
  67. data/web/assets/stylesheets/application-dark.css +143 -0
  68. data/web/assets/stylesheets/application-rtl.css +0 -4
  69. data/web/assets/stylesheets/application.css +120 -232
  70. data/web/assets/stylesheets/bootstrap.css +2 -2
  71. data/web/locales/ar.yml +9 -2
  72. data/web/locales/de.yml +14 -2
  73. data/web/locales/el.yml +43 -19
  74. data/web/locales/en.yml +14 -1
  75. data/web/locales/es.yml +21 -5
  76. data/web/locales/fr.yml +10 -3
  77. data/web/locales/ja.yml +14 -1
  78. data/web/locales/lt.yml +83 -0
  79. data/web/locales/pl.yml +4 -4
  80. data/web/locales/pt-br.yml +27 -9
  81. data/web/locales/ru.yml +4 -0
  82. data/web/locales/vi.yml +83 -0
  83. data/web/locales/zh-cn.yml +36 -11
  84. data/web/locales/zh-tw.yml +32 -7
  85. data/web/views/_footer.erb +4 -1
  86. data/web/views/_job_info.erb +3 -2
  87. data/web/views/_nav.erb +4 -18
  88. data/web/views/_poll_link.erb +2 -5
  89. data/web/views/_summary.erb +7 -7
  90. data/web/views/busy.erb +61 -22
  91. data/web/views/dashboard.erb +23 -14
  92. data/web/views/dead.erb +3 -3
  93. data/web/views/layout.erb +4 -2
  94. data/web/views/metrics.erb +69 -0
  95. data/web/views/metrics_for_job.erb +87 -0
  96. data/web/views/morgue.erb +9 -6
  97. data/web/views/queue.erb +24 -10
  98. data/web/views/queues.erb +11 -3
  99. data/web/views/retries.erb +14 -7
  100. data/web/views/retry.erb +3 -3
  101. data/web/views/scheduled.erb +5 -2
  102. metadata +62 -135
  103. data/.github/contributing.md +0 -32
  104. data/.github/issue_template.md +0 -11
  105. data/.gitignore +0 -13
  106. data/.travis.yml +0 -14
  107. data/3.0-Upgrade.md +0 -70
  108. data/4.0-Upgrade.md +0 -53
  109. data/5.0-Upgrade.md +0 -56
  110. data/COMM-LICENSE +0 -95
  111. data/Ent-Changes.md +0 -210
  112. data/Gemfile +0 -8
  113. data/Pro-2.0-Upgrade.md +0 -138
  114. data/Pro-3.0-Upgrade.md +0 -44
  115. data/Pro-4.0-Upgrade.md +0 -35
  116. data/Pro-Changes.md +0 -716
  117. data/Rakefile +0 -8
  118. data/bin/sidekiqctl +0 -99
  119. data/code_of_conduct.md +0 -50
  120. data/lib/generators/sidekiq/worker_generator.rb +0 -49
  121. data/lib/sidekiq/core_ext.rb +0 -1
  122. data/lib/sidekiq/exception_handler.rb +0 -29
  123. data/lib/sidekiq/logging.rb +0 -122
  124. data/lib/sidekiq/middleware/server/active_record.rb +0 -22
  125. data/lib/sidekiq/middleware/server/active_record_cache.rb +0 -11
  126. data/lib/sidekiq/util.rb +0 -66
@@ -1,35 +1,63 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq'
3
- require 'sidekiq/util'
4
- require 'sidekiq/api'
2
+
3
+ require "sidekiq"
4
+ require "sidekiq/component"
5
5
 
6
6
  module Sidekiq
7
7
  module Scheduled
8
- SETS = %w(retry schedule)
8
+ SETS = %w[retry schedule]
9
9
 
10
10
  class Enq
11
- def enqueue_jobs(now=Time.now.to_f.to_s, sorted_sets=SETS)
11
+ LUA_ZPOPBYSCORE = <<~LUA
12
+ local key, now = KEYS[1], ARGV[1]
13
+ local jobs = redis.call("zrangebyscore", key, "-inf", now, "limit", 0, 1)
14
+ if jobs[1] then
15
+ redis.call("zrem", key, jobs[1])
16
+ return jobs[1]
17
+ end
18
+ LUA
19
+
20
+ def initialize
21
+ @done = false
22
+ @lua_zpopbyscore_sha = nil
23
+ end
24
+
25
+ def enqueue_jobs(sorted_sets = SETS)
12
26
  # A job's "score" in Redis is the time at which it should be processed.
13
27
  # Just check Redis for the set of jobs with a timestamp before now.
14
28
  Sidekiq.redis do |conn|
15
29
  sorted_sets.each do |sorted_set|
16
- # Get the next item in the queue if it's score (time to execute) is <= now.
30
+ # Get next item in the queue with score (time to execute) <= now.
17
31
  # We need to go through the list one at a time to reduce the risk of something
18
32
  # going wrong between the time jobs are popped from the scheduled queue and when
19
33
  # they are pushed onto a work queue and losing the jobs.
20
- while job = conn.zrangebyscore(sorted_set, '-inf'.freeze, now, :limit => [0, 1]).first do
21
-
22
- # Pop item off the queue and add it to the work queue. If the job can't be popped from
23
- # the queue, it's because another process already popped it so we can move on to the
24
- # next one.
25
- if conn.zrem(sorted_set, job)
26
- Sidekiq::Client.push(Sidekiq.load_json(job))
27
- Sidekiq::Logging.logger.debug { "enqueued #{sorted_set}: #{job}" }
28
- end
34
+ while !@done && (job = zpopbyscore(conn, keys: [sorted_set], argv: [Time.now.to_f.to_s]))
35
+ Sidekiq::Client.push(Sidekiq.load_json(job))
36
+ Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
29
37
  end
30
38
  end
31
39
  end
32
40
  end
41
+
42
+ def terminate
43
+ @done = true
44
+ end
45
+
46
+ private
47
+
48
+ def zpopbyscore(conn, keys: nil, argv: nil)
49
+ if @lua_zpopbyscore_sha.nil?
50
+ raw_conn = conn.respond_to?(:redis) ? conn.redis : conn
51
+ @lua_zpopbyscore_sha = raw_conn.script(:load, LUA_ZPOPBYSCORE)
52
+ end
53
+
54
+ conn.evalsha(@lua_zpopbyscore_sha, keys, argv)
55
+ rescue RedisConnection.adapter::CommandError => e
56
+ raise unless e.message.start_with?("NOSCRIPT")
57
+
58
+ @lua_zpopbyscore_sha = nil
59
+ retry
60
+ end
33
61
  end
34
62
 
35
63
  ##
@@ -38,20 +66,24 @@ module Sidekiq
38
66
  # just pops the job back onto its original queue so the
39
67
  # workers can pick it up like any other job.
40
68
  class Poller
41
- include Util
69
+ include Sidekiq::Component
42
70
 
43
71
  INITIAL_WAIT = 10
44
72
 
45
- def initialize
46
- @enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
73
+ def initialize(options)
74
+ @config = options
75
+ @enq = (options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
47
76
  @sleeper = ConnectionPool::TimedStack.new
48
77
  @done = false
49
78
  @thread = nil
79
+ @count_calls = 0
50
80
  end
51
81
 
52
82
  # Shut down this instance, will pause until the thread is dead.
53
83
  def terminate
54
84
  @done = true
85
+ @enq.terminate if @enq.respond_to?(:terminate)
86
+
55
87
  if @thread
56
88
  t = @thread
57
89
  @thread = nil
@@ -61,26 +93,24 @@ module Sidekiq
61
93
  end
62
94
 
63
95
  def start
64
- @thread ||= safe_thread("scheduler") do
96
+ @thread ||= safe_thread("scheduler") {
65
97
  initial_wait
66
98
 
67
- while !@done
99
+ until @done
68
100
  enqueue
69
101
  wait
70
102
  end
71
- Sidekiq.logger.info("Scheduler exiting...")
72
- end
103
+ logger.info("Scheduler exiting...")
104
+ }
73
105
  end
74
106
 
75
107
  def enqueue
76
- begin
77
- @enq.enqueue_jobs
78
- rescue => ex
79
- # Most likely a problem with redis networking.
80
- # Punt and try again at the next interval
81
- logger.error ex.message
82
- handle_exception(ex)
83
- end
108
+ @enq.enqueue_jobs
109
+ rescue => ex
110
+ # Most likely a problem with redis networking.
111
+ # Punt and try again at the next interval
112
+ logger.error ex.message
113
+ handle_exception(ex)
84
114
  end
85
115
 
86
116
  private
@@ -97,9 +127,37 @@ module Sidekiq
97
127
  sleep 5
98
128
  end
99
129
 
100
- # Calculates a random interval that is ±50% the desired average.
101
130
  def random_poll_interval
102
- poll_interval_average * rand + poll_interval_average.to_f / 2
131
+ # We want one Sidekiq process to schedule jobs every N seconds. We have M processes
132
+ # and **don't** want to coordinate.
133
+ #
134
+ # So in N*M second timespan, we want each process to schedule once. The basic loop is:
135
+ #
136
+ # * sleep a random amount within that N*M timespan
137
+ # * wake up and schedule
138
+ #
139
+ # We want to avoid one edge case: imagine a set of 2 processes, scheduling every 5 seconds,
140
+ # so N*M = 10. Each process decides to randomly sleep 8 seconds, now we've failed to meet
141
+ # that 5 second average. Thankfully each schedule cycle will sleep randomly so the next
142
+ # iteration could see each process sleep for 1 second, undercutting our average.
143
+ #
144
+ # So below 10 processes, we special case and ensure the processes sleep closer to the average.
145
+ # In the example above, each process should schedule every 10 seconds on average. We special
146
+ # case smaller clusters to add 50% so they would sleep somewhere between 5 and 15 seconds.
147
+ # As we run more processes, the scheduling interval average will approach an even spread
148
+ # between 0 and poll interval so we don't need this artifical boost.
149
+ #
150
+ count = process_count
151
+ interval = poll_interval_average(count)
152
+
153
+ if count < 10
154
+ # For small clusters, calculate a random interval that is ±50% the desired average.
155
+ interval * rand + interval.to_f / 2
156
+ else
157
+ # With 10+ processes, we should have enough randomness to get decent polling
158
+ # across the entire timespan
159
+ interval * rand
160
+ end
103
161
  end
104
162
 
105
163
  # We do our best to tune the poll interval to the size of the active Sidekiq
@@ -115,31 +173,65 @@ module Sidekiq
115
173
  # the same time: the thundering herd problem.
116
174
  #
117
175
  # We only do this if poll_interval_average is unset (the default).
118
- def poll_interval_average
119
- Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
176
+ def poll_interval_average(count)
177
+ @config[:poll_interval_average] || scaled_poll_interval(count)
120
178
  end
121
179
 
122
180
  # Calculates an average poll interval based on the number of known Sidekiq processes.
123
181
  # This minimizes a single point of failure by dispersing check-ins but without taxing
124
182
  # Redis if you run many Sidekiq processes.
125
- def scaled_poll_interval
126
- pcount = Sidekiq::ProcessSet.new.size
183
+ def scaled_poll_interval(process_count)
184
+ process_count * @config[:average_scheduled_poll_interval]
185
+ end
186
+
187
+ def process_count
188
+ pcount = Sidekiq.redis { |conn| conn.scard("processes") }
127
189
  pcount = 1 if pcount == 0
128
- pcount * Sidekiq.options[:average_scheduled_poll_interval]
190
+ pcount
191
+ end
192
+
193
+ # A copy of Sidekiq::ProcessSet#cleanup because server
194
+ # should never depend on sidekiq/api.
195
+ def cleanup
196
+ # dont run cleanup more than once per minute
197
+ return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
198
+
199
+ count = 0
200
+ Sidekiq.redis do |conn|
201
+ procs = conn.sscan_each("processes").to_a
202
+ heartbeats = conn.pipelined { |pipeline|
203
+ procs.each do |key|
204
+ pipeline.hget(key, "info")
205
+ end
206
+ }
207
+
208
+ # the hash named key has an expiry of 60 seconds.
209
+ # if it's not found, that means the process has not reported
210
+ # in to Redis and probably died.
211
+ to_prune = procs.select.with_index { |proc, i|
212
+ heartbeats[i].nil?
213
+ }
214
+ count = conn.srem("processes", to_prune) unless to_prune.empty?
215
+ end
216
+ count
129
217
  end
130
218
 
131
219
  def initial_wait
132
- # Have all processes sleep between 5-15 seconds. 10 seconds
133
- # to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
220
+ # Have all processes sleep between 5-15 seconds. 10 seconds to give time for
221
+ # the heartbeat to register (if the poll interval is going to be calculated by the number
134
222
  # of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
135
223
  total = 0
136
- total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
224
+ total += INITIAL_WAIT unless @config[:poll_interval_average]
137
225
  total += (5 * rand)
138
226
 
139
227
  @sleeper.pop(total)
140
228
  rescue Timeout::Error
229
+ ensure
230
+ # periodically clean out the `processes` set in Redis which can collect
231
+ # references to dead processes over time. The process count affects how
232
+ # often we scan for scheduled jobs.
233
+ cleanup
141
234
  end
142
-
143
235
  end
144
236
  end
145
237
  end
@@ -0,0 +1,149 @@
1
+ # frozen_string_literal: true
2
+
3
+ # The MIT License
4
+ #
5
+ # Copyright (c) 2017, 2018, 2019, 2020 Agis Anastasopoulos
6
+ #
7
+ # Permission is hereby granted, free of charge, to any person obtaining a copy of
8
+ # this software and associated documentation files (the "Software"), to deal in
9
+ # the Software without restriction, including without limitation the rights to
10
+ # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11
+ # the Software, and to permit persons to whom the Software is furnished to do so,
12
+ # subject to the following conditions:
13
+ #
14
+ # The above copyright notice and this permission notice shall be included in all
15
+ # copies or substantial portions of the Software.
16
+ #
17
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19
+ # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20
+ # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21
+ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22
+ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23
+
24
+ # This is a copy of https://github.com/agis/ruby-sdnotify as of commit a7d52ee
25
+ # The only changes made was "rehoming" it within the Sidekiq module to avoid
26
+ # namespace collisions and applying standard's code formatting style.
27
+
28
+ require "socket"
29
+
30
+ # SdNotify is a pure-Ruby implementation of sd_notify(3). It can be used to
31
+ # notify systemd about state changes. Methods of this package are no-op on
32
+ # non-systemd systems (eg. Darwin).
33
+ #
34
+ # The API maps closely to the original implementation of sd_notify(3),
35
+ # therefore be sure to check the official man pages prior to using SdNotify.
36
+ #
37
+ # @see https://www.freedesktop.org/software/systemd/man/sd_notify.html
38
+ module Sidekiq
39
+ module SdNotify
40
+ # Exception raised when there's an error writing to the notification socket
41
+ class NotifyError < RuntimeError; end
42
+
43
+ READY = "READY=1"
44
+ RELOADING = "RELOADING=1"
45
+ STOPPING = "STOPPING=1"
46
+ STATUS = "STATUS="
47
+ ERRNO = "ERRNO="
48
+ MAINPID = "MAINPID="
49
+ WATCHDOG = "WATCHDOG=1"
50
+ FDSTORE = "FDSTORE=1"
51
+
52
+ def self.ready(unset_env = false)
53
+ notify(READY, unset_env)
54
+ end
55
+
56
+ def self.reloading(unset_env = false)
57
+ notify(RELOADING, unset_env)
58
+ end
59
+
60
+ def self.stopping(unset_env = false)
61
+ notify(STOPPING, unset_env)
62
+ end
63
+
64
+ # @param status [String] a custom status string that describes the current
65
+ # state of the service
66
+ def self.status(status, unset_env = false)
67
+ notify("#{STATUS}#{status}", unset_env)
68
+ end
69
+
70
+ # @param errno [Integer]
71
+ def self.errno(errno, unset_env = false)
72
+ notify("#{ERRNO}#{errno}", unset_env)
73
+ end
74
+
75
+ # @param pid [Integer]
76
+ def self.mainpid(pid, unset_env = false)
77
+ notify("#{MAINPID}#{pid}", unset_env)
78
+ end
79
+
80
+ def self.watchdog(unset_env = false)
81
+ notify(WATCHDOG, unset_env)
82
+ end
83
+
84
+ def self.fdstore(unset_env = false)
85
+ notify(FDSTORE, unset_env)
86
+ end
87
+
88
+ # @return [Boolean] true if the service manager expects watchdog keep-alive
89
+ # notification messages to be sent from this process.
90
+ #
91
+ # If the $WATCHDOG_USEC environment variable is set,
92
+ # and the $WATCHDOG_PID variable is unset or set to the PID of the current
93
+ # process
94
+ #
95
+ # @note Unlike sd_watchdog_enabled(3), this method does not mutate the
96
+ # environment.
97
+ def self.watchdog?
98
+ wd_usec = ENV["WATCHDOG_USEC"]
99
+ wd_pid = ENV["WATCHDOG_PID"]
100
+
101
+ return false unless wd_usec
102
+
103
+ begin
104
+ wd_usec = Integer(wd_usec)
105
+ rescue
106
+ return false
107
+ end
108
+
109
+ return false if wd_usec <= 0
110
+ return true if !wd_pid || wd_pid == $$.to_s
111
+
112
+ false
113
+ end
114
+
115
+ # Notify systemd with the provided state, via the notification socket, if
116
+ # any.
117
+ #
118
+ # Generally this method will be used indirectly through the other methods
119
+ # of the library.
120
+ #
121
+ # @param state [String]
122
+ # @param unset_env [Boolean]
123
+ #
124
+ # @return [Fixnum, nil] the number of bytes written to the notification
125
+ # socket or nil if there was no socket to report to (eg. the program wasn't
126
+ # started by systemd)
127
+ #
128
+ # @raise [NotifyError] if there was an error communicating with the systemd
129
+ # socket
130
+ #
131
+ # @see https://www.freedesktop.org/software/systemd/man/sd_notify.html
132
+ def self.notify(state, unset_env = false)
133
+ sock = ENV["NOTIFY_SOCKET"]
134
+
135
+ return nil unless sock
136
+
137
+ ENV.delete("NOTIFY_SOCKET") if unset_env
138
+
139
+ begin
140
+ Addrinfo.unix(sock, :DGRAM).connect do |s|
141
+ s.close_on_exec = true
142
+ s.write(state)
143
+ end
144
+ rescue => e
145
+ raise NotifyError, "#{e.class}: #{e.message}", e.backtrace
146
+ end
147
+ end
148
+ end
149
+ end
@@ -0,0 +1,24 @@
1
+ #
2
+ # Sidekiq's systemd integration allows Sidekiq to inform systemd:
3
+ # 1. when it has successfully started
4
+ # 2. when it is starting shutdown
5
+ # 3. periodically for a liveness check with a watchdog thread
6
+ #
7
+ module Sidekiq
8
+ def self.start_watchdog
9
+ usec = Integer(ENV["WATCHDOG_USEC"])
10
+ return Sidekiq.logger.error("systemd Watchdog too fast: " + usec) if usec < 1_000_000
11
+
12
+ sec_f = usec / 1_000_000.0
13
+ # "It is recommended that a daemon sends a keep-alive notification message
14
+ # to the service manager every half of the time returned here."
15
+ ping_f = sec_f / 2
16
+ Sidekiq.logger.info "Pinging systemd watchdog every #{ping_f.round(1)} sec"
17
+ Thread.new do
18
+ loop do
19
+ sleep ping_f
20
+ Sidekiq::SdNotify.watchdog
21
+ end
22
+ end
23
+ end
24
+ end
@@ -1,9 +1,10 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/testing'
2
+
3
+ require "sidekiq/testing"
3
4
 
4
5
  ##
5
6
  # The Sidekiq inline infrastructure overrides perform_async so that it
6
- # actually calls perform instead. This allows workers to be run inline in a
7
+ # actually calls perform instead. This allows jobs to be run inline in a
7
8
  # testing environment.
8
9
  #
9
10
  # This is similar to `Resque.inline = true` functionality.
@@ -14,8 +15,8 @@ require 'sidekiq/testing'
14
15
  #
15
16
  # $external_variable = 0
16
17
  #
17
- # class ExternalWorker
18
- # include Sidekiq::Worker
18
+ # class ExternalJob
19
+ # include Sidekiq::Job
19
20
  #
20
21
  # def perform
21
22
  # $external_variable = 1
@@ -23,7 +24,7 @@ require 'sidekiq/testing'
23
24
  # end
24
25
  #
25
26
  # assert_equal 0, $external_variable
26
- # ExternalWorker.perform_async
27
+ # ExternalJob.perform_async
27
28
  # assert_equal 1, $external_variable
28
29
  #
29
30
  Sidekiq::Testing.inline!