sidekiq 5.2.4 → 7.2.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (153) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +672 -8
  3. data/LICENSE.txt +9 -0
  4. data/README.md +48 -51
  5. data/bin/multi_queue_bench +271 -0
  6. data/bin/sidekiq +22 -3
  7. data/bin/sidekiqload +213 -115
  8. data/bin/sidekiqmon +11 -0
  9. data/lib/generators/sidekiq/job_generator.rb +57 -0
  10. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  11. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  12. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  13. data/lib/sidekiq/api.rb +623 -352
  14. data/lib/sidekiq/capsule.rb +127 -0
  15. data/lib/sidekiq/cli.rb +214 -229
  16. data/lib/sidekiq/client.rb +127 -102
  17. data/lib/sidekiq/component.rb +68 -0
  18. data/lib/sidekiq/config.rb +287 -0
  19. data/lib/sidekiq/deploy.rb +62 -0
  20. data/lib/sidekiq/embedded.rb +61 -0
  21. data/lib/sidekiq/fetch.rb +49 -42
  22. data/lib/sidekiq/job.rb +374 -0
  23. data/lib/sidekiq/job_logger.rb +33 -7
  24. data/lib/sidekiq/job_retry.rb +157 -108
  25. data/lib/sidekiq/job_util.rb +107 -0
  26. data/lib/sidekiq/launcher.rb +206 -106
  27. data/lib/sidekiq/logger.rb +131 -0
  28. data/lib/sidekiq/manager.rb +43 -46
  29. data/lib/sidekiq/metrics/query.rb +156 -0
  30. data/lib/sidekiq/metrics/shared.rb +95 -0
  31. data/lib/sidekiq/metrics/tracking.rb +140 -0
  32. data/lib/sidekiq/middleware/chain.rb +113 -56
  33. data/lib/sidekiq/middleware/current_attributes.rb +95 -0
  34. data/lib/sidekiq/middleware/i18n.rb +7 -7
  35. data/lib/sidekiq/middleware/modules.rb +21 -0
  36. data/lib/sidekiq/monitor.rb +146 -0
  37. data/lib/sidekiq/paginator.rb +28 -16
  38. data/lib/sidekiq/processor.rb +126 -117
  39. data/lib/sidekiq/rails.rb +52 -38
  40. data/lib/sidekiq/redis_client_adapter.rb +111 -0
  41. data/lib/sidekiq/redis_connection.rb +41 -112
  42. data/lib/sidekiq/ring_buffer.rb +29 -0
  43. data/lib/sidekiq/scheduled.rb +112 -50
  44. data/lib/sidekiq/sd_notify.rb +149 -0
  45. data/lib/sidekiq/systemd.rb +24 -0
  46. data/lib/sidekiq/testing/inline.rb +6 -5
  47. data/lib/sidekiq/testing.rb +91 -90
  48. data/lib/sidekiq/transaction_aware_client.rb +51 -0
  49. data/lib/sidekiq/version.rb +3 -1
  50. data/lib/sidekiq/web/action.rb +20 -11
  51. data/lib/sidekiq/web/application.rb +202 -80
  52. data/lib/sidekiq/web/csrf_protection.rb +183 -0
  53. data/lib/sidekiq/web/helpers.rb +165 -114
  54. data/lib/sidekiq/web/router.rb +23 -19
  55. data/lib/sidekiq/web.rb +68 -107
  56. data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
  57. data/lib/sidekiq.rb +92 -182
  58. data/sidekiq.gemspec +25 -16
  59. data/web/assets/images/apple-touch-icon.png +0 -0
  60. data/web/assets/javascripts/application.js +152 -61
  61. data/web/assets/javascripts/base-charts.js +106 -0
  62. data/web/assets/javascripts/chart.min.js +13 -0
  63. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  64. data/web/assets/javascripts/dashboard-charts.js +182 -0
  65. data/web/assets/javascripts/dashboard.js +35 -293
  66. data/web/assets/javascripts/metrics.js +298 -0
  67. data/web/assets/stylesheets/application-dark.css +147 -0
  68. data/web/assets/stylesheets/application-rtl.css +10 -93
  69. data/web/assets/stylesheets/application.css +124 -522
  70. data/web/assets/stylesheets/bootstrap.css +1 -1
  71. data/web/locales/ar.yml +71 -65
  72. data/web/locales/cs.yml +62 -62
  73. data/web/locales/da.yml +60 -53
  74. data/web/locales/de.yml +65 -53
  75. data/web/locales/el.yml +43 -24
  76. data/web/locales/en.yml +86 -66
  77. data/web/locales/es.yml +70 -54
  78. data/web/locales/fa.yml +65 -65
  79. data/web/locales/fr.yml +83 -62
  80. data/web/locales/gd.yml +99 -0
  81. data/web/locales/he.yml +65 -64
  82. data/web/locales/hi.yml +59 -59
  83. data/web/locales/it.yml +53 -53
  84. data/web/locales/ja.yml +75 -64
  85. data/web/locales/ko.yml +52 -52
  86. data/web/locales/lt.yml +83 -0
  87. data/web/locales/nb.yml +61 -61
  88. data/web/locales/nl.yml +52 -52
  89. data/web/locales/pl.yml +45 -45
  90. data/web/locales/pt-br.yml +83 -55
  91. data/web/locales/pt.yml +51 -51
  92. data/web/locales/ru.yml +68 -63
  93. data/web/locales/sv.yml +53 -53
  94. data/web/locales/ta.yml +60 -60
  95. data/web/locales/uk.yml +62 -61
  96. data/web/locales/ur.yml +64 -64
  97. data/web/locales/vi.yml +83 -0
  98. data/web/locales/zh-cn.yml +43 -16
  99. data/web/locales/zh-tw.yml +42 -8
  100. data/web/views/_footer.erb +18 -3
  101. data/web/views/_job_info.erb +21 -4
  102. data/web/views/_metrics_period_select.erb +12 -0
  103. data/web/views/_nav.erb +1 -1
  104. data/web/views/_paging.erb +2 -0
  105. data/web/views/_poll_link.erb +3 -6
  106. data/web/views/_summary.erb +7 -7
  107. data/web/views/busy.erb +79 -29
  108. data/web/views/dashboard.erb +48 -18
  109. data/web/views/dead.erb +3 -3
  110. data/web/views/filtering.erb +7 -0
  111. data/web/views/layout.erb +3 -1
  112. data/web/views/metrics.erb +91 -0
  113. data/web/views/metrics_for_job.erb +59 -0
  114. data/web/views/morgue.erb +14 -15
  115. data/web/views/queue.erb +33 -24
  116. data/web/views/queues.erb +19 -5
  117. data/web/views/retries.erb +16 -17
  118. data/web/views/retry.erb +3 -3
  119. data/web/views/scheduled.erb +17 -15
  120. metadata +71 -72
  121. data/.github/contributing.md +0 -32
  122. data/.github/issue_template.md +0 -11
  123. data/.gitignore +0 -15
  124. data/.travis.yml +0 -17
  125. data/3.0-Upgrade.md +0 -70
  126. data/4.0-Upgrade.md +0 -53
  127. data/5.0-Upgrade.md +0 -56
  128. data/Appraisals +0 -9
  129. data/COMM-LICENSE +0 -95
  130. data/Ent-Changes.md +0 -225
  131. data/Gemfile +0 -29
  132. data/LICENSE +0 -9
  133. data/Pro-2.0-Upgrade.md +0 -138
  134. data/Pro-3.0-Upgrade.md +0 -44
  135. data/Pro-4.0-Upgrade.md +0 -35
  136. data/Pro-Changes.md +0 -752
  137. data/Rakefile +0 -9
  138. data/bin/sidekiqctl +0 -237
  139. data/code_of_conduct.md +0 -50
  140. data/gemfiles/rails_4.gemfile +0 -31
  141. data/gemfiles/rails_5.gemfile +0 -31
  142. data/lib/generators/sidekiq/worker_generator.rb +0 -49
  143. data/lib/sidekiq/core_ext.rb +0 -1
  144. data/lib/sidekiq/delay.rb +0 -42
  145. data/lib/sidekiq/exception_handler.rb +0 -29
  146. data/lib/sidekiq/extensions/action_mailer.rb +0 -57
  147. data/lib/sidekiq/extensions/active_record.rb +0 -40
  148. data/lib/sidekiq/extensions/class_methods.rb +0 -40
  149. data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
  150. data/lib/sidekiq/logging.rb +0 -122
  151. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
  152. data/lib/sidekiq/util.rb +0 -66
  153. data/lib/sidekiq/worker.rb +0 -215
@@ -1,111 +1,50 @@
1
1
  # frozen_string_literal: true
2
- require 'connection_pool'
3
- require 'redis'
4
- require 'uri'
2
+
3
+ require "connection_pool"
4
+ require "uri"
5
+ require "sidekiq/redis_client_adapter"
5
6
 
6
7
  module Sidekiq
7
- class RedisConnection
8
+ module RedisConnection
8
9
  class << self
10
+ def create(options = {})
11
+ symbolized_options = options.transform_keys(&:to_sym)
12
+ symbolized_options[:url] ||= determine_redis_provider
9
13
 
10
- def create(options={})
11
- options.keys.each do |key|
12
- options[key.to_sym] = options.delete(key)
13
- end
14
-
15
- options[:id] = "Sidekiq-#{Sidekiq.server? ? "server" : "client"}-PID-#{$$}" if !options.has_key?(:id)
16
- options[:url] ||= determine_redis_provider
17
-
18
- size = if options[:size]
19
- options[:size]
20
- elsif Sidekiq.server?
21
- Sidekiq.options[:concurrency] + 5
22
- elsif ENV['RAILS_MAX_THREADS']
23
- Integer(ENV['RAILS_MAX_THREADS'])
24
- else
25
- 5
26
- end
14
+ logger = symbolized_options.delete(:logger)
15
+ logger&.info { "Sidekiq #{Sidekiq::VERSION} connecting to Redis with options #{scrub(symbolized_options)}" }
27
16
 
28
- verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
17
+ raise "Sidekiq 7+ does not support Redis protocol 2" if symbolized_options[:protocol] == 2
18
+ size = symbolized_options.delete(:size) || 5
19
+ pool_timeout = symbolized_options.delete(:pool_timeout) || 1
20
+ pool_name = symbolized_options.delete(:pool_name)
29
21
 
30
- pool_timeout = options[:pool_timeout] || 1
31
- log_info(options)
32
-
33
- ConnectionPool.new(:timeout => pool_timeout, :size => size) do
34
- build_client(options)
22
+ redis_config = Sidekiq::RedisClientAdapter.new(symbolized_options)
23
+ ConnectionPool.new(timeout: pool_timeout, size: size, name: pool_name) do
24
+ redis_config.new_client
35
25
  end
36
26
  end
37
27
 
38
28
  private
39
29
 
40
- # Sidekiq needs a lot of concurrent Redis connections.
41
- #
42
- # We need a connection for each Processor.
43
- # We need a connection for Pro's real-time change listener
44
- # We need a connection to various features to call Redis every few seconds:
45
- # - the process heartbeat.
46
- # - enterprise's leader election
47
- # - enterprise's cron support
48
- def verify_sizing(size, concurrency)
49
- raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size <= concurrency
50
- end
51
-
52
- def build_client(options)
53
- namespace = options[:namespace]
54
-
55
- client = Redis.new client_opts(options)
56
- if namespace
57
- begin
58
- require 'redis/namespace'
59
- Redis::Namespace.new(namespace, :redis => client)
60
- rescue LoadError
61
- Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
62
- "Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
63
- exit(-127)
64
- end
65
- else
66
- client
67
- end
68
- end
69
-
70
- def client_opts(options)
71
- opts = options.dup
72
- if opts[:namespace]
73
- opts.delete(:namespace)
74
- end
75
-
76
- if opts[:network_timeout]
77
- opts[:timeout] = opts[:network_timeout]
78
- opts.delete(:network_timeout)
79
- end
80
-
81
- opts[:driver] ||= Redis::Connection.drivers.last || 'ruby'
82
-
83
- # Issue #3303, redis-rb will silently retry an operation.
84
- # This can lead to duplicate jobs if Sidekiq::Client's LPUSH
85
- # is performed twice but I believe this is much, much rarer
86
- # than the reconnect silently fixing a problem; we keep it
87
- # on by default.
88
- opts[:reconnect_attempts] ||= 1
89
-
90
- opts
91
- end
92
-
93
- def log_info(options)
94
- # Don't log Redis AUTH password
30
+ def scrub(options)
95
31
  redacted = "REDACTED"
96
- scrubbed_options = options.dup
32
+
33
+ # Deep clone so we can muck with these options all we want and exclude
34
+ # params from dump-and-load that may contain objects that Marshal is
35
+ # unable to safely dump.
36
+ keys = options.keys - [:logger, :ssl_params]
37
+ scrubbed_options = Marshal.load(Marshal.dump(options.slice(*keys)))
97
38
  if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
98
39
  uri.password = redacted
99
40
  scrubbed_options[:url] = uri.to_s
100
41
  end
101
- if scrubbed_options[:password]
102
- scrubbed_options[:password] = redacted
103
- end
104
- if Sidekiq.server?
105
- Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with redis options #{scrubbed_options}")
106
- else
107
- Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}")
42
+ scrubbed_options[:password] = redacted if scrubbed_options[:password]
43
+ scrubbed_options[:sentinel_password] = redacted if scrubbed_options[:sentinel_password]
44
+ scrubbed_options[:sentinels]&.each do |sentinel|
45
+ sentinel[:password] = redacted if sentinel[:password]
108
46
  end
47
+ scrubbed_options
109
48
  end
110
49
 
111
50
  def determine_redis_provider
@@ -115,30 +54,20 @@ module Sidekiq
115
54
  # REDIS_PROVIDER=MY_REDIS_URL
116
55
  # and Sidekiq will find your custom URL variable with no custom
117
56
  # initialization code at all.
118
- p = ENV['REDIS_PROVIDER']
119
- if p && p =~ /\:/
120
- Sidekiq.logger.error <<-EOM
121
-
122
- #################################################################################
123
-
124
- REDIS_PROVIDER should be set to the **name** of the variable which contains the Redis URL, not a URL itself.
125
- Platforms like Heroku sell addons that publish a *_URL variable. You tell Sidekiq with REDIS_PROVIDER, e.g.:
126
-
127
- REDIS_PROVIDER=REDISTOGO_URL
128
- REDISTOGO_URL=redis://somehost.example.com:6379/4
129
-
130
- Use REDIS_URL if you wish to point Sidekiq to a URL directly.
131
-
132
- This configuration error will crash starting in Sidekiq 5.3.
133
-
134
- #################################################################################
135
- EOM
57
+ #
58
+ p = ENV["REDIS_PROVIDER"]
59
+ if p && p =~ /:/
60
+ raise <<~EOM
61
+ REDIS_PROVIDER should be set to the name of the variable which contains the Redis URL, not a URL itself.
62
+ Platforms like Heroku will sell addons that publish a *_URL variable. You need to tell Sidekiq with REDIS_PROVIDER, e.g.:
63
+
64
+ REDISTOGO_URL=redis://somehost.example.com:6379/4
65
+ REDIS_PROVIDER=REDISTOGO_URL
66
+ EOM
136
67
  end
137
- ENV[
138
- ENV['REDIS_PROVIDER'] || 'REDIS_URL'
139
- ]
140
- end
141
68
 
69
+ ENV[p.to_s] || ENV["REDIS_URL"]
70
+ end
142
71
  end
143
72
  end
144
73
  end
@@ -0,0 +1,29 @@
1
+ require "forwardable"
2
+
3
+ module Sidekiq
4
+ class RingBuffer
5
+ include Enumerable
6
+ extend Forwardable
7
+ def_delegators :@buf, :[], :each, :size
8
+
9
+ def initialize(size, default = 0)
10
+ @size = size
11
+ @buf = Array.new(size, default)
12
+ @index = 0
13
+ end
14
+
15
+ def <<(element)
16
+ @buf[@index % @size] = element
17
+ @index += 1
18
+ element
19
+ end
20
+
21
+ def buffer
22
+ @buf
23
+ end
24
+
25
+ def reset(default = 0)
26
+ @buf.fill(default)
27
+ end
28
+ end
29
+ end
@@ -1,35 +1,66 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq'
3
- require 'sidekiq/util'
4
- require 'sidekiq/api'
2
+
3
+ require "sidekiq"
4
+ require "sidekiq/component"
5
5
 
6
6
  module Sidekiq
7
7
  module Scheduled
8
- SETS = %w(retry schedule)
8
+ SETS = %w[retry schedule]
9
9
 
10
10
  class Enq
11
- def enqueue_jobs(now=Time.now.to_f.to_s, sorted_sets=SETS)
11
+ include Sidekiq::Component
12
+
13
+ LUA_ZPOPBYSCORE = <<~LUA
14
+ local key, now = KEYS[1], ARGV[1]
15
+ local jobs = redis.call("zrange", key, "-inf", now, "byscore", "limit", 0, 1)
16
+ if jobs[1] then
17
+ redis.call("zrem", key, jobs[1])
18
+ return jobs[1]
19
+ end
20
+ LUA
21
+
22
+ def initialize(container)
23
+ @config = container
24
+ @client = Sidekiq::Client.new(config: container)
25
+ @done = false
26
+ @lua_zpopbyscore_sha = nil
27
+ end
28
+
29
+ def enqueue_jobs(sorted_sets = SETS)
12
30
  # A job's "score" in Redis is the time at which it should be processed.
13
31
  # Just check Redis for the set of jobs with a timestamp before now.
14
- Sidekiq.redis do |conn|
32
+ redis do |conn|
15
33
  sorted_sets.each do |sorted_set|
16
- # Get the next item in the queue if it's score (time to execute) is <= now.
34
+ # Get next item in the queue with score (time to execute) <= now.
17
35
  # We need to go through the list one at a time to reduce the risk of something
18
36
  # going wrong between the time jobs are popped from the scheduled queue and when
19
37
  # they are pushed onto a work queue and losing the jobs.
20
- while job = conn.zrangebyscore(sorted_set, '-inf', now, :limit => [0, 1]).first do
21
-
22
- # Pop item off the queue and add it to the work queue. If the job can't be popped from
23
- # the queue, it's because another process already popped it so we can move on to the
24
- # next one.
25
- if conn.zrem(sorted_set, job)
26
- Sidekiq::Client.push(Sidekiq.load_json(job))
27
- Sidekiq::Logging.logger.debug { "enqueued #{sorted_set}: #{job}" }
28
- end
38
+ while !@done && (job = zpopbyscore(conn, keys: [sorted_set], argv: [Time.now.to_f.to_s]))
39
+ @client.push(Sidekiq.load_json(job))
40
+ logger.debug { "enqueued #{sorted_set}: #{job}" }
29
41
  end
30
42
  end
31
43
  end
32
44
  end
45
+
46
+ def terminate
47
+ @done = true
48
+ end
49
+
50
+ private
51
+
52
+ def zpopbyscore(conn, keys: nil, argv: nil)
53
+ if @lua_zpopbyscore_sha.nil?
54
+ @lua_zpopbyscore_sha = conn.script(:load, LUA_ZPOPBYSCORE)
55
+ end
56
+
57
+ conn.call("EVALSHA", @lua_zpopbyscore_sha, keys.size, *keys, *argv)
58
+ rescue RedisClient::CommandError => e
59
+ raise unless e.message.start_with?("NOSCRIPT")
60
+
61
+ @lua_zpopbyscore_sha = nil
62
+ retry
63
+ end
33
64
  end
34
65
 
35
66
  ##
@@ -38,49 +69,47 @@ module Sidekiq
38
69
  # just pops the job back onto its original queue so the
39
70
  # workers can pick it up like any other job.
40
71
  class Poller
41
- include Util
72
+ include Sidekiq::Component
42
73
 
43
74
  INITIAL_WAIT = 10
44
75
 
45
- def initialize
46
- @enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
76
+ def initialize(config)
77
+ @config = config
78
+ @enq = (config[:scheduled_enq] || Sidekiq::Scheduled::Enq).new(config)
47
79
  @sleeper = ConnectionPool::TimedStack.new
48
80
  @done = false
49
81
  @thread = nil
82
+ @count_calls = 0
50
83
  end
51
84
 
52
85
  # Shut down this instance, will pause until the thread is dead.
53
86
  def terminate
54
87
  @done = true
55
- if @thread
56
- t = @thread
57
- @thread = nil
58
- @sleeper << 0
59
- t.value
60
- end
88
+ @enq.terminate
89
+
90
+ @sleeper << 0
91
+ @thread&.value
61
92
  end
62
93
 
63
94
  def start
64
- @thread ||= safe_thread("scheduler") do
95
+ @thread ||= safe_thread("scheduler") {
65
96
  initial_wait
66
97
 
67
- while !@done
98
+ until @done
68
99
  enqueue
69
100
  wait
70
101
  end
71
- Sidekiq.logger.info("Scheduler exiting...")
72
- end
102
+ logger.info("Scheduler exiting...")
103
+ }
73
104
  end
74
105
 
75
106
  def enqueue
76
- begin
77
- @enq.enqueue_jobs
78
- rescue => ex
79
- # Most likely a problem with redis networking.
80
- # Punt and try again at the next interval
81
- logger.error ex.message
82
- handle_exception(ex)
83
- end
107
+ @enq.enqueue_jobs
108
+ rescue => ex
109
+ # Most likely a problem with redis networking.
110
+ # Punt and try again at the next interval
111
+ logger.error ex.message
112
+ handle_exception(ex)
84
113
  end
85
114
 
86
115
  private
@@ -115,15 +144,18 @@ module Sidekiq
115
144
  # In the example above, each process should schedule every 10 seconds on average. We special
116
145
  # case smaller clusters to add 50% so they would sleep somewhere between 5 and 15 seconds.
117
146
  # As we run more processes, the scheduling interval average will approach an even spread
118
- # between 0 and poll interval so we don't need this artifical boost.
147
+ # between 0 and poll interval so we don't need this artificial boost.
119
148
  #
120
- if process_count < 10
149
+ count = process_count
150
+ interval = poll_interval_average(count)
151
+
152
+ if count < 10
121
153
  # For small clusters, calculate a random interval that is ±50% the desired average.
122
- poll_interval_average * rand + poll_interval_average.to_f / 2
154
+ interval * rand + interval.to_f / 2
123
155
  else
124
156
  # With 10+ processes, we should have enough randomness to get decent polling
125
157
  # across the entire timespan
126
- poll_interval_average * rand
158
+ interval * rand
127
159
  end
128
160
  end
129
161
 
@@ -140,35 +172,65 @@ module Sidekiq
140
172
  # the same time: the thundering herd problem.
141
173
  #
142
174
  # We only do this if poll_interval_average is unset (the default).
143
- def poll_interval_average
144
- Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
175
+ def poll_interval_average(count)
176
+ @config[:poll_interval_average] || scaled_poll_interval(count)
145
177
  end
146
178
 
147
179
  # Calculates an average poll interval based on the number of known Sidekiq processes.
148
180
  # This minimizes a single point of failure by dispersing check-ins but without taxing
149
181
  # Redis if you run many Sidekiq processes.
150
- def scaled_poll_interval
151
- process_count * Sidekiq.options[:average_scheduled_poll_interval]
182
+ def scaled_poll_interval(process_count)
183
+ process_count * @config[:average_scheduled_poll_interval]
152
184
  end
153
185
 
154
186
  def process_count
155
- pcount = Sidekiq::ProcessSet.new.size
187
+ pcount = Sidekiq.redis { |conn| conn.scard("processes") }
156
188
  pcount = 1 if pcount == 0
157
189
  pcount
158
190
  end
159
191
 
192
+ # A copy of Sidekiq::ProcessSet#cleanup because server
193
+ # should never depend on sidekiq/api.
194
+ def cleanup
195
+ # dont run cleanup more than once per minute
196
+ return 0 unless redis { |conn| conn.set("process_cleanup", "1", "NX", "EX", "60") }
197
+
198
+ count = 0
199
+ redis do |conn|
200
+ procs = conn.sscan("processes").to_a
201
+ heartbeats = conn.pipelined { |pipeline|
202
+ procs.each do |key|
203
+ pipeline.hget(key, "info")
204
+ end
205
+ }
206
+
207
+ # the hash named key has an expiry of 60 seconds.
208
+ # if it's not found, that means the process has not reported
209
+ # in to Redis and probably died.
210
+ to_prune = procs.select.with_index { |proc, i|
211
+ heartbeats[i].nil?
212
+ }
213
+ count = conn.srem("processes", to_prune) unless to_prune.empty?
214
+ end
215
+ count
216
+ end
217
+
160
218
  def initial_wait
161
- # Have all processes sleep between 5-15 seconds. 10 seconds
162
- # to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
219
+ # Have all processes sleep between 5-15 seconds. 10 seconds to give time for
220
+ # the heartbeat to register (if the poll interval is going to be calculated by the number
163
221
  # of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
164
222
  total = 0
165
- total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
223
+ total += INITIAL_WAIT unless @config[:poll_interval_average]
166
224
  total += (5 * rand)
167
225
 
168
226
  @sleeper.pop(total)
169
227
  rescue Timeout::Error
228
+ ensure
229
+ # periodically clean out the `processes` set in Redis which can collect
230
+ # references to dead processes over time. The process count affects how
231
+ # often we scan for scheduled jobs.
232
+ cleanup
170
233
  end
171
-
172
234
  end
173
235
  end
174
236
  end
@@ -0,0 +1,149 @@
1
+ # frozen_string_literal: true
2
+
3
+ # The MIT License
4
+ #
5
+ # Copyright (c) 2017, 2018, 2019, 2020 Agis Anastasopoulos
6
+ #
7
+ # Permission is hereby granted, free of charge, to any person obtaining a copy of
8
+ # this software and associated documentation files (the "Software"), to deal in
9
+ # the Software without restriction, including without limitation the rights to
10
+ # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11
+ # the Software, and to permit persons to whom the Software is furnished to do so,
12
+ # subject to the following conditions:
13
+ #
14
+ # The above copyright notice and this permission notice shall be included in all
15
+ # copies or substantial portions of the Software.
16
+ #
17
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19
+ # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20
+ # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21
+ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22
+ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23
+
24
+ # This is a copy of https://github.com/agis/ruby-sdnotify as of commit a7d52ee
25
+ # The only changes made was "rehoming" it within the Sidekiq module to avoid
26
+ # namespace collisions and applying standard's code formatting style.
27
+
28
+ require "socket"
29
+
30
+ # SdNotify is a pure-Ruby implementation of sd_notify(3). It can be used to
31
+ # notify systemd about state changes. Methods of this package are no-op on
32
+ # non-systemd systems (eg. Darwin).
33
+ #
34
+ # The API maps closely to the original implementation of sd_notify(3),
35
+ # therefore be sure to check the official man pages prior to using SdNotify.
36
+ #
37
+ # @see https://www.freedesktop.org/software/systemd/man/sd_notify.html
38
+ module Sidekiq
39
+ module SdNotify
40
+ # Exception raised when there's an error writing to the notification socket
41
+ class NotifyError < RuntimeError; end
42
+
43
+ READY = "READY=1"
44
+ RELOADING = "RELOADING=1"
45
+ STOPPING = "STOPPING=1"
46
+ STATUS = "STATUS="
47
+ ERRNO = "ERRNO="
48
+ MAINPID = "MAINPID="
49
+ WATCHDOG = "WATCHDOG=1"
50
+ FDSTORE = "FDSTORE=1"
51
+
52
+ def self.ready(unset_env = false)
53
+ notify(READY, unset_env)
54
+ end
55
+
56
+ def self.reloading(unset_env = false)
57
+ notify(RELOADING, unset_env)
58
+ end
59
+
60
+ def self.stopping(unset_env = false)
61
+ notify(STOPPING, unset_env)
62
+ end
63
+
64
+ # @param status [String] a custom status string that describes the current
65
+ # state of the service
66
+ def self.status(status, unset_env = false)
67
+ notify("#{STATUS}#{status}", unset_env)
68
+ end
69
+
70
+ # @param errno [Integer]
71
+ def self.errno(errno, unset_env = false)
72
+ notify("#{ERRNO}#{errno}", unset_env)
73
+ end
74
+
75
+ # @param pid [Integer]
76
+ def self.mainpid(pid, unset_env = false)
77
+ notify("#{MAINPID}#{pid}", unset_env)
78
+ end
79
+
80
+ def self.watchdog(unset_env = false)
81
+ notify(WATCHDOG, unset_env)
82
+ end
83
+
84
+ def self.fdstore(unset_env = false)
85
+ notify(FDSTORE, unset_env)
86
+ end
87
+
88
+ # @return [Boolean] true if the service manager expects watchdog keep-alive
89
+ # notification messages to be sent from this process.
90
+ #
91
+ # If the $WATCHDOG_USEC environment variable is set,
92
+ # and the $WATCHDOG_PID variable is unset or set to the PID of the current
93
+ # process
94
+ #
95
+ # @note Unlike sd_watchdog_enabled(3), this method does not mutate the
96
+ # environment.
97
+ def self.watchdog?
98
+ wd_usec = ENV["WATCHDOG_USEC"]
99
+ wd_pid = ENV["WATCHDOG_PID"]
100
+
101
+ return false unless wd_usec
102
+
103
+ begin
104
+ wd_usec = Integer(wd_usec)
105
+ rescue
106
+ return false
107
+ end
108
+
109
+ return false if wd_usec <= 0
110
+ return true if !wd_pid || wd_pid == $$.to_s
111
+
112
+ false
113
+ end
114
+
115
+ # Notify systemd with the provided state, via the notification socket, if
116
+ # any.
117
+ #
118
+ # Generally this method will be used indirectly through the other methods
119
+ # of the library.
120
+ #
121
+ # @param state [String]
122
+ # @param unset_env [Boolean]
123
+ #
124
+ # @return [Fixnum, nil] the number of bytes written to the notification
125
+ # socket or nil if there was no socket to report to (eg. the program wasn't
126
+ # started by systemd)
127
+ #
128
+ # @raise [NotifyError] if there was an error communicating with the systemd
129
+ # socket
130
+ #
131
+ # @see https://www.freedesktop.org/software/systemd/man/sd_notify.html
132
+ def self.notify(state, unset_env = false)
133
+ sock = ENV["NOTIFY_SOCKET"]
134
+
135
+ return nil unless sock
136
+
137
+ ENV.delete("NOTIFY_SOCKET") if unset_env
138
+
139
+ begin
140
+ Addrinfo.unix(sock, :DGRAM).connect do |s|
141
+ s.close_on_exec = true
142
+ s.write(state)
143
+ end
144
+ rescue => e
145
+ raise NotifyError, "#{e.class}: #{e.message}", e.backtrace
146
+ end
147
+ end
148
+ end
149
+ end
@@ -0,0 +1,24 @@
1
+ #
2
+ # Sidekiq's systemd integration allows Sidekiq to inform systemd:
3
+ # 1. when it has successfully started
4
+ # 2. when it is starting shutdown
5
+ # 3. periodically for a liveness check with a watchdog thread
6
+ #
7
+ module Sidekiq
8
+ def self.start_watchdog
9
+ usec = Integer(ENV["WATCHDOG_USEC"])
10
+ return Sidekiq.logger.error("systemd Watchdog too fast: " + usec) if usec < 1_000_000
11
+
12
+ sec_f = usec / 1_000_000.0
13
+ # "It is recommended that a daemon sends a keep-alive notification message
14
+ # to the service manager every half of the time returned here."
15
+ ping_f = sec_f / 2
16
+ Sidekiq.logger.info "Pinging systemd watchdog every #{ping_f.round(1)} sec"
17
+ Thread.new do
18
+ loop do
19
+ sleep ping_f
20
+ Sidekiq::SdNotify.watchdog
21
+ end
22
+ end
23
+ end
24
+ end
@@ -1,9 +1,10 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/testing'
2
+
3
+ require "sidekiq/testing"
3
4
 
4
5
  ##
5
6
  # The Sidekiq inline infrastructure overrides perform_async so that it
6
- # actually calls perform instead. This allows workers to be run inline in a
7
+ # actually calls perform instead. This allows jobs to be run inline in a
7
8
  # testing environment.
8
9
  #
9
10
  # This is similar to `Resque.inline = true` functionality.
@@ -14,8 +15,8 @@ require 'sidekiq/testing'
14
15
  #
15
16
  # $external_variable = 0
16
17
  #
17
- # class ExternalWorker
18
- # include Sidekiq::Worker
18
+ # class ExternalJob
19
+ # include Sidekiq::Job
19
20
  #
20
21
  # def perform
21
22
  # $external_variable = 1
@@ -23,7 +24,7 @@ require 'sidekiq/testing'
23
24
  # end
24
25
  #
25
26
  # assert_equal 0, $external_variable
26
- # ExternalWorker.perform_async
27
+ # ExternalJob.perform_async
27
28
  # assert_equal 1, $external_variable
28
29
  #
29
30
  Sidekiq::Testing.inline!