sidekiq 6.1.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (127) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +71 -0
  3. data/.github/contributing.md +32 -0
  4. data/.github/issue_template.md +11 -0
  5. data/.gitignore +13 -0
  6. data/.standard.yml +20 -0
  7. data/3.0-Upgrade.md +70 -0
  8. data/4.0-Upgrade.md +53 -0
  9. data/5.0-Upgrade.md +56 -0
  10. data/6.0-Upgrade.md +72 -0
  11. data/COMM-LICENSE +97 -0
  12. data/Changes.md +1718 -0
  13. data/Ent-2.0-Upgrade.md +37 -0
  14. data/Ent-Changes.md +269 -0
  15. data/Gemfile +24 -0
  16. data/Gemfile.lock +208 -0
  17. data/LICENSE +9 -0
  18. data/Pro-2.0-Upgrade.md +138 -0
  19. data/Pro-3.0-Upgrade.md +44 -0
  20. data/Pro-4.0-Upgrade.md +35 -0
  21. data/Pro-5.0-Upgrade.md +25 -0
  22. data/Pro-Changes.md +790 -0
  23. data/README.md +94 -0
  24. data/Rakefile +10 -0
  25. data/bin/sidekiq +42 -0
  26. data/bin/sidekiqload +157 -0
  27. data/bin/sidekiqmon +8 -0
  28. data/code_of_conduct.md +50 -0
  29. data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
  30. data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
  31. data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
  32. data/lib/generators/sidekiq/worker_generator.rb +57 -0
  33. data/lib/sidekiq.rb +262 -0
  34. data/lib/sidekiq/api.rb +960 -0
  35. data/lib/sidekiq/cli.rb +401 -0
  36. data/lib/sidekiq/client.rb +263 -0
  37. data/lib/sidekiq/delay.rb +41 -0
  38. data/lib/sidekiq/exception_handler.rb +27 -0
  39. data/lib/sidekiq/extensions/action_mailer.rb +47 -0
  40. data/lib/sidekiq/extensions/active_record.rb +43 -0
  41. data/lib/sidekiq/extensions/class_methods.rb +43 -0
  42. data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
  43. data/lib/sidekiq/fetch.rb +82 -0
  44. data/lib/sidekiq/job_logger.rb +63 -0
  45. data/lib/sidekiq/job_retry.rb +262 -0
  46. data/lib/sidekiq/launcher.rb +206 -0
  47. data/lib/sidekiq/logger.rb +165 -0
  48. data/lib/sidekiq/manager.rb +135 -0
  49. data/lib/sidekiq/middleware/chain.rb +160 -0
  50. data/lib/sidekiq/middleware/i18n.rb +40 -0
  51. data/lib/sidekiq/monitor.rb +133 -0
  52. data/lib/sidekiq/paginator.rb +47 -0
  53. data/lib/sidekiq/processor.rb +280 -0
  54. data/lib/sidekiq/rails.rb +50 -0
  55. data/lib/sidekiq/redis_connection.rb +146 -0
  56. data/lib/sidekiq/scheduled.rb +173 -0
  57. data/lib/sidekiq/sd_notify.rb +149 -0
  58. data/lib/sidekiq/systemd.rb +24 -0
  59. data/lib/sidekiq/testing.rb +344 -0
  60. data/lib/sidekiq/testing/inline.rb +30 -0
  61. data/lib/sidekiq/util.rb +67 -0
  62. data/lib/sidekiq/version.rb +5 -0
  63. data/lib/sidekiq/web.rb +213 -0
  64. data/lib/sidekiq/web/action.rb +93 -0
  65. data/lib/sidekiq/web/application.rb +357 -0
  66. data/lib/sidekiq/web/csrf_protection.rb +153 -0
  67. data/lib/sidekiq/web/helpers.rb +333 -0
  68. data/lib/sidekiq/web/router.rb +101 -0
  69. data/lib/sidekiq/worker.rb +244 -0
  70. data/sidekiq.gemspec +20 -0
  71. data/web/assets/images/favicon.ico +0 -0
  72. data/web/assets/images/logo.png +0 -0
  73. data/web/assets/images/status.png +0 -0
  74. data/web/assets/javascripts/application.js +95 -0
  75. data/web/assets/javascripts/dashboard.js +296 -0
  76. data/web/assets/stylesheets/application-dark.css +133 -0
  77. data/web/assets/stylesheets/application-rtl.css +246 -0
  78. data/web/assets/stylesheets/application.css +1158 -0
  79. data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
  80. data/web/assets/stylesheets/bootstrap.css +5 -0
  81. data/web/locales/ar.yml +81 -0
  82. data/web/locales/cs.yml +78 -0
  83. data/web/locales/da.yml +68 -0
  84. data/web/locales/de.yml +81 -0
  85. data/web/locales/el.yml +68 -0
  86. data/web/locales/en.yml +83 -0
  87. data/web/locales/es.yml +70 -0
  88. data/web/locales/fa.yml +80 -0
  89. data/web/locales/fr.yml +78 -0
  90. data/web/locales/he.yml +79 -0
  91. data/web/locales/hi.yml +75 -0
  92. data/web/locales/it.yml +69 -0
  93. data/web/locales/ja.yml +83 -0
  94. data/web/locales/ko.yml +68 -0
  95. data/web/locales/lt.yml +83 -0
  96. data/web/locales/nb.yml +77 -0
  97. data/web/locales/nl.yml +68 -0
  98. data/web/locales/pl.yml +59 -0
  99. data/web/locales/pt-br.yml +68 -0
  100. data/web/locales/pt.yml +67 -0
  101. data/web/locales/ru.yml +78 -0
  102. data/web/locales/sv.yml +68 -0
  103. data/web/locales/ta.yml +75 -0
  104. data/web/locales/uk.yml +76 -0
  105. data/web/locales/ur.yml +80 -0
  106. data/web/locales/vi.yml +83 -0
  107. data/web/locales/zh-cn.yml +68 -0
  108. data/web/locales/zh-tw.yml +68 -0
  109. data/web/views/_footer.erb +20 -0
  110. data/web/views/_job_info.erb +89 -0
  111. data/web/views/_nav.erb +52 -0
  112. data/web/views/_paging.erb +23 -0
  113. data/web/views/_poll_link.erb +7 -0
  114. data/web/views/_status.erb +4 -0
  115. data/web/views/_summary.erb +40 -0
  116. data/web/views/busy.erb +101 -0
  117. data/web/views/dashboard.erb +75 -0
  118. data/web/views/dead.erb +34 -0
  119. data/web/views/layout.erb +41 -0
  120. data/web/views/morgue.erb +78 -0
  121. data/web/views/queue.erb +55 -0
  122. data/web/views/queues.erb +38 -0
  123. data/web/views/retries.erb +83 -0
  124. data/web/views/retry.erb +34 -0
  125. data/web/views/scheduled.erb +57 -0
  126. data/web/views/scheduled_job_info.erb +8 -0
  127. metadata +212 -0
@@ -0,0 +1,262 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "sidekiq/version"
4
+ fail "Sidekiq #{Sidekiq::VERSION} does not support Ruby versions below 2.5.0." if RUBY_PLATFORM != "java" && Gem::Version.new(RUBY_VERSION) < Gem::Version.new("2.5.0")
5
+
6
+ require "sidekiq/logger"
7
+ require "sidekiq/client"
8
+ require "sidekiq/worker"
9
+ require "sidekiq/redis_connection"
10
+ require "sidekiq/delay"
11
+
12
+ require "json"
13
+
14
+ module Sidekiq
15
+ NAME = "Sidekiq"
16
+ LICENSE = "See LICENSE and the LGPL-3.0 for licensing details."
17
+
18
+ DEFAULTS = {
19
+ queues: [],
20
+ labels: [],
21
+ concurrency: 10,
22
+ require: ".",
23
+ strict: true,
24
+ environment: nil,
25
+ timeout: 25,
26
+ poll_interval_average: nil,
27
+ average_scheduled_poll_interval: 5,
28
+ error_handlers: [],
29
+ death_handlers: [],
30
+ lifecycle_events: {
31
+ startup: [],
32
+ quiet: [],
33
+ shutdown: [],
34
+ heartbeat: []
35
+ },
36
+ dead_max_jobs: 10_000,
37
+ dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
38
+ reloader: proc { |&block| block.call }
39
+ }
40
+
41
+ DEFAULT_WORKER_OPTIONS = {
42
+ "retry" => true,
43
+ "queue" => "default"
44
+ }
45
+
46
+ FAKE_INFO = {
47
+ "redis_version" => "9.9.9",
48
+ "uptime_in_days" => "9999",
49
+ "connected_clients" => "9999",
50
+ "used_memory_human" => "9P",
51
+ "used_memory_peak_human" => "9P"
52
+ }
53
+
54
+ def self.❨╯°□°❩╯︵┻━┻
55
+ puts "Calm down, yo."
56
+ end
57
+
58
+ def self.options
59
+ @options ||= DEFAULTS.dup
60
+ end
61
+
62
+ def self.options=(opts)
63
+ @options = opts
64
+ end
65
+
66
+ ##
67
+ # Configuration for Sidekiq server, use like:
68
+ #
69
+ # Sidekiq.configure_server do |config|
70
+ # config.redis = { :namespace => 'myapp', :size => 25, :url => 'redis://myhost:8877/0' }
71
+ # config.server_middleware do |chain|
72
+ # chain.add MyServerHook
73
+ # end
74
+ # end
75
+ def self.configure_server
76
+ yield self if server?
77
+ end
78
+
79
+ ##
80
+ # Configuration for Sidekiq client, use like:
81
+ #
82
+ # Sidekiq.configure_client do |config|
83
+ # config.redis = { :namespace => 'myapp', :size => 1, :url => 'redis://myhost:8877/0' }
84
+ # end
85
+ def self.configure_client
86
+ yield self unless server?
87
+ end
88
+
89
+ def self.server?
90
+ defined?(Sidekiq::CLI)
91
+ end
92
+
93
+ def self.redis
94
+ raise ArgumentError, "requires a block" unless block_given?
95
+ redis_pool.with do |conn|
96
+ retryable = true
97
+ begin
98
+ yield conn
99
+ rescue Redis::BaseError => ex
100
+ # 2550 Failover can cause the server to become a replica, need
101
+ # to disconnect and reopen the socket to get back to the primary.
102
+ # 4495 Use the same logic if we have a "Not enough replicas" error from the primary
103
+ if retryable && ex.message =~ /READONLY|NOREPLICAS/
104
+ conn.disconnect!
105
+ retryable = false
106
+ retry
107
+ end
108
+ raise
109
+ end
110
+ end
111
+ end
112
+
113
+ def self.redis_info
114
+ redis do |conn|
115
+ # admin commands can't go through redis-namespace starting
116
+ # in redis-namespace 2.0
117
+ if conn.respond_to?(:namespace)
118
+ conn.redis.info
119
+ else
120
+ conn.info
121
+ end
122
+ rescue Redis::CommandError => ex
123
+ # 2850 return fake version when INFO command has (probably) been renamed
124
+ raise unless /unknown command/.match?(ex.message)
125
+ FAKE_INFO
126
+ end
127
+ end
128
+
129
+ def self.redis_pool
130
+ @redis ||= Sidekiq::RedisConnection.create
131
+ end
132
+
133
+ def self.redis=(hash)
134
+ @redis = if hash.is_a?(ConnectionPool)
135
+ hash
136
+ else
137
+ Sidekiq::RedisConnection.create(hash)
138
+ end
139
+ end
140
+
141
+ def self.client_middleware
142
+ @client_chain ||= Middleware::Chain.new
143
+ yield @client_chain if block_given?
144
+ @client_chain
145
+ end
146
+
147
+ def self.server_middleware
148
+ @server_chain ||= default_server_middleware
149
+ yield @server_chain if block_given?
150
+ @server_chain
151
+ end
152
+
153
+ def self.default_server_middleware
154
+ Middleware::Chain.new
155
+ end
156
+
157
+ def self.default_worker_options=(hash)
158
+ # stringify
159
+ @default_worker_options = default_worker_options.merge(hash.transform_keys(&:to_s))
160
+ end
161
+
162
+ def self.default_worker_options
163
+ defined?(@default_worker_options) ? @default_worker_options : DEFAULT_WORKER_OPTIONS
164
+ end
165
+
166
+ ##
167
+ # Death handlers are called when all retries for a job have been exhausted and
168
+ # the job dies. It's the notification to your application
169
+ # that this job will not succeed without manual intervention.
170
+ #
171
+ # Sidekiq.configure_server do |config|
172
+ # config.death_handlers << ->(job, ex) do
173
+ # end
174
+ # end
175
+ def self.death_handlers
176
+ options[:death_handlers]
177
+ end
178
+
179
+ def self.load_json(string)
180
+ JSON.parse(string)
181
+ end
182
+
183
+ def self.dump_json(object)
184
+ JSON.generate(object)
185
+ end
186
+
187
+ def self.log_formatter
188
+ @log_formatter ||= if ENV["DYNO"]
189
+ Sidekiq::Logger::Formatters::WithoutTimestamp.new
190
+ else
191
+ Sidekiq::Logger::Formatters::Pretty.new
192
+ end
193
+ end
194
+
195
+ def self.log_formatter=(log_formatter)
196
+ @log_formatter = log_formatter
197
+ logger.formatter = log_formatter
198
+ end
199
+
200
+ def self.logger
201
+ @logger ||= Sidekiq::Logger.new(STDOUT, level: Logger::INFO)
202
+ end
203
+
204
+ def self.logger=(logger)
205
+ if logger.nil?
206
+ self.logger.level = Logger::FATAL
207
+ return self.logger
208
+ end
209
+
210
+ logger.extend(Sidekiq::LoggingUtils)
211
+
212
+ @logger = logger
213
+ end
214
+
215
+ def self.pro?
216
+ defined?(Sidekiq::Pro)
217
+ end
218
+
219
+ # How frequently Redis should be checked by a random Sidekiq process for
220
+ # scheduled and retriable jobs. Each individual process will take turns by
221
+ # waiting some multiple of this value.
222
+ #
223
+ # See sidekiq/scheduled.rb for an in-depth explanation of this value
224
+ def self.average_scheduled_poll_interval=(interval)
225
+ options[:average_scheduled_poll_interval] = interval
226
+ end
227
+
228
+ # Register a proc to handle any error which occurs within the Sidekiq process.
229
+ #
230
+ # Sidekiq.configure_server do |config|
231
+ # config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
232
+ # end
233
+ #
234
+ # The default error handler logs errors to Sidekiq.logger.
235
+ def self.error_handlers
236
+ options[:error_handlers]
237
+ end
238
+
239
+ # Register a block to run at a point in the Sidekiq lifecycle.
240
+ # :startup, :quiet or :shutdown are valid events.
241
+ #
242
+ # Sidekiq.configure_server do |config|
243
+ # config.on(:shutdown) do
244
+ # puts "Goodbye cruel world!"
245
+ # end
246
+ # end
247
+ def self.on(event, &block)
248
+ raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
249
+ raise ArgumentError, "Invalid event name: #{event}" unless options[:lifecycle_events].key?(event)
250
+ options[:lifecycle_events][event] << block
251
+ end
252
+
253
+ # We are shutting down Sidekiq but what about workers that
254
+ # are working on some long job? This error is
255
+ # raised in workers that have not finished within the hard
256
+ # timeout limit. This is needed to rollback db transactions,
257
+ # otherwise Ruby's Thread#kill will commit. See #377.
258
+ # DO NOT RESCUE THIS ERROR IN YOUR WORKERS
259
+ class Shutdown < Interrupt; end
260
+ end
261
+
262
+ require "sidekiq/rails" if defined?(::Rails::Engine)
@@ -0,0 +1,960 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "sidekiq"
4
+
5
+ require "zlib"
6
+ require "base64"
7
+
8
+ module Sidekiq
9
+ class Stats
10
+ def initialize
11
+ fetch_stats!
12
+ end
13
+
14
+ def processed
15
+ stat :processed
16
+ end
17
+
18
+ def failed
19
+ stat :failed
20
+ end
21
+
22
+ def scheduled_size
23
+ stat :scheduled_size
24
+ end
25
+
26
+ def retry_size
27
+ stat :retry_size
28
+ end
29
+
30
+ def dead_size
31
+ stat :dead_size
32
+ end
33
+
34
+ def enqueued
35
+ stat :enqueued
36
+ end
37
+
38
+ def processes_size
39
+ stat :processes_size
40
+ end
41
+
42
+ def workers_size
43
+ stat :workers_size
44
+ end
45
+
46
+ def default_queue_latency
47
+ stat :default_queue_latency
48
+ end
49
+
50
+ def queues
51
+ Sidekiq::Stats::Queues.new.lengths
52
+ end
53
+
54
+ def fetch_stats!
55
+ pipe1_res = Sidekiq.redis { |conn|
56
+ conn.pipelined do
57
+ conn.get("stat:processed")
58
+ conn.get("stat:failed")
59
+ conn.zcard("schedule")
60
+ conn.zcard("retry")
61
+ conn.zcard("dead")
62
+ conn.scard("processes")
63
+ conn.lrange("queue:default", -1, -1)
64
+ end
65
+ }
66
+
67
+ processes = Sidekiq.redis { |conn|
68
+ conn.sscan_each("processes").to_a
69
+ }
70
+
71
+ queues = Sidekiq.redis { |conn|
72
+ conn.sscan_each("queues").to_a
73
+ }
74
+
75
+ pipe2_res = Sidekiq.redis { |conn|
76
+ conn.pipelined do
77
+ processes.each { |key| conn.hget(key, "busy") }
78
+ queues.each { |queue| conn.llen("queue:#{queue}") }
79
+ end
80
+ }
81
+
82
+ s = processes.size
83
+ workers_size = pipe2_res[0...s].sum(&:to_i)
84
+ enqueued = pipe2_res[s..-1].sum(&:to_i)
85
+
86
+ default_queue_latency = if (entry = pipe1_res[6].first)
87
+ job = begin
88
+ Sidekiq.load_json(entry)
89
+ rescue
90
+ {}
91
+ end
92
+ now = Time.now.to_f
93
+ thence = job["enqueued_at"] || now
94
+ now - thence
95
+ else
96
+ 0
97
+ end
98
+ @stats = {
99
+ processed: pipe1_res[0].to_i,
100
+ failed: pipe1_res[1].to_i,
101
+ scheduled_size: pipe1_res[2],
102
+ retry_size: pipe1_res[3],
103
+ dead_size: pipe1_res[4],
104
+ processes_size: pipe1_res[5],
105
+
106
+ default_queue_latency: default_queue_latency,
107
+ workers_size: workers_size,
108
+ enqueued: enqueued
109
+ }
110
+ end
111
+
112
+ def reset(*stats)
113
+ all = %w[failed processed]
114
+ stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s)
115
+
116
+ mset_args = []
117
+ stats.each do |stat|
118
+ mset_args << "stat:#{stat}"
119
+ mset_args << 0
120
+ end
121
+ Sidekiq.redis do |conn|
122
+ conn.mset(*mset_args)
123
+ end
124
+ end
125
+
126
+ private
127
+
128
+ def stat(s)
129
+ @stats[s]
130
+ end
131
+
132
+ class Queues
133
+ def lengths
134
+ Sidekiq.redis do |conn|
135
+ queues = conn.sscan_each("queues").to_a
136
+
137
+ lengths = conn.pipelined {
138
+ queues.each do |queue|
139
+ conn.llen("queue:#{queue}")
140
+ end
141
+ }
142
+
143
+ array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
144
+ Hash[array_of_arrays]
145
+ end
146
+ end
147
+ end
148
+
149
+ class History
150
+ def initialize(days_previous, start_date = nil)
151
+ @days_previous = days_previous
152
+ @start_date = start_date || Time.now.utc.to_date
153
+ end
154
+
155
+ def processed
156
+ @processed ||= date_stat_hash("processed")
157
+ end
158
+
159
+ def failed
160
+ @failed ||= date_stat_hash("failed")
161
+ end
162
+
163
+ private
164
+
165
+ def date_stat_hash(stat)
166
+ stat_hash = {}
167
+ dates = @start_date.downto(@start_date - @days_previous + 1).map { |date|
168
+ date.strftime("%Y-%m-%d")
169
+ }
170
+
171
+ keys = dates.map { |datestr| "stat:#{stat}:#{datestr}" }
172
+
173
+ begin
174
+ Sidekiq.redis do |conn|
175
+ conn.mget(keys).each_with_index do |value, idx|
176
+ stat_hash[dates[idx]] = value ? value.to_i : 0
177
+ end
178
+ end
179
+ rescue Redis::CommandError
180
+ # mget will trigger a CROSSSLOT error when run against a Cluster
181
+ # TODO Someone want to add Cluster support?
182
+ end
183
+
184
+ stat_hash
185
+ end
186
+ end
187
+ end
188
+
189
+ ##
190
+ # Encapsulates a queue within Sidekiq.
191
+ # Allows enumeration of all jobs within the queue
192
+ # and deletion of jobs.
193
+ #
194
+ # queue = Sidekiq::Queue.new("mailer")
195
+ # queue.each do |job|
196
+ # job.klass # => 'MyWorker'
197
+ # job.args # => [1, 2, 3]
198
+ # job.delete if job.jid == 'abcdef1234567890'
199
+ # end
200
+ #
201
+ class Queue
202
+ include Enumerable
203
+
204
+ ##
205
+ # Return all known queues within Redis.
206
+ #
207
+ def self.all
208
+ Sidekiq.redis { |c| c.sscan_each("queues").to_a }.sort.map { |q| Sidekiq::Queue.new(q) }
209
+ end
210
+
211
+ attr_reader :name
212
+
213
+ def initialize(name = "default")
214
+ @name = name.to_s
215
+ @rname = "queue:#{name}"
216
+ end
217
+
218
+ def size
219
+ Sidekiq.redis { |con| con.llen(@rname) }
220
+ end
221
+
222
+ # Sidekiq Pro overrides this
223
+ def paused?
224
+ false
225
+ end
226
+
227
+ ##
228
+ # Calculates this queue's latency, the difference in seconds since the oldest
229
+ # job in the queue was enqueued.
230
+ #
231
+ # @return Float
232
+ def latency
233
+ entry = Sidekiq.redis { |conn|
234
+ conn.lrange(@rname, -1, -1)
235
+ }.first
236
+ return 0 unless entry
237
+ job = Sidekiq.load_json(entry)
238
+ now = Time.now.to_f
239
+ thence = job["enqueued_at"] || now
240
+ now - thence
241
+ end
242
+
243
+ def each
244
+ initial_size = size
245
+ deleted_size = 0
246
+ page = 0
247
+ page_size = 50
248
+
249
+ loop do
250
+ range_start = page * page_size - deleted_size
251
+ range_end = range_start + page_size - 1
252
+ entries = Sidekiq.redis { |conn|
253
+ conn.lrange @rname, range_start, range_end
254
+ }
255
+ break if entries.empty?
256
+ page += 1
257
+ entries.each do |entry|
258
+ yield Job.new(entry, @name)
259
+ end
260
+ deleted_size = initial_size - size
261
+ end
262
+ end
263
+
264
+ ##
265
+ # Find the job with the given JID within this queue.
266
+ #
267
+ # This is a slow, inefficient operation. Do not use under
268
+ # normal conditions. Sidekiq Pro contains a faster version.
269
+ def find_job(jid)
270
+ detect { |j| j.jid == jid }
271
+ end
272
+
273
+ def clear
274
+ Sidekiq.redis do |conn|
275
+ conn.multi do
276
+ conn.unlink(@rname)
277
+ conn.srem("queues", name)
278
+ end
279
+ end
280
+ end
281
+ alias_method :💣, :clear
282
+ end
283
+
284
+ ##
285
+ # Encapsulates a pending job within a Sidekiq queue or
286
+ # sorted set.
287
+ #
288
+ # The job should be considered immutable but may be
289
+ # removed from the queue via Job#delete.
290
+ #
291
+ class Job
292
+ attr_reader :item
293
+ attr_reader :value
294
+
295
+ def initialize(item, queue_name = nil)
296
+ @args = nil
297
+ @value = item
298
+ @item = item.is_a?(Hash) ? item : parse(item)
299
+ @queue = queue_name || @item["queue"]
300
+ end
301
+
302
+ def parse(item)
303
+ Sidekiq.load_json(item)
304
+ rescue JSON::ParserError
305
+ # If the job payload in Redis is invalid JSON, we'll load
306
+ # the item as an empty hash and store the invalid JSON as
307
+ # the job 'args' for display in the Web UI.
308
+ @invalid = true
309
+ @args = [item]
310
+ {}
311
+ end
312
+
313
+ def klass
314
+ self["class"]
315
+ end
316
+
317
+ def display_class
318
+ # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
319
+ @klass ||= case klass
320
+ when /\ASidekiq::Extensions::Delayed/
321
+ safe_load(args[0], klass) do |target, method, _|
322
+ "#{target}.#{method}"
323
+ end
324
+ when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
325
+ job_class = @item["wrapped"] || args[0]
326
+ if job_class == "ActionMailer::DeliveryJob" || job_class == "ActionMailer::MailDeliveryJob"
327
+ # MailerClass#mailer_method
328
+ args[0]["arguments"][0..1].join("#")
329
+ else
330
+ job_class
331
+ end
332
+ else
333
+ klass
334
+ end
335
+ end
336
+
337
+ def display_args
338
+ # Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
339
+ @display_args ||= case klass
340
+ when /\ASidekiq::Extensions::Delayed/
341
+ safe_load(args[0], args) do |_, _, arg|
342
+ arg
343
+ end
344
+ when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
345
+ job_args = self["wrapped"] ? args[0]["arguments"] : []
346
+ if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
347
+ # remove MailerClass, mailer_method and 'deliver_now'
348
+ job_args.drop(3)
349
+ elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob"
350
+ # remove MailerClass, mailer_method and 'deliver_now'
351
+ job_args.drop(3).first["args"]
352
+ else
353
+ job_args
354
+ end
355
+ else
356
+ if self["encrypt"]
357
+ # no point in showing 150+ bytes of random garbage
358
+ args[-1] = "[encrypted data]"
359
+ end
360
+ args
361
+ end
362
+ end
363
+
364
+ def args
365
+ @args || @item["args"]
366
+ end
367
+
368
+ def jid
369
+ self["jid"]
370
+ end
371
+
372
+ def enqueued_at
373
+ self["enqueued_at"] ? Time.at(self["enqueued_at"]).utc : nil
374
+ end
375
+
376
+ def created_at
377
+ Time.at(self["created_at"] || self["enqueued_at"] || 0).utc
378
+ end
379
+
380
+ def tags
381
+ self["tags"] || []
382
+ end
383
+
384
+ def error_backtrace
385
+ # Cache nil values
386
+ if defined?(@error_backtrace)
387
+ @error_backtrace
388
+ else
389
+ value = self["error_backtrace"]
390
+ @error_backtrace = value && uncompress_backtrace(value)
391
+ end
392
+ end
393
+
394
+ attr_reader :queue
395
+
396
+ def latency
397
+ now = Time.now.to_f
398
+ now - (@item["enqueued_at"] || @item["created_at"] || now)
399
+ end
400
+
401
+ ##
402
+ # Remove this job from the queue.
403
+ def delete
404
+ count = Sidekiq.redis { |conn|
405
+ conn.lrem("queue:#{@queue}", 1, @value)
406
+ }
407
+ count != 0
408
+ end
409
+
410
+ def [](name)
411
+ # nil will happen if the JSON fails to parse.
412
+ # We don't guarantee Sidekiq will work with bad job JSON but we should
413
+ # make a best effort to minimize the damage.
414
+ @item ? @item[name] : nil
415
+ end
416
+
417
+ private
418
+
419
+ def safe_load(content, default)
420
+ yield(*YAML.load(content))
421
+ rescue => ex
422
+ # #1761 in dev mode, it's possible to have jobs enqueued which haven't been loaded into
423
+ # memory yet so the YAML can't be loaded.
424
+ Sidekiq.logger.warn "Unable to load YAML: #{ex.message}" unless Sidekiq.options[:environment] == "development"
425
+ default
426
+ end
427
+
428
+ def uncompress_backtrace(backtrace)
429
+ if backtrace.is_a?(Array)
430
+ # Handle old jobs with raw Array backtrace format
431
+ backtrace
432
+ else
433
+ decoded = Base64.decode64(backtrace)
434
+ uncompressed = Zlib::Inflate.inflate(decoded)
435
+ begin
436
+ Sidekiq.load_json(uncompressed)
437
+ rescue
438
+ # Handle old jobs with marshalled backtrace format
439
+ # TODO Remove in 7.x
440
+ Marshal.load(uncompressed)
441
+ end
442
+ end
443
+ end
444
+ end
445
+
446
+ class SortedEntry < Job
447
+ attr_reader :score
448
+ attr_reader :parent
449
+
450
+ def initialize(parent, score, item)
451
+ super(item)
452
+ @score = score
453
+ @parent = parent
454
+ end
455
+
456
+ def at
457
+ Time.at(score).utc
458
+ end
459
+
460
+ def delete
461
+ if @value
462
+ @parent.delete_by_value(@parent.name, @value)
463
+ else
464
+ @parent.delete_by_jid(score, jid)
465
+ end
466
+ end
467
+
468
+ def reschedule(at)
469
+ Sidekiq.redis do |conn|
470
+ conn.zincrby(@parent.name, at.to_f - @score, Sidekiq.dump_json(@item))
471
+ end
472
+ end
473
+
474
+ def add_to_queue
475
+ remove_job do |message|
476
+ msg = Sidekiq.load_json(message)
477
+ Sidekiq::Client.push(msg)
478
+ end
479
+ end
480
+
481
+ def retry
482
+ remove_job do |message|
483
+ msg = Sidekiq.load_json(message)
484
+ msg["retry_count"] -= 1 if msg["retry_count"]
485
+ Sidekiq::Client.push(msg)
486
+ end
487
+ end
488
+
489
+ ##
490
+ # Place job in the dead set
491
+ def kill
492
+ remove_job do |message|
493
+ DeadSet.new.kill(message)
494
+ end
495
+ end
496
+
497
+ def error?
498
+ !!item["error_class"]
499
+ end
500
+
501
+ private
502
+
503
+ def remove_job
504
+ Sidekiq.redis do |conn|
505
+ results = conn.multi {
506
+ conn.zrangebyscore(parent.name, score, score)
507
+ conn.zremrangebyscore(parent.name, score, score)
508
+ }.first
509
+
510
+ if results.size == 1
511
+ yield results.first
512
+ else
513
+ # multiple jobs with the same score
514
+ # find the one with the right JID and push it
515
+ matched, nonmatched = results.partition { |message|
516
+ if message.index(jid)
517
+ msg = Sidekiq.load_json(message)
518
+ msg["jid"] == jid
519
+ else
520
+ false
521
+ end
522
+ }
523
+
524
+ msg = matched.first
525
+ yield msg if msg
526
+
527
+ # push the rest back onto the sorted set
528
+ conn.multi do
529
+ nonmatched.each do |message|
530
+ conn.zadd(parent.name, score.to_f.to_s, message)
531
+ end
532
+ end
533
+ end
534
+ end
535
+ end
536
+ end
537
+
538
+ class SortedSet
539
+ include Enumerable
540
+
541
+ attr_reader :name
542
+
543
+ def initialize(name)
544
+ @name = name
545
+ @_size = size
546
+ end
547
+
548
+ def size
549
+ Sidekiq.redis { |c| c.zcard(name) }
550
+ end
551
+
552
+ def scan(match, count = 100)
553
+ return to_enum(:scan, match, count) unless block_given?
554
+
555
+ match = "*#{match}*" unless match.include?("*")
556
+ Sidekiq.redis do |conn|
557
+ conn.zscan_each(name, match: match, count: count) do |entry, score|
558
+ yield SortedEntry.new(self, score, entry)
559
+ end
560
+ end
561
+ end
562
+
563
+ def clear
564
+ Sidekiq.redis do |conn|
565
+ conn.unlink(name)
566
+ end
567
+ end
568
+ alias_method :💣, :clear
569
+ end
570
+
571
+ class JobSet < SortedSet
572
+ def schedule(timestamp, message)
573
+ Sidekiq.redis do |conn|
574
+ conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(message))
575
+ end
576
+ end
577
+
578
+ def each
579
+ initial_size = @_size
580
+ offset_size = 0
581
+ page = -1
582
+ page_size = 50
583
+
584
+ loop do
585
+ range_start = page * page_size + offset_size
586
+ range_end = range_start + page_size - 1
587
+ elements = Sidekiq.redis { |conn|
588
+ conn.zrange name, range_start, range_end, with_scores: true
589
+ }
590
+ break if elements.empty?
591
+ page -= 1
592
+ elements.reverse_each do |element, score|
593
+ yield SortedEntry.new(self, score, element)
594
+ end
595
+ offset_size = initial_size - @_size
596
+ end
597
+ end
598
+
599
+ ##
600
+ # Fetch jobs that match a given time or Range. Job ID is an
601
+ # optional second argument.
602
+ def fetch(score, jid = nil)
603
+ begin_score, end_score =
604
+ if score.is_a?(Range)
605
+ [score.first, score.last]
606
+ else
607
+ [score, score]
608
+ end
609
+
610
+ elements = Sidekiq.redis { |conn|
611
+ conn.zrangebyscore(name, begin_score, end_score, with_scores: true)
612
+ }
613
+
614
+ elements.each_with_object([]) do |element, result|
615
+ data, job_score = element
616
+ entry = SortedEntry.new(self, job_score, data)
617
+ result << entry if jid.nil? || entry.jid == jid
618
+ end
619
+ end
620
+
621
+ ##
622
+ # Find the job with the given JID within this sorted set.
623
+ # This is a slower O(n) operation. Do not use for app logic.
624
+ def find_job(jid)
625
+ Sidekiq.redis do |conn|
626
+ conn.zscan_each(name, match: "*#{jid}*", count: 100) do |entry, score|
627
+ job = JSON.parse(entry)
628
+ matched = job["jid"] == jid
629
+ return SortedEntry.new(self, score, entry) if matched
630
+ end
631
+ end
632
+ nil
633
+ end
634
+
635
+ def delete_by_value(name, value)
636
+ Sidekiq.redis do |conn|
637
+ ret = conn.zrem(name, value)
638
+ @_size -= 1 if ret
639
+ ret
640
+ end
641
+ end
642
+
643
+ def delete_by_jid(score, jid)
644
+ Sidekiq.redis do |conn|
645
+ elements = conn.zrangebyscore(name, score, score)
646
+ elements.each do |element|
647
+ if element.index(jid)
648
+ message = Sidekiq.load_json(element)
649
+ if message["jid"] == jid
650
+ ret = conn.zrem(name, element)
651
+ @_size -= 1 if ret
652
+ break ret
653
+ end
654
+ end
655
+ end
656
+ end
657
+ end
658
+
659
+ alias_method :delete, :delete_by_jid
660
+ end
661
+
662
+ ##
663
+ # Allows enumeration of scheduled jobs within Sidekiq.
664
+ # Based on this, you can search/filter for jobs. Here's an
665
+ # example where I'm selecting all jobs of a certain type
666
+ # and deleting them from the schedule queue.
667
+ #
668
+ # r = Sidekiq::ScheduledSet.new
669
+ # r.select do |scheduled|
670
+ # scheduled.klass == 'Sidekiq::Extensions::DelayedClass' &&
671
+ # scheduled.args[0] == 'User' &&
672
+ # scheduled.args[1] == 'setup_new_subscriber'
673
+ # end.map(&:delete)
674
+ class ScheduledSet < JobSet
675
+ def initialize
676
+ super "schedule"
677
+ end
678
+ end
679
+
680
+ ##
681
+ # Allows enumeration of retries within Sidekiq.
682
+ # Based on this, you can search/filter for jobs. Here's an
683
+ # example where I'm selecting all jobs of a certain type
684
+ # and deleting them from the retry queue.
685
+ #
686
+ # r = Sidekiq::RetrySet.new
687
+ # r.select do |retri|
688
+ # retri.klass == 'Sidekiq::Extensions::DelayedClass' &&
689
+ # retri.args[0] == 'User' &&
690
+ # retri.args[1] == 'setup_new_subscriber'
691
+ # end.map(&:delete)
692
+ class RetrySet < JobSet
693
+ def initialize
694
+ super "retry"
695
+ end
696
+
697
+ def retry_all
698
+ each(&:retry) while size > 0
699
+ end
700
+
701
+ def kill_all
702
+ each(&:kill) while size > 0
703
+ end
704
+ end
705
+
706
+ ##
707
+ # Allows enumeration of dead jobs within Sidekiq.
708
+ #
709
+ class DeadSet < JobSet
710
+ def initialize
711
+ super "dead"
712
+ end
713
+
714
+ def kill(message, opts = {})
715
+ now = Time.now.to_f
716
+ Sidekiq.redis do |conn|
717
+ conn.multi do
718
+ conn.zadd(name, now.to_s, message)
719
+ conn.zremrangebyscore(name, "-inf", now - self.class.timeout)
720
+ conn.zremrangebyrank(name, 0, - self.class.max_jobs)
721
+ end
722
+ end
723
+
724
+ if opts[:notify_failure] != false
725
+ job = Sidekiq.load_json(message)
726
+ r = RuntimeError.new("Job killed by API")
727
+ r.set_backtrace(caller)
728
+ Sidekiq.death_handlers.each do |handle|
729
+ handle.call(job, r)
730
+ end
731
+ end
732
+ true
733
+ end
734
+
735
+ def retry_all
736
+ each(&:retry) while size > 0
737
+ end
738
+
739
+ def self.max_jobs
740
+ Sidekiq.options[:dead_max_jobs]
741
+ end
742
+
743
+ def self.timeout
744
+ Sidekiq.options[:dead_timeout_in_seconds]
745
+ end
746
+ end
747
+
748
+ ##
749
+ # Enumerates the set of Sidekiq processes which are actively working
750
+ # right now. Each process sends a heartbeat to Redis every 5 seconds
751
+ # so this set should be relatively accurate, barring network partitions.
752
+ #
753
+ # Yields a Sidekiq::Process.
754
+ #
755
+ class ProcessSet
756
+ include Enumerable
757
+
758
+ def initialize(clean_plz = true)
759
+ cleanup if clean_plz
760
+ end
761
+
762
+ # Cleans up dead processes recorded in Redis.
763
+ # Returns the number of processes cleaned.
764
+ def cleanup
765
+ count = 0
766
+ Sidekiq.redis do |conn|
767
+ procs = conn.sscan_each("processes").to_a.sort
768
+ heartbeats = conn.pipelined {
769
+ procs.each do |key|
770
+ conn.hget(key, "info")
771
+ end
772
+ }
773
+
774
+ # the hash named key has an expiry of 60 seconds.
775
+ # if it's not found, that means the process has not reported
776
+ # in to Redis and probably died.
777
+ to_prune = procs.select.with_index { |proc, i|
778
+ heartbeats[i].nil?
779
+ }
780
+ count = conn.srem("processes", to_prune) unless to_prune.empty?
781
+ end
782
+ count
783
+ end
784
+
785
+ def each
786
+ result = Sidekiq.redis { |conn|
787
+ procs = conn.sscan_each("processes").to_a.sort
788
+
789
+ # We're making a tradeoff here between consuming more memory instead of
790
+ # making more roundtrips to Redis, but if you have hundreds or thousands of workers,
791
+ # you'll be happier this way
792
+ conn.pipelined do
793
+ procs.each do |key|
794
+ conn.hmget(key, "info", "busy", "beat", "quiet")
795
+ end
796
+ end
797
+ }
798
+
799
+ result.each do |info, busy, at_s, quiet|
800
+ # If a process is stopped between when we query Redis for `procs` and
801
+ # when we query for `result`, we will have an item in `result` that is
802
+ # composed of `nil` values.
803
+ next if info.nil?
804
+
805
+ hash = Sidekiq.load_json(info)
806
+ yield Process.new(hash.merge("busy" => busy.to_i, "beat" => at_s.to_f, "quiet" => quiet))
807
+ end
808
+ end
809
+
810
+ # This method is not guaranteed accurate since it does not prune the set
811
+ # based on current heartbeat. #each does that and ensures the set only
812
+ # contains Sidekiq processes which have sent a heartbeat within the last
813
+ # 60 seconds.
814
+ def size
815
+ Sidekiq.redis { |conn| conn.scard("processes") }
816
+ end
817
+
818
+ # Returns the identity of the current cluster leader or "" if no leader.
819
+ # This is a Sidekiq Enterprise feature, will always return "" in Sidekiq
820
+ # or Sidekiq Pro.
821
+ def leader
822
+ @leader ||= begin
823
+ x = Sidekiq.redis { |c| c.get("dear-leader") }
824
+ # need a non-falsy value so we can memoize
825
+ x ||= ""
826
+ x
827
+ end
828
+ end
829
+ end
830
+
831
+ #
832
+ # Sidekiq::Process represents an active Sidekiq process talking with Redis.
833
+ # Each process has a set of attributes which look like this:
834
+ #
835
+ # {
836
+ # 'hostname' => 'app-1.example.com',
837
+ # 'started_at' => <process start time>,
838
+ # 'pid' => 12345,
839
+ # 'tag' => 'myapp'
840
+ # 'concurrency' => 25,
841
+ # 'queues' => ['default', 'low'],
842
+ # 'busy' => 10,
843
+ # 'beat' => <last heartbeat>,
844
+ # 'identity' => <unique string identifying the process>,
845
+ # }
846
+ class Process
847
+ def initialize(hash)
848
+ @attribs = hash
849
+ end
850
+
851
+ def tag
852
+ self["tag"]
853
+ end
854
+
855
+ def labels
856
+ Array(self["labels"])
857
+ end
858
+
859
+ def [](key)
860
+ @attribs[key]
861
+ end
862
+
863
+ def identity
864
+ self["identity"]
865
+ end
866
+
867
+ def quiet!
868
+ signal("TSTP")
869
+ end
870
+
871
+ def stop!
872
+ signal("TERM")
873
+ end
874
+
875
+ def dump_threads
876
+ signal("TTIN")
877
+ end
878
+
879
+ def stopping?
880
+ self["quiet"] == "true"
881
+ end
882
+
883
+ private
884
+
885
+ def signal(sig)
886
+ key = "#{identity}-signals"
887
+ Sidekiq.redis do |c|
888
+ c.multi do
889
+ c.lpush(key, sig)
890
+ c.expire(key, 60)
891
+ end
892
+ end
893
+ end
894
+ end
895
+
896
+ ##
897
+ # A worker is a thread that is currently processing a job.
898
+ # Programmatic access to the current active worker set.
899
+ #
900
+ # WARNING WARNING WARNING
901
+ #
902
+ # This is live data that can change every millisecond.
903
+ # If you call #size => 5 and then expect #each to be
904
+ # called 5 times, you're going to have a bad time.
905
+ #
906
+ # workers = Sidekiq::Workers.new
907
+ # workers.size => 2
908
+ # workers.each do |process_id, thread_id, work|
909
+ # # process_id is a unique identifier per Sidekiq process
910
+ # # thread_id is a unique identifier per thread
911
+ # # work is a Hash which looks like:
912
+ # # { 'queue' => name, 'run_at' => timestamp, 'payload' => msg }
913
+ # # run_at is an epoch Integer.
914
+ # end
915
+ #
916
+ class Workers
917
+ include Enumerable
918
+
919
+ def each
920
+ Sidekiq.redis do |conn|
921
+ procs = conn.sscan_each("processes").to_a
922
+ procs.sort.each do |key|
923
+ valid, workers = conn.pipelined {
924
+ conn.exists?(key)
925
+ conn.hgetall("#{key}:workers")
926
+ }
927
+ next unless valid
928
+ workers.each_pair do |tid, json|
929
+ hsh = Sidekiq.load_json(json)
930
+ p = hsh["payload"]
931
+ # avoid breaking API, this is a side effect of the JSON optimization in #4316
932
+ hsh["payload"] = Sidekiq.load_json(p) if p.is_a?(String)
933
+ yield key, tid, hsh
934
+ end
935
+ end
936
+ end
937
+ end
938
+
939
+ # Note that #size is only as accurate as Sidekiq's heartbeat,
940
+ # which happens every 5 seconds. It is NOT real-time.
941
+ #
942
+ # Not very efficient if you have lots of Sidekiq
943
+ # processes but the alternative is a global counter
944
+ # which can easily get out of sync with crashy processes.
945
+ def size
946
+ Sidekiq.redis do |conn|
947
+ procs = conn.sscan_each("processes").to_a
948
+ if procs.empty?
949
+ 0
950
+ else
951
+ conn.pipelined {
952
+ procs.each do |key|
953
+ conn.hget(key, "busy")
954
+ end
955
+ }.sum(&:to_i)
956
+ end
957
+ end
958
+ end
959
+ end
960
+ end