sidekiq 5.2.4 → 7.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +672 -8
- data/LICENSE.txt +9 -0
- data/README.md +48 -51
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +213 -115
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +623 -352
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +214 -229
- data/lib/sidekiq/client.rb +127 -102
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +287 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/job.rb +374 -0
- data/lib/sidekiq/job_logger.rb +33 -7
- data/lib/sidekiq/job_retry.rb +157 -108
- data/lib/sidekiq/job_util.rb +107 -0
- data/lib/sidekiq/launcher.rb +206 -106
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +43 -46
- data/lib/sidekiq/metrics/query.rb +156 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +140 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +95 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +146 -0
- data/lib/sidekiq/paginator.rb +28 -16
- data/lib/sidekiq/processor.rb +126 -117
- data/lib/sidekiq/rails.rb +52 -38
- data/lib/sidekiq/redis_client_adapter.rb +111 -0
- data/lib/sidekiq/redis_connection.rb +41 -112
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +112 -50
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +91 -90
- data/lib/sidekiq/transaction_aware_client.rb +51 -0
- data/lib/sidekiq/version.rb +3 -1
- data/lib/sidekiq/web/action.rb +20 -11
- data/lib/sidekiq/web/application.rb +202 -80
- data/lib/sidekiq/web/csrf_protection.rb +183 -0
- data/lib/sidekiq/web/helpers.rb +165 -114
- data/lib/sidekiq/web/router.rb +23 -19
- data/lib/sidekiq/web.rb +68 -107
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +92 -182
- data/sidekiq.gemspec +25 -16
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +152 -61
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +182 -0
- data/web/assets/javascripts/dashboard.js +35 -293
- data/web/assets/javascripts/metrics.js +298 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +10 -93
- data/web/assets/stylesheets/application.css +124 -522
- data/web/assets/stylesheets/bootstrap.css +1 -1
- data/web/locales/ar.yml +71 -65
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -53
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +86 -66
- data/web/locales/es.yml +70 -54
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +83 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +68 -63
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +18 -3
- data/web/views/_job_info.erb +21 -4
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +1 -1
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +79 -29
- data/web/views/dashboard.erb +48 -18
- data/web/views/dead.erb +3 -3
- data/web/views/filtering.erb +7 -0
- data/web/views/layout.erb +3 -1
- data/web/views/metrics.erb +91 -0
- data/web/views/metrics_for_job.erb +59 -0
- data/web/views/morgue.erb +14 -15
- data/web/views/queue.erb +33 -24
- data/web/views/queues.erb +19 -5
- data/web/views/retries.erb +16 -17
- data/web/views/retry.erb +3 -3
- data/web/views/scheduled.erb +17 -15
- metadata +71 -72
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -15
- data/.travis.yml +0 -17
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/Appraisals +0 -9
- data/COMM-LICENSE +0 -95
- data/Ent-Changes.md +0 -225
- data/Gemfile +0 -29
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -752
- data/Rakefile +0 -9
- data/bin/sidekiqctl +0 -237
- data/code_of_conduct.md +0 -50
- data/gemfiles/rails_4.gemfile +0 -31
- data/gemfiles/rails_5.gemfile +0 -31
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/delay.rb +0 -42
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -215
data/lib/sidekiq/api.rb
CHANGED
|
@@ -1,26 +1,32 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
|
-
require 'sidekiq'
|
|
3
2
|
|
|
4
|
-
|
|
3
|
+
require "sidekiq"
|
|
5
4
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
cursor = '0'
|
|
9
|
-
result = []
|
|
10
|
-
loop do
|
|
11
|
-
cursor, values = conn.sscan(key, cursor)
|
|
12
|
-
result.push(*values)
|
|
13
|
-
break if cursor == '0'
|
|
14
|
-
end
|
|
15
|
-
result
|
|
16
|
-
end
|
|
17
|
-
end
|
|
5
|
+
require "zlib"
|
|
6
|
+
require "set"
|
|
18
7
|
|
|
19
|
-
|
|
20
|
-
|
|
8
|
+
require "sidekiq/metrics/query"
|
|
9
|
+
|
|
10
|
+
#
|
|
11
|
+
# Sidekiq's Data API provides a Ruby object model on top
|
|
12
|
+
# of Sidekiq's runtime data in Redis. This API should never
|
|
13
|
+
# be used within application code for business logic.
|
|
14
|
+
#
|
|
15
|
+
# The Sidekiq server process never uses this API: all data
|
|
16
|
+
# manipulation is done directly for performance reasons to
|
|
17
|
+
# ensure we are using Redis as efficiently as possible at
|
|
18
|
+
# every callsite.
|
|
19
|
+
#
|
|
21
20
|
|
|
21
|
+
module Sidekiq
|
|
22
|
+
# Retrieve runtime statistics from Redis regarding
|
|
23
|
+
# this Sidekiq cluster.
|
|
24
|
+
#
|
|
25
|
+
# stat = Sidekiq::Stats.new
|
|
26
|
+
# stat.processed
|
|
27
|
+
class Stats
|
|
22
28
|
def initialize
|
|
23
|
-
|
|
29
|
+
fetch_stats_fast!
|
|
24
30
|
end
|
|
25
31
|
|
|
26
32
|
def processed
|
|
@@ -60,65 +66,96 @@ module Sidekiq
|
|
|
60
66
|
end
|
|
61
67
|
|
|
62
68
|
def queues
|
|
63
|
-
Sidekiq
|
|
64
|
-
|
|
69
|
+
Sidekiq.redis do |conn|
|
|
70
|
+
queues = conn.sscan("queues").to_a
|
|
65
71
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
conn.zcard('schedule')
|
|
72
|
-
conn.zcard('retry')
|
|
73
|
-
conn.zcard('dead')
|
|
74
|
-
conn.scard('processes')
|
|
75
|
-
conn.lrange('queue:default', -1, -1)
|
|
76
|
-
end
|
|
77
|
-
end
|
|
72
|
+
lengths = conn.pipelined { |pipeline|
|
|
73
|
+
queues.each do |queue|
|
|
74
|
+
pipeline.llen("queue:#{queue}")
|
|
75
|
+
end
|
|
76
|
+
}
|
|
78
77
|
|
|
79
|
-
|
|
80
|
-
|
|
78
|
+
array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
|
|
79
|
+
array_of_arrays.to_h
|
|
81
80
|
end
|
|
81
|
+
end
|
|
82
82
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
83
|
+
# O(1) redis calls
|
|
84
|
+
# @api private
|
|
85
|
+
def fetch_stats_fast!
|
|
86
|
+
pipe1_res = Sidekiq.redis { |conn|
|
|
87
|
+
conn.pipelined do |pipeline|
|
|
88
|
+
pipeline.get("stat:processed")
|
|
89
|
+
pipeline.get("stat:failed")
|
|
90
|
+
pipeline.zcard("schedule")
|
|
91
|
+
pipeline.zcard("retry")
|
|
92
|
+
pipeline.zcard("dead")
|
|
93
|
+
pipeline.scard("processes")
|
|
94
|
+
pipeline.lindex("queue:default", -1)
|
|
95
|
+
end
|
|
96
|
+
}
|
|
86
97
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
98
|
+
default_queue_latency = if (entry = pipe1_res[6])
|
|
99
|
+
job = begin
|
|
100
|
+
Sidekiq.load_json(entry)
|
|
101
|
+
rescue
|
|
102
|
+
{}
|
|
91
103
|
end
|
|
104
|
+
now = Time.now.to_f
|
|
105
|
+
thence = job["enqueued_at"] || now
|
|
106
|
+
now - thence
|
|
107
|
+
else
|
|
108
|
+
0
|
|
92
109
|
end
|
|
93
110
|
|
|
94
|
-
s = processes.size
|
|
95
|
-
workers_size = pipe2_res[0...s].map(&:to_i).inject(0, &:+)
|
|
96
|
-
enqueued = pipe2_res[s..-1].map(&:to_i).inject(0, &:+)
|
|
97
|
-
|
|
98
|
-
default_queue_latency = if (entry = pipe1_res[6].first)
|
|
99
|
-
job = Sidekiq.load_json(entry) rescue {}
|
|
100
|
-
now = Time.now.to_f
|
|
101
|
-
thence = job['enqueued_at'] || now
|
|
102
|
-
now - thence
|
|
103
|
-
else
|
|
104
|
-
0
|
|
105
|
-
end
|
|
106
111
|
@stats = {
|
|
107
|
-
processed:
|
|
108
|
-
failed:
|
|
109
|
-
scheduled_size:
|
|
110
|
-
retry_size:
|
|
111
|
-
dead_size:
|
|
112
|
-
processes_size:
|
|
113
|
-
|
|
114
|
-
default_queue_latency: default_queue_latency
|
|
115
|
-
workers_size: workers_size,
|
|
116
|
-
enqueued: enqueued
|
|
112
|
+
processed: pipe1_res[0].to_i,
|
|
113
|
+
failed: pipe1_res[1].to_i,
|
|
114
|
+
scheduled_size: pipe1_res[2],
|
|
115
|
+
retry_size: pipe1_res[3],
|
|
116
|
+
dead_size: pipe1_res[4],
|
|
117
|
+
processes_size: pipe1_res[5],
|
|
118
|
+
|
|
119
|
+
default_queue_latency: default_queue_latency
|
|
117
120
|
}
|
|
118
121
|
end
|
|
119
122
|
|
|
123
|
+
# O(number of processes + number of queues) redis calls
|
|
124
|
+
# @api private
|
|
125
|
+
def fetch_stats_slow!
|
|
126
|
+
processes = Sidekiq.redis { |conn|
|
|
127
|
+
conn.sscan("processes").to_a
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
queues = Sidekiq.redis { |conn|
|
|
131
|
+
conn.sscan("queues").to_a
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
pipe2_res = Sidekiq.redis { |conn|
|
|
135
|
+
conn.pipelined do |pipeline|
|
|
136
|
+
processes.each { |key| pipeline.hget(key, "busy") }
|
|
137
|
+
queues.each { |queue| pipeline.llen("queue:#{queue}") }
|
|
138
|
+
end
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
s = processes.size
|
|
142
|
+
workers_size = pipe2_res[0...s].sum(&:to_i)
|
|
143
|
+
enqueued = pipe2_res[s..].sum(&:to_i)
|
|
144
|
+
|
|
145
|
+
@stats[:workers_size] = workers_size
|
|
146
|
+
@stats[:enqueued] = enqueued
|
|
147
|
+
@stats
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
# @api private
|
|
151
|
+
def fetch_stats!
|
|
152
|
+
fetch_stats_fast!
|
|
153
|
+
fetch_stats_slow!
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
# @api private
|
|
120
157
|
def reset(*stats)
|
|
121
|
-
all
|
|
158
|
+
all = %w[failed processed]
|
|
122
159
|
stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s)
|
|
123
160
|
|
|
124
161
|
mset_args = []
|
|
@@ -134,36 +171,14 @@ module Sidekiq
|
|
|
134
171
|
private
|
|
135
172
|
|
|
136
173
|
def stat(s)
|
|
137
|
-
@stats[s]
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
class Queues
|
|
141
|
-
include RedisScanner
|
|
142
|
-
|
|
143
|
-
def lengths
|
|
144
|
-
Sidekiq.redis do |conn|
|
|
145
|
-
queues = sscan(conn, 'queues')
|
|
146
|
-
|
|
147
|
-
lengths = conn.pipelined do
|
|
148
|
-
queues.each do |queue|
|
|
149
|
-
conn.llen("queue:#{queue}")
|
|
150
|
-
end
|
|
151
|
-
end
|
|
152
|
-
|
|
153
|
-
i = 0
|
|
154
|
-
array_of_arrays = queues.inject({}) do |memo, queue|
|
|
155
|
-
memo[queue] = lengths[i]
|
|
156
|
-
i += 1
|
|
157
|
-
memo
|
|
158
|
-
end.sort_by { |_, size| size }
|
|
159
|
-
|
|
160
|
-
Hash[array_of_arrays.reverse]
|
|
161
|
-
end
|
|
162
|
-
end
|
|
174
|
+
fetch_stats_slow! if @stats[s].nil?
|
|
175
|
+
@stats[s] || raise(ArgumentError, "Unknown stat #{s}")
|
|
163
176
|
end
|
|
164
177
|
|
|
165
178
|
class History
|
|
166
|
-
def initialize(days_previous, start_date = nil)
|
|
179
|
+
def initialize(days_previous, start_date = nil, pool: nil)
|
|
180
|
+
# we only store five years of data in Redis
|
|
181
|
+
raise ArgumentError if days_previous < 1 || days_previous > (5 * 365)
|
|
167
182
|
@days_previous = days_previous
|
|
168
183
|
@start_date = start_date || Time.now.utc.to_date
|
|
169
184
|
end
|
|
@@ -179,28 +194,17 @@ module Sidekiq
|
|
|
179
194
|
private
|
|
180
195
|
|
|
181
196
|
def date_stat_hash(stat)
|
|
182
|
-
i = 0
|
|
183
197
|
stat_hash = {}
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
while i < @days_previous
|
|
188
|
-
date = @start_date - i
|
|
189
|
-
datestr = date.strftime("%Y-%m-%d")
|
|
190
|
-
keys << "stat:#{stat}:#{datestr}"
|
|
191
|
-
dates << datestr
|
|
192
|
-
i += 1
|
|
193
|
-
end
|
|
198
|
+
dates = @start_date.downto(@start_date - @days_previous + 1).map { |date|
|
|
199
|
+
date.strftime("%Y-%m-%d")
|
|
200
|
+
}
|
|
194
201
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
202
|
+
keys = dates.map { |datestr| "stat:#{stat}:#{datestr}" }
|
|
203
|
+
|
|
204
|
+
Sidekiq.redis do |conn|
|
|
205
|
+
conn.mget(keys).each_with_index do |value, idx|
|
|
206
|
+
stat_hash[dates[idx]] = value ? value.to_i : 0
|
|
200
207
|
end
|
|
201
|
-
rescue Redis::CommandError
|
|
202
|
-
# mget will trigger a CROSSSLOT error when run against a Cluster
|
|
203
|
-
# TODO Someone want to add Cluster support?
|
|
204
208
|
end
|
|
205
209
|
|
|
206
210
|
stat_hash
|
|
@@ -209,9 +213,10 @@ module Sidekiq
|
|
|
209
213
|
end
|
|
210
214
|
|
|
211
215
|
##
|
|
212
|
-
#
|
|
216
|
+
# Represents a queue within Sidekiq.
|
|
213
217
|
# Allows enumeration of all jobs within the queue
|
|
214
|
-
# and deletion of jobs.
|
|
218
|
+
# and deletion of jobs. NB: this queue data is real-time
|
|
219
|
+
# and is changing within Redis moment by moment.
|
|
215
220
|
#
|
|
216
221
|
# queue = Sidekiq::Queue.new("mailer")
|
|
217
222
|
# queue.each do |job|
|
|
@@ -219,30 +224,34 @@ module Sidekiq
|
|
|
219
224
|
# job.args # => [1, 2, 3]
|
|
220
225
|
# job.delete if job.jid == 'abcdef1234567890'
|
|
221
226
|
# end
|
|
222
|
-
#
|
|
223
227
|
class Queue
|
|
224
228
|
include Enumerable
|
|
225
|
-
extend RedisScanner
|
|
226
229
|
|
|
227
230
|
##
|
|
228
|
-
#
|
|
231
|
+
# Fetch all known queues within Redis.
|
|
229
232
|
#
|
|
233
|
+
# @return [Array<Sidekiq::Queue>]
|
|
230
234
|
def self.all
|
|
231
|
-
Sidekiq.redis { |c| sscan(
|
|
235
|
+
Sidekiq.redis { |c| c.sscan("queues").to_a }.sort.map { |q| Sidekiq::Queue.new(q) }
|
|
232
236
|
end
|
|
233
237
|
|
|
234
238
|
attr_reader :name
|
|
235
239
|
|
|
236
|
-
|
|
240
|
+
# @param name [String] the name of the queue
|
|
241
|
+
def initialize(name = "default")
|
|
237
242
|
@name = name.to_s
|
|
238
243
|
@rname = "queue:#{name}"
|
|
239
244
|
end
|
|
240
245
|
|
|
246
|
+
# The current size of the queue within Redis.
|
|
247
|
+
# This value is real-time and can change between calls.
|
|
248
|
+
#
|
|
249
|
+
# @return [Integer] the size
|
|
241
250
|
def size
|
|
242
251
|
Sidekiq.redis { |con| con.llen(@rname) }
|
|
243
252
|
end
|
|
244
253
|
|
|
245
|
-
#
|
|
254
|
+
# @return [Boolean] if the queue is currently paused
|
|
246
255
|
def paused?
|
|
247
256
|
false
|
|
248
257
|
end
|
|
@@ -251,15 +260,15 @@ module Sidekiq
|
|
|
251
260
|
# Calculates this queue's latency, the difference in seconds since the oldest
|
|
252
261
|
# job in the queue was enqueued.
|
|
253
262
|
#
|
|
254
|
-
# @return Float
|
|
263
|
+
# @return [Float] in seconds
|
|
255
264
|
def latency
|
|
256
|
-
entry = Sidekiq.redis
|
|
257
|
-
conn.
|
|
258
|
-
|
|
265
|
+
entry = Sidekiq.redis { |conn|
|
|
266
|
+
conn.lindex(@rname, -1)
|
|
267
|
+
}
|
|
259
268
|
return 0 unless entry
|
|
260
269
|
job = Sidekiq.load_json(entry)
|
|
261
270
|
now = Time.now.to_f
|
|
262
|
-
thence = job[
|
|
271
|
+
thence = job["enqueued_at"] || now
|
|
263
272
|
now - thence
|
|
264
273
|
end
|
|
265
274
|
|
|
@@ -269,16 +278,16 @@ module Sidekiq
|
|
|
269
278
|
page = 0
|
|
270
279
|
page_size = 50
|
|
271
280
|
|
|
272
|
-
|
|
281
|
+
loop do
|
|
273
282
|
range_start = page * page_size - deleted_size
|
|
274
|
-
range_end
|
|
275
|
-
entries = Sidekiq.redis
|
|
283
|
+
range_end = range_start + page_size - 1
|
|
284
|
+
entries = Sidekiq.redis { |conn|
|
|
276
285
|
conn.lrange @rname, range_start, range_end
|
|
277
|
-
|
|
286
|
+
}
|
|
278
287
|
break if entries.empty?
|
|
279
288
|
page += 1
|
|
280
289
|
entries.each do |entry|
|
|
281
|
-
yield
|
|
290
|
+
yield JobRecord.new(entry, @name)
|
|
282
291
|
end
|
|
283
292
|
deleted_size = initial_size - size
|
|
284
293
|
end
|
|
@@ -287,41 +296,63 @@ module Sidekiq
|
|
|
287
296
|
##
|
|
288
297
|
# Find the job with the given JID within this queue.
|
|
289
298
|
#
|
|
290
|
-
# This is a slow, inefficient operation. Do not use under
|
|
291
|
-
# normal conditions.
|
|
299
|
+
# This is a *slow, inefficient* operation. Do not use under
|
|
300
|
+
# normal conditions.
|
|
301
|
+
#
|
|
302
|
+
# @param jid [String] the job_id to look for
|
|
303
|
+
# @return [Sidekiq::JobRecord]
|
|
304
|
+
# @return [nil] if not found
|
|
292
305
|
def find_job(jid)
|
|
293
306
|
detect { |j| j.jid == jid }
|
|
294
307
|
end
|
|
295
308
|
|
|
309
|
+
# delete all jobs within this queue
|
|
310
|
+
# @return [Boolean] true
|
|
296
311
|
def clear
|
|
297
312
|
Sidekiq.redis do |conn|
|
|
298
|
-
conn.multi do
|
|
299
|
-
|
|
300
|
-
|
|
313
|
+
conn.multi do |transaction|
|
|
314
|
+
transaction.unlink(@rname)
|
|
315
|
+
transaction.srem("queues", [name])
|
|
301
316
|
end
|
|
302
317
|
end
|
|
318
|
+
true
|
|
303
319
|
end
|
|
304
320
|
alias_method :💣, :clear
|
|
321
|
+
|
|
322
|
+
# :nodoc:
|
|
323
|
+
# @api private
|
|
324
|
+
def as_json(options = nil)
|
|
325
|
+
{name: name} # 5336
|
|
326
|
+
end
|
|
305
327
|
end
|
|
306
328
|
|
|
307
329
|
##
|
|
308
|
-
#
|
|
309
|
-
# sorted set.
|
|
330
|
+
# Represents a pending job within a Sidekiq queue.
|
|
310
331
|
#
|
|
311
332
|
# The job should be considered immutable but may be
|
|
312
|
-
# removed from the queue via
|
|
313
|
-
|
|
314
|
-
|
|
333
|
+
# removed from the queue via JobRecord#delete.
|
|
334
|
+
class JobRecord
|
|
335
|
+
# the parsed Hash of job data
|
|
336
|
+
# @!attribute [r] Item
|
|
315
337
|
attr_reader :item
|
|
338
|
+
# the underlying String in Redis
|
|
339
|
+
# @!attribute [r] Value
|
|
316
340
|
attr_reader :value
|
|
341
|
+
# the queue associated with this job
|
|
342
|
+
# @!attribute [r] Queue
|
|
343
|
+
attr_reader :queue
|
|
317
344
|
|
|
318
|
-
|
|
345
|
+
# :nodoc:
|
|
346
|
+
# @api private
|
|
347
|
+
def initialize(item, queue_name = nil)
|
|
319
348
|
@args = nil
|
|
320
349
|
@value = item
|
|
321
350
|
@item = item.is_a?(Hash) ? item : parse(item)
|
|
322
|
-
@queue = queue_name || @item[
|
|
351
|
+
@queue = queue_name || @item["queue"]
|
|
323
352
|
end
|
|
324
353
|
|
|
354
|
+
# :nodoc:
|
|
355
|
+
# @api private
|
|
325
356
|
def parse(item)
|
|
326
357
|
Sidekiq.load_json(item)
|
|
327
358
|
rescue JSON::ParserError
|
|
@@ -333,88 +364,99 @@ module Sidekiq
|
|
|
333
364
|
{}
|
|
334
365
|
end
|
|
335
366
|
|
|
367
|
+
# This is the job class which Sidekiq will execute. If using ActiveJob,
|
|
368
|
+
# this class will be the ActiveJob adapter class rather than a specific job.
|
|
336
369
|
def klass
|
|
337
|
-
self[
|
|
370
|
+
self["class"]
|
|
338
371
|
end
|
|
339
372
|
|
|
340
373
|
def display_class
|
|
341
374
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
|
342
|
-
@klass ||=
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
else
|
|
356
|
-
klass
|
|
357
|
-
end
|
|
375
|
+
@klass ||= self["display_class"] || begin
|
|
376
|
+
if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
|
377
|
+
job_class = @item["wrapped"] || args[0]
|
|
378
|
+
if job_class == "ActionMailer::DeliveryJob" || job_class == "ActionMailer::MailDeliveryJob"
|
|
379
|
+
# MailerClass#mailer_method
|
|
380
|
+
args[0]["arguments"][0..1].join("#")
|
|
381
|
+
else
|
|
382
|
+
job_class
|
|
383
|
+
end
|
|
384
|
+
else
|
|
385
|
+
klass
|
|
386
|
+
end
|
|
387
|
+
end
|
|
358
388
|
end
|
|
359
389
|
|
|
360
390
|
def display_args
|
|
361
391
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
|
362
|
-
@display_args ||=
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
args
|
|
381
|
-
end
|
|
392
|
+
@display_args ||= if klass == "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
|
393
|
+
job_args = self["wrapped"] ? deserialize_argument(args[0]["arguments"]) : []
|
|
394
|
+
if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
|
|
395
|
+
# remove MailerClass, mailer_method and 'deliver_now'
|
|
396
|
+
job_args.drop(3)
|
|
397
|
+
elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob"
|
|
398
|
+
# remove MailerClass, mailer_method and 'deliver_now'
|
|
399
|
+
job_args.drop(3).first.values_at("params", "args")
|
|
400
|
+
else
|
|
401
|
+
job_args
|
|
402
|
+
end
|
|
403
|
+
else
|
|
404
|
+
if self["encrypt"]
|
|
405
|
+
# no point in showing 150+ bytes of random garbage
|
|
406
|
+
args[-1] = "[encrypted data]"
|
|
407
|
+
end
|
|
408
|
+
args
|
|
409
|
+
end
|
|
382
410
|
end
|
|
383
411
|
|
|
384
412
|
def args
|
|
385
|
-
@args || @item[
|
|
413
|
+
@args || @item["args"]
|
|
386
414
|
end
|
|
387
415
|
|
|
388
416
|
def jid
|
|
389
|
-
self[
|
|
417
|
+
self["jid"]
|
|
418
|
+
end
|
|
419
|
+
|
|
420
|
+
def bid
|
|
421
|
+
self["bid"]
|
|
390
422
|
end
|
|
391
423
|
|
|
392
424
|
def enqueued_at
|
|
393
|
-
self[
|
|
425
|
+
self["enqueued_at"] ? Time.at(self["enqueued_at"]).utc : nil
|
|
394
426
|
end
|
|
395
427
|
|
|
396
428
|
def created_at
|
|
397
|
-
Time.at(self[
|
|
429
|
+
Time.at(self["created_at"] || self["enqueued_at"] || 0).utc
|
|
398
430
|
end
|
|
399
431
|
|
|
400
|
-
def
|
|
401
|
-
|
|
432
|
+
def tags
|
|
433
|
+
self["tags"] || []
|
|
434
|
+
end
|
|
435
|
+
|
|
436
|
+
def error_backtrace
|
|
437
|
+
# Cache nil values
|
|
438
|
+
if defined?(@error_backtrace)
|
|
439
|
+
@error_backtrace
|
|
440
|
+
else
|
|
441
|
+
value = self["error_backtrace"]
|
|
442
|
+
@error_backtrace = value && uncompress_backtrace(value)
|
|
443
|
+
end
|
|
402
444
|
end
|
|
403
445
|
|
|
404
446
|
def latency
|
|
405
447
|
now = Time.now.to_f
|
|
406
|
-
now - (@item[
|
|
448
|
+
now - (@item["enqueued_at"] || @item["created_at"] || now)
|
|
407
449
|
end
|
|
408
450
|
|
|
409
|
-
|
|
410
|
-
# Remove this job from the queue.
|
|
451
|
+
# Remove this job from the queue
|
|
411
452
|
def delete
|
|
412
|
-
count = Sidekiq.redis
|
|
453
|
+
count = Sidekiq.redis { |conn|
|
|
413
454
|
conn.lrem("queue:#{@queue}", 1, @value)
|
|
414
|
-
|
|
455
|
+
}
|
|
415
456
|
count != 0
|
|
416
457
|
end
|
|
417
458
|
|
|
459
|
+
# Access arbitrary attributes within the job hash
|
|
418
460
|
def [](name)
|
|
419
461
|
# nil will happen if the JSON fails to parse.
|
|
420
462
|
# We don't guarantee Sidekiq will work with bad job JSON but we should
|
|
@@ -424,32 +466,58 @@ module Sidekiq
|
|
|
424
466
|
|
|
425
467
|
private
|
|
426
468
|
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
469
|
+
ACTIVE_JOB_PREFIX = "_aj_"
|
|
470
|
+
GLOBALID_KEY = "_aj_globalid"
|
|
471
|
+
|
|
472
|
+
def deserialize_argument(argument)
|
|
473
|
+
case argument
|
|
474
|
+
when Array
|
|
475
|
+
argument.map { |arg| deserialize_argument(arg) }
|
|
476
|
+
when Hash
|
|
477
|
+
if serialized_global_id?(argument)
|
|
478
|
+
argument[GLOBALID_KEY]
|
|
479
|
+
else
|
|
480
|
+
argument.transform_values { |v| deserialize_argument(v) }
|
|
481
|
+
.reject { |k, _| k.start_with?(ACTIVE_JOB_PREFIX) }
|
|
482
|
+
end
|
|
483
|
+
else
|
|
484
|
+
argument
|
|
435
485
|
end
|
|
436
486
|
end
|
|
487
|
+
|
|
488
|
+
def serialized_global_id?(hash)
|
|
489
|
+
hash.size == 1 && hash.include?(GLOBALID_KEY)
|
|
490
|
+
end
|
|
491
|
+
|
|
492
|
+
def uncompress_backtrace(backtrace)
|
|
493
|
+
strict_base64_decoded = backtrace.unpack1("m")
|
|
494
|
+
uncompressed = Zlib::Inflate.inflate(strict_base64_decoded)
|
|
495
|
+
Sidekiq.load_json(uncompressed)
|
|
496
|
+
end
|
|
437
497
|
end
|
|
438
498
|
|
|
439
|
-
|
|
499
|
+
# Represents a job within a Redis sorted set where the score
|
|
500
|
+
# represents a timestamp associated with the job. This timestamp
|
|
501
|
+
# could be the scheduled time for it to run (e.g. scheduled set),
|
|
502
|
+
# or the expiration date after which the entry should be deleted (e.g. dead set).
|
|
503
|
+
class SortedEntry < JobRecord
|
|
440
504
|
attr_reader :score
|
|
441
505
|
attr_reader :parent
|
|
442
506
|
|
|
507
|
+
# :nodoc:
|
|
508
|
+
# @api private
|
|
443
509
|
def initialize(parent, score, item)
|
|
444
510
|
super(item)
|
|
445
|
-
@score = score
|
|
511
|
+
@score = Float(score)
|
|
446
512
|
@parent = parent
|
|
447
513
|
end
|
|
448
514
|
|
|
515
|
+
# The timestamp associated with this entry
|
|
449
516
|
def at
|
|
450
517
|
Time.at(score).utc
|
|
451
518
|
end
|
|
452
519
|
|
|
520
|
+
# remove this entry from the sorted set
|
|
453
521
|
def delete
|
|
454
522
|
if @value
|
|
455
523
|
@parent.delete_by_value(@parent.name, @value)
|
|
@@ -458,11 +526,17 @@ module Sidekiq
|
|
|
458
526
|
end
|
|
459
527
|
end
|
|
460
528
|
|
|
529
|
+
# Change the scheduled time for this job.
|
|
530
|
+
#
|
|
531
|
+
# @param at [Time] the new timestamp for this job
|
|
461
532
|
def reschedule(at)
|
|
462
|
-
|
|
463
|
-
|
|
533
|
+
Sidekiq.redis do |conn|
|
|
534
|
+
conn.zincrby(@parent.name, at.to_f - @score, Sidekiq.dump_json(@item))
|
|
535
|
+
end
|
|
464
536
|
end
|
|
465
537
|
|
|
538
|
+
# Enqueue this job from the scheduled or dead set so it will
|
|
539
|
+
# be executed at some point in the near future.
|
|
466
540
|
def add_to_queue
|
|
467
541
|
remove_job do |message|
|
|
468
542
|
msg = Sidekiq.load_json(message)
|
|
@@ -470,16 +544,17 @@ module Sidekiq
|
|
|
470
544
|
end
|
|
471
545
|
end
|
|
472
546
|
|
|
547
|
+
# enqueue this job from the retry set so it will be executed
|
|
548
|
+
# at some point in the near future.
|
|
473
549
|
def retry
|
|
474
550
|
remove_job do |message|
|
|
475
551
|
msg = Sidekiq.load_json(message)
|
|
476
|
-
msg[
|
|
552
|
+
msg["retry_count"] -= 1 if msg["retry_count"]
|
|
477
553
|
Sidekiq::Client.push(msg)
|
|
478
554
|
end
|
|
479
555
|
end
|
|
480
556
|
|
|
481
|
-
|
|
482
|
-
# Place job in the dead set
|
|
557
|
+
# Move this job from its current set into the Dead set.
|
|
483
558
|
def kill
|
|
484
559
|
remove_job do |message|
|
|
485
560
|
DeadSet.new.kill(message)
|
|
@@ -487,74 +562,109 @@ module Sidekiq
|
|
|
487
562
|
end
|
|
488
563
|
|
|
489
564
|
def error?
|
|
490
|
-
!!item[
|
|
565
|
+
!!item["error_class"]
|
|
491
566
|
end
|
|
492
567
|
|
|
493
568
|
private
|
|
494
569
|
|
|
495
570
|
def remove_job
|
|
496
571
|
Sidekiq.redis do |conn|
|
|
497
|
-
results = conn.multi
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
572
|
+
results = conn.multi { |transaction|
|
|
573
|
+
transaction.zrange(parent.name, score, score, "BYSCORE")
|
|
574
|
+
transaction.zremrangebyscore(parent.name, score, score)
|
|
575
|
+
}.first
|
|
501
576
|
|
|
502
577
|
if results.size == 1
|
|
503
578
|
yield results.first
|
|
504
579
|
else
|
|
505
580
|
# multiple jobs with the same score
|
|
506
581
|
# find the one with the right JID and push it
|
|
507
|
-
|
|
582
|
+
matched, nonmatched = results.partition { |message|
|
|
508
583
|
if message.index(jid)
|
|
509
584
|
msg = Sidekiq.load_json(message)
|
|
510
|
-
msg[
|
|
585
|
+
msg["jid"] == jid
|
|
511
586
|
else
|
|
512
587
|
false
|
|
513
588
|
end
|
|
514
|
-
|
|
589
|
+
}
|
|
515
590
|
|
|
516
|
-
msg =
|
|
591
|
+
msg = matched.first
|
|
517
592
|
yield msg if msg
|
|
518
593
|
|
|
519
594
|
# push the rest back onto the sorted set
|
|
520
|
-
conn.multi do
|
|
521
|
-
|
|
522
|
-
|
|
595
|
+
conn.multi do |transaction|
|
|
596
|
+
nonmatched.each do |message|
|
|
597
|
+
transaction.zadd(parent.name, score.to_f.to_s, message)
|
|
523
598
|
end
|
|
524
599
|
end
|
|
525
600
|
end
|
|
526
601
|
end
|
|
527
602
|
end
|
|
528
|
-
|
|
529
603
|
end
|
|
530
604
|
|
|
605
|
+
# Base class for all sorted sets within Sidekiq.
|
|
531
606
|
class SortedSet
|
|
532
607
|
include Enumerable
|
|
533
608
|
|
|
609
|
+
# Redis key of the set
|
|
610
|
+
# @!attribute [r] Name
|
|
534
611
|
attr_reader :name
|
|
535
612
|
|
|
613
|
+
# :nodoc:
|
|
614
|
+
# @api private
|
|
536
615
|
def initialize(name)
|
|
537
616
|
@name = name
|
|
538
617
|
@_size = size
|
|
539
618
|
end
|
|
540
619
|
|
|
620
|
+
# real-time size of the set, will change
|
|
541
621
|
def size
|
|
542
622
|
Sidekiq.redis { |c| c.zcard(name) }
|
|
543
623
|
end
|
|
544
624
|
|
|
625
|
+
# Scan through each element of the sorted set, yielding each to the supplied block.
|
|
626
|
+
# Please see Redis's <a href="https://redis.io/commands/scan/">SCAN documentation</a> for implementation details.
|
|
627
|
+
#
|
|
628
|
+
# @param match [String] a snippet or regexp to filter matches.
|
|
629
|
+
# @param count [Integer] number of elements to retrieve at a time, default 100
|
|
630
|
+
# @yieldparam [Sidekiq::SortedEntry] each entry
|
|
631
|
+
def scan(match, count = 100)
|
|
632
|
+
return to_enum(:scan, match, count) unless block_given?
|
|
633
|
+
|
|
634
|
+
match = "*#{match}*" unless match.include?("*")
|
|
635
|
+
Sidekiq.redis do |conn|
|
|
636
|
+
conn.zscan(name, match: match, count: count) do |entry, score|
|
|
637
|
+
yield SortedEntry.new(self, score, entry)
|
|
638
|
+
end
|
|
639
|
+
end
|
|
640
|
+
end
|
|
641
|
+
|
|
642
|
+
# @return [Boolean] always true
|
|
545
643
|
def clear
|
|
546
644
|
Sidekiq.redis do |conn|
|
|
547
|
-
conn.
|
|
645
|
+
conn.unlink(name)
|
|
548
646
|
end
|
|
647
|
+
true
|
|
549
648
|
end
|
|
550
649
|
alias_method :💣, :clear
|
|
650
|
+
|
|
651
|
+
# :nodoc:
|
|
652
|
+
# @api private
|
|
653
|
+
def as_json(options = nil)
|
|
654
|
+
{name: name} # 5336
|
|
655
|
+
end
|
|
551
656
|
end
|
|
552
657
|
|
|
658
|
+
# Base class for all sorted sets which contain jobs, e.g. scheduled, retry and dead.
|
|
659
|
+
# Sidekiq Pro and Enterprise add additional sorted sets which do not contain job data,
|
|
660
|
+
# e.g. Batches.
|
|
553
661
|
class JobSet < SortedSet
|
|
554
|
-
|
|
555
|
-
|
|
662
|
+
# Add a job with the associated timestamp to this set.
|
|
663
|
+
# @param timestamp [Time] the score for the job
|
|
664
|
+
# @param job [Hash] the job data
|
|
665
|
+
def schedule(timestamp, job)
|
|
556
666
|
Sidekiq.redis do |conn|
|
|
557
|
-
conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(
|
|
667
|
+
conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(job))
|
|
558
668
|
end
|
|
559
669
|
end
|
|
560
670
|
|
|
@@ -564,46 +674,66 @@ module Sidekiq
|
|
|
564
674
|
page = -1
|
|
565
675
|
page_size = 50
|
|
566
676
|
|
|
567
|
-
|
|
677
|
+
loop do
|
|
568
678
|
range_start = page * page_size + offset_size
|
|
569
|
-
range_end
|
|
570
|
-
elements = Sidekiq.redis
|
|
571
|
-
conn.zrange name, range_start, range_end,
|
|
572
|
-
|
|
679
|
+
range_end = range_start + page_size - 1
|
|
680
|
+
elements = Sidekiq.redis { |conn|
|
|
681
|
+
conn.zrange name, range_start, range_end, "withscores"
|
|
682
|
+
}
|
|
573
683
|
break if elements.empty?
|
|
574
684
|
page -= 1
|
|
575
|
-
elements.
|
|
685
|
+
elements.reverse_each do |element, score|
|
|
576
686
|
yield SortedEntry.new(self, score, element)
|
|
577
687
|
end
|
|
578
688
|
offset_size = initial_size - @_size
|
|
579
689
|
end
|
|
580
690
|
end
|
|
581
691
|
|
|
692
|
+
##
|
|
693
|
+
# Fetch jobs that match a given time or Range. Job ID is an
|
|
694
|
+
# optional second argument.
|
|
695
|
+
#
|
|
696
|
+
# @param score [Time,Range] a specific timestamp or range
|
|
697
|
+
# @param jid [String, optional] find a specific JID within the score
|
|
698
|
+
# @return [Array<SortedEntry>] any results found, can be empty
|
|
582
699
|
def fetch(score, jid = nil)
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
elements.inject([]) do |result, element|
|
|
588
|
-
entry = SortedEntry.new(self, score, element)
|
|
589
|
-
if jid
|
|
590
|
-
result << entry if entry.jid == jid
|
|
700
|
+
begin_score, end_score =
|
|
701
|
+
if score.is_a?(Range)
|
|
702
|
+
[score.first, score.last]
|
|
591
703
|
else
|
|
592
|
-
|
|
704
|
+
[score, score]
|
|
593
705
|
end
|
|
594
|
-
|
|
706
|
+
|
|
707
|
+
elements = Sidekiq.redis { |conn|
|
|
708
|
+
conn.zrange(name, begin_score, end_score, "BYSCORE", "withscores")
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
elements.each_with_object([]) do |element, result|
|
|
712
|
+
data, job_score = element
|
|
713
|
+
entry = SortedEntry.new(self, job_score, data)
|
|
714
|
+
result << entry if jid.nil? || entry.jid == jid
|
|
595
715
|
end
|
|
596
716
|
end
|
|
597
717
|
|
|
598
718
|
##
|
|
599
719
|
# Find the job with the given JID within this sorted set.
|
|
720
|
+
# *This is a slow O(n) operation*. Do not use for app logic.
|
|
600
721
|
#
|
|
601
|
-
#
|
|
602
|
-
#
|
|
722
|
+
# @param jid [String] the job identifier
|
|
723
|
+
# @return [SortedEntry] the record or nil
|
|
603
724
|
def find_job(jid)
|
|
604
|
-
|
|
725
|
+
Sidekiq.redis do |conn|
|
|
726
|
+
conn.zscan(name, match: "*#{jid}*", count: 100) do |entry, score|
|
|
727
|
+
job = Sidekiq.load_json(entry)
|
|
728
|
+
matched = job["jid"] == jid
|
|
729
|
+
return SortedEntry.new(self, score, entry) if matched
|
|
730
|
+
end
|
|
731
|
+
end
|
|
732
|
+
nil
|
|
605
733
|
end
|
|
606
734
|
|
|
735
|
+
# :nodoc:
|
|
736
|
+
# @api private
|
|
607
737
|
def delete_by_value(name, value)
|
|
608
738
|
Sidekiq.redis do |conn|
|
|
609
739
|
ret = conn.zrem(name, value)
|
|
@@ -612,17 +742,20 @@ module Sidekiq
|
|
|
612
742
|
end
|
|
613
743
|
end
|
|
614
744
|
|
|
745
|
+
# :nodoc:
|
|
746
|
+
# @api private
|
|
615
747
|
def delete_by_jid(score, jid)
|
|
616
748
|
Sidekiq.redis do |conn|
|
|
617
|
-
elements = conn.
|
|
749
|
+
elements = conn.zrange(name, score, score, "BYSCORE")
|
|
618
750
|
elements.each do |element|
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
751
|
+
if element.index(jid)
|
|
752
|
+
message = Sidekiq.load_json(element)
|
|
753
|
+
if message["jid"] == jid
|
|
754
|
+
ret = conn.zrem(name, element)
|
|
755
|
+
@_size -= 1 if ret
|
|
756
|
+
break ret
|
|
757
|
+
end
|
|
624
758
|
end
|
|
625
|
-
false
|
|
626
759
|
end
|
|
627
760
|
end
|
|
628
761
|
end
|
|
@@ -631,68 +764,62 @@ module Sidekiq
|
|
|
631
764
|
end
|
|
632
765
|
|
|
633
766
|
##
|
|
634
|
-
#
|
|
767
|
+
# The set of scheduled jobs within Sidekiq.
|
|
635
768
|
# Based on this, you can search/filter for jobs. Here's an
|
|
636
|
-
# example where I'm selecting
|
|
637
|
-
# and deleting them from the
|
|
769
|
+
# example where I'm selecting jobs based on some complex logic
|
|
770
|
+
# and deleting them from the scheduled set.
|
|
771
|
+
#
|
|
772
|
+
# See the API wiki page for usage notes and examples.
|
|
638
773
|
#
|
|
639
|
-
# r = Sidekiq::ScheduledSet.new
|
|
640
|
-
# r.select do |scheduled|
|
|
641
|
-
# scheduled.klass == 'Sidekiq::Extensions::DelayedClass' &&
|
|
642
|
-
# scheduled.args[0] == 'User' &&
|
|
643
|
-
# scheduled.args[1] == 'setup_new_subscriber'
|
|
644
|
-
# end.map(&:delete)
|
|
645
774
|
class ScheduledSet < JobSet
|
|
646
775
|
def initialize
|
|
647
|
-
super
|
|
776
|
+
super("schedule")
|
|
648
777
|
end
|
|
649
778
|
end
|
|
650
779
|
|
|
651
780
|
##
|
|
652
|
-
#
|
|
781
|
+
# The set of retries within Sidekiq.
|
|
653
782
|
# Based on this, you can search/filter for jobs. Here's an
|
|
654
783
|
# example where I'm selecting all jobs of a certain type
|
|
655
784
|
# and deleting them from the retry queue.
|
|
656
785
|
#
|
|
657
|
-
#
|
|
658
|
-
#
|
|
659
|
-
# retri.klass == 'Sidekiq::Extensions::DelayedClass' &&
|
|
660
|
-
# retri.args[0] == 'User' &&
|
|
661
|
-
# retri.args[1] == 'setup_new_subscriber'
|
|
662
|
-
# end.map(&:delete)
|
|
786
|
+
# See the API wiki page for usage notes and examples.
|
|
787
|
+
#
|
|
663
788
|
class RetrySet < JobSet
|
|
664
789
|
def initialize
|
|
665
|
-
super
|
|
790
|
+
super("retry")
|
|
666
791
|
end
|
|
667
792
|
|
|
793
|
+
# Enqueues all jobs pending within the retry set.
|
|
668
794
|
def retry_all
|
|
669
|
-
while size > 0
|
|
670
|
-
each(&:retry)
|
|
671
|
-
end
|
|
795
|
+
each(&:retry) while size > 0
|
|
672
796
|
end
|
|
673
797
|
|
|
798
|
+
# Kills all jobs pending within the retry set.
|
|
674
799
|
def kill_all
|
|
675
|
-
while size > 0
|
|
676
|
-
each(&:kill)
|
|
677
|
-
end
|
|
800
|
+
each(&:kill) while size > 0
|
|
678
801
|
end
|
|
679
802
|
end
|
|
680
803
|
|
|
681
804
|
##
|
|
682
|
-
#
|
|
805
|
+
# The set of dead jobs within Sidekiq. Dead jobs have failed all of
|
|
806
|
+
# their retries and are helding in this set pending some sort of manual
|
|
807
|
+
# fix. They will be removed after 6 months (dead_timeout) if not.
|
|
683
808
|
#
|
|
684
809
|
class DeadSet < JobSet
|
|
685
810
|
def initialize
|
|
686
|
-
super
|
|
811
|
+
super("dead")
|
|
687
812
|
end
|
|
688
813
|
|
|
689
|
-
|
|
814
|
+
# Add the given job to the Dead set.
|
|
815
|
+
# @param message [String] the job data as JSON
|
|
816
|
+
def kill(message, opts = {})
|
|
690
817
|
now = Time.now.to_f
|
|
691
818
|
Sidekiq.redis do |conn|
|
|
692
|
-
conn.multi do
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
819
|
+
conn.multi do |transaction|
|
|
820
|
+
transaction.zadd(name, now.to_s, message)
|
|
821
|
+
transaction.zremrangebyscore(name, "-inf", now - Sidekiq::Config::DEFAULTS[:dead_timeout_in_seconds])
|
|
822
|
+
transaction.zremrangebyrank(name, 0, - Sidekiq::Config::DEFAULTS[:dead_max_jobs])
|
|
696
823
|
end
|
|
697
824
|
end
|
|
698
825
|
|
|
@@ -700,110 +827,143 @@ module Sidekiq
|
|
|
700
827
|
job = Sidekiq.load_json(message)
|
|
701
828
|
r = RuntimeError.new("Job killed by API")
|
|
702
829
|
r.set_backtrace(caller)
|
|
703
|
-
Sidekiq.death_handlers.each do |handle|
|
|
830
|
+
Sidekiq.default_configuration.death_handlers.each do |handle|
|
|
704
831
|
handle.call(job, r)
|
|
705
832
|
end
|
|
706
833
|
end
|
|
707
834
|
true
|
|
708
835
|
end
|
|
709
836
|
|
|
837
|
+
# Enqueue all dead jobs
|
|
710
838
|
def retry_all
|
|
711
|
-
while size > 0
|
|
712
|
-
each(&:retry)
|
|
713
|
-
end
|
|
714
|
-
end
|
|
715
|
-
|
|
716
|
-
def self.max_jobs
|
|
717
|
-
Sidekiq.options[:dead_max_jobs]
|
|
718
|
-
end
|
|
719
|
-
|
|
720
|
-
def self.timeout
|
|
721
|
-
Sidekiq.options[:dead_timeout_in_seconds]
|
|
839
|
+
each(&:retry) while size > 0
|
|
722
840
|
end
|
|
723
841
|
end
|
|
724
842
|
|
|
725
843
|
##
|
|
726
844
|
# Enumerates the set of Sidekiq processes which are actively working
|
|
727
|
-
# right now. Each process
|
|
845
|
+
# right now. Each process sends a heartbeat to Redis every 5 seconds
|
|
728
846
|
# so this set should be relatively accurate, barring network partitions.
|
|
729
847
|
#
|
|
730
|
-
#
|
|
848
|
+
# @yieldparam [Sidekiq::Process]
|
|
731
849
|
#
|
|
732
850
|
class ProcessSet
|
|
733
851
|
include Enumerable
|
|
734
|
-
include RedisScanner
|
|
735
852
|
|
|
736
|
-
def
|
|
853
|
+
def self.[](identity)
|
|
854
|
+
exists, (info, busy, beat, quiet, rss, rtt_us) = Sidekiq.redis { |conn|
|
|
855
|
+
conn.multi { |transaction|
|
|
856
|
+
transaction.sismember("processes", identity)
|
|
857
|
+
transaction.hmget(identity, "info", "busy", "beat", "quiet", "rss", "rtt_us")
|
|
858
|
+
}
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
return nil if exists == 0 || info.nil?
|
|
862
|
+
|
|
863
|
+
hash = Sidekiq.load_json(info)
|
|
864
|
+
Process.new(hash.merge("busy" => busy.to_i,
|
|
865
|
+
"beat" => beat.to_f,
|
|
866
|
+
"quiet" => quiet,
|
|
867
|
+
"rss" => rss.to_i,
|
|
868
|
+
"rtt_us" => rtt_us.to_i))
|
|
869
|
+
end
|
|
870
|
+
|
|
871
|
+
# :nodoc:
|
|
872
|
+
# @api private
|
|
873
|
+
def initialize(clean_plz = true)
|
|
737
874
|
cleanup if clean_plz
|
|
738
875
|
end
|
|
739
876
|
|
|
740
877
|
# Cleans up dead processes recorded in Redis.
|
|
741
878
|
# Returns the number of processes cleaned.
|
|
879
|
+
# :nodoc:
|
|
880
|
+
# @api private
|
|
742
881
|
def cleanup
|
|
882
|
+
# dont run cleanup more than once per minute
|
|
883
|
+
return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", "NX", "EX", "60") }
|
|
884
|
+
|
|
743
885
|
count = 0
|
|
744
886
|
Sidekiq.redis do |conn|
|
|
745
|
-
procs = sscan(
|
|
746
|
-
heartbeats = conn.pipelined
|
|
887
|
+
procs = conn.sscan("processes").to_a
|
|
888
|
+
heartbeats = conn.pipelined { |pipeline|
|
|
747
889
|
procs.each do |key|
|
|
748
|
-
|
|
890
|
+
pipeline.hget(key, "info")
|
|
749
891
|
end
|
|
750
|
-
|
|
892
|
+
}
|
|
751
893
|
|
|
752
894
|
# the hash named key has an expiry of 60 seconds.
|
|
753
895
|
# if it's not found, that means the process has not reported
|
|
754
896
|
# in to Redis and probably died.
|
|
755
|
-
to_prune =
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
count = conn.srem('processes', to_prune) unless to_prune.empty?
|
|
897
|
+
to_prune = procs.select.with_index { |proc, i|
|
|
898
|
+
heartbeats[i].nil?
|
|
899
|
+
}
|
|
900
|
+
count = conn.srem("processes", to_prune) unless to_prune.empty?
|
|
760
901
|
end
|
|
761
902
|
count
|
|
762
903
|
end
|
|
763
904
|
|
|
764
905
|
def each
|
|
765
|
-
|
|
906
|
+
result = Sidekiq.redis { |conn|
|
|
907
|
+
procs = conn.sscan("processes").to_a.sort
|
|
766
908
|
|
|
767
|
-
Sidekiq.redis do |conn|
|
|
768
909
|
# We're making a tradeoff here between consuming more memory instead of
|
|
769
910
|
# making more roundtrips to Redis, but if you have hundreds or thousands of workers,
|
|
770
911
|
# you'll be happier this way
|
|
771
|
-
|
|
912
|
+
conn.pipelined do |pipeline|
|
|
772
913
|
procs.each do |key|
|
|
773
|
-
|
|
914
|
+
pipeline.hmget(key, "info", "busy", "beat", "quiet", "rss", "rtt_us")
|
|
774
915
|
end
|
|
775
916
|
end
|
|
917
|
+
}
|
|
776
918
|
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
919
|
+
result.each do |info, busy, beat, quiet, rss, rtt_us|
|
|
920
|
+
# If a process is stopped between when we query Redis for `procs` and
|
|
921
|
+
# when we query for `result`, we will have an item in `result` that is
|
|
922
|
+
# composed of `nil` values.
|
|
923
|
+
next if info.nil?
|
|
924
|
+
|
|
925
|
+
hash = Sidekiq.load_json(info)
|
|
926
|
+
yield Process.new(hash.merge("busy" => busy.to_i,
|
|
927
|
+
"beat" => beat.to_f,
|
|
928
|
+
"quiet" => quiet,
|
|
929
|
+
"rss" => rss.to_i,
|
|
930
|
+
"rtt_us" => rtt_us.to_i))
|
|
786
931
|
end
|
|
787
|
-
|
|
788
|
-
nil
|
|
789
932
|
end
|
|
790
933
|
|
|
791
934
|
# This method is not guaranteed accurate since it does not prune the set
|
|
792
935
|
# based on current heartbeat. #each does that and ensures the set only
|
|
793
936
|
# contains Sidekiq processes which have sent a heartbeat within the last
|
|
794
937
|
# 60 seconds.
|
|
938
|
+
# @return [Integer] current number of registered Sidekiq processes
|
|
795
939
|
def size
|
|
796
|
-
Sidekiq.redis { |conn| conn.scard(
|
|
940
|
+
Sidekiq.redis { |conn| conn.scard("processes") }
|
|
941
|
+
end
|
|
942
|
+
|
|
943
|
+
# Total number of threads available to execute jobs.
|
|
944
|
+
# For Sidekiq Enterprise customers this number (in production) must be
|
|
945
|
+
# less than or equal to your licensed concurrency.
|
|
946
|
+
# @return [Integer] the sum of process concurrency
|
|
947
|
+
def total_concurrency
|
|
948
|
+
sum { |x| x["concurrency"].to_i }
|
|
949
|
+
end
|
|
950
|
+
|
|
951
|
+
# @return [Integer] total amount of RSS memory consumed by Sidekiq processes
|
|
952
|
+
def total_rss_in_kb
|
|
953
|
+
sum { |x| x["rss"].to_i }
|
|
797
954
|
end
|
|
955
|
+
alias_method :total_rss, :total_rss_in_kb
|
|
798
956
|
|
|
799
957
|
# Returns the identity of the current cluster leader or "" if no leader.
|
|
800
958
|
# This is a Sidekiq Enterprise feature, will always return "" in Sidekiq
|
|
801
959
|
# or Sidekiq Pro.
|
|
960
|
+
# @return [String] Identity of cluster leader
|
|
961
|
+
# @return [String] empty string if no leader
|
|
802
962
|
def leader
|
|
803
963
|
@leader ||= begin
|
|
804
|
-
x = Sidekiq.redis {|c| c.get("dear-leader") }
|
|
964
|
+
x = Sidekiq.redis { |c| c.get("dear-leader") }
|
|
805
965
|
# need a non-falsy value so we can memoize
|
|
806
|
-
x
|
|
966
|
+
x ||= ""
|
|
807
967
|
x
|
|
808
968
|
end
|
|
809
969
|
end
|
|
@@ -823,18 +983,21 @@ module Sidekiq
|
|
|
823
983
|
# 'busy' => 10,
|
|
824
984
|
# 'beat' => <last heartbeat>,
|
|
825
985
|
# 'identity' => <unique string identifying the process>,
|
|
986
|
+
# 'embedded' => true,
|
|
826
987
|
# }
|
|
827
988
|
class Process
|
|
989
|
+
# :nodoc:
|
|
990
|
+
# @api private
|
|
828
991
|
def initialize(hash)
|
|
829
992
|
@attribs = hash
|
|
830
993
|
end
|
|
831
994
|
|
|
832
995
|
def tag
|
|
833
|
-
self[
|
|
996
|
+
self["tag"]
|
|
834
997
|
end
|
|
835
998
|
|
|
836
999
|
def labels
|
|
837
|
-
|
|
1000
|
+
self["labels"].to_a
|
|
838
1001
|
end
|
|
839
1002
|
|
|
840
1003
|
def [](key)
|
|
@@ -842,23 +1005,56 @@ module Sidekiq
|
|
|
842
1005
|
end
|
|
843
1006
|
|
|
844
1007
|
def identity
|
|
845
|
-
self[
|
|
1008
|
+
self["identity"]
|
|
846
1009
|
end
|
|
847
1010
|
|
|
1011
|
+
def queues
|
|
1012
|
+
self["queues"]
|
|
1013
|
+
end
|
|
1014
|
+
|
|
1015
|
+
def weights
|
|
1016
|
+
self["weights"]
|
|
1017
|
+
end
|
|
1018
|
+
|
|
1019
|
+
def version
|
|
1020
|
+
self["version"]
|
|
1021
|
+
end
|
|
1022
|
+
|
|
1023
|
+
def embedded?
|
|
1024
|
+
self["embedded"]
|
|
1025
|
+
end
|
|
1026
|
+
|
|
1027
|
+
# Signal this process to stop processing new jobs.
|
|
1028
|
+
# It will continue to execute jobs it has already fetched.
|
|
1029
|
+
# This method is *asynchronous* and it can take 5-10
|
|
1030
|
+
# seconds for the process to quiet.
|
|
848
1031
|
def quiet!
|
|
849
|
-
|
|
1032
|
+
raise "Can't quiet an embedded process" if embedded?
|
|
1033
|
+
|
|
1034
|
+
signal("TSTP")
|
|
850
1035
|
end
|
|
851
1036
|
|
|
1037
|
+
# Signal this process to shutdown.
|
|
1038
|
+
# It will shutdown within its configured :timeout value, default 25 seconds.
|
|
1039
|
+
# This method is *asynchronous* and it can take 5-10
|
|
1040
|
+
# seconds for the process to start shutting down.
|
|
852
1041
|
def stop!
|
|
853
|
-
|
|
1042
|
+
raise "Can't stop an embedded process" if embedded?
|
|
1043
|
+
|
|
1044
|
+
signal("TERM")
|
|
854
1045
|
end
|
|
855
1046
|
|
|
1047
|
+
# Signal this process to log backtraces for all threads.
|
|
1048
|
+
# Useful if you have a frozen or deadlocked process which is
|
|
1049
|
+
# still sending a heartbeat.
|
|
1050
|
+
# This method is *asynchronous* and it can take 5-10 seconds.
|
|
856
1051
|
def dump_threads
|
|
857
|
-
signal(
|
|
1052
|
+
signal("TTIN")
|
|
858
1053
|
end
|
|
859
1054
|
|
|
1055
|
+
# @return [Boolean] true if this process is quiet or shutting down
|
|
860
1056
|
def stopping?
|
|
861
|
-
self[
|
|
1057
|
+
self["quiet"] == "true"
|
|
862
1058
|
end
|
|
863
1059
|
|
|
864
1060
|
private
|
|
@@ -866,18 +1062,17 @@ module Sidekiq
|
|
|
866
1062
|
def signal(sig)
|
|
867
1063
|
key = "#{identity}-signals"
|
|
868
1064
|
Sidekiq.redis do |c|
|
|
869
|
-
c.multi do
|
|
870
|
-
|
|
871
|
-
|
|
1065
|
+
c.multi do |transaction|
|
|
1066
|
+
transaction.lpush(key, sig)
|
|
1067
|
+
transaction.expire(key, 60)
|
|
872
1068
|
end
|
|
873
1069
|
end
|
|
874
1070
|
end
|
|
875
|
-
|
|
876
1071
|
end
|
|
877
1072
|
|
|
878
1073
|
##
|
|
879
|
-
#
|
|
880
|
-
#
|
|
1074
|
+
# The WorkSet stores the work being done by this Sidekiq cluster.
|
|
1075
|
+
# It tracks the process and thread working on each job.
|
|
881
1076
|
#
|
|
882
1077
|
# WARNING WARNING WARNING
|
|
883
1078
|
#
|
|
@@ -885,34 +1080,40 @@ module Sidekiq
|
|
|
885
1080
|
# If you call #size => 5 and then expect #each to be
|
|
886
1081
|
# called 5 times, you're going to have a bad time.
|
|
887
1082
|
#
|
|
888
|
-
#
|
|
889
|
-
#
|
|
890
|
-
#
|
|
1083
|
+
# works = Sidekiq::WorkSet.new
|
|
1084
|
+
# works.size => 2
|
|
1085
|
+
# works.each do |process_id, thread_id, work|
|
|
891
1086
|
# # process_id is a unique identifier per Sidekiq process
|
|
892
1087
|
# # thread_id is a unique identifier per thread
|
|
893
1088
|
# # work is a Hash which looks like:
|
|
894
|
-
# # { 'queue' => name, 'run_at' => timestamp, 'payload' =>
|
|
1089
|
+
# # { 'queue' => name, 'run_at' => timestamp, 'payload' => job_hash }
|
|
895
1090
|
# # run_at is an epoch Integer.
|
|
896
1091
|
# end
|
|
897
1092
|
#
|
|
898
|
-
class
|
|
1093
|
+
class WorkSet
|
|
899
1094
|
include Enumerable
|
|
900
|
-
include RedisScanner
|
|
901
1095
|
|
|
902
|
-
def each
|
|
1096
|
+
def each(&block)
|
|
1097
|
+
results = []
|
|
1098
|
+
procs = nil
|
|
1099
|
+
all_works = nil
|
|
1100
|
+
|
|
903
1101
|
Sidekiq.redis do |conn|
|
|
904
|
-
procs = sscan(
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
conn.hgetall("#{key}:workers")
|
|
909
|
-
end
|
|
910
|
-
next unless valid
|
|
911
|
-
workers.each_pair do |tid, json|
|
|
912
|
-
yield key, tid, Sidekiq.load_json(json)
|
|
1102
|
+
procs = conn.sscan("processes").to_a.sort
|
|
1103
|
+
all_works = conn.pipelined do |pipeline|
|
|
1104
|
+
procs.each do |key|
|
|
1105
|
+
pipeline.hgetall("#{key}:work")
|
|
913
1106
|
end
|
|
914
1107
|
end
|
|
915
1108
|
end
|
|
1109
|
+
|
|
1110
|
+
procs.zip(all_works).each do |key, workers|
|
|
1111
|
+
workers.each_pair do |tid, json|
|
|
1112
|
+
results << [key, tid, Sidekiq::Work.new(key, tid, Sidekiq.load_json(json))] unless json.empty?
|
|
1113
|
+
end
|
|
1114
|
+
end
|
|
1115
|
+
|
|
1116
|
+
results.sort_by { |(_, _, hsh)| hsh.raw("run_at") }.each(&block)
|
|
916
1117
|
end
|
|
917
1118
|
|
|
918
1119
|
# Note that #size is only as accurate as Sidekiq's heartbeat,
|
|
@@ -923,18 +1124,88 @@ module Sidekiq
|
|
|
923
1124
|
# which can easily get out of sync with crashy processes.
|
|
924
1125
|
def size
|
|
925
1126
|
Sidekiq.redis do |conn|
|
|
926
|
-
procs = sscan(
|
|
1127
|
+
procs = conn.sscan("processes").to_a
|
|
927
1128
|
if procs.empty?
|
|
928
1129
|
0
|
|
929
1130
|
else
|
|
930
|
-
conn.pipelined
|
|
1131
|
+
conn.pipelined { |pipeline|
|
|
931
1132
|
procs.each do |key|
|
|
932
|
-
|
|
1133
|
+
pipeline.hget(key, "busy")
|
|
933
1134
|
end
|
|
934
|
-
|
|
1135
|
+
}.sum(&:to_i)
|
|
935
1136
|
end
|
|
936
1137
|
end
|
|
937
1138
|
end
|
|
1139
|
+
|
|
1140
|
+
##
|
|
1141
|
+
# Find the work which represents a job with the given JID.
|
|
1142
|
+
# *This is a slow O(n) operation*. Do not use for app logic.
|
|
1143
|
+
#
|
|
1144
|
+
# @param jid [String] the job identifier
|
|
1145
|
+
# @return [Sidekiq::Work] the work or nil
|
|
1146
|
+
def find_work_by_jid(jid)
|
|
1147
|
+
each do |_process_id, _thread_id, work|
|
|
1148
|
+
job = work.job
|
|
1149
|
+
return work if job.jid == jid
|
|
1150
|
+
end
|
|
1151
|
+
nil
|
|
1152
|
+
end
|
|
1153
|
+
end
|
|
1154
|
+
|
|
1155
|
+
# Sidekiq::Work represents a job which is currently executing.
|
|
1156
|
+
class Work
|
|
1157
|
+
attr_reader :process_id
|
|
1158
|
+
attr_reader :thread_id
|
|
1159
|
+
|
|
1160
|
+
def initialize(pid, tid, hsh)
|
|
1161
|
+
@process_id = pid
|
|
1162
|
+
@thread_id = tid
|
|
1163
|
+
@hsh = hsh
|
|
1164
|
+
@job = nil
|
|
1165
|
+
end
|
|
1166
|
+
|
|
1167
|
+
def queue
|
|
1168
|
+
@hsh["queue"]
|
|
1169
|
+
end
|
|
1170
|
+
|
|
1171
|
+
def run_at
|
|
1172
|
+
Time.at(@hsh["run_at"])
|
|
1173
|
+
end
|
|
1174
|
+
|
|
1175
|
+
def job
|
|
1176
|
+
@job ||= Sidekiq::JobRecord.new(@hsh["payload"])
|
|
1177
|
+
end
|
|
1178
|
+
|
|
1179
|
+
def payload
|
|
1180
|
+
@hsh["payload"]
|
|
1181
|
+
end
|
|
1182
|
+
|
|
1183
|
+
# deprecated
|
|
1184
|
+
def [](key)
|
|
1185
|
+
kwargs = {uplevel: 1}
|
|
1186
|
+
kwargs[:category] = :deprecated if RUBY_VERSION > "3.0" # TODO
|
|
1187
|
+
warn("Direct access to `Sidekiq::Work` attributes is deprecated, please use `#payload`, `#queue`, `#run_at` or `#job` instead", **kwargs)
|
|
1188
|
+
|
|
1189
|
+
@hsh[key]
|
|
1190
|
+
end
|
|
1191
|
+
|
|
1192
|
+
# :nodoc:
|
|
1193
|
+
# @api private
|
|
1194
|
+
def raw(name)
|
|
1195
|
+
@hsh[name]
|
|
1196
|
+
end
|
|
1197
|
+
|
|
1198
|
+
def method_missing(*all)
|
|
1199
|
+
@hsh.send(*all)
|
|
1200
|
+
end
|
|
1201
|
+
|
|
1202
|
+
def respond_to_missing?(name)
|
|
1203
|
+
@hsh.respond_to?(name)
|
|
1204
|
+
end
|
|
938
1205
|
end
|
|
939
1206
|
|
|
1207
|
+
# Since "worker" is a nebulous term, we've deprecated the use of this class name.
|
|
1208
|
+
# Is "worker" a process, a type of job, a thread? Undefined!
|
|
1209
|
+
# WorkSet better describes the data.
|
|
1210
|
+
Workers = WorkSet
|
|
940
1211
|
end
|