sidekiq 6.5.6 → 6.5.7
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +6 -0
- data/bin/sidekiqload +2 -2
- data/lib/sidekiq/api.rb +14 -1
- data/lib/sidekiq/scheduled.rb +42 -8
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/helpers.rb +1 -1
- data/lib/sidekiq/worker.rb +4 -1
- data/web/locales/ja.yml +7 -0
- data/web/locales/zh-cn.yml +36 -11
- data/web/locales/zh-tw.yml +32 -7
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a46b127af053d0d8e34960975ba328973778e74e5e11c972b7ed90ad3ff37e81
|
4
|
+
data.tar.gz: b879598b3c9219e52b2c61ecc33fc03670a779a9afc3c5651f99240021391551
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b30b0d0ddf63bf32cf9dadce9090112fcc0a26f3aa255fa49b784197a22ccc49280c323a7cbcccbd64fc7fb78b0273824c763bf7fc06dfcc0f310562099597de
|
7
|
+
data.tar.gz: e987d3347e7bd83a7abda98c72b1f9c79f7a75686e1895a4ad6e937c8eb3350a90fc03f010ea7c781b5a3609e5e255c2773cffda3e4c4570470ef8f1ed108316
|
data/Changes.md
CHANGED
@@ -2,6 +2,12 @@
|
|
2
2
|
|
3
3
|
[Sidekiq Changes](https://github.com/mperham/sidekiq/blob/main/Changes.md) | [Sidekiq Pro Changes](https://github.com/mperham/sidekiq/blob/main/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/mperham/sidekiq/blob/main/Ent-Changes.md)
|
4
4
|
|
5
|
+
6.5.7
|
6
|
+
----------
|
7
|
+
|
8
|
+
- Updates for JA and ZH locales
|
9
|
+
- Further optimizations for scheduled polling [#5513]
|
10
|
+
|
5
11
|
6.5.6
|
6
12
|
----------
|
7
13
|
|
data/bin/sidekiqload
CHANGED
@@ -89,7 +89,7 @@ def Process.rss
|
|
89
89
|
`ps -o rss= -p #{Process.pid}`.chomp.to_i
|
90
90
|
end
|
91
91
|
|
92
|
-
iter =
|
92
|
+
iter = 10
|
93
93
|
count = 10_000
|
94
94
|
|
95
95
|
iter.times do
|
@@ -139,7 +139,7 @@ begin
|
|
139
139
|
events.clear
|
140
140
|
|
141
141
|
with_latency(Integer(ENV.fetch("LATENCY", "1"))) do
|
142
|
-
launcher = Sidekiq::Launcher.new(Sidekiq
|
142
|
+
launcher = Sidekiq::Launcher.new(Sidekiq)
|
143
143
|
launcher.run
|
144
144
|
|
145
145
|
while readable_io = IO.select([self_read])
|
data/lib/sidekiq/api.rb
CHANGED
@@ -11,6 +11,17 @@ if ENV["SIDEKIQ_METRICS_BETA"]
|
|
11
11
|
require "sidekiq/metrics/query"
|
12
12
|
end
|
13
13
|
|
14
|
+
#
|
15
|
+
# Sidekiq's Data API provides a Ruby object model on top
|
16
|
+
# of Sidekiq's runtime data in Redis. This API should never
|
17
|
+
# be used within application code for business logic.
|
18
|
+
#
|
19
|
+
# The Sidekiq server process never uses this API: all data
|
20
|
+
# manipulation is done directly for performance reasons to
|
21
|
+
# ensure we are using Redis as efficiently as possible at
|
22
|
+
# every callsite.
|
23
|
+
#
|
24
|
+
|
14
25
|
module Sidekiq
|
15
26
|
# Retrieve runtime statistics from Redis regarding
|
16
27
|
# this Sidekiq cluster.
|
@@ -893,10 +904,12 @@ module Sidekiq
|
|
893
904
|
# :nodoc:
|
894
905
|
# @api private
|
895
906
|
def cleanup
|
907
|
+
# dont run cleanup more than once per minute
|
896
908
|
return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
|
909
|
+
|
897
910
|
count = 0
|
898
911
|
Sidekiq.redis do |conn|
|
899
|
-
procs = conn.sscan_each("processes").to_a
|
912
|
+
procs = conn.sscan_each("processes").to_a
|
900
913
|
heartbeats = conn.pipelined { |pipeline|
|
901
914
|
procs.each do |key|
|
902
915
|
pipeline.hget(key, "info")
|
data/lib/sidekiq/scheduled.rb
CHANGED
@@ -147,13 +147,16 @@ module Sidekiq
|
|
147
147
|
# As we run more processes, the scheduling interval average will approach an even spread
|
148
148
|
# between 0 and poll interval so we don't need this artifical boost.
|
149
149
|
#
|
150
|
-
|
150
|
+
count = process_count
|
151
|
+
interval = poll_interval_average(count)
|
152
|
+
|
153
|
+
if count < 10
|
151
154
|
# For small clusters, calculate a random interval that is ±50% the desired average.
|
152
|
-
|
155
|
+
interval * rand + interval.to_f / 2
|
153
156
|
else
|
154
157
|
# With 10+ processes, we should have enough randomness to get decent polling
|
155
158
|
# across the entire timespan
|
156
|
-
|
159
|
+
interval * rand
|
157
160
|
end
|
158
161
|
end
|
159
162
|
|
@@ -170,14 +173,14 @@ module Sidekiq
|
|
170
173
|
# the same time: the thundering herd problem.
|
171
174
|
#
|
172
175
|
# We only do this if poll_interval_average is unset (the default).
|
173
|
-
def poll_interval_average
|
174
|
-
@config[:poll_interval_average]
|
176
|
+
def poll_interval_average(count)
|
177
|
+
@config[:poll_interval_average] || scaled_poll_interval(count)
|
175
178
|
end
|
176
179
|
|
177
180
|
# Calculates an average poll interval based on the number of known Sidekiq processes.
|
178
181
|
# This minimizes a single point of failure by dispersing check-ins but without taxing
|
179
182
|
# Redis if you run many Sidekiq processes.
|
180
|
-
def scaled_poll_interval
|
183
|
+
def scaled_poll_interval(process_count)
|
181
184
|
process_count * @config[:average_scheduled_poll_interval]
|
182
185
|
end
|
183
186
|
|
@@ -187,9 +190,35 @@ module Sidekiq
|
|
187
190
|
pcount
|
188
191
|
end
|
189
192
|
|
193
|
+
# A copy of Sidekiq::ProcessSet#cleanup because server
|
194
|
+
# should never depend on sidekiq/api.
|
195
|
+
def cleanup
|
196
|
+
# dont run cleanup more than once per minute
|
197
|
+
return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
|
198
|
+
|
199
|
+
count = 0
|
200
|
+
Sidekiq.redis do |conn|
|
201
|
+
procs = conn.sscan_each("processes").to_a
|
202
|
+
heartbeats = conn.pipelined { |pipeline|
|
203
|
+
procs.each do |key|
|
204
|
+
pipeline.hget(key, "info")
|
205
|
+
end
|
206
|
+
}
|
207
|
+
|
208
|
+
# the hash named key has an expiry of 60 seconds.
|
209
|
+
# if it's not found, that means the process has not reported
|
210
|
+
# in to Redis and probably died.
|
211
|
+
to_prune = procs.select.with_index { |proc, i|
|
212
|
+
heartbeats[i].nil?
|
213
|
+
}
|
214
|
+
count = conn.srem("processes", to_prune) unless to_prune.empty?
|
215
|
+
end
|
216
|
+
count
|
217
|
+
end
|
218
|
+
|
190
219
|
def initial_wait
|
191
|
-
# Have all processes sleep between 5-15 seconds.
|
192
|
-
#
|
220
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds to give time for
|
221
|
+
# the heartbeat to register (if the poll interval is going to be calculated by the number
|
193
222
|
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
194
223
|
total = 0
|
195
224
|
total += INITIAL_WAIT unless @config[:poll_interval_average]
|
@@ -197,6 +226,11 @@ module Sidekiq
|
|
197
226
|
|
198
227
|
@sleeper.pop(total)
|
199
228
|
rescue Timeout::Error
|
229
|
+
ensure
|
230
|
+
# periodically clean out the `processes` set in Redis which can collect
|
231
|
+
# references to dead processes over time. The process count affects how
|
232
|
+
# often we scan for scheduled jobs.
|
233
|
+
cleanup
|
200
234
|
end
|
201
235
|
end
|
202
236
|
end
|
data/lib/sidekiq/version.rb
CHANGED
data/lib/sidekiq/web/helpers.rb
CHANGED
@@ -155,7 +155,7 @@ module Sidekiq
|
|
155
155
|
@sorted_processes ||= begin
|
156
156
|
return processes unless processes.all? { |p| p["hostname"] }
|
157
157
|
|
158
|
-
split_characters = /[._-]
|
158
|
+
split_characters = /[._-]+/
|
159
159
|
|
160
160
|
padding = processes.flat_map { |p| p["hostname"].split(split_characters) }.map(&:size).max
|
161
161
|
|
data/lib/sidekiq/worker.rb
CHANGED
@@ -340,7 +340,7 @@ module Sidekiq
|
|
340
340
|
# Legal options:
|
341
341
|
#
|
342
342
|
# queue - use a named queue for this Worker, default 'default'
|
343
|
-
# retry - enable
|
343
|
+
# retry - enable retries via JobRetry, *true* to use the default
|
344
344
|
# or *Integer* count
|
345
345
|
# backtrace - whether to save any error backtrace in the retry payload to display in web UI,
|
346
346
|
# can be true, false or an integer number of lines to save, default *false*
|
@@ -348,6 +348,9 @@ module Sidekiq
|
|
348
348
|
#
|
349
349
|
# In practice, any option is allowed. This is the main mechanism to configure the
|
350
350
|
# options for a specific job.
|
351
|
+
#
|
352
|
+
# These options will be saved into the serialized job when enqueued by
|
353
|
+
# the client.
|
351
354
|
def sidekiq_options(opts = {})
|
352
355
|
super
|
353
356
|
end
|
data/web/locales/ja.yml
CHANGED
data/web/locales/zh-cn.yml
CHANGED
@@ -7,6 +7,7 @@ zh-cn: # <---- change this to your locale code
|
|
7
7
|
Realtime: 实时
|
8
8
|
History: 历史记录
|
9
9
|
Busy: 执行中
|
10
|
+
Utilization: 利用率
|
10
11
|
Processed: 已处理
|
11
12
|
Failed: 已失败
|
12
13
|
Scheduled: 已计划
|
@@ -17,15 +18,15 @@ zh-cn: # <---- change this to your locale code
|
|
17
18
|
StopPolling: 停止轮询
|
18
19
|
Queue: 队列
|
19
20
|
Class: 类别
|
20
|
-
Job:
|
21
|
+
Job: 任务
|
21
22
|
Arguments: 参数
|
22
23
|
Extras: 额外的
|
23
24
|
Started: 已开始
|
24
25
|
ShowAll: 显示全部
|
25
|
-
CurrentMessagesInQueue: 目前在<span class='title'>%{queue}</span
|
26
|
+
CurrentMessagesInQueue: 目前在<span class='title'>%{queue}</span>的任务
|
26
27
|
Delete: 删除
|
27
28
|
AddToQueue: 添加至队列
|
28
|
-
AreYouSureDeleteJob:
|
29
|
+
AreYouSureDeleteJob: 你确定要删除这个任务么?
|
29
30
|
AreYouSureDeleteQueue: 你确定要删除%{queue}这个队列?
|
30
31
|
Queues: 队列
|
31
32
|
Size: 容量
|
@@ -33,20 +34,22 @@ zh-cn: # <---- change this to your locale code
|
|
33
34
|
NextRetry: 下次重试
|
34
35
|
RetryCount: 重试次數
|
35
36
|
RetryNow: 现在重试
|
37
|
+
Kill: 终止
|
36
38
|
LastRetry: 最后一次重试
|
37
39
|
OriginallyFailed: 原本已失败
|
38
40
|
AreYouSure: 你确定?
|
39
|
-
DeleteAll:
|
40
|
-
RetryAll:
|
41
|
+
DeleteAll: 全部删除
|
42
|
+
RetryAll: 全部重试
|
43
|
+
KillAll: 全部终止
|
41
44
|
NoRetriesFound: 沒有发现可重试
|
42
45
|
Error: 错误
|
43
46
|
ErrorClass: 错误类别
|
44
47
|
ErrorMessage: 错误消息
|
45
|
-
ErrorBacktrace:
|
48
|
+
ErrorBacktrace: 错误细节
|
46
49
|
GoBack: ← 返回
|
47
|
-
NoScheduledFound:
|
50
|
+
NoScheduledFound: 沒有发现计划任务
|
48
51
|
When: 当
|
49
|
-
ScheduledJobs:
|
52
|
+
ScheduledJobs: 计划任务
|
50
53
|
idle: 闲置
|
51
54
|
active: 活动中
|
52
55
|
Version: 版本
|
@@ -59,10 +62,32 @@ zh-cn: # <---- change this to your locale code
|
|
59
62
|
ThreeMonths: 三个月
|
60
63
|
SixMonths: 六个月
|
61
64
|
Failures: 失败
|
62
|
-
DeadJobs:
|
63
|
-
NoDeadJobsFound:
|
65
|
+
DeadJobs: 已停滞任务
|
66
|
+
NoDeadJobsFound: 沒有发现任何已停滞的任务
|
64
67
|
Dead: 已停滞
|
68
|
+
Process: 进程
|
65
69
|
Processes: 处理中
|
70
|
+
Name: 名称
|
66
71
|
Thread: 线程
|
67
72
|
Threads: 线程
|
68
|
-
Jobs:
|
73
|
+
Jobs: 任务
|
74
|
+
Paused: 已暫停
|
75
|
+
Stop: 強制暫停
|
76
|
+
Quiet: 暫停
|
77
|
+
StopAll: 全部強制暫停
|
78
|
+
QuietAll: 全部暫停
|
79
|
+
PollingInterval: 輪詢週期
|
80
|
+
Plugins: 套件
|
81
|
+
NotYetEnqueued: 尚未進入佇列
|
82
|
+
CreatedAt: 建立時間
|
83
|
+
BackToApp: 回首頁
|
84
|
+
Latency: 延時
|
85
|
+
Pause: 暫停
|
86
|
+
Unpause: 取消暂停
|
87
|
+
Metrics: 指标
|
88
|
+
NoDataFound: 无数据
|
89
|
+
TotalExecutionTime: 总执行时间
|
90
|
+
AvgExecutionTime: 平均执行时间
|
91
|
+
Context: 上下文
|
92
|
+
Bucket: 桶
|
93
|
+
NoJobMetricsFound: 无任务相关指标数据
|
data/web/locales/zh-tw.yml
CHANGED
@@ -7,6 +7,7 @@ zh-tw: # <---- change this to your locale code
|
|
7
7
|
Realtime: 即時
|
8
8
|
History: 歷史資料
|
9
9
|
Busy: 忙碌
|
10
|
+
Utilization: 使用率
|
10
11
|
Processed: 已處理
|
11
12
|
Failed: 已失敗
|
12
13
|
Scheduled: 已排程
|
@@ -25,26 +26,28 @@ zh-tw: # <---- change this to your locale code
|
|
25
26
|
CurrentMessagesInQueue: 目前在<span class='title'>%{queue}</span>的工作
|
26
27
|
Delete: 刪除
|
27
28
|
AddToQueue: 增加至佇列
|
28
|
-
AreYouSureDeleteJob:
|
29
|
-
AreYouSureDeleteQueue:
|
29
|
+
AreYouSureDeleteJob: 確定要刪除這個工作嗎?
|
30
|
+
AreYouSureDeleteQueue: 確定要刪除%{queue}佇列?這會刪除佇列裡的所有工作,佇列將會在有新工作時重新出現。
|
30
31
|
Queues: 佇列
|
31
32
|
Size: 容量
|
32
33
|
Actions: 動作
|
33
34
|
NextRetry: 下次重試
|
34
35
|
RetryCount: 重試次數
|
35
36
|
RetryNow: 馬上重試
|
37
|
+
Kill: 取消
|
36
38
|
LastRetry: 最後一次重試
|
37
39
|
OriginallyFailed: 原本已失敗
|
38
40
|
AreYouSure: 你確定?
|
39
|
-
DeleteAll:
|
40
|
-
RetryAll:
|
41
|
-
|
41
|
+
DeleteAll: 全部刪除
|
42
|
+
RetryAll: 全部重試
|
43
|
+
KillAll: 全部取消
|
44
|
+
NoRetriesFound: 找無可重試的工作
|
42
45
|
Error: 錯誤
|
43
46
|
ErrorClass: 錯誤類別
|
44
47
|
ErrorMessage: 錯誤訊息
|
45
|
-
ErrorBacktrace:
|
48
|
+
ErrorBacktrace: 詳細錯誤訊息
|
46
49
|
GoBack: ← 返回
|
47
|
-
NoScheduledFound:
|
50
|
+
NoScheduledFound: 找無已排程的工作
|
48
51
|
When: 當
|
49
52
|
ScheduledJobs: 已排程的工作
|
50
53
|
idle: 閒置
|
@@ -62,7 +65,29 @@ zh-tw: # <---- change this to your locale code
|
|
62
65
|
DeadJobs: 停滯工作
|
63
66
|
NoDeadJobsFound: 沒有發現任何停滯的工作
|
64
67
|
Dead: 停滯
|
68
|
+
Process: 程序
|
65
69
|
Processes: 處理中
|
70
|
+
Name: 名稱
|
66
71
|
Thread: 執行緒
|
67
72
|
Threads: 執行緒
|
68
73
|
Jobs: 工作
|
74
|
+
Paused: 已暫停
|
75
|
+
Stop: 強制暫停
|
76
|
+
Quiet: 暫停
|
77
|
+
StopAll: 全部強制暫停
|
78
|
+
QuietAll: 全部暫停
|
79
|
+
PollingInterval: 輪詢週期
|
80
|
+
Plugins: 套件
|
81
|
+
NotYetEnqueued: 尚未進入佇列
|
82
|
+
CreatedAt: 建立時間
|
83
|
+
BackToApp: 回首頁
|
84
|
+
Latency: 延時
|
85
|
+
Pause: 暫停
|
86
|
+
Unpause: 取消暫停
|
87
|
+
Metrics: 計量
|
88
|
+
NoDataFound: 找無資料
|
89
|
+
TotalExecutionTime: 總執行時間
|
90
|
+
AvgExecutionTime: 平均執行時間
|
91
|
+
Context: 上下文
|
92
|
+
Bucket: 桶
|
93
|
+
NoJobMetricsFound: 找無工作相關計量資料
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sidekiq
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 6.5.
|
4
|
+
version: 6.5.7
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Mike Perham
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2022-
|
11
|
+
date: 2022-09-20 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: redis
|