sidekiq 6.0.5 → 6.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.github/ISSUE_TEMPLATE/bug_report.md +20 -0
- data/.github/workflows/ci.yml +41 -0
- data/Changes.md +53 -0
- data/Ent-Changes.md +26 -1
- data/Gemfile +1 -1
- data/Gemfile.lock +96 -112
- data/Pro-Changes.md +30 -1
- data/README.md +2 -6
- data/bin/sidekiq +26 -2
- data/lib/sidekiq.rb +10 -8
- data/lib/sidekiq/api.rb +7 -4
- data/lib/sidekiq/cli.rb +15 -10
- data/lib/sidekiq/client.rb +17 -10
- data/lib/sidekiq/extensions/action_mailer.rb +3 -2
- data/lib/sidekiq/extensions/active_record.rb +4 -3
- data/lib/sidekiq/extensions/class_methods.rb +5 -4
- data/lib/sidekiq/fetch.rb +20 -20
- data/lib/sidekiq/job_logger.rb +1 -1
- data/lib/sidekiq/launcher.rb +32 -5
- data/lib/sidekiq/logger.rb +7 -7
- data/lib/sidekiq/manager.rb +4 -4
- data/lib/sidekiq/middleware/chain.rb +1 -1
- data/lib/sidekiq/monitor.rb +2 -2
- data/lib/sidekiq/processor.rb +4 -4
- data/lib/sidekiq/rails.rb +16 -18
- data/lib/sidekiq/redis_connection.rb +18 -13
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing.rb +1 -1
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web.rb +16 -8
- data/lib/sidekiq/web/application.rb +4 -6
- data/lib/sidekiq/web/csrf_protection.rb +158 -0
- data/lib/sidekiq/web/helpers.rb +3 -6
- data/lib/sidekiq/web/router.rb +2 -4
- data/lib/sidekiq/worker.rb +4 -7
- data/sidekiq.gemspec +1 -2
- data/web/assets/javascripts/application.js +25 -27
- data/web/assets/stylesheets/application-dark.css +142 -124
- data/web/assets/stylesheets/application.css +7 -6
- data/web/locales/fr.yml +2 -2
- data/web/locales/lt.yml +83 -0
- data/web/locales/pl.yml +4 -4
- data/web/locales/ru.yml +4 -0
- data/web/locales/vi.yml +83 -0
- data/web/views/busy.erb +2 -2
- data/web/views/layout.erb +1 -1
- data/web/views/morgue.erb +1 -1
- data/web/views/queues.erb +1 -1
- data/web/views/retries.erb +1 -1
- data/web/views/scheduled.erb +1 -1
- metadata +15 -24
- data/.circleci/config.yml +0 -61
- data/.github/issue_template.md +0 -11
data/Pro-Changes.md
CHANGED
@@ -2,7 +2,36 @@
|
|
2
2
|
|
3
3
|
[Sidekiq Changes](https://github.com/mperham/sidekiq/blob/master/Changes.md) | [Sidekiq Pro Changes](https://github.com/mperham/sidekiq/blob/master/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/mperham/sidekiq/blob/master/Ent-Changes.md)
|
4
4
|
|
5
|
-
Please see [
|
5
|
+
Please see [sidekiq.org](https://sidekiq.org/) for more details and how to buy.
|
6
|
+
|
7
|
+
5.2.0
|
8
|
+
---------
|
9
|
+
|
10
|
+
- The Sidekiq Pro and Enterprise gem servers now `bundle install` much faster with **Bundler 2.2+** [#4158]
|
11
|
+
- Fix issue with reliable push and multiple shards [#4669]
|
12
|
+
- Fix Pro memory leak due to fetch refactoring in Sidekiq 6.1 [#4652]
|
13
|
+
- Gracefully handle poison pill jobs [#4633]
|
14
|
+
- Remove support for multi-shard batches [#4642]
|
15
|
+
- Rename `Sidekiq::Rack::BatchStatus` to `Sidekiq::Pro::BatchStatus` [#4655]
|
16
|
+
|
17
|
+
5.1.1
|
18
|
+
---------
|
19
|
+
|
20
|
+
- Fix broken basic fetcher [#4616]
|
21
|
+
|
22
|
+
5.1.0
|
23
|
+
---------
|
24
|
+
|
25
|
+
- Remove old Statsd metrics with `WorkerName` in the name [#4377]
|
26
|
+
```
|
27
|
+
job.WorkerName.count -> job.count with tag worker:WorkerName
|
28
|
+
job.WorkerName.perform -> job.perform with tag worker:WorkerName
|
29
|
+
job.WorkerName.failure -> job.failure with tag worker:WorkerName
|
30
|
+
```
|
31
|
+
- Remove `concurrent-ruby` gem dependency [#4586]
|
32
|
+
- Update `constantize` for batch callbacks. [#4469]
|
33
|
+
- Add queue tag to `jobs.recovered.fetch` metric [#4594]
|
34
|
+
- Refactor Pro's fetch infrastructure [#4602]
|
6
35
|
|
7
36
|
5.0.1
|
8
37
|
---------
|
data/README.md
CHANGED
@@ -2,11 +2,7 @@ Sidekiq
|
|
2
2
|
==============
|
3
3
|
|
4
4
|
[](https://rubygems.org/gems/sidekiq)
|
5
|
-
|
6
|
-
[](https://codeclimate.com/github/mperham/sidekiq/coverage)
|
7
|
-
[](https://circleci.com/gh/mperham/sidekiq/tree/master)
|
8
|
-
[](https://gitter.im/mperham/sidekiq)
|
9
|
-
|
5
|
+

|
10
6
|
|
11
7
|
Simple, efficient background processing for Ruby.
|
12
8
|
|
@@ -94,4 +90,4 @@ Please see [LICENSE](https://github.com/mperham/sidekiq/blob/master/LICENSE) for
|
|
94
90
|
Author
|
95
91
|
-----------------
|
96
92
|
|
97
|
-
Mike Perham, [@
|
93
|
+
Mike Perham, [@getajobmike](https://twitter.com/getajobmike) / [@sidekiq](https://twitter.com/sidekiq), [https://www.mikeperham.com](https://www.mikeperham.com) / [https://www.contribsys.com](https://www.contribsys.com)
|
data/bin/sidekiq
CHANGED
@@ -6,13 +6,37 @@ $TESTING = false
|
|
6
6
|
|
7
7
|
require_relative '../lib/sidekiq/cli'
|
8
8
|
|
9
|
+
def integrate_with_systemd
|
10
|
+
return unless ENV["NOTIFY_SOCKET"]
|
11
|
+
|
12
|
+
Sidekiq.configure_server do |config|
|
13
|
+
Sidekiq.logger.info "Enabling systemd notification integration"
|
14
|
+
require "sidekiq/sd_notify"
|
15
|
+
config.on(:startup) do
|
16
|
+
Sidekiq::SdNotify.ready
|
17
|
+
end
|
18
|
+
config.on(:shutdown) do
|
19
|
+
Sidekiq::SdNotify.stopping
|
20
|
+
end
|
21
|
+
Sidekiq.start_watchdog if Sidekiq::SdNotify.watchdog?
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
9
25
|
begin
|
10
26
|
cli = Sidekiq::CLI.instance
|
11
27
|
cli.parse
|
28
|
+
|
29
|
+
integrate_with_systemd
|
30
|
+
|
12
31
|
cli.run
|
13
32
|
rescue => e
|
14
33
|
raise e if $DEBUG
|
15
|
-
|
16
|
-
|
34
|
+
if Sidekiq.error_handlers.length == 0
|
35
|
+
STDERR.puts e.message
|
36
|
+
STDERR.puts e.backtrace.join("\n")
|
37
|
+
else
|
38
|
+
cli.handle_exception e
|
39
|
+
end
|
40
|
+
|
17
41
|
exit 1
|
18
42
|
end
|
data/lib/sidekiq.rb
CHANGED
@@ -20,6 +20,7 @@ module Sidekiq
|
|
20
20
|
labels: [],
|
21
21
|
concurrency: 10,
|
22
22
|
require: ".",
|
23
|
+
strict: true,
|
23
24
|
environment: nil,
|
24
25
|
timeout: 25,
|
25
26
|
poll_interval_average: nil,
|
@@ -30,16 +31,16 @@ module Sidekiq
|
|
30
31
|
startup: [],
|
31
32
|
quiet: [],
|
32
33
|
shutdown: [],
|
33
|
-
heartbeat: []
|
34
|
+
heartbeat: []
|
34
35
|
},
|
35
36
|
dead_max_jobs: 10_000,
|
36
37
|
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
37
|
-
reloader: proc { |&block| block.call }
|
38
|
+
reloader: proc { |&block| block.call }
|
38
39
|
}
|
39
40
|
|
40
41
|
DEFAULT_WORKER_OPTIONS = {
|
41
42
|
"retry" => true,
|
42
|
-
"queue" => "default"
|
43
|
+
"queue" => "default"
|
43
44
|
}
|
44
45
|
|
45
46
|
FAKE_INFO = {
|
@@ -47,7 +48,7 @@ module Sidekiq
|
|
47
48
|
"uptime_in_days" => "9999",
|
48
49
|
"connected_clients" => "9999",
|
49
50
|
"used_memory_human" => "9P",
|
50
|
-
"used_memory_peak_human" => "9P"
|
51
|
+
"used_memory_peak_human" => "9P"
|
51
52
|
}
|
52
53
|
|
53
54
|
def self.❨╯°□°❩╯︵┻━┻
|
@@ -95,10 +96,11 @@ module Sidekiq
|
|
95
96
|
retryable = true
|
96
97
|
begin
|
97
98
|
yield conn
|
98
|
-
rescue Redis::
|
99
|
+
rescue Redis::BaseError => ex
|
99
100
|
# 2550 Failover can cause the server to become a replica, need
|
100
101
|
# to disconnect and reopen the socket to get back to the primary.
|
101
|
-
if
|
102
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
103
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS/
|
102
104
|
conn.disconnect!
|
103
105
|
retryable = false
|
104
106
|
retry
|
@@ -154,7 +156,7 @@ module Sidekiq
|
|
154
156
|
|
155
157
|
def self.default_worker_options=(hash)
|
156
158
|
# stringify
|
157
|
-
@default_worker_options = default_worker_options.merge(
|
159
|
+
@default_worker_options = default_worker_options.merge(hash.transform_keys(&:to_s))
|
158
160
|
end
|
159
161
|
|
160
162
|
def self.default_worker_options
|
@@ -196,7 +198,7 @@ module Sidekiq
|
|
196
198
|
end
|
197
199
|
|
198
200
|
def self.logger
|
199
|
-
@logger ||= Sidekiq::Logger.new(
|
201
|
+
@logger ||= Sidekiq::Logger.new($stdout, level: Logger::INFO)
|
200
202
|
end
|
201
203
|
|
202
204
|
def self.logger=(logger)
|
data/lib/sidekiq/api.rb
CHANGED
@@ -105,7 +105,7 @@ module Sidekiq
|
|
105
105
|
|
106
106
|
default_queue_latency: default_queue_latency,
|
107
107
|
workers_size: workers_size,
|
108
|
-
enqueued: enqueued
|
108
|
+
enqueued: enqueued
|
109
109
|
}
|
110
110
|
end
|
111
111
|
|
@@ -916,12 +916,13 @@ module Sidekiq
|
|
916
916
|
class Workers
|
917
917
|
include Enumerable
|
918
918
|
|
919
|
-
def each
|
919
|
+
def each(&block)
|
920
|
+
results = []
|
920
921
|
Sidekiq.redis do |conn|
|
921
922
|
procs = conn.sscan_each("processes").to_a
|
922
923
|
procs.sort.each do |key|
|
923
924
|
valid, workers = conn.pipelined {
|
924
|
-
conn.exists(key)
|
925
|
+
conn.exists?(key)
|
925
926
|
conn.hgetall("#{key}:workers")
|
926
927
|
}
|
927
928
|
next unless valid
|
@@ -930,10 +931,12 @@ module Sidekiq
|
|
930
931
|
p = hsh["payload"]
|
931
932
|
# avoid breaking API, this is a side effect of the JSON optimization in #4316
|
932
933
|
hsh["payload"] = Sidekiq.load_json(p) if p.is_a?(String)
|
933
|
-
|
934
|
+
results << [key, tid, hsh]
|
934
935
|
end
|
935
936
|
end
|
936
937
|
end
|
938
|
+
|
939
|
+
results.sort_by { |(_, _, hsh)| hsh["run_at"] }.each(&block)
|
937
940
|
end
|
938
941
|
|
939
942
|
# Note that #size is only as accurate as Sidekiq's heartbeat,
|
data/lib/sidekiq/cli.rb
CHANGED
@@ -33,8 +33,9 @@ module Sidekiq
|
|
33
33
|
# Code within this method is not tested because it alters
|
34
34
|
# global process state irreversibly. PRs which improve the
|
35
35
|
# test coverage of Sidekiq::CLI are welcomed.
|
36
|
-
def run
|
37
|
-
|
36
|
+
def run(boot_app: true)
|
37
|
+
boot_application if boot_app
|
38
|
+
|
38
39
|
if environment == "development" && $stdout.tty? && Sidekiq.log_formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
|
39
40
|
print_banner
|
40
41
|
end
|
@@ -43,7 +44,7 @@ module Sidekiq
|
|
43
44
|
self_read, self_write = IO.pipe
|
44
45
|
sigs = %w[INT TERM TTIN TSTP]
|
45
46
|
# USR1 and USR2 don't work on the JVM
|
46
|
-
sigs << "USR2"
|
47
|
+
sigs << "USR2" if Sidekiq.pro? && !jruby?
|
47
48
|
sigs.each do |sig|
|
48
49
|
trap sig do
|
49
50
|
self_write.puts(sig)
|
@@ -54,7 +55,7 @@ module Sidekiq
|
|
54
55
|
|
55
56
|
logger.info "Running in #{RUBY_DESCRIPTION}"
|
56
57
|
logger.info Sidekiq::LICENSE
|
57
|
-
logger.info "Upgrade to Sidekiq Pro for more features and support:
|
58
|
+
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
|
58
59
|
|
59
60
|
# touch the connection pool so it is created before we
|
60
61
|
# fire startup and start multithreading.
|
@@ -163,7 +164,7 @@ module Sidekiq
|
|
163
164
|
Sidekiq.logger.warn "<no backtrace available>"
|
164
165
|
end
|
165
166
|
end
|
166
|
-
}
|
167
|
+
}
|
167
168
|
}
|
168
169
|
UNHANDLED_SIGNAL_HANDLER = ->(cli) { Sidekiq.logger.info "No signal handler registered, ignoring" }
|
169
170
|
SIGNAL_HANDLERS.default = UNHANDLED_SIGNAL_HANDLER
|
@@ -185,8 +186,8 @@ module Sidekiq
|
|
185
186
|
# See #984 for discussion.
|
186
187
|
# APP_ENV is now the preferred ENV term since it is not tech-specific.
|
187
188
|
# Both Sinatra 2.0+ and Sidekiq support this term.
|
188
|
-
#
|
189
|
-
@environment = cli_env || ENV["APP_ENV"] || ENV["
|
189
|
+
# RAILS_ENV and RACK_ENV are there for legacy support.
|
190
|
+
@environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
|
190
191
|
end
|
191
192
|
|
192
193
|
def symbolize_keys_deep!(hash)
|
@@ -228,8 +229,7 @@ module Sidekiq
|
|
228
229
|
opts = parse_config(opts[:config_file]).merge(opts) if opts[:config_file]
|
229
230
|
|
230
231
|
# set defaults
|
231
|
-
opts[:queues] = ["default"] if opts[:queues].nil?
|
232
|
-
opts[:strict] = true if opts[:strict].nil?
|
232
|
+
opts[:queues] = ["default"] if opts[:queues].nil?
|
233
233
|
opts[:concurrency] = Integer(ENV["RAILS_MAX_THREADS"]) if opts[:concurrency].nil? && ENV["RAILS_MAX_THREADS"]
|
234
234
|
|
235
235
|
# merge with defaults
|
@@ -240,7 +240,7 @@ module Sidekiq
|
|
240
240
|
Sidekiq.options
|
241
241
|
end
|
242
242
|
|
243
|
-
def
|
243
|
+
def boot_application
|
244
244
|
ENV["RACK_ENV"] = ENV["RAILS_ENV"] = environment
|
245
245
|
|
246
246
|
if File.directory?(options[:require])
|
@@ -368,6 +368,8 @@ module Sidekiq
|
|
368
368
|
end
|
369
369
|
|
370
370
|
opts = opts.merge(opts.delete(environment.to_sym) || {})
|
371
|
+
opts.delete(:strict)
|
372
|
+
|
371
373
|
parse_queues(opts, opts.delete(:queues) || [])
|
372
374
|
|
373
375
|
opts
|
@@ -379,6 +381,7 @@ module Sidekiq
|
|
379
381
|
|
380
382
|
def parse_queue(opts, queue, weight = nil)
|
381
383
|
opts[:queues] ||= []
|
384
|
+
opts[:strict] = true if opts[:strict].nil?
|
382
385
|
raise ArgumentError, "queues: #{queue} cannot be defined twice" if opts[:queues].include?(queue)
|
383
386
|
[weight.to_i, 1].max.times { opts[:queues] << queue }
|
384
387
|
opts[:strict] = false if weight.to_i > 0
|
@@ -389,3 +392,5 @@ module Sidekiq
|
|
389
392
|
end
|
390
393
|
end
|
391
394
|
end
|
395
|
+
|
396
|
+
require "sidekiq/systemd"
|
data/lib/sidekiq/client.rb
CHANGED
@@ -90,16 +90,17 @@ module Sidekiq
|
|
90
90
|
# Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
|
91
91
|
# than the number given if the middleware stopped processing for one or more jobs.
|
92
92
|
def push_bulk(items)
|
93
|
-
|
94
|
-
|
95
|
-
|
93
|
+
args = items["args"]
|
94
|
+
raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless args.is_a?(Array) && args.all?(Array)
|
95
|
+
return [] if args.empty? # no jobs to push
|
96
96
|
|
97
97
|
at = items.delete("at")
|
98
98
|
raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all?(Numeric))
|
99
|
+
raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
|
99
100
|
|
100
101
|
normed = normalize_item(items)
|
101
|
-
payloads =
|
102
|
-
copy = normed.merge("args" =>
|
102
|
+
payloads = args.map.with_index { |job_args, index|
|
103
|
+
copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
|
103
104
|
copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
|
104
105
|
|
105
106
|
result = process_single(items["class"], copy)
|
@@ -218,16 +219,20 @@ module Sidekiq
|
|
218
219
|
end
|
219
220
|
end
|
220
221
|
|
222
|
+
def validate(item)
|
223
|
+
raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
|
224
|
+
raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
|
225
|
+
raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
|
226
|
+
raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
|
227
|
+
raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
|
228
|
+
end
|
229
|
+
|
221
230
|
def normalize_item(item)
|
222
231
|
# 6.0.0 push_bulk bug, #4321
|
223
232
|
# TODO Remove after a while...
|
224
233
|
item.delete("at") if item.key?("at") && item["at"].nil?
|
225
234
|
|
226
|
-
|
227
|
-
raise(ArgumentError, "Job args must be an Array") unless item["args"].is_a?(Array)
|
228
|
-
raise(ArgumentError, "Job class must be either a Class or String representation of the class name") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
|
229
|
-
raise(ArgumentError, "Job 'at' must be a Numeric timestamp") if item.key?("at") && !item["at"].is_a?(Numeric)
|
230
|
-
raise(ArgumentError, "Job tags must be an Array") if item["tags"] && !item["tags"].is_a?(Array)
|
235
|
+
validate(item)
|
231
236
|
# raise(ArgumentError, "Arguments must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices") unless JSON.load(JSON.dump(item['args'])) == item['args']
|
232
237
|
|
233
238
|
# merge in the default sidekiq_options for the item's class and/or wrapped element
|
@@ -236,6 +241,8 @@ module Sidekiq
|
|
236
241
|
defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?("get_sidekiq_options")
|
237
242
|
item = defaults.merge(item)
|
238
243
|
|
244
|
+
raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
|
245
|
+
|
239
246
|
item["class"] = item["class"].to_s
|
240
247
|
item["queue"] = item["queue"].to_s
|
241
248
|
item["jid"] ||= SecureRandom.hex(12)
|
@@ -5,9 +5,10 @@ require "sidekiq/extensions/generic_proxy"
|
|
5
5
|
module Sidekiq
|
6
6
|
module Extensions
|
7
7
|
##
|
8
|
-
# Adds
|
9
|
-
# delivery to Sidekiq.
|
8
|
+
# Adds +delay+, +delay_for+ and +delay_until+ methods to ActionMailer to offload arbitrary email
|
9
|
+
# delivery to Sidekiq.
|
10
10
|
#
|
11
|
+
# @example
|
11
12
|
# UserMailer.delay.send_welcome_email(new_user)
|
12
13
|
# UserMailer.delay_for(5.days).send_welcome_email(new_user)
|
13
14
|
# UserMailer.delay_until(5.days.from_now).send_welcome_email(new_user)
|
@@ -5,10 +5,11 @@ require "sidekiq/extensions/generic_proxy"
|
|
5
5
|
module Sidekiq
|
6
6
|
module Extensions
|
7
7
|
##
|
8
|
-
# Adds
|
9
|
-
# execution to Sidekiq.
|
8
|
+
# Adds +delay+, +delay_for+ and +delay_until+ methods to ActiveRecord to offload instance method
|
9
|
+
# execution to Sidekiq.
|
10
10
|
#
|
11
|
-
#
|
11
|
+
# @example
|
12
|
+
# User.recent_signups.each { |user| user.delay.mark_as_awesome }
|
12
13
|
#
|
13
14
|
# Please note, this is not recommended as this will serialize the entire
|
14
15
|
# object to Redis. Your Sidekiq jobs should pass IDs, not entire instances.
|
@@ -5,11 +5,12 @@ require "sidekiq/extensions/generic_proxy"
|
|
5
5
|
module Sidekiq
|
6
6
|
module Extensions
|
7
7
|
##
|
8
|
-
# Adds
|
9
|
-
# execution to Sidekiq.
|
8
|
+
# Adds `delay`, `delay_for` and `delay_until` methods to all Classes to offload class method
|
9
|
+
# execution to Sidekiq.
|
10
10
|
#
|
11
|
-
#
|
12
|
-
#
|
11
|
+
# @example
|
12
|
+
# User.delay.delete_inactive
|
13
|
+
# Wikipedia.delay.download_changes_for(Date.today)
|
13
14
|
#
|
14
15
|
class DelayedClass
|
15
16
|
include Sidekiq::Worker
|
data/lib/sidekiq/fetch.rb
CHANGED
@@ -25,8 +25,10 @@ module Sidekiq
|
|
25
25
|
}
|
26
26
|
|
27
27
|
def initialize(options)
|
28
|
-
|
29
|
-
@
|
28
|
+
raise ArgumentError, "missing queue list" unless options[:queues]
|
29
|
+
@options = options
|
30
|
+
@strictly_ordered_queues = !!@options[:strict]
|
31
|
+
@queues = @options[:queues].map { |q| "queue:#{q}" }
|
30
32
|
if @strictly_ordered_queues
|
31
33
|
@queues.uniq!
|
32
34
|
@queues << TIMEOUT
|
@@ -38,24 +40,7 @@ module Sidekiq
|
|
38
40
|
UnitOfWork.new(*work) if work
|
39
41
|
end
|
40
42
|
|
41
|
-
|
42
|
-
# configured queue weights. By default Redis#brpop returns
|
43
|
-
# data from the first queue that has pending elements. We
|
44
|
-
# recreate the queue command each time we invoke Redis#brpop
|
45
|
-
# to honor weights and avoid queue starvation.
|
46
|
-
def queues_cmd
|
47
|
-
if @strictly_ordered_queues
|
48
|
-
@queues
|
49
|
-
else
|
50
|
-
queues = @queues.shuffle!.uniq
|
51
|
-
queues << TIMEOUT
|
52
|
-
queues
|
53
|
-
end
|
54
|
-
end
|
55
|
-
|
56
|
-
# By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
|
57
|
-
# an instance method will make it async to the Fetcher actor
|
58
|
-
def self.bulk_requeue(inprogress, options)
|
43
|
+
def bulk_requeue(inprogress, options)
|
59
44
|
return if inprogress.empty?
|
60
45
|
|
61
46
|
Sidekiq.logger.debug { "Re-queueing terminated jobs" }
|
@@ -76,5 +61,20 @@ module Sidekiq
|
|
76
61
|
rescue => ex
|
77
62
|
Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
78
63
|
end
|
64
|
+
|
65
|
+
# Creating the Redis#brpop command takes into account any
|
66
|
+
# configured queue weights. By default Redis#brpop returns
|
67
|
+
# data from the first queue that has pending elements. We
|
68
|
+
# recreate the queue command each time we invoke Redis#brpop
|
69
|
+
# to honor weights and avoid queue starvation.
|
70
|
+
def queues_cmd
|
71
|
+
if @strictly_ordered_queues
|
72
|
+
@queues
|
73
|
+
else
|
74
|
+
queues = @queues.shuffle!.uniq
|
75
|
+
queues << TIMEOUT
|
76
|
+
queues
|
77
|
+
end
|
78
|
+
end
|
79
79
|
end
|
80
80
|
end
|