sidekiq-fairplay 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/workflows/ci.yml +76 -0
- data/.gitignore +37 -0
- data/.standard.yml +9 -0
- data/Gemfile +3 -0
- data/LICENSE +21 -0
- data/README.md +191 -0
- data/Rakefile +7 -0
- data/bin/console +7 -0
- data/bin/setup +6 -0
- data/gemfiles/sidekiq_7.gemfile +5 -0
- data/gemfiles/sidekiq_8.gemfile +5 -0
- data/lib/sidekiq/fairplay/config.rb +29 -0
- data/lib/sidekiq/fairplay/middleware.rb +54 -0
- data/lib/sidekiq/fairplay/planner.rb +102 -0
- data/lib/sidekiq/fairplay/redis.rb +149 -0
- data/lib/sidekiq/fairplay/version.rb +5 -0
- data/lib/sidekiq/fairplay.rb +67 -0
- data/sidekiq-fairplay.gemspec +37 -0
- data/spec/sidekiq/fairplay_spec.rb +301 -0
- data/spec/spec_helper.rb +63 -0
- metadata +230 -0
@@ -0,0 +1,149 @@
|
|
1
|
+
module Sidekiq
|
2
|
+
module Fairplay
|
3
|
+
class Redis
|
4
|
+
LUA = {}
|
5
|
+
SHAS = {}
|
6
|
+
|
7
|
+
LUA[:push_tenant_job] = <<~LUA
|
8
|
+
local queue_key = KEYS[1]
|
9
|
+
local counts_key = KEYS[2]
|
10
|
+
|
11
|
+
local tenant_id = ARGV[1]
|
12
|
+
local job_payload = ARGV[2]
|
13
|
+
|
14
|
+
redis.call('RPUSH', queue_key, job_payload)
|
15
|
+
redis.call('HINCRBY', counts_key, tenant_id, 1)
|
16
|
+
|
17
|
+
-- Clean up after a week of inactivity
|
18
|
+
redis.call('EXPIRE', queue_key, 604800)
|
19
|
+
redis.call('EXPIRE', counts_key, 604800)
|
20
|
+
|
21
|
+
return 1
|
22
|
+
LUA
|
23
|
+
|
24
|
+
LUA[:pop_tenant_job] = <<~LUA
|
25
|
+
local queue_key = KEYS[1]
|
26
|
+
local counts_key = KEYS[2]
|
27
|
+
|
28
|
+
local tenant_id = ARGV[1]
|
29
|
+
|
30
|
+
local popped = redis.call('LPOP', queue_key)
|
31
|
+
if not popped then
|
32
|
+
return 0
|
33
|
+
end
|
34
|
+
|
35
|
+
local newcount = redis.call('HINCRBY', counts_key, tenant_id, -1)
|
36
|
+
if newcount <= 0 then
|
37
|
+
redis.call('HDEL', counts_key, tenant_id)
|
38
|
+
end
|
39
|
+
|
40
|
+
return 1
|
41
|
+
LUA
|
42
|
+
|
43
|
+
LUA[:release_planner_lock] = <<~LUA
|
44
|
+
if redis.call('get', KEYS[1]) == ARGV[1] then
|
45
|
+
redis.call('del', KEYS[1])
|
46
|
+
end
|
47
|
+
LUA
|
48
|
+
|
49
|
+
def self.bootstrap_scripts
|
50
|
+
Sidekiq.redis do |conn|
|
51
|
+
LUA.each_with_object(SHAS) do |(name, lua), memo|
|
52
|
+
memo[name] = conn.call("SCRIPT", "LOAD", lua)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def push_tenant_job(job_class, tenant_id, payload)
|
58
|
+
script_call(
|
59
|
+
:push_tenant_job,
|
60
|
+
[tenant_queue_redis_key(job_class.name, tenant_id), tenant_counts_redis_key(job_class.name)],
|
61
|
+
[tenant_id.to_s, payload.to_s]
|
62
|
+
)
|
63
|
+
end
|
64
|
+
|
65
|
+
def pop_tenant_job(job_class, tenant_id)
|
66
|
+
script_call(
|
67
|
+
:pop_tenant_job,
|
68
|
+
[tenant_queue_redis_key(job_class.name, tenant_id), tenant_counts_redis_key(job_class.name)],
|
69
|
+
[tenant_id.to_s]
|
70
|
+
)
|
71
|
+
end
|
72
|
+
|
73
|
+
def peek_tenant(job_class, tenant_id)
|
74
|
+
redis_call(:lindex, tenant_queue_redis_key(job_class.name, tenant_id), 0)
|
75
|
+
end
|
76
|
+
|
77
|
+
def tenant_counts(job_class)
|
78
|
+
redis_call(:hgetall, tenant_counts_redis_key(job_class.name)) || {}
|
79
|
+
end
|
80
|
+
|
81
|
+
def with_planner_lock(job_class, jid)
|
82
|
+
return false unless try_acquire_planner_lock(job_class, jid)
|
83
|
+
|
84
|
+
begin
|
85
|
+
yield
|
86
|
+
ensure
|
87
|
+
release_planner_lock(job_class, jid)
|
88
|
+
end
|
89
|
+
|
90
|
+
true
|
91
|
+
end
|
92
|
+
|
93
|
+
def try_acquire_planner_lock(job_class, jid)
|
94
|
+
key = execute_lock_redis_key(job_class.name)
|
95
|
+
ttl = job_class.sidekiq_fairplay_options_hash[:planner_lock_ttl].to_i
|
96
|
+
|
97
|
+
!!redis_call(:set, key, jid.to_s, nx: true, ex: ttl)
|
98
|
+
end
|
99
|
+
|
100
|
+
def release_planner_lock(job_class, jid)
|
101
|
+
script_call(:release_planner_lock, [execute_lock_redis_key(job_class.name)], [jid.to_s])
|
102
|
+
end
|
103
|
+
|
104
|
+
def planner_enqueued_recently?(job_class)
|
105
|
+
key = enqueue_lock_redis_key(job_class.name)
|
106
|
+
window = job_class.sidekiq_fairplay_options_hash[:enqueue_interval].to_i
|
107
|
+
|
108
|
+
redis_call(:set, key, "1", nx: true, ex: window) ? false : true
|
109
|
+
end
|
110
|
+
|
111
|
+
private
|
112
|
+
|
113
|
+
def tenant_counts_redis_key(job_class_name)
|
114
|
+
ns("#{job_class_name.underscore}:tenant_counts")
|
115
|
+
end
|
116
|
+
|
117
|
+
def tenant_queue_redis_key(job_class_name, tenant_id)
|
118
|
+
ns("#{job_class_name.underscore}:tenant_queue:#{tenant_id}")
|
119
|
+
end
|
120
|
+
|
121
|
+
def enqueue_lock_redis_key(job_class_name)
|
122
|
+
ns("#{job_class_name.underscore}:enqueue_lock")
|
123
|
+
end
|
124
|
+
|
125
|
+
def execute_lock_redis_key(job_class_name)
|
126
|
+
ns("#{job_class_name.underscore}:execute_lock")
|
127
|
+
end
|
128
|
+
|
129
|
+
def ns(key)
|
130
|
+
"fairplay:#{key}"
|
131
|
+
end
|
132
|
+
|
133
|
+
def redis_call(command, *args, **kwargs)
|
134
|
+
Sidekiq.redis { |connection| connection.call(command.to_s.upcase, *args, **kwargs) }
|
135
|
+
end
|
136
|
+
|
137
|
+
def script_call(name, keys, args)
|
138
|
+
self.class.bootstrap_scripts if SHAS.length != LUA.length
|
139
|
+
|
140
|
+
redis_call(:evalsha, SHAS[name], keys.size, *keys, *args)
|
141
|
+
rescue RedisClient::CommandError => e
|
142
|
+
raise unless /NOSCRIPT/.match?(e.message)
|
143
|
+
|
144
|
+
self.class.bootstrap_scripts
|
145
|
+
retry
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
@@ -0,0 +1,67 @@
|
|
1
|
+
require "active_support"
|
2
|
+
require "active_support/inflector"
|
3
|
+
require "active_support/core_ext/string"
|
4
|
+
require "active_support/configurable"
|
5
|
+
require "active_support/core_ext/numeric/time"
|
6
|
+
require "sidekiq"
|
7
|
+
require "sidekiq/api"
|
8
|
+
|
9
|
+
require "sidekiq/fairplay/version"
|
10
|
+
|
11
|
+
module Sidekiq
|
12
|
+
module Fairplay
|
13
|
+
autoload :Config, "sidekiq/fairplay/config"
|
14
|
+
autoload :Redis, "sidekiq/fairplay/redis"
|
15
|
+
autoload :Middleware, "sidekiq/fairplay/middleware"
|
16
|
+
autoload :Planner, "sidekiq/fairplay/planner"
|
17
|
+
|
18
|
+
class << self
|
19
|
+
attr_writer :logger
|
20
|
+
|
21
|
+
def logger
|
22
|
+
@logger ||= Sidekiq.logger
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
module Sidekiq
|
29
|
+
module Fairplay
|
30
|
+
module Job
|
31
|
+
def self.included(base)
|
32
|
+
base.extend(ClassMethods)
|
33
|
+
end
|
34
|
+
|
35
|
+
module ClassMethods
|
36
|
+
def sidekiq_fairplay_options(opts = {})
|
37
|
+
opts = opts.compact
|
38
|
+
|
39
|
+
unless opts.key?(:enqueue_jobs) && opts.key?(:enqueue_interval)
|
40
|
+
raise ArgumentError, "You must specify how many jobs to enqueue and how often."
|
41
|
+
end
|
42
|
+
|
43
|
+
unless opts.key?(:tenant_key) && opts[:tenant_key].respond_to?(:call)
|
44
|
+
raise ArgumentError, "You must provide the tenant_key lambda."
|
45
|
+
end
|
46
|
+
|
47
|
+
@sidekiq_fairplay_options = default_fairplay_options.merge(opts)
|
48
|
+
end
|
49
|
+
|
50
|
+
def sidekiq_fairplay_options_hash
|
51
|
+
@sidekiq_fairplay_options || default_fairplay_options
|
52
|
+
end
|
53
|
+
|
54
|
+
private
|
55
|
+
|
56
|
+
def default_fairplay_options
|
57
|
+
{
|
58
|
+
latency_threshold: Sidekiq::Fairplay::Config.default_latency_threshold,
|
59
|
+
planner_queue: Sidekiq::Fairplay::Config.default_planner_queue,
|
60
|
+
planner_lock_ttl: Sidekiq::Fairplay::Config.default_planner_lock_ttl,
|
61
|
+
tenant_weights: Sidekiq::Fairplay::Config.default_tenant_weights
|
62
|
+
}
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
lib = File.expand_path("lib", __dir__)
|
2
|
+
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
3
|
+
require "sidekiq/fairplay/version"
|
4
|
+
|
5
|
+
Gem::Specification.new do |spec|
|
6
|
+
spec.name = "sidekiq-fairplay"
|
7
|
+
spec.version = Sidekiq::Fairplay::VERSION
|
8
|
+
spec.authors = ["Alexander Baygeldin"]
|
9
|
+
spec.email = ["a.baygeldin@gmail.com"]
|
10
|
+
spec.summary = <<~SUMMARY
|
11
|
+
Make Sidekiq play fair — dynamic job prioritization for multi-tenant apps.
|
12
|
+
SUMMARY
|
13
|
+
spec.homepage = "http://github.com/baygeldin/sidekiq-fairplay"
|
14
|
+
spec.license = "MIT"
|
15
|
+
|
16
|
+
spec.files = `git ls-files -z`.split("\x0")
|
17
|
+
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
|
18
|
+
spec.require_paths = ["lib"]
|
19
|
+
|
20
|
+
spec.required_ruby_version = ">= 3.4.0"
|
21
|
+
|
22
|
+
spec.add_development_dependency "bundler", "~> 2.0"
|
23
|
+
spec.add_development_dependency "pry", "~> 0.15"
|
24
|
+
spec.add_development_dependency "rake", "~> 13.0"
|
25
|
+
spec.add_development_dependency "rspec", "~> 3.0"
|
26
|
+
spec.add_development_dependency "rspec-sidekiq", "~> 5.0"
|
27
|
+
spec.add_development_dependency "standard", "~> 1.0"
|
28
|
+
spec.add_development_dependency "standard-performance", "~> 1.0"
|
29
|
+
spec.add_development_dependency "standard-rspec", "~> 0.3"
|
30
|
+
spec.add_development_dependency "simplecov", "~> 0.22"
|
31
|
+
spec.add_development_dependency "timecop", "~> 0.9"
|
32
|
+
|
33
|
+
spec.add_dependency "activesupport", "~> 7.0"
|
34
|
+
spec.add_runtime_dependency "sidekiq", "~> 7.0"
|
35
|
+
|
36
|
+
spec.metadata["rubygems_mfa_required"] = "true"
|
37
|
+
end
|
@@ -0,0 +1,301 @@
|
|
1
|
+
require "spec_helper"
|
2
|
+
|
3
|
+
class RegularJob
|
4
|
+
include Sidekiq::Job
|
5
|
+
|
6
|
+
def perform(foo)
|
7
|
+
end
|
8
|
+
end
|
9
|
+
|
10
|
+
class FairplayJob
|
11
|
+
include Sidekiq::Job
|
12
|
+
include Sidekiq::Fairplay::Job
|
13
|
+
|
14
|
+
def perform(tenant_key, foo)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
RSpec.describe Sidekiq::Fairplay do
|
19
|
+
before do
|
20
|
+
FairplayJob.sidekiq_fairplay_options \
|
21
|
+
enqueue_interval:,
|
22
|
+
enqueue_jobs:,
|
23
|
+
planner_queue:,
|
24
|
+
planner_lock_ttl:,
|
25
|
+
latency_threshold:,
|
26
|
+
tenant_key:,
|
27
|
+
tenant_weights:
|
28
|
+
end
|
29
|
+
|
30
|
+
let(:enqueue_interval) { 1 }
|
31
|
+
let(:enqueue_jobs) { 10 }
|
32
|
+
let(:planner_queue) { "default" }
|
33
|
+
let(:planner_lock_ttl) { 60 }
|
34
|
+
let(:latency_threshold) { 60 }
|
35
|
+
let(:tenant_key) { ->(tenant_key, *_args) { tenant_key } }
|
36
|
+
let(:tenant_weights) { ->(tenant_keys) { tenant_keys.to_h { |tid| [tid, 1] } } }
|
37
|
+
|
38
|
+
describe "fairness (probabilistic)" do
|
39
|
+
# Seed Ruby's PRNG for deterministic results
|
40
|
+
around do |example|
|
41
|
+
prev = srand(1234)
|
42
|
+
example.run
|
43
|
+
ensure
|
44
|
+
srand(prev)
|
45
|
+
end
|
46
|
+
|
47
|
+
let(:enqueue_jobs) { 1000 }
|
48
|
+
let(:tenant_weights) do
|
49
|
+
->(tenant_ids) do
|
50
|
+
mapping = {"t1" => 1, "t2" => 3, "t3" => 6}
|
51
|
+
tenant_ids.to_h { |tid| [tid, mapping.fetch(tid, 1)] }
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
it "enqueues approximately proportional to weights" do
|
56
|
+
enqueue_jobs.times do |i|
|
57
|
+
FairplayJob.perform_async("t1", "a#{i}")
|
58
|
+
FairplayJob.perform_async("t2", "b#{i}")
|
59
|
+
FairplayJob.perform_async("t3", "c#{i}")
|
60
|
+
end
|
61
|
+
|
62
|
+
Sidekiq::Fairplay::Planner.new.perform("FairplayJob")
|
63
|
+
|
64
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job.exactly(enqueue_jobs)
|
65
|
+
|
66
|
+
jobs_per_tenant = FairplayJob.jobs.each_with_object(Hash.new(0)) do |job, memo|
|
67
|
+
memo[job["args"].first] += 1
|
68
|
+
end
|
69
|
+
|
70
|
+
expected_jobs_per_tenant = {"t1" => 100, "t2" => 300, "t3" => 600}
|
71
|
+
tolerance = 0.25 # 25% tolerance to avoid flakiness across Ruby versions
|
72
|
+
|
73
|
+
expected_jobs_per_tenant.each do |tid, exp|
|
74
|
+
low = (exp * (1 - tolerance)).floor
|
75
|
+
high = (exp * (1 + tolerance)).ceil
|
76
|
+
|
77
|
+
expect(jobs_per_tenant[tid]).to be_between(low, high).inclusive
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
describe "basic functionality" do
|
83
|
+
it "intercepts fairplay jobs and enqueues them later" do
|
84
|
+
FairplayJob.perform_async("t1", "a")
|
85
|
+
FairplayJob.perform_async("t2", "b")
|
86
|
+
FairplayJob.perform_async("t3", "c")
|
87
|
+
|
88
|
+
expect(FairplayJob).not_to have_enqueued_sidekiq_job
|
89
|
+
expect(Sidekiq::Fairplay::Planner)
|
90
|
+
.to have_enqueued_sidekiq_job("FairplayJob")
|
91
|
+
.exactly(1)
|
92
|
+
.immediately
|
93
|
+
|
94
|
+
Sidekiq::Fairplay::Planner.perform_one
|
95
|
+
|
96
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job.exactly(3)
|
97
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job("t1", "a")
|
98
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job("t2", "b")
|
99
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job("t3", "c")
|
100
|
+
end
|
101
|
+
|
102
|
+
context "with custom planner queue" do
|
103
|
+
let(:planner_queue) { "whatever" }
|
104
|
+
|
105
|
+
it "enqueues the planner job on the configured queue" do
|
106
|
+
FairplayJob.perform_async("t1", "a")
|
107
|
+
|
108
|
+
Sidekiq::Fairplay::Planner.perform_one
|
109
|
+
|
110
|
+
expect(Sidekiq::Fairplay::Planner)
|
111
|
+
.to have_enqueued_sidekiq_job("FairplayJob")
|
112
|
+
.on(planner_queue)
|
113
|
+
.in(enqueue_interval.to_i)
|
114
|
+
|
115
|
+
expect(FairplayJob)
|
116
|
+
.to have_enqueued_sidekiq_job("t1", "a")
|
117
|
+
.on("default") # default queue for FairplayJob
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
context "when latency threshold exceeded" do
|
122
|
+
let(:queue) { instance_double(Sidekiq::Queue) }
|
123
|
+
|
124
|
+
before do
|
125
|
+
allow(Sidekiq::Queue).to receive(:new).and_return(queue)
|
126
|
+
allow(queue).to receive(:latency).and_return(latency_threshold.to_i + 1)
|
127
|
+
end
|
128
|
+
|
129
|
+
it "reschedules the planner without enqueuing jobs" do
|
130
|
+
FairplayJob.perform_async("t1", "a")
|
131
|
+
|
132
|
+
Sidekiq::Fairplay::Planner.perform_one
|
133
|
+
|
134
|
+
expect(FairplayJob).not_to have_enqueued_sidekiq_job
|
135
|
+
expect(Sidekiq::Fairplay::Planner)
|
136
|
+
.to have_enqueued_sidekiq_job("FairplayJob")
|
137
|
+
.in(enqueue_interval.to_i)
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
context "with custom weights" do
|
142
|
+
let(:tenant_weights) do
|
143
|
+
->(tenant_ids) do
|
144
|
+
tenant_ids.to_h do |tid|
|
145
|
+
[tid, (tid == "t1") ? 1 : 0]
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
it "uses weights to prefer specific tenant" do
|
151
|
+
FairplayJob.perform_async("t1", "a")
|
152
|
+
FairplayJob.perform_async("t2", "b")
|
153
|
+
|
154
|
+
Sidekiq::Fairplay::Planner.perform_one
|
155
|
+
|
156
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job.exactly(1)
|
157
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job("t1", "a")
|
158
|
+
expect(FairplayJob).not_to have_enqueued_sidekiq_job("t2", "b")
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
context "when too many jobs in the queue" do
|
163
|
+
let(:enqueue_jobs) { 1 }
|
164
|
+
|
165
|
+
it "respects the enqueue_jobs limit" do
|
166
|
+
FairplayJob.perform_async("t1", "a")
|
167
|
+
FairplayJob.perform_async("t1", "b")
|
168
|
+
|
169
|
+
Sidekiq::Fairplay::Planner.perform_one
|
170
|
+
|
171
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job.exactly(1)
|
172
|
+
expect(FairplayJob)
|
173
|
+
.to have_enqueued_sidekiq_job("t1", "a")
|
174
|
+
.or have_enqueued_sidekiq_job("t1", "b")
|
175
|
+
end
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
describe "edge cases" do
|
180
|
+
it "ignores unknown job class" do
|
181
|
+
Sidekiq::Fairplay::Planner.new.perform("UnknownJob")
|
182
|
+
|
183
|
+
expect(Sidekiq::Fairplay::Planner).not_to have_enqueued_sidekiq_job
|
184
|
+
expect(FairplayJob).not_to have_enqueued_sidekiq_job
|
185
|
+
end
|
186
|
+
|
187
|
+
it "has no effect on regular jobs" do
|
188
|
+
RegularJob.perform_async("foo")
|
189
|
+
|
190
|
+
expect(RegularJob).to have_enqueued_sidekiq_job("foo")
|
191
|
+
expect(Sidekiq::Fairplay::Planner).not_to have_enqueued_sidekiq_job
|
192
|
+
end
|
193
|
+
|
194
|
+
it "has no effect on scheduled jobs" do
|
195
|
+
FairplayJob.perform_in(5, "t1", "a")
|
196
|
+
|
197
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job("t1", "a").in(5)
|
198
|
+
expect(Sidekiq::Fairplay::Planner).not_to have_enqueued_sidekiq_job
|
199
|
+
end
|
200
|
+
|
201
|
+
context "with zero weights for all tenants" do
|
202
|
+
let(:tenant_weights) do
|
203
|
+
->(tenant_ids) { tenant_ids.to_h { |tid| [tid, 0] } }
|
204
|
+
end
|
205
|
+
|
206
|
+
it "enqueues no jobs" do
|
207
|
+
FairplayJob.perform_async("t1", "a")
|
208
|
+
FairplayJob.perform_async("t2", "b")
|
209
|
+
|
210
|
+
Sidekiq::Fairplay::Planner.perform_one
|
211
|
+
|
212
|
+
expect(FairplayJob).not_to have_enqueued_sidekiq_job
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
describe "errors" do
|
218
|
+
let(:tenant_key) { ->(_tid, *_args) {} }
|
219
|
+
|
220
|
+
it "raises when tenant key resolves to nil" do
|
221
|
+
tenant_key
|
222
|
+
|
223
|
+
expect { FairplayJob.perform_async("t1", "a") }
|
224
|
+
.to raise_error(ArgumentError, /tenant key cannot be nil/)
|
225
|
+
end
|
226
|
+
end
|
227
|
+
|
228
|
+
describe "implementation details" do
|
229
|
+
it "reschedules planning for the next interval" do
|
230
|
+
FairplayJob.perform_async("t1", "a")
|
231
|
+
|
232
|
+
Sidekiq::Fairplay::Planner.perform_one
|
233
|
+
|
234
|
+
expect(Sidekiq::Fairplay::Planner)
|
235
|
+
.to have_enqueued_sidekiq_job("FairplayJob")
|
236
|
+
.in(enqueue_interval.to_i)
|
237
|
+
end
|
238
|
+
|
239
|
+
context "when planner_lock_ttl is being held" do
|
240
|
+
let(:planner_lock_ttl) { 42 }
|
241
|
+
|
242
|
+
before do
|
243
|
+
redis = Sidekiq::Fairplay::Redis.new
|
244
|
+
redis.try_acquire_planner_lock(FairplayJob, "some_jid")
|
245
|
+
end
|
246
|
+
|
247
|
+
it "blocks planning until the TTL expires" do
|
248
|
+
FairplayJob.perform_async("t1", "a")
|
249
|
+
|
250
|
+
Sidekiq::Fairplay::Planner.perform_one
|
251
|
+
|
252
|
+
expect(FairplayJob).not_to have_enqueued_sidekiq_job
|
253
|
+
expect(Sidekiq::Fairplay::Planner)
|
254
|
+
.to have_enqueued_sidekiq_job("FairplayJob")
|
255
|
+
.in(enqueue_interval.to_i)
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
context "when tenant_key and tenant_weights refer to class methods" do
|
260
|
+
before do
|
261
|
+
class << FairplayJob
|
262
|
+
def static_tenant_key(tid, *_args) = tid
|
263
|
+
def static_tenant_weights(tids) = tids.to_h { |tid| [tid, 1] }
|
264
|
+
end
|
265
|
+
end
|
266
|
+
|
267
|
+
let(:tenant_key) { ->(tid, *args) { static_tenant_key(tid, *args) } }
|
268
|
+
let(:tenant_weights) { ->(tids) { static_tenant_weights(tids) } }
|
269
|
+
|
270
|
+
it "works as expected" do
|
271
|
+
FairplayJob.perform_async("t1", "a")
|
272
|
+
FairplayJob.perform_async("t2", "b")
|
273
|
+
|
274
|
+
Sidekiq::Fairplay::Planner.perform_one
|
275
|
+
|
276
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job.exactly(2)
|
277
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job("t1", "a")
|
278
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job("t2", "b")
|
279
|
+
end
|
280
|
+
end
|
281
|
+
|
282
|
+
context "when using ActiveSupport::Duration" do
|
283
|
+
let(:enqueue_interval) { 1.minute }
|
284
|
+
let(:latency_threshold) { 1.hour }
|
285
|
+
let(:planner_lock_ttl) { 10.seconds }
|
286
|
+
|
287
|
+
it "handles durations correctly" do
|
288
|
+
FairplayJob.perform_async("t1", "a")
|
289
|
+
|
290
|
+
Sidekiq::Fairplay::Planner.perform_one
|
291
|
+
|
292
|
+
expect(Sidekiq::Fairplay::Planner)
|
293
|
+
.to have_enqueued_sidekiq_job("FairplayJob")
|
294
|
+
.in(enqueue_interval.to_i)
|
295
|
+
|
296
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job.exactly(1)
|
297
|
+
expect(FairplayJob).to have_enqueued_sidekiq_job("t1", "a")
|
298
|
+
end
|
299
|
+
end
|
300
|
+
end
|
301
|
+
end
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,63 @@
|
|
1
|
+
$LOAD_PATH << "." unless $LOAD_PATH.include?(".")
|
2
|
+
|
3
|
+
require "rubygems"
|
4
|
+
require "bundler/setup"
|
5
|
+
require "timecop"
|
6
|
+
require "simplecov"
|
7
|
+
|
8
|
+
require "sidekiq"
|
9
|
+
require "rspec-sidekiq"
|
10
|
+
require "sidekiq/fairplay"
|
11
|
+
require "pry"
|
12
|
+
|
13
|
+
SimpleCov.start do
|
14
|
+
add_filter "spec"
|
15
|
+
end
|
16
|
+
|
17
|
+
Sidekiq::Fairplay.logger = nil
|
18
|
+
|
19
|
+
Sidekiq.configure_client do |config|
|
20
|
+
config.redis = {db: 1}
|
21
|
+
config.logger = nil
|
22
|
+
|
23
|
+
config.client_middleware do |chain|
|
24
|
+
chain.add Sidekiq::Fairplay::Middleware
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
Sidekiq.configure_server do |config|
|
29
|
+
config.redis = {db: 1}
|
30
|
+
config.logger = nil
|
31
|
+
|
32
|
+
config.client_middleware do |chain|
|
33
|
+
chain.add Sidekiq::Fairplay::Middleware
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
RSpec::Sidekiq.configure do |config|
|
38
|
+
config.clear_all_enqueued_jobs = true
|
39
|
+
config.warn_when_jobs_not_processed_by_sidekiq = false
|
40
|
+
end
|
41
|
+
|
42
|
+
RSpec.configure do |config|
|
43
|
+
config.order = :random
|
44
|
+
config.run_all_when_everything_filtered = true
|
45
|
+
config.example_status_persistence_file_path = "spec/examples.txt"
|
46
|
+
|
47
|
+
config.before do
|
48
|
+
Sidekiq.redis do |conn|
|
49
|
+
keys = conn.call("KEYS", "fairplay*")
|
50
|
+
keys.each { |key| conn.call("DEL", key) }
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
config.before do
|
55
|
+
Timecop.freeze
|
56
|
+
end
|
57
|
+
|
58
|
+
config.after do
|
59
|
+
Timecop.return
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
$LOAD_PATH << File.join(File.dirname(__FILE__), "..", "lib")
|