sidekiq-fairplay 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/workflows/ci.yml +76 -0
- data/.gitignore +37 -0
- data/.standard.yml +9 -0
- data/Gemfile +3 -0
- data/LICENSE +21 -0
- data/README.md +191 -0
- data/Rakefile +7 -0
- data/bin/console +7 -0
- data/bin/setup +6 -0
- data/gemfiles/sidekiq_7.gemfile +5 -0
- data/gemfiles/sidekiq_8.gemfile +5 -0
- data/lib/sidekiq/fairplay/config.rb +29 -0
- data/lib/sidekiq/fairplay/middleware.rb +54 -0
- data/lib/sidekiq/fairplay/planner.rb +102 -0
- data/lib/sidekiq/fairplay/redis.rb +149 -0
- data/lib/sidekiq/fairplay/version.rb +5 -0
- data/lib/sidekiq/fairplay.rb +67 -0
- data/sidekiq-fairplay.gemspec +37 -0
- data/spec/sidekiq/fairplay_spec.rb +301 -0
- data/spec/spec_helper.rb +63 -0
- metadata +230 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 8da6198834317f72485926227a1a0798f26fa2cb89506248b8430f36445c3d8e
|
4
|
+
data.tar.gz: 1b09fd7ff17a68822a77d2944a22f9913a3048660e591aebb0fcdcf71ef69b8f
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 74bfa63effdd8815a733acb146f7acd5bfa6203eef103c22ba79f1b170fc3de9085b52cd6086b37d86d32d7da2110af2896e8ce562f191648c47c6e924b33292
|
7
|
+
data.tar.gz: f06564822787aa2e4c01072a450f12dc7ba78539457bef069ed6c38708d8989bf03b1881b4bd9a26e3e0ae2ffa3002bf455bcc959b403424cafc019728be98fb
|
@@ -0,0 +1,76 @@
|
|
1
|
+
name: CI
|
2
|
+
|
3
|
+
on:
|
4
|
+
push:
|
5
|
+
branches: [ main, master ]
|
6
|
+
pull_request:
|
7
|
+
branches: [ main, master ]
|
8
|
+
|
9
|
+
jobs:
|
10
|
+
test:
|
11
|
+
name: Test (Ruby ${{ matrix.ruby }}, Sidekiq ${{ matrix.sidekiq }})
|
12
|
+
runs-on: ubuntu-latest
|
13
|
+
timeout-minutes: 15
|
14
|
+
strategy:
|
15
|
+
fail-fast: false
|
16
|
+
matrix:
|
17
|
+
ruby: ['3.4']
|
18
|
+
sidekiq: ['7', '8']
|
19
|
+
|
20
|
+
services:
|
21
|
+
redis:
|
22
|
+
image: redis:7-alpine
|
23
|
+
ports:
|
24
|
+
- 6379:6379
|
25
|
+
options: >-
|
26
|
+
--health-cmd "redis-cli ping || exit 1"
|
27
|
+
--health-interval 10s
|
28
|
+
--health-timeout 5s
|
29
|
+
--health-retries 5
|
30
|
+
|
31
|
+
steps:
|
32
|
+
- name: Checkout
|
33
|
+
uses: actions/checkout@v4
|
34
|
+
|
35
|
+
- name: Set up Ruby
|
36
|
+
uses: ruby/setup-ruby@v1
|
37
|
+
with:
|
38
|
+
ruby-version: ${{ matrix.ruby }}
|
39
|
+
bundler-cache: true
|
40
|
+
|
41
|
+
- name: Select Gemfile for Sidekiq ${{ matrix.sidekiq }}
|
42
|
+
run: |
|
43
|
+
export BUNDLE_GEMFILE="gemfiles/sidekiq_${{ matrix.sidekiq }}.gemfile"
|
44
|
+
echo "BUNDLE_GEMFILE=$BUNDLE_GEMFILE" >> $GITHUB_ENV
|
45
|
+
|
46
|
+
- name: Bundle install
|
47
|
+
run: bundle install --jobs 4 --retry 3
|
48
|
+
|
49
|
+
- name: Run specs
|
50
|
+
env:
|
51
|
+
REDIS_URL: redis://localhost:6379/1
|
52
|
+
run: |
|
53
|
+
bundle exec rspec --format progress
|
54
|
+
|
55
|
+
- uses: qltysh/qlty-action/coverage@v2
|
56
|
+
if: matrix.sidekiq == '8' && matrix.ruby == '3.4'
|
57
|
+
with:
|
58
|
+
token: ${{secrets.QLTY_COVERAGE_TOKEN}}
|
59
|
+
files: coverage/.resultset.json
|
60
|
+
|
61
|
+
lint:
|
62
|
+
name: Lint (standardrb)
|
63
|
+
runs-on: ubuntu-latest
|
64
|
+
continue-on-error: true
|
65
|
+
steps:
|
66
|
+
- name: Checkout
|
67
|
+
uses: actions/checkout@v4
|
68
|
+
|
69
|
+
- name: Set up Ruby
|
70
|
+
uses: ruby/setup-ruby@v1
|
71
|
+
with:
|
72
|
+
ruby-version: '3.4'
|
73
|
+
bundler-cache: true
|
74
|
+
|
75
|
+
- name: Run StandardRB
|
76
|
+
run: bundle exec standardrb
|
data/.gitignore
ADDED
@@ -0,0 +1,37 @@
|
|
1
|
+
*.gem
|
2
|
+
*.rbc
|
3
|
+
/.config
|
4
|
+
/coverage/
|
5
|
+
/InstalledFiles
|
6
|
+
/pkg/
|
7
|
+
/spec/reports/
|
8
|
+
/spec/examples.txt
|
9
|
+
/test/tmp/
|
10
|
+
/test/version_tmp/
|
11
|
+
/tmp/
|
12
|
+
spec/examples.txt
|
13
|
+
.byebug_history
|
14
|
+
|
15
|
+
## Documentation cache and generated files
|
16
|
+
/.yardoc/
|
17
|
+
/_yardoc/
|
18
|
+
/doc/
|
19
|
+
/rdoc/
|
20
|
+
|
21
|
+
## Environment normalization
|
22
|
+
/.bundle/
|
23
|
+
/vendor/bundle
|
24
|
+
/lib/bundler/man/
|
25
|
+
|
26
|
+
# System files
|
27
|
+
.DS_Store
|
28
|
+
|
29
|
+
# Editors
|
30
|
+
.vscode
|
31
|
+
.ruby-lsp
|
32
|
+
|
33
|
+
# Unnecessary for Ruby gems
|
34
|
+
Gemfile.lock
|
35
|
+
.ruby-version
|
36
|
+
.ruby-gemset
|
37
|
+
.tool-versions
|
data/.standard.yml
ADDED
data/Gemfile
ADDED
data/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2025 Alexander Baygeldin
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,191 @@
|
|
1
|
+
# sidekiq-fairplay
|
2
|
+
|
3
|
+
[](https://github.com/baygeldin/sidekiq-fairplay/actions/workflows/ci.yml)
|
4
|
+
[](https://rubygems.org/gems/sidekiq-fairplay)
|
5
|
+
[](https://qlty.sh/gh/baygeldin/projects/sidekiq-fairplay)
|
6
|
+
[](https://qlty.sh/gh/baygeldin/projects/sidekiq-fairplay)
|
7
|
+
|
8
|
+
> [!NOTE]
|
9
|
+
> This gem is a reference implementation of the approach I describe in my EuRuKo 2025 talk *“Prioritization justice: lessons from making background jobs fair at scale”*.
|
10
|
+
> While the approach itself is battle-tested in production in a real multi-tenant app with lots of users, the gem is not (yet). So, use at your own peril 🫣
|
11
|
+
|
12
|
+
Are you treating your users fairly? They could be stuck in the queue while a greedy user monopolizes your workers—and you might not even know it! This gem implements fair background job prioritization for Sidekiq: instead of letting a single noisy tenant hog your queues, `sidekiq‑fairplay` enqueues jobs in balanced rounds, using dynamically calculated tenant weights. It works especially well in multi‑tenant apps, where you want *fairness* even when some tenants are "needier" than others.
|
13
|
+
|
14
|
+
Take a look at the most basic example below: it intercepts all jobs you try to enqueue (e.g., via `HeavyJob.perform_async`) and slowly releases them into the main queue in batches of 100 jobs every minute, ensuring no tenant is forgotten.
|
15
|
+
|
16
|
+
```ruby
|
17
|
+
class HeavyJob
|
18
|
+
include Sidekiq::Job
|
19
|
+
include Sidekiq::Fairplay::Job
|
20
|
+
|
21
|
+
sidekiq_fairplay_options(
|
22
|
+
enqueue_interval: 1.minute,
|
23
|
+
enqueue_jobs: 100,
|
24
|
+
tenant_key: ->(user_id, _foo) { user_id }
|
25
|
+
)
|
26
|
+
|
27
|
+
def perform(user_id, foo)
|
28
|
+
# do heavy work
|
29
|
+
end
|
30
|
+
end
|
31
|
+
```
|
32
|
+
|
33
|
+
<a href="https://evilmartians.com/?utm_source=sidekiq-fair_tenant">
|
34
|
+
<picture>
|
35
|
+
<source
|
36
|
+
media="(prefers-color-scheme: dark)"
|
37
|
+
srcset="https://evilmartians.com/badges/sponsored-by-evil-martians_v2.0_for-dark-bg@2x.png"
|
38
|
+
>
|
39
|
+
<img
|
40
|
+
src="https://evilmartians.com/badges/sponsored-by-evil-martians_v2.0@2x.png"
|
41
|
+
alt="Sponsored by Evil Martians"
|
42
|
+
width="236"
|
43
|
+
height="54"
|
44
|
+
>
|
45
|
+
</picture>
|
46
|
+
</a>
|
47
|
+
|
48
|
+
## Requirements
|
49
|
+
- Ruby >= 3.4
|
50
|
+
- Sidekiq >= 7
|
51
|
+
|
52
|
+
## Installation
|
53
|
+
|
54
|
+
Add to your Gemfile and bundle:
|
55
|
+
|
56
|
+
```ruby
|
57
|
+
gem 'sidekiq-fairplay'
|
58
|
+
```
|
59
|
+
|
60
|
+
Configure the client middleware on both client and server:
|
61
|
+
|
62
|
+
```ruby
|
63
|
+
Sidekiq.configure_client do |config|
|
64
|
+
config.client_middleware do |chain|
|
65
|
+
chain.add Sidekiq::Fairplay::Middleware
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
Sidekiq.configure_server do |config|
|
70
|
+
config.client_middleware do |chain|
|
71
|
+
chain.add Sidekiq::Fairplay::Middleware
|
72
|
+
end
|
73
|
+
end
|
74
|
+
```
|
75
|
+
|
76
|
+
## API
|
77
|
+
|
78
|
+
In the following example you can see all of the available configuration parameters and their meaning:
|
79
|
+
|
80
|
+
```ruby
|
81
|
+
class HeavyJob
|
82
|
+
include Sidekiq::Job
|
83
|
+
include Sidekiq::Fairplay::Job
|
84
|
+
|
85
|
+
sidekiq_options queue: :heavy_stuff
|
86
|
+
|
87
|
+
sidekiq_fairplay_options(
|
88
|
+
# How often the planner tries to enqueue more jobs into `heavy_stuff` (in seconds).
|
89
|
+
# It should be large enough for the planner job to finish executing in that time.
|
90
|
+
enqueue_interval: 60,
|
91
|
+
|
92
|
+
# How many jobs the planner tries to enqueue every `enqueue_interval`.
|
93
|
+
# If the jobs are processed faster than the planner enqueues them, increase this number.
|
94
|
+
enqueue_jobs: 100,
|
95
|
+
|
96
|
+
# Tenant ID extraction from the job arguments. It's required and it should return a string.
|
97
|
+
# It is called in the client middleware (i.e. every time you call `SomeWorker.perform_async`).
|
98
|
+
tenant_key: ->(tenant_id, *_args) { tenant_id },
|
99
|
+
|
100
|
+
# Tenant weights extraction. It accepts a list of tenants who currently have jobs waiting to be enqueued.
|
101
|
+
# It should return a hash with keys being tenant IDs and values being their respective weights/priorities.
|
102
|
+
# It's called during the planning and it should be able to execute within `enqueue_interval`.
|
103
|
+
tenant_weights: ->(tenant_ids) { tenant_ids.to_h { |tid| [tid, 1] } }
|
104
|
+
|
105
|
+
# A *very* important parameter to control backpressure and avoid flooding the queue (in seconds).
|
106
|
+
# If the latency of `heavy_stuff` is larger than this number, the planner will skip a beat.
|
107
|
+
latency_threshold: 60,
|
108
|
+
|
109
|
+
# The queue in which the planner job should be executing.
|
110
|
+
planner_queue: 'default',
|
111
|
+
|
112
|
+
# For how long should the planner job hold the lock (in seconds).
|
113
|
+
# This is a protection against accidentally running multiple planners at the same time.
|
114
|
+
planner_lock_ttl: 60,
|
115
|
+
)
|
116
|
+
|
117
|
+
def perform(tenant_id, foo)
|
118
|
+
# do heavy work
|
119
|
+
end
|
120
|
+
end
|
121
|
+
```
|
122
|
+
|
123
|
+
## Configuration
|
124
|
+
You can specify some of the default values in `sidekiq.yml`:
|
125
|
+
|
126
|
+
```yaml
|
127
|
+
fairplay:
|
128
|
+
:default_latency_threshold: 60
|
129
|
+
:default_planner_queue: default
|
130
|
+
:default_planner_lock_ttl: 60
|
131
|
+
```
|
132
|
+
|
133
|
+
Or directly in the code:
|
134
|
+
```ruby
|
135
|
+
Sidekiq::Fairplay::Config.default_latency_threshold = 60
|
136
|
+
Sidekiq::Fairplay::Config.default_planner_queue = 'default'
|
137
|
+
Sidekiq::Fairplay::Config.default_planner_lock_ttl = 60
|
138
|
+
Sidekiq::Fairplay::Config.default_tenant_weights = ->(tenant_ids) { tenant_ids.to_h { |tid| [tid, 1] } }
|
139
|
+
```
|
140
|
+
|
141
|
+
## How it works
|
142
|
+
|
143
|
+
At a high level, `sidekiq-fairplay` introduces **virtual per-tenant queues**. Instead of enqueuing jobs directly into Sidekiq, each job first goes into its tenant's queue. Then, at regular intervals, a special planner job (`Sidekiq::Fairplay::Planner`) runs. The planner decides which jobs to promote from tenant queues into the main Sidekiq queue—while keeping things fair.
|
144
|
+
|
145
|
+
### Backpressure
|
146
|
+
|
147
|
+
Without backpressure, we’d just dump all jobs from tenant queues into the main queue and end up back at square one (high latency and unhappy users). To avoid that, the planner checks queue latency before enqueuing. If latency is already high, it waits. This ensures that **new tenants arriving later still get a chance** to have their jobs processed, even if older tenants are sitting on mountains of unprocessed work.
|
148
|
+
|
149
|
+
### Dynamic weights
|
150
|
+
|
151
|
+
We keep track of how many jobs are waiting to be enqueued for each tenant. Only tenants with pending work are passed to your `tenant_weights` callback, so your calculations can stay efficient. The callback returns weights: larger numbers mean more jobs get promoted to the main queue. So, weight `10` > weight `1` (just like [Sidekiq’s built-in queue weights](https://github.com/sidekiq/sidekiq/wiki/Advanced-Options#queues)).
|
152
|
+
|
153
|
+
From there, you can apply **your own prioritization logic**—for example:
|
154
|
+
- Favor paying customers over freeloaders.
|
155
|
+
- Cool down tenants who've just had a large batch processed.
|
156
|
+
- Balance "needy" vs. "quiet" tenants.
|
157
|
+
|
158
|
+
### Reliability
|
159
|
+
|
160
|
+
All operations—pushing jobs into tenant queues, pulling them out—are performed atomically in Redis using Lua scripts. This guarantees **consistent state** with a single round-trip. However, if a network failure or process crash happens after a job is enqueued into the main queue but before it’s dropped from its tenant queue, that job may be processed twice. In other words, `sidekiq-fairplay` provides **at-least-once delivery semantics**.
|
161
|
+
|
162
|
+
### Concurrency
|
163
|
+
|
164
|
+
We use two simple Redis-backed distributed locks:
|
165
|
+
|
166
|
+
1. **Planner deduplication lock**
|
167
|
+
- Ensures only one planner per job class is enqueued within `enqueue_interval`.
|
168
|
+
- This is needed to avoid flooding Sidekiq with duplicate jobs.
|
169
|
+
|
170
|
+
2. **Planner execution lock**
|
171
|
+
- Ensures only one planner per job class runs at a time.
|
172
|
+
- Not strictly necessary (the first lock already prevents most issues), but adds safety.
|
173
|
+
|
174
|
+
⚠️ Note: If a planner takes longer than its `planner_lock_ttl`, multiple planners may run concurrently.
|
175
|
+
It's not the end of the world, but it means you probably should **optimize your `tenant_weights` logic** and/or increase the `enqueue_interval`.
|
176
|
+
|
177
|
+
## Troubleshooting
|
178
|
+
|
179
|
+
If you use Sidekiq Pro and are using `reliable_scheduler!`, then keep in mind that it bypasses the client middlewares. This essentially means that all jobs scheduled via `perform_in/perform_at` will bypass the planner and go directly into the main queue.
|
180
|
+
|
181
|
+
## Development
|
182
|
+
|
183
|
+
After checking out the repo, run `bin/setup` to install dependencies. To execute the test suite simply run `rake`.
|
184
|
+
|
185
|
+
## Contributing
|
186
|
+
|
187
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/baygeldin/sidekiq-fairplay.
|
188
|
+
|
189
|
+
## License
|
190
|
+
|
191
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
data/Rakefile
ADDED
data/bin/console
ADDED
data/bin/setup
ADDED
@@ -0,0 +1,29 @@
|
|
1
|
+
module Sidekiq
|
2
|
+
module Fairplay
|
3
|
+
module Config
|
4
|
+
include ActiveSupport::Configurable
|
5
|
+
|
6
|
+
def self.options
|
7
|
+
Sidekiq.default_configuration[:fairplay] || {}
|
8
|
+
end
|
9
|
+
|
10
|
+
config_accessor :default_latency_threshold do
|
11
|
+
options[:default_latency_threshold] || 60 # seconds
|
12
|
+
end
|
13
|
+
|
14
|
+
config_accessor :default_planner_queue do
|
15
|
+
options[:default_planner_queue] || "default"
|
16
|
+
end
|
17
|
+
|
18
|
+
config_accessor :default_planner_lock_ttl do
|
19
|
+
options[:default_planner_lock_ttl] || 60 # seconds
|
20
|
+
end
|
21
|
+
|
22
|
+
# By default, all tenants have equal weight.
|
23
|
+
config_accessor :default_tenant_weights do
|
24
|
+
options[:default_tenant_weights] ||
|
25
|
+
->(tenant_ids) { tenant_ids.to_h { |tid| [tid, 1] } }
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
module Sidekiq
|
2
|
+
module Fairplay
|
3
|
+
class Middleware
|
4
|
+
def call(job_class, job, _queue, _redis_pool)
|
5
|
+
klass = job_class.is_a?(String) ? constantize(job_class) : job_class
|
6
|
+
return nil unless klass
|
7
|
+
|
8
|
+
opts = klass.respond_to?(:sidekiq_fairplay_options_hash) ? klass.sidekiq_fairplay_options_hash : {}
|
9
|
+
args = job["args"] || []
|
10
|
+
|
11
|
+
# For jobs scheduled via `perform_in` or `perform_at`, let it schedule as usual (including the planner job itself).
|
12
|
+
# When the job is pushed onto the main queue from the scheduled set by Sidekiq, it will go through this middleware again.
|
13
|
+
# NOTE: `reliable_scheduler` in Sidekiq Pro skips client middlewares, so such jobs will be enqueued bypassing the planner.
|
14
|
+
return yield if job.key?("at")
|
15
|
+
|
16
|
+
# Skip enqueuing the planner if it was already enqueued recently.
|
17
|
+
if klass == Sidekiq::Fairplay::Planner
|
18
|
+
target_job_class = constantize(args.first)
|
19
|
+
return nil unless target_job_class
|
20
|
+
return nil if redis.planner_enqueued_recently?(target_job_class)
|
21
|
+
|
22
|
+
return yield
|
23
|
+
end
|
24
|
+
|
25
|
+
# Only apply fairplay logic to jobs that have it enabled.
|
26
|
+
return yield unless klass.respond_to?(:sidekiq_fairplay_options_hash)
|
27
|
+
|
28
|
+
# Perform the job as usual if it was enqueued by the planner.
|
29
|
+
return yield if job["fairplay_enqueued_at"]
|
30
|
+
|
31
|
+
tenant_id = klass.instance_exec(*args, &opts[:tenant_key])
|
32
|
+
raise ArgumentError, "sidekiq-fairplay: tenant key cannot be nil" if tenant_id.nil?
|
33
|
+
|
34
|
+
redis.push_tenant_job(klass, tenant_id, args.to_json)
|
35
|
+
|
36
|
+
::Sidekiq::Fairplay::Planner.set(queue: opts[:planner_queue]).perform_async(klass.name)
|
37
|
+
|
38
|
+
nil # short-circuit job execution
|
39
|
+
end
|
40
|
+
|
41
|
+
private
|
42
|
+
|
43
|
+
def constantize(name)
|
44
|
+
name.constantize
|
45
|
+
rescue NameError
|
46
|
+
nil
|
47
|
+
end
|
48
|
+
|
49
|
+
def redis
|
50
|
+
@redis ||= Sidekiq::Fairplay::Redis.new
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,102 @@
|
|
1
|
+
require "json"
|
2
|
+
require "sidekiq/api"
|
3
|
+
|
4
|
+
module Sidekiq
|
5
|
+
module Fairplay
|
6
|
+
class Planner
|
7
|
+
include Sidekiq::Job
|
8
|
+
|
9
|
+
sidekiq_options retry: false
|
10
|
+
|
11
|
+
def perform(job_class_name)
|
12
|
+
@job_class = constantize(job_class_name)
|
13
|
+
return unless job_class&.respond_to?(:sidekiq_fairplay_options_hash)
|
14
|
+
|
15
|
+
planner = self.class.set(queue: options[:planner_queue])
|
16
|
+
planner.perform_in(options[:enqueue_interval].to_i, job_class.name)
|
17
|
+
|
18
|
+
return if job_queue.latency > options[:latency_threshold].to_i
|
19
|
+
|
20
|
+
redis.with_planner_lock(job_class, Sidekiq::Context.current["jid"]) do
|
21
|
+
enqueue_more_jobs!
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
private
|
26
|
+
|
27
|
+
attr_reader :job_class
|
28
|
+
|
29
|
+
def enqueue_more_jobs!
|
30
|
+
counts = fetch_tenant_counts
|
31
|
+
return if counts.empty?
|
32
|
+
|
33
|
+
weighted_tenant_ids = build_weighted_tenant_ids(counts.keys)
|
34
|
+
return if weighted_tenant_ids.empty?
|
35
|
+
|
36
|
+
pushed = 0
|
37
|
+
|
38
|
+
while pushed < options[:enqueue_jobs]
|
39
|
+
tid = weighted_tenant_ids.sample
|
40
|
+
break unless tid && enqueue_job_for_tenant(tid)
|
41
|
+
|
42
|
+
counts[tid] -= 1
|
43
|
+
pushed += 1
|
44
|
+
|
45
|
+
weighted_tenant_ids.reject! { it == tid } if counts[tid] <= 0
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def fetch_tenant_counts
|
50
|
+
redis.tenant_counts(job_class)
|
51
|
+
.transform_values { |c| c.to_i }
|
52
|
+
.select { |_tid, c| c.positive? }
|
53
|
+
end
|
54
|
+
|
55
|
+
# Build a sampling bag with tenant IDs proportional to their weights.
|
56
|
+
def build_weighted_tenant_ids(tenant_ids)
|
57
|
+
weights = job_class.instance_exec(tenant_ids, &options[:tenant_weights])
|
58
|
+
|
59
|
+
tenant_ids.each_with_object([]) do |tid, memo|
|
60
|
+
weights[tid].to_i.times { memo << tid }
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
def enqueue_job_for_tenant(tid)
|
65
|
+
job_payload = redis.peek_tenant(job_class, tid)
|
66
|
+
|
67
|
+
ok = Sidekiq::Client.push(
|
68
|
+
"class" => job_class,
|
69
|
+
"queue" => job_queue.name,
|
70
|
+
"args" => JSON.parse(job_payload),
|
71
|
+
"fairplay_enqueued_at" => Time.now.to_i
|
72
|
+
)
|
73
|
+
return false unless ok
|
74
|
+
|
75
|
+
# Only remove the job from the tenant queue if it was successfully enqueued,
|
76
|
+
# so that we don't lose a job if the process is killed or in case of a network issue.
|
77
|
+
# However, this may lead to a job being processed more than once.
|
78
|
+
redis.pop_tenant_job(job_class, tid)
|
79
|
+
|
80
|
+
true
|
81
|
+
end
|
82
|
+
|
83
|
+
def constantize(name)
|
84
|
+
name.constantize
|
85
|
+
rescue NameError
|
86
|
+
nil
|
87
|
+
end
|
88
|
+
|
89
|
+
def options
|
90
|
+
@options ||= job_class.sidekiq_fairplay_options_hash
|
91
|
+
end
|
92
|
+
|
93
|
+
def job_queue
|
94
|
+
@job_queue ||= Sidekiq::Queue.new(job_class.get_sidekiq_options["queue"] || "default")
|
95
|
+
end
|
96
|
+
|
97
|
+
def redis
|
98
|
+
@redis ||= Sidekiq::Fairplay::Redis.new
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|