sbmt-outbox 5.0.4 → 6.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +56 -7
- data/app/interactors/sbmt/outbox/process_item.rb +2 -1
- data/app/interactors/sbmt/outbox/retry_strategies/base.rb +15 -0
- data/app/interactors/sbmt/outbox/retry_strategies/compacted_log.rb +2 -32
- data/app/interactors/sbmt/outbox/retry_strategies/exponential_backoff.rb +3 -5
- data/app/interactors/sbmt/outbox/retry_strategies/latest_available.rb +39 -0
- data/app/interactors/sbmt/outbox/retry_strategies/no_delay.rb +13 -0
- data/app/models/sbmt/outbox/base_item.rb +9 -8
- data/app/models/sbmt/outbox/base_item_config.rb +11 -2
- data/config/initializers/yabeda.rb +32 -5
- data/lib/generators/helpers/migration.rb +2 -2
- data/lib/sbmt/outbox/cli.rb +50 -7
- data/lib/sbmt/outbox/engine.rb +25 -0
- data/lib/sbmt/outbox/logger.rb +6 -0
- data/lib/sbmt/outbox/v1/thread_pool.rb +110 -0
- data/lib/sbmt/outbox/v1/throttler.rb +54 -0
- data/lib/sbmt/outbox/v1/worker.rb +231 -0
- data/lib/sbmt/outbox/v2/box_processor.rb +148 -0
- data/lib/sbmt/outbox/v2/poll_throttler/base.rb +43 -0
- data/lib/sbmt/outbox/v2/poll_throttler/composite.rb +42 -0
- data/lib/sbmt/outbox/v2/poll_throttler/fixed_delay.rb +28 -0
- data/lib/sbmt/outbox/v2/poll_throttler/noop.rb +17 -0
- data/lib/sbmt/outbox/v2/poll_throttler/rate_limited.rb +45 -0
- data/lib/sbmt/outbox/v2/poll_throttler/redis_queue_size.rb +46 -0
- data/lib/sbmt/outbox/v2/poll_throttler/redis_queue_time_lag.rb +45 -0
- data/lib/sbmt/outbox/v2/poll_throttler.rb +49 -0
- data/lib/sbmt/outbox/v2/poller.rb +180 -0
- data/lib/sbmt/outbox/v2/processor.rb +109 -0
- data/lib/sbmt/outbox/v2/redis_job.rb +42 -0
- data/lib/sbmt/outbox/v2/tasks/base.rb +48 -0
- data/lib/sbmt/outbox/v2/tasks/default.rb +17 -0
- data/lib/sbmt/outbox/v2/tasks/poll.rb +34 -0
- data/lib/sbmt/outbox/v2/tasks/process.rb +31 -0
- data/lib/sbmt/outbox/v2/thread_pool.rb +152 -0
- data/lib/sbmt/outbox/v2/throttler.rb +13 -0
- data/lib/sbmt/outbox/v2/worker.rb +52 -0
- data/lib/sbmt/outbox/version.rb +1 -1
- data/lib/sbmt/outbox.rb +16 -2
- metadata +40 -4
- data/lib/sbmt/outbox/thread_pool.rb +0 -108
- data/lib/sbmt/outbox/throttler.rb +0 -52
- data/lib/sbmt/outbox/worker.rb +0 -233
@@ -0,0 +1,54 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sbmt
|
4
|
+
module Outbox
|
5
|
+
module V1
|
6
|
+
# Based on https://github.com/Shopify/limiter/blob/master/lib/limiter/rate_queue.rb
|
7
|
+
# We cannot use that gem because we have to support Ruby 2.5,
|
8
|
+
# but Shopify's limiter requires minimum Ruby 2.6
|
9
|
+
class Throttler
|
10
|
+
def initialize(limit: nil, interval: nil)
|
11
|
+
@limit = limit
|
12
|
+
@interval = limit
|
13
|
+
@map = (0...@limit).map { |i| base_time + (gap * i) }
|
14
|
+
@index = 0
|
15
|
+
@mutex = Mutex.new
|
16
|
+
end
|
17
|
+
|
18
|
+
def wait
|
19
|
+
time = nil
|
20
|
+
|
21
|
+
@mutex.synchronize do
|
22
|
+
time = @map[@index]
|
23
|
+
|
24
|
+
sleep_until(time + @interval)
|
25
|
+
|
26
|
+
@map[@index] = now
|
27
|
+
@index = (@index + 1) % @limit
|
28
|
+
end
|
29
|
+
|
30
|
+
time
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def sleep_until(time)
|
36
|
+
period = time - now
|
37
|
+
sleep(period) if period > 0
|
38
|
+
end
|
39
|
+
|
40
|
+
def base_time
|
41
|
+
now - @interval
|
42
|
+
end
|
43
|
+
|
44
|
+
def gap
|
45
|
+
@interval.to_f / @limit.to_f
|
46
|
+
end
|
47
|
+
|
48
|
+
def now
|
49
|
+
Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,231 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "redlock"
|
4
|
+
require "sbmt/outbox/v1/thread_pool"
|
5
|
+
|
6
|
+
module Sbmt
|
7
|
+
module Outbox
|
8
|
+
module V1
|
9
|
+
class Worker
|
10
|
+
Job = Struct.new(
|
11
|
+
:item_class,
|
12
|
+
:partition,
|
13
|
+
:buckets,
|
14
|
+
:log_tags,
|
15
|
+
:yabeda_labels,
|
16
|
+
:resource_key,
|
17
|
+
:resource_path,
|
18
|
+
keyword_init: true
|
19
|
+
)
|
20
|
+
|
21
|
+
delegate :config,
|
22
|
+
:logger,
|
23
|
+
:batch_process_middlewares,
|
24
|
+
to: "Sbmt::Outbox"
|
25
|
+
delegate :stop, to: :thread_pool
|
26
|
+
delegate :general_timeout, :cutoff_timeout, :batch_size, to: "Sbmt::Outbox.config.process_items"
|
27
|
+
delegate :job_counter,
|
28
|
+
:job_execution_runtime,
|
29
|
+
:item_execution_runtime,
|
30
|
+
:job_items_counter,
|
31
|
+
:job_timeout_counter,
|
32
|
+
to: "Yabeda.box_worker"
|
33
|
+
|
34
|
+
def initialize(boxes:, concurrency: 10)
|
35
|
+
self.queue = Queue.new
|
36
|
+
build_jobs(boxes).each { |job| queue << job }
|
37
|
+
self.thread_pool = ThreadPool.new { queue.pop }
|
38
|
+
self.concurrency = [concurrency, queue.size].min
|
39
|
+
self.thread_workers = {}
|
40
|
+
init_redis
|
41
|
+
end
|
42
|
+
|
43
|
+
def start
|
44
|
+
raise "Outbox is already started" if started
|
45
|
+
self.started = true
|
46
|
+
self.thread_workers = {}
|
47
|
+
|
48
|
+
thread_pool.start(concurrency: concurrency) do |worker_number, job|
|
49
|
+
touch_thread_worker!
|
50
|
+
result = ThreadPool::PROCESSED
|
51
|
+
logger.with_tags(**job.log_tags.merge(worker: worker_number)) do
|
52
|
+
lock_manager.lock("#{job.resource_path}:lock", general_timeout * 1000) do |locked|
|
53
|
+
labels = job.yabeda_labels
|
54
|
+
|
55
|
+
if locked
|
56
|
+
job_execution_runtime.measure(labels) do
|
57
|
+
::Rails.application.executor.wrap do
|
58
|
+
safe_process_job(job, worker_number, labels)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
else
|
62
|
+
result = ThreadPool::SKIPPED
|
63
|
+
logger.log_info("Skip processing already locked #{job.resource_key}")
|
64
|
+
end
|
65
|
+
|
66
|
+
job_counter.increment(labels.merge(state: locked ? "processed" : "skipped"), by: 1)
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
result
|
71
|
+
ensure
|
72
|
+
queue << job
|
73
|
+
end
|
74
|
+
rescue => e
|
75
|
+
Outbox.error_tracker.error(e)
|
76
|
+
raise
|
77
|
+
ensure
|
78
|
+
self.started = false
|
79
|
+
end
|
80
|
+
|
81
|
+
def ready?
|
82
|
+
started && thread_workers.any?
|
83
|
+
end
|
84
|
+
|
85
|
+
def alive?
|
86
|
+
return false unless started
|
87
|
+
|
88
|
+
deadline = Time.current - general_timeout
|
89
|
+
thread_workers.all? do |_worker_number, time|
|
90
|
+
deadline < time
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
private
|
95
|
+
|
96
|
+
attr_accessor :queue, :thread_pool, :concurrency, :lock_manager, :redis, :thread_workers, :started
|
97
|
+
|
98
|
+
def init_redis
|
99
|
+
self.redis = ConnectionPool::Wrapper.new(size: concurrency) { RedisClientFactory.build(config.redis) }
|
100
|
+
|
101
|
+
client = if Gem::Version.new(Redlock::VERSION) >= Gem::Version.new("2.0.0")
|
102
|
+
redis
|
103
|
+
else
|
104
|
+
ConnectionPool::Wrapper.new(size: concurrency) { Redis.new(config.redis) }
|
105
|
+
end
|
106
|
+
|
107
|
+
self.lock_manager = Redlock::Client.new([client], retry_count: 0)
|
108
|
+
end
|
109
|
+
|
110
|
+
def build_jobs(boxes)
|
111
|
+
res = boxes.map do |item_class|
|
112
|
+
partitions = (0...item_class.config.partition_size).to_a
|
113
|
+
partitions.map do |partition|
|
114
|
+
buckets = item_class.partition_buckets.fetch(partition)
|
115
|
+
resource_key = "#{item_class.box_name}/#{partition}"
|
116
|
+
|
117
|
+
Job.new(
|
118
|
+
item_class: item_class,
|
119
|
+
partition: partition,
|
120
|
+
buckets: buckets,
|
121
|
+
log_tags: {
|
122
|
+
box_type: item_class.box_type,
|
123
|
+
box_name: item_class.box_name,
|
124
|
+
box_partition: partition,
|
125
|
+
trace_id: nil
|
126
|
+
},
|
127
|
+
yabeda_labels: {
|
128
|
+
type: item_class.box_type,
|
129
|
+
name: item_class.box_name,
|
130
|
+
partition: partition
|
131
|
+
},
|
132
|
+
resource_key: resource_key,
|
133
|
+
resource_path: "sbmt/outbox/worker/#{resource_key}"
|
134
|
+
)
|
135
|
+
end
|
136
|
+
end.flatten
|
137
|
+
|
138
|
+
res.shuffle! if Outbox.config.worker.shuffle_jobs
|
139
|
+
res
|
140
|
+
end
|
141
|
+
|
142
|
+
def touch_thread_worker!
|
143
|
+
thread_workers[thread_pool.worker_number] = Time.current
|
144
|
+
end
|
145
|
+
|
146
|
+
def safe_process_job(job, worker_number, labels)
|
147
|
+
middlewares = Middleware::Builder.new(batch_process_middlewares)
|
148
|
+
|
149
|
+
middlewares.call(job) do
|
150
|
+
start_id ||= redis.call("GETDEL", "#{job.resource_path}:last_id").to_i + 1
|
151
|
+
logger.log_info("Start processing #{job.resource_key} from id #{start_id}")
|
152
|
+
process_job_with_timeouts(job, start_id, labels)
|
153
|
+
end
|
154
|
+
rescue => e
|
155
|
+
log_fatal(e, job, worker_number)
|
156
|
+
track_fatal(e, job)
|
157
|
+
end
|
158
|
+
|
159
|
+
def process_job_with_timeouts(job, start_id, labels)
|
160
|
+
count = 0
|
161
|
+
last_id = nil
|
162
|
+
lock_timer = Cutoff.new(general_timeout)
|
163
|
+
requeue_timer = Cutoff.new(cutoff_timeout)
|
164
|
+
|
165
|
+
process_job(job, start_id, labels) do |item|
|
166
|
+
job_items_counter.increment(labels, by: 1)
|
167
|
+
last_id = item.id
|
168
|
+
count += 1
|
169
|
+
lock_timer.checkpoint!
|
170
|
+
requeue_timer.checkpoint!
|
171
|
+
end
|
172
|
+
|
173
|
+
logger.log_info("Finish processing #{job.resource_key} at id #{last_id}")
|
174
|
+
rescue Cutoff::CutoffExceededError
|
175
|
+
job_timeout_counter.increment(labels, by: 1)
|
176
|
+
|
177
|
+
msg = if lock_timer.exceeded?
|
178
|
+
"Lock timeout"
|
179
|
+
elsif requeue_timer.exceeded?
|
180
|
+
redis.call("SET", "#{job.resource_path}:last_id", last_id, "EX", general_timeout) if last_id
|
181
|
+
"Requeue timeout"
|
182
|
+
end
|
183
|
+
raise "Unknown timer has been timed out" unless msg
|
184
|
+
|
185
|
+
logger.log_info("#{msg} while processing #{job.resource_key} at id #{last_id}")
|
186
|
+
end
|
187
|
+
|
188
|
+
def process_job(job, start_id, labels)
|
189
|
+
Outbox.database_switcher.use_slave do
|
190
|
+
item_class = job.item_class
|
191
|
+
|
192
|
+
scope = item_class
|
193
|
+
.for_processing
|
194
|
+
.select(:id)
|
195
|
+
|
196
|
+
if item_class.has_attribute?(:bucket)
|
197
|
+
scope = scope.where(bucket: job.buckets)
|
198
|
+
elsif job.partition > 0
|
199
|
+
raise "Could not filter by partition #{job.resource_key}"
|
200
|
+
end
|
201
|
+
|
202
|
+
scope.find_each(start: start_id, batch_size: batch_size) do |item|
|
203
|
+
touch_thread_worker!
|
204
|
+
item_execution_runtime.measure(labels) do
|
205
|
+
Outbox.database_switcher.use_master do
|
206
|
+
ProcessItem.call(job.item_class, item.id)
|
207
|
+
end
|
208
|
+
yield item
|
209
|
+
end
|
210
|
+
end
|
211
|
+
end
|
212
|
+
end
|
213
|
+
|
214
|
+
def log_fatal(e, job, worker_number)
|
215
|
+
backtrace = e.backtrace.join("\n") if e.respond_to?(:backtrace)
|
216
|
+
|
217
|
+
logger.log_error(
|
218
|
+
"Failed processing #{job.resource_key} with error: #{e.class} #{e.message}",
|
219
|
+
backtrace: backtrace
|
220
|
+
)
|
221
|
+
end
|
222
|
+
|
223
|
+
def track_fatal(e, job)
|
224
|
+
job_counter.increment(job.yabeda_labels.merge(state: "failed"))
|
225
|
+
|
226
|
+
Outbox.error_tracker.error(e, **job.log_tags)
|
227
|
+
end
|
228
|
+
end
|
229
|
+
end
|
230
|
+
end
|
231
|
+
end
|
@@ -0,0 +1,148 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/throttler"
|
4
|
+
require "sbmt/outbox/v2/thread_pool"
|
5
|
+
require "sbmt/outbox/v2/tasks/default"
|
6
|
+
|
7
|
+
module Sbmt
|
8
|
+
module Outbox
|
9
|
+
module V2
|
10
|
+
class BoxProcessor
|
11
|
+
delegate :config, :logger, to: "Sbmt::Outbox"
|
12
|
+
delegate :box_worker, to: "Yabeda"
|
13
|
+
attr_reader :started, :threads_count, :worker_name
|
14
|
+
|
15
|
+
def initialize(boxes:, threads_count:, name: "abstract_worker", redis: nil)
|
16
|
+
@threads_count = threads_count
|
17
|
+
@worker_name = name
|
18
|
+
|
19
|
+
@queue = build_task_queue(boxes)
|
20
|
+
@thread_pool = ThreadPool.new(
|
21
|
+
concurrency: threads_count,
|
22
|
+
name: "#{name}_thread_pool"
|
23
|
+
) do
|
24
|
+
queue.pop
|
25
|
+
end
|
26
|
+
|
27
|
+
@started = false
|
28
|
+
|
29
|
+
init_redis(redis)
|
30
|
+
end
|
31
|
+
|
32
|
+
def start
|
33
|
+
raise "#{worker_name} is already started" if started
|
34
|
+
@started = true
|
35
|
+
|
36
|
+
thread_pool.start do |worker_number, scheduled_task|
|
37
|
+
result = ThreadPool::PROCESSED
|
38
|
+
last_result = Thread.current[:last_polling_result]
|
39
|
+
|
40
|
+
throttling_res = throttle(worker_number, scheduled_task, last_result)
|
41
|
+
next ThreadPool::SKIPPED if throttling_res&.value_or(nil) == Sbmt::Outbox::V2::Throttler::SKIP_STATUS
|
42
|
+
|
43
|
+
lock_task(scheduled_task) do |locked_task|
|
44
|
+
base_labels = scheduled_task.yabeda_labels.merge(worker_name: worker_name)
|
45
|
+
if locked_task
|
46
|
+
labels = base_labels.merge(locked_task.yabeda_labels)
|
47
|
+
box_worker.job_execution_runtime.measure(labels) do
|
48
|
+
::Rails.application.executor.wrap do
|
49
|
+
logger.with_tags(**locked_task.log_tags) do
|
50
|
+
result = safe_process_task(worker_number, locked_task)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
else
|
55
|
+
result = ThreadPool::SKIPPED
|
56
|
+
end
|
57
|
+
|
58
|
+
box_worker.job_counter.increment(base_labels.merge(state: locked_task ? "processed" : "skipped"), by: 1)
|
59
|
+
end
|
60
|
+
|
61
|
+
Thread.current[:last_polling_result] = result || ThreadPool::PROCESSED
|
62
|
+
ensure
|
63
|
+
queue << scheduled_task
|
64
|
+
end
|
65
|
+
rescue => e
|
66
|
+
Outbox.error_tracker.error(e)
|
67
|
+
raise
|
68
|
+
end
|
69
|
+
|
70
|
+
def stop
|
71
|
+
@started = false
|
72
|
+
@thread_pool.stop
|
73
|
+
end
|
74
|
+
|
75
|
+
def ready?
|
76
|
+
started && @thread_pool.running?
|
77
|
+
end
|
78
|
+
|
79
|
+
def alive?(timeout)
|
80
|
+
return false unless ready?
|
81
|
+
|
82
|
+
@thread_pool.alive?(timeout)
|
83
|
+
end
|
84
|
+
|
85
|
+
def safe_process_task(worker_number, task)
|
86
|
+
process_task(worker_number, task)
|
87
|
+
rescue => e
|
88
|
+
log_fatal(e, task)
|
89
|
+
track_fatal(e, task)
|
90
|
+
end
|
91
|
+
|
92
|
+
def throttle(_worker_number, _scheduled_task, _result)
|
93
|
+
# noop by default
|
94
|
+
# IMPORTANT: method is called from thread-pool, i.e. code must be thread-safe
|
95
|
+
end
|
96
|
+
|
97
|
+
def process_task(_worker_number, _task)
|
98
|
+
raise NotImplementedError, "Implement #process_task for Sbmt::Outbox::V2::BoxProcessor"
|
99
|
+
end
|
100
|
+
|
101
|
+
private
|
102
|
+
|
103
|
+
attr_accessor :queue, :thread_pool, :redis, :lock_manager
|
104
|
+
|
105
|
+
def init_redis(redis)
|
106
|
+
self.redis = redis || ConnectionPool::Wrapper.new(size: threads_count) { RedisClientFactory.build(config.redis) }
|
107
|
+
|
108
|
+
client = if Gem::Version.new(Redlock::VERSION) >= Gem::Version.new("2.0.0")
|
109
|
+
self.redis
|
110
|
+
else
|
111
|
+
ConnectionPool::Wrapper.new(size: threads_count) { Redis.new(config.redis) }
|
112
|
+
end
|
113
|
+
|
114
|
+
self.lock_manager = Redlock::Client.new([client], retry_count: 0)
|
115
|
+
end
|
116
|
+
|
117
|
+
def lock_task(scheduled_task)
|
118
|
+
# by default there's no locking
|
119
|
+
yield scheduled_task
|
120
|
+
end
|
121
|
+
|
122
|
+
def build_task_queue(boxes)
|
123
|
+
scheduled_tasks = boxes.map do |item_class|
|
124
|
+
Tasks::Default.new(item_class: item_class, worker_name: worker_name)
|
125
|
+
end
|
126
|
+
|
127
|
+
scheduled_tasks.shuffle!
|
128
|
+
|
129
|
+
Queue.new.tap { |queue| scheduled_tasks.each { |task| queue << task } }
|
130
|
+
end
|
131
|
+
|
132
|
+
def log_fatal(e, task)
|
133
|
+
backtrace = e.backtrace.join("\n") if e.respond_to?(:backtrace)
|
134
|
+
|
135
|
+
logger.log_error(
|
136
|
+
"Failed processing #{task} with error: #{e.class} #{e.message}",
|
137
|
+
backtrace: backtrace
|
138
|
+
)
|
139
|
+
end
|
140
|
+
|
141
|
+
def track_fatal(e, task)
|
142
|
+
box_worker.job_counter.increment(task.yabeda_labels.merge(state: "failed"))
|
143
|
+
Outbox.error_tracker.error(e, **task.log_tags)
|
144
|
+
end
|
145
|
+
end
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/throttler"
|
4
|
+
|
5
|
+
module Sbmt
|
6
|
+
module Outbox
|
7
|
+
module V2
|
8
|
+
module PollThrottler
|
9
|
+
class Base < Outbox::DryInteractor
|
10
|
+
delegate :poll_throttling_counter, :poll_throttling_runtime, to: "Yabeda.box_worker"
|
11
|
+
|
12
|
+
def call(worker_num, poll_task, task_result)
|
13
|
+
with_metrics(poll_task) do
|
14
|
+
wait(worker_num, poll_task, task_result)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def wait(_worker_num, _poll_task, _task_result)
|
19
|
+
raise NotImplementedError, "Implement #wait for Sbmt::Outbox::PollThrottler::Base"
|
20
|
+
end
|
21
|
+
|
22
|
+
private
|
23
|
+
|
24
|
+
def with_metrics(poll_task, &block)
|
25
|
+
tags = metric_tags(poll_task)
|
26
|
+
result = nil
|
27
|
+
|
28
|
+
poll_throttling_runtime.measure(tags) do
|
29
|
+
result = yield
|
30
|
+
poll_throttling_counter.increment(tags.merge(status: result.value_or(result.failure)), by: 1)
|
31
|
+
end
|
32
|
+
|
33
|
+
result
|
34
|
+
end
|
35
|
+
|
36
|
+
def metric_tags(poll_task)
|
37
|
+
poll_task.yabeda_labels.merge(throttler: self.class.name)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/poll_throttler/base"
|
4
|
+
|
5
|
+
module Sbmt
|
6
|
+
module Outbox
|
7
|
+
module V2
|
8
|
+
module PollThrottler
|
9
|
+
class Composite < Base
|
10
|
+
attr_reader :throttlers
|
11
|
+
|
12
|
+
def initialize(throttlers:)
|
13
|
+
super()
|
14
|
+
|
15
|
+
@throttlers = throttlers
|
16
|
+
end
|
17
|
+
|
18
|
+
def call(worker_num, poll_task, task_result)
|
19
|
+
# each throttler delays polling thread by it's own rules
|
20
|
+
# i.e. resulting delay is a sum of each throttler's ones
|
21
|
+
results = @throttlers.map do |t|
|
22
|
+
res = t.call(worker_num, poll_task, task_result)
|
23
|
+
|
24
|
+
return res if res.success? && res.value! == Sbmt::Outbox::V2::Throttler::SKIP_STATUS
|
25
|
+
return res if res.failure?
|
26
|
+
|
27
|
+
res
|
28
|
+
end
|
29
|
+
|
30
|
+
throttled(results) || Success(Sbmt::Outbox::V2::Throttler::NOOP_STATUS)
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def throttled(results)
|
36
|
+
results.find { |res| res.success? && res.value! == Sbmt::Outbox::V2::Throttler::THROTTLE_STATUS }
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/poll_throttler/base"
|
4
|
+
require "sbmt/outbox/v2/thread_pool"
|
5
|
+
|
6
|
+
module Sbmt
|
7
|
+
module Outbox
|
8
|
+
module V2
|
9
|
+
module PollThrottler
|
10
|
+
class FixedDelay < Base
|
11
|
+
def initialize(delay:)
|
12
|
+
super()
|
13
|
+
|
14
|
+
@delay = delay
|
15
|
+
end
|
16
|
+
|
17
|
+
def wait(worker_num, poll_task, task_result)
|
18
|
+
return Success(Sbmt::Outbox::V2::Throttler::NOOP_STATUS) unless task_result == Sbmt::Outbox::V2::ThreadPool::PROCESSED
|
19
|
+
|
20
|
+
sleep(@delay)
|
21
|
+
|
22
|
+
Success(Sbmt::Outbox::V2::Throttler::THROTTLE_STATUS)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/poll_throttler/base"
|
4
|
+
|
5
|
+
module Sbmt
|
6
|
+
module Outbox
|
7
|
+
module V2
|
8
|
+
module PollThrottler
|
9
|
+
class Noop < Base
|
10
|
+
def wait(worker_num, poll_task, _task_result)
|
11
|
+
Success(Sbmt::Outbox::V2::Throttler::NOOP_STATUS)
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/poll_throttler/base"
|
4
|
+
require "ruby-limiter"
|
5
|
+
|
6
|
+
module Sbmt
|
7
|
+
module Outbox
|
8
|
+
module V2
|
9
|
+
module PollThrottler
|
10
|
+
class RateLimited < Base
|
11
|
+
attr_reader :queues
|
12
|
+
|
13
|
+
def initialize(limit: nil, interval: nil, balanced: true)
|
14
|
+
@limit = limit
|
15
|
+
@interval = interval
|
16
|
+
@balanced = balanced
|
17
|
+
@queues = {}
|
18
|
+
@mutex = Mutex.new
|
19
|
+
end
|
20
|
+
|
21
|
+
def wait(_worker_num, poll_task, _task_result)
|
22
|
+
queue_for(poll_task).shift
|
23
|
+
|
24
|
+
Success(Sbmt::Outbox::V2::Throttler::THROTTLE_STATUS)
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
|
29
|
+
def queue_for(task)
|
30
|
+
key = task.item_class.box_name
|
31
|
+
return @queues[key] if @queues.key?(key)
|
32
|
+
|
33
|
+
@mutex.synchronize do
|
34
|
+
return @queues[key] if @queues.key?(key)
|
35
|
+
|
36
|
+
@queues[key] = Limiter::RateQueue.new(
|
37
|
+
@limit, interval: @interval, balanced: @balanced
|
38
|
+
)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/poll_throttler/base"
|
4
|
+
|
5
|
+
module Sbmt
|
6
|
+
module Outbox
|
7
|
+
module V2
|
8
|
+
module PollThrottler
|
9
|
+
class RedisQueueSize < Base
|
10
|
+
delegate :redis_job_queue_size, to: "Yabeda.box_worker"
|
11
|
+
|
12
|
+
def initialize(redis:, min_size: -1, max_size: 100, delay: 0)
|
13
|
+
super()
|
14
|
+
|
15
|
+
@redis = redis
|
16
|
+
@min_size = min_size
|
17
|
+
@max_size = max_size
|
18
|
+
@delay = delay
|
19
|
+
end
|
20
|
+
|
21
|
+
def wait(worker_num, poll_task, _task_result)
|
22
|
+
# LLEN is O(1)
|
23
|
+
queue_size = @redis.call("LLEN", poll_task.redis_queue).to_i
|
24
|
+
redis_job_queue_size.set(metric_tags(poll_task), queue_size)
|
25
|
+
|
26
|
+
if queue_size < @min_size
|
27
|
+
# just throttle (not skip) to wait for job queue size becomes acceptable
|
28
|
+
sleep(@delay)
|
29
|
+
return Success(Sbmt::Outbox::V2::Throttler::THROTTLE_STATUS)
|
30
|
+
end
|
31
|
+
|
32
|
+
if queue_size > @max_size
|
33
|
+
# completely skip poll-cycle if job queue is oversized
|
34
|
+
sleep(@delay)
|
35
|
+
return Success(Sbmt::Outbox::V2::Throttler::SKIP_STATUS)
|
36
|
+
end
|
37
|
+
|
38
|
+
Success(Sbmt::Outbox::V2::Throttler::NOOP_STATUS)
|
39
|
+
rescue => ex
|
40
|
+
Failure(ex.message)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/poll_throttler/base"
|
4
|
+
require "sbmt/outbox/v2/redis_job"
|
5
|
+
|
6
|
+
module Sbmt
|
7
|
+
module Outbox
|
8
|
+
module V2
|
9
|
+
module PollThrottler
|
10
|
+
class RedisQueueTimeLag < Base
|
11
|
+
delegate :redis_job_queue_time_lag, to: "Yabeda.box_worker"
|
12
|
+
|
13
|
+
def initialize(redis:, min_lag: 5, delay: 0)
|
14
|
+
super()
|
15
|
+
|
16
|
+
@redis = redis
|
17
|
+
@min_lag = min_lag
|
18
|
+
@delay = delay
|
19
|
+
end
|
20
|
+
|
21
|
+
def wait(worker_num, poll_task, _task_result)
|
22
|
+
# LINDEX is O(1) for first/last element
|
23
|
+
oldest_job = @redis.call("LINDEX", poll_task.redis_queue, -1)
|
24
|
+
return Success(Sbmt::Outbox::V2::Throttler::NOOP_STATUS) if oldest_job.nil?
|
25
|
+
|
26
|
+
job = RedisJob.deserialize!(oldest_job)
|
27
|
+
time_lag = Time.current.to_i - job.timestamp
|
28
|
+
|
29
|
+
redis_job_queue_time_lag.set(metric_tags(poll_task), time_lag)
|
30
|
+
|
31
|
+
if time_lag <= @min_lag
|
32
|
+
sleep(@delay)
|
33
|
+
return Success(Sbmt::Outbox::V2::Throttler::SKIP_STATUS)
|
34
|
+
end
|
35
|
+
|
36
|
+
Success(Sbmt::Outbox::V2::Throttler::NOOP_STATUS)
|
37
|
+
rescue => ex
|
38
|
+
# noop, just skip any redis / serialization errors
|
39
|
+
Failure(ex.message)
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|