sbmt-outbox 5.0.1 → 6.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +68 -9
- data/app/interactors/sbmt/outbox/process_item.rb +2 -1
- data/app/interactors/sbmt/outbox/retry_strategies/base.rb +15 -0
- data/app/interactors/sbmt/outbox/retry_strategies/compacted_log.rb +2 -32
- data/app/interactors/sbmt/outbox/retry_strategies/exponential_backoff.rb +3 -5
- data/app/interactors/sbmt/outbox/retry_strategies/latest_available.rb +39 -0
- data/app/interactors/sbmt/outbox/retry_strategies/no_delay.rb +13 -0
- data/app/models/sbmt/outbox/base_item.rb +9 -8
- data/app/models/sbmt/outbox/base_item_config.rb +23 -4
- data/config/initializers/yabeda.rb +32 -5
- data/lib/generators/helpers/migration.rb +2 -2
- data/lib/sbmt/outbox/cli.rb +50 -7
- data/lib/sbmt/outbox/engine.rb +26 -0
- data/lib/sbmt/outbox/logger.rb +6 -0
- data/lib/sbmt/outbox/v1/thread_pool.rb +110 -0
- data/lib/sbmt/outbox/v1/throttler.rb +54 -0
- data/lib/sbmt/outbox/v1/worker.rb +231 -0
- data/lib/sbmt/outbox/v2/box_processor.rb +148 -0
- data/lib/sbmt/outbox/v2/poll_throttler/base.rb +43 -0
- data/lib/sbmt/outbox/v2/poll_throttler/composite.rb +42 -0
- data/lib/sbmt/outbox/v2/poll_throttler/fixed_delay.rb +28 -0
- data/lib/sbmt/outbox/v2/poll_throttler/noop.rb +17 -0
- data/lib/sbmt/outbox/v2/poll_throttler/rate_limited.rb +24 -0
- data/lib/sbmt/outbox/v2/poll_throttler/redis_queue_size.rb +46 -0
- data/lib/sbmt/outbox/v2/poll_throttler/redis_queue_time_lag.rb +45 -0
- data/lib/sbmt/outbox/v2/poll_throttler.rb +49 -0
- data/lib/sbmt/outbox/v2/poller.rb +180 -0
- data/lib/sbmt/outbox/v2/processor.rb +101 -0
- data/lib/sbmt/outbox/v2/redis_job.rb +42 -0
- data/lib/sbmt/outbox/v2/tasks/base.rb +48 -0
- data/lib/sbmt/outbox/v2/tasks/default.rb +17 -0
- data/lib/sbmt/outbox/v2/tasks/poll.rb +34 -0
- data/lib/sbmt/outbox/v2/tasks/process.rb +31 -0
- data/lib/sbmt/outbox/v2/thread_pool.rb +152 -0
- data/lib/sbmt/outbox/v2/throttler.rb +13 -0
- data/lib/sbmt/outbox/v2/worker.rb +52 -0
- data/lib/sbmt/outbox/version.rb +1 -1
- data/lib/sbmt/outbox.rb +16 -2
- metadata +41 -5
- data/lib/sbmt/outbox/thread_pool.rb +0 -108
- data/lib/sbmt/outbox/throttler.rb +0 -52
- data/lib/sbmt/outbox/worker.rb +0 -233
@@ -0,0 +1,46 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/poll_throttler/base"
|
4
|
+
|
5
|
+
module Sbmt
|
6
|
+
module Outbox
|
7
|
+
module V2
|
8
|
+
module PollThrottler
|
9
|
+
class RedisQueueSize < Base
|
10
|
+
delegate :redis_job_queue_size, to: "Yabeda.box_worker"
|
11
|
+
|
12
|
+
def initialize(redis:, min_size: -1, max_size: 100, delay: 0)
|
13
|
+
super()
|
14
|
+
|
15
|
+
@redis = redis
|
16
|
+
@min_size = min_size
|
17
|
+
@max_size = max_size
|
18
|
+
@delay = delay
|
19
|
+
end
|
20
|
+
|
21
|
+
def wait(worker_num, poll_task, _task_result)
|
22
|
+
# LLEN is O(1)
|
23
|
+
queue_size = @redis.call("LLEN", poll_task.redis_queue).to_i
|
24
|
+
redis_job_queue_size.set(metric_tags(poll_task), queue_size)
|
25
|
+
|
26
|
+
if queue_size < @min_size
|
27
|
+
# just throttle (not skip) to wait for job queue size becomes acceptable
|
28
|
+
sleep(@delay)
|
29
|
+
return Success(Sbmt::Outbox::V2::Throttler::THROTTLE_STATUS)
|
30
|
+
end
|
31
|
+
|
32
|
+
if queue_size > @max_size
|
33
|
+
# completely skip poll-cycle if job queue is oversized
|
34
|
+
sleep(@delay)
|
35
|
+
return Success(Sbmt::Outbox::V2::Throttler::SKIP_STATUS)
|
36
|
+
end
|
37
|
+
|
38
|
+
Success(Sbmt::Outbox::V2::Throttler::NOOP_STATUS)
|
39
|
+
rescue => ex
|
40
|
+
Failure(ex.message)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/poll_throttler/base"
|
4
|
+
require "sbmt/outbox/v2/redis_job"
|
5
|
+
|
6
|
+
module Sbmt
|
7
|
+
module Outbox
|
8
|
+
module V2
|
9
|
+
module PollThrottler
|
10
|
+
class RedisQueueTimeLag < Base
|
11
|
+
delegate :redis_job_queue_time_lag, to: "Yabeda.box_worker"
|
12
|
+
|
13
|
+
def initialize(redis:, min_lag: 5, delay: 0)
|
14
|
+
super()
|
15
|
+
|
16
|
+
@redis = redis
|
17
|
+
@min_lag = min_lag
|
18
|
+
@delay = delay
|
19
|
+
end
|
20
|
+
|
21
|
+
def wait(worker_num, poll_task, _task_result)
|
22
|
+
# LINDEX is O(1) for first/last element
|
23
|
+
oldest_job = @redis.call("LINDEX", poll_task.redis_queue, -1)
|
24
|
+
return Success(Sbmt::Outbox::V2::Throttler::NOOP_STATUS) if oldest_job.nil?
|
25
|
+
|
26
|
+
job = RedisJob.deserialize!(oldest_job)
|
27
|
+
time_lag = Time.current.to_i - job.timestamp
|
28
|
+
|
29
|
+
redis_job_queue_time_lag.set(metric_tags(poll_task), time_lag)
|
30
|
+
|
31
|
+
if time_lag <= @min_lag
|
32
|
+
sleep(@delay)
|
33
|
+
return Success(Sbmt::Outbox::V2::Throttler::SKIP_STATUS)
|
34
|
+
end
|
35
|
+
|
36
|
+
Success(Sbmt::Outbox::V2::Throttler::NOOP_STATUS)
|
37
|
+
rescue => ex
|
38
|
+
# noop, just skip any redis / serialization errors
|
39
|
+
Failure(ex.message)
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
@@ -0,0 +1,49 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/poll_throttler/base"
|
4
|
+
require "sbmt/outbox/v2/poll_throttler/composite"
|
5
|
+
require "sbmt/outbox/v2/poll_throttler/rate_limited"
|
6
|
+
require "sbmt/outbox/v2/poll_throttler/fixed_delay"
|
7
|
+
require "sbmt/outbox/v2/poll_throttler/noop"
|
8
|
+
require "sbmt/outbox/v2/poll_throttler/redis_queue_size"
|
9
|
+
require "sbmt/outbox/v2/poll_throttler/redis_queue_time_lag"
|
10
|
+
|
11
|
+
module Sbmt
|
12
|
+
module Outbox
|
13
|
+
module V2
|
14
|
+
module PollThrottler
|
15
|
+
POLL_TACTICS = %w[noop default low-priority aggressive]
|
16
|
+
|
17
|
+
def self.build(tactic, redis, poller_config)
|
18
|
+
raise "WARN: invalid poller poll tactic provided: #{tactic}, available options: #{POLL_TACTICS}" unless POLL_TACTICS.include?(tactic)
|
19
|
+
|
20
|
+
if tactic == "default"
|
21
|
+
# composite of RateLimited & RedisQueueSize (upper bound only)
|
22
|
+
# optimal polling performance for most cases
|
23
|
+
Composite.new(throttlers: [
|
24
|
+
RedisQueueSize.new(redis: redis, max_size: poller_config.max_queue_size, delay: poller_config.queue_delay),
|
25
|
+
RateLimited.new(limit: poller_config.rate_limit, interval: poller_config.rate_interval)
|
26
|
+
])
|
27
|
+
elsif tactic == "low-priority"
|
28
|
+
# composite of RateLimited & RedisQueueSize (with lower & upper bounds) & RedisQueueTimeLag,
|
29
|
+
# delays polling depending on min job queue size threshold
|
30
|
+
# and also by min redis queue oldest item lag
|
31
|
+
# optimal polling performance for low-intensity data flow
|
32
|
+
Composite.new(throttlers: [
|
33
|
+
RedisQueueSize.new(redis: redis, min_size: poller_config.min_queue_size, max_size: poller_config.max_queue_size, delay: poller_config.queue_delay),
|
34
|
+
RedisQueueTimeLag.new(redis: redis, min_lag: poller_config.min_queue_timelag, delay: poller_config.queue_delay),
|
35
|
+
RateLimited.new(limit: poller_config.rate_limit, interval: poller_config.rate_interval)
|
36
|
+
])
|
37
|
+
elsif tactic == "aggressive"
|
38
|
+
# throttles only by max job queue size, max polling performance
|
39
|
+
# optimal polling performance for high-intensity data flow
|
40
|
+
RedisQueueSize.new(redis: redis, max_size: poller_config.max_queue_size, delay: poller_config.queue_delay)
|
41
|
+
elsif tactic == "noop"
|
42
|
+
# no-op, for testing purposes
|
43
|
+
Noop.new
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,180 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "redlock"
|
4
|
+
require "sbmt/outbox/v2/box_processor"
|
5
|
+
require "sbmt/outbox/v2/redis_job"
|
6
|
+
require "sbmt/outbox/v2/poll_throttler"
|
7
|
+
require "sbmt/outbox/v2/tasks/poll"
|
8
|
+
|
9
|
+
module Sbmt
|
10
|
+
module Outbox
|
11
|
+
module V2
|
12
|
+
class Poller < BoxProcessor
|
13
|
+
delegate :poller_config, :logger, to: "Sbmt::Outbox"
|
14
|
+
delegate :box_worker, to: "Yabeda"
|
15
|
+
attr_reader :partitions_count, :lock_timeout, :regular_items_batch_size, :retryable_items_batch_size, :max_buffer_size, :max_batch_size, :throttler
|
16
|
+
|
17
|
+
def initialize(
|
18
|
+
boxes,
|
19
|
+
partitions_count: nil,
|
20
|
+
threads_count: nil,
|
21
|
+
lock_timeout: nil,
|
22
|
+
regular_items_batch_size: nil,
|
23
|
+
retryable_items_batch_size: nil,
|
24
|
+
throttler_tactic: nil,
|
25
|
+
redis: nil
|
26
|
+
)
|
27
|
+
@partitions_count = partitions_count || poller_config.concurrency
|
28
|
+
@lock_timeout = lock_timeout || poller_config.general_timeout
|
29
|
+
|
30
|
+
@regular_items_batch_size = regular_items_batch_size || poller_config.regular_items_batch_size
|
31
|
+
@retryable_items_batch_size = retryable_items_batch_size || poller_config.retryable_items_batch_size
|
32
|
+
@max_buffer_size = @regular_items_batch_size + @retryable_items_batch_size
|
33
|
+
@max_batch_size = @regular_items_batch_size
|
34
|
+
|
35
|
+
super(boxes: boxes, threads_count: threads_count || poller_config.threads_count, name: "poller", redis: redis)
|
36
|
+
|
37
|
+
@throttler = PollThrottler.build(throttler_tactic || poller_config.tactic || "default", self.redis, poller_config)
|
38
|
+
end
|
39
|
+
|
40
|
+
def throttle(worker_number, poll_task, result)
|
41
|
+
throttler.call(worker_number, poll_task, result)
|
42
|
+
end
|
43
|
+
|
44
|
+
def process_task(_worker_number, task)
|
45
|
+
poll(task)
|
46
|
+
end
|
47
|
+
|
48
|
+
private
|
49
|
+
|
50
|
+
def build_task_queue(boxes)
|
51
|
+
scheduled_tasks = boxes.map do |item_class|
|
52
|
+
schedule_concurrency = (0...partitions_count).to_a
|
53
|
+
schedule_concurrency.map do |partition|
|
54
|
+
buckets = item_class.calc_bucket_partitions(partitions_count).fetch(partition)
|
55
|
+
|
56
|
+
Tasks::Poll.new(
|
57
|
+
item_class: item_class,
|
58
|
+
worker_name: worker_name,
|
59
|
+
partition: partition,
|
60
|
+
buckets: buckets
|
61
|
+
)
|
62
|
+
end
|
63
|
+
end.flatten
|
64
|
+
|
65
|
+
scheduled_tasks.shuffle!
|
66
|
+
Queue.new.tap { |queue| scheduled_tasks.each { |task| queue << task } }
|
67
|
+
end
|
68
|
+
|
69
|
+
def lock_task(poll_task)
|
70
|
+
lock_manager.lock("#{poll_task.resource_path}:lock", lock_timeout * 1000) do |locked|
|
71
|
+
lock_status = locked ? "locked" : "skipped"
|
72
|
+
logger.log_debug("poller: lock for #{poll_task}: #{lock_status}")
|
73
|
+
|
74
|
+
yield(locked ? poll_task : nil)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
def poll(task)
|
79
|
+
lock_timer = Cutoff.new(lock_timeout)
|
80
|
+
last_id = 0
|
81
|
+
|
82
|
+
box_worker.item_execution_runtime.measure(task.yabeda_labels) do
|
83
|
+
Outbox.database_switcher.use_slave do
|
84
|
+
result = fetch_items(task) do |item|
|
85
|
+
box_worker.job_items_counter.increment(task.yabeda_labels)
|
86
|
+
|
87
|
+
last_id = item.id
|
88
|
+
lock_timer.checkpoint!
|
89
|
+
end
|
90
|
+
|
91
|
+
logger.log_debug("poll task #{task}: fetched buckets:#{result.keys.count}, items:#{result.values.sum(0) { |ids| ids.count }}")
|
92
|
+
|
93
|
+
push_to_redis(task, result) if result.present?
|
94
|
+
end
|
95
|
+
end
|
96
|
+
rescue Cutoff::CutoffExceededError
|
97
|
+
box_worker.job_timeout_counter.increment(labels)
|
98
|
+
logger.log_info("Lock timeout while processing #{task.resource_key} at id #{last_id}")
|
99
|
+
end
|
100
|
+
|
101
|
+
def fetch_items(task)
|
102
|
+
regular_count = 0
|
103
|
+
retryable_count = 0
|
104
|
+
|
105
|
+
# single buffer to preserve item's positions
|
106
|
+
poll_buffer = {}
|
107
|
+
|
108
|
+
fetch_items_with_retries(task, max_batch_size).each do |item|
|
109
|
+
if item.errors_count > 0
|
110
|
+
# skip if retryable buffer capacity limit reached
|
111
|
+
next if retryable_count >= retryable_items_batch_size
|
112
|
+
|
113
|
+
poll_buffer[item.bucket] ||= []
|
114
|
+
poll_buffer[item.bucket] << item.id
|
115
|
+
|
116
|
+
retryable_count += 1
|
117
|
+
else
|
118
|
+
poll_buffer[item.bucket] ||= []
|
119
|
+
poll_buffer[item.bucket] << item.id
|
120
|
+
|
121
|
+
regular_count += 1
|
122
|
+
end
|
123
|
+
|
124
|
+
yield(item)
|
125
|
+
end
|
126
|
+
|
127
|
+
box_worker.batches_per_poll_counter.increment(task.yabeda_labels)
|
128
|
+
|
129
|
+
return {} if poll_buffer.blank?
|
130
|
+
|
131
|
+
# regular items have priority over retryable ones
|
132
|
+
return poll_buffer if regular_count >= regular_items_batch_size
|
133
|
+
|
134
|
+
# additionally poll regular items only when retryable buffer capacity limit reached
|
135
|
+
# and no regular items were found
|
136
|
+
if retryable_count >= retryable_items_batch_size && regular_count == 0
|
137
|
+
fetch_regular_items(task, regular_items_batch_size).each do |item|
|
138
|
+
poll_buffer[item.bucket] ||= []
|
139
|
+
poll_buffer[item.bucket] << item.id
|
140
|
+
|
141
|
+
yield(item)
|
142
|
+
end
|
143
|
+
box_worker.batches_per_poll_counter.increment(task.yabeda_labels)
|
144
|
+
end
|
145
|
+
|
146
|
+
poll_buffer
|
147
|
+
end
|
148
|
+
|
149
|
+
def fetch_items_with_retries(task, limit)
|
150
|
+
task.item_class
|
151
|
+
.for_processing
|
152
|
+
.where(bucket: task.buckets)
|
153
|
+
.order(id: :asc)
|
154
|
+
.limit(limit)
|
155
|
+
.select(:id, :bucket, :errors_count)
|
156
|
+
end
|
157
|
+
|
158
|
+
def fetch_regular_items(task, limit)
|
159
|
+
task.item_class
|
160
|
+
.for_processing
|
161
|
+
.where(bucket: task.buckets, errors_count: 0)
|
162
|
+
.order(id: :asc)
|
163
|
+
.limit(limit)
|
164
|
+
.select(:id, :bucket)
|
165
|
+
end
|
166
|
+
|
167
|
+
def push_to_redis(poll_task, ids_per_bucket)
|
168
|
+
redis.pipelined do |conn|
|
169
|
+
ids_per_bucket.each do |bucket, ids|
|
170
|
+
redis_job = RedisJob.new(bucket, ids)
|
171
|
+
|
172
|
+
logger.log_debug("pushing job to redis, items count: #{ids.count}: #{redis_job}")
|
173
|
+
conn.call("LPUSH", poll_task.redis_queue, redis_job.serialize)
|
174
|
+
end
|
175
|
+
end
|
176
|
+
end
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
@@ -0,0 +1,101 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "redlock"
|
4
|
+
require "sbmt/outbox/v2/box_processor"
|
5
|
+
require "sbmt/outbox/v2/redis_job"
|
6
|
+
require "sbmt/outbox/v2/tasks/process"
|
7
|
+
|
8
|
+
module Sbmt
|
9
|
+
module Outbox
|
10
|
+
module V2
|
11
|
+
class Processor < BoxProcessor
|
12
|
+
delegate :processor_config, :batch_process_middlewares, :logger, to: "Sbmt::Outbox"
|
13
|
+
attr_reader :lock_timeout, :brpop_delay
|
14
|
+
|
15
|
+
def initialize(
|
16
|
+
boxes,
|
17
|
+
threads_count: nil,
|
18
|
+
lock_timeout: nil,
|
19
|
+
brpop_delay: nil,
|
20
|
+
redis: nil
|
21
|
+
)
|
22
|
+
@lock_timeout = lock_timeout || processor_config.general_timeout
|
23
|
+
@brpop_delay = brpop_delay || processor_config.brpop_delay
|
24
|
+
|
25
|
+
super(boxes: boxes, threads_count: threads_count || processor_config.threads_count, name: "processor", redis: redis)
|
26
|
+
end
|
27
|
+
|
28
|
+
def process_task(_worker_number, task)
|
29
|
+
middlewares = Middleware::Builder.new(batch_process_middlewares)
|
30
|
+
middlewares.call(task) { process(task) }
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def build_task_queue(boxes)
|
36
|
+
# queue size is: boxes_count * threads_count
|
37
|
+
# to simplify scheduling per box
|
38
|
+
tasks = boxes.map do |item_class|
|
39
|
+
(0...threads_count)
|
40
|
+
.to_a
|
41
|
+
.map { Tasks::Base.new(item_class: item_class, worker_name: worker_name) }
|
42
|
+
end.flatten
|
43
|
+
|
44
|
+
tasks.shuffle!
|
45
|
+
Queue.new.tap { |queue| tasks.each { |task| queue << task } }
|
46
|
+
end
|
47
|
+
|
48
|
+
def lock_task(scheduled_task)
|
49
|
+
redis_job = fetch_redis_job(scheduled_task)
|
50
|
+
return yield(nil) if redis_job.blank?
|
51
|
+
|
52
|
+
processor_task = Tasks::Process.new(
|
53
|
+
item_class: scheduled_task.item_class,
|
54
|
+
worker_name: worker_name,
|
55
|
+
bucket: redis_job.bucket,
|
56
|
+
ids: redis_job.ids
|
57
|
+
)
|
58
|
+
lock_manager.lock("#{processor_task.resource_path}:lock", lock_timeout * 1000) do |locked|
|
59
|
+
lock_status = locked ? "locked" : "skipped"
|
60
|
+
logger.log_debug("processor: lock for #{processor_task}: #{lock_status}")
|
61
|
+
|
62
|
+
yield(locked ? processor_task : nil)
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
def process(task)
|
67
|
+
lock_timer = Cutoff.new(lock_timeout)
|
68
|
+
last_id = 0
|
69
|
+
|
70
|
+
box_worker.item_execution_runtime.measure(task.yabeda_labels) do
|
71
|
+
Outbox.database_switcher.use_master do
|
72
|
+
task.ids.each do |id|
|
73
|
+
ProcessItem.call(task.item_class, id, worker_version: task.yabeda_labels[:worker_version])
|
74
|
+
|
75
|
+
box_worker.job_items_counter.increment(task.yabeda_labels)
|
76
|
+
last_id = id
|
77
|
+
lock_timer.checkpoint!
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
rescue Cutoff::CutoffExceededError
|
82
|
+
box_worker.job_timeout_counter.increment(task.yabeda_labels)
|
83
|
+
logger.log_info("Lock timeout while processing #{task.resource_key} at id #{last_id}")
|
84
|
+
end
|
85
|
+
|
86
|
+
def fetch_redis_job(scheduled_task)
|
87
|
+
_queue, result = redis.blocking_call(redis_block_timeout, "BRPOP", "#{scheduled_task.item_class.box_name}:job_queue", brpop_delay)
|
88
|
+
return if result.blank?
|
89
|
+
|
90
|
+
RedisJob.deserialize!(result)
|
91
|
+
rescue => ex
|
92
|
+
logger.log_error("error while fetching redis job: #{ex.message}")
|
93
|
+
end
|
94
|
+
|
95
|
+
def redis_block_timeout
|
96
|
+
redis.read_timeout + brpop_delay
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sbmt
|
4
|
+
module Outbox
|
5
|
+
module V2
|
6
|
+
class RedisJob
|
7
|
+
attr_reader :bucket, :timestamp, :ids
|
8
|
+
|
9
|
+
GENERIC_SEPARATOR = ":"
|
10
|
+
IDS_SEPARATOR = ","
|
11
|
+
|
12
|
+
def initialize(bucket, ids, timestamp = Time.current.to_i)
|
13
|
+
@bucket = bucket
|
14
|
+
@ids = ids
|
15
|
+
@timestamp = timestamp
|
16
|
+
end
|
17
|
+
|
18
|
+
def to_s
|
19
|
+
serialize
|
20
|
+
end
|
21
|
+
|
22
|
+
def serialize
|
23
|
+
[bucket, timestamp, ids.join(IDS_SEPARATOR)].join(GENERIC_SEPARATOR)
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.deserialize!(value)
|
27
|
+
raise "invalid data type: string is required" unless value.is_a?(String)
|
28
|
+
|
29
|
+
bucket, ts_utc, ids_str, _ = value.split(GENERIC_SEPARATOR)
|
30
|
+
raise "invalid data format: bucket or ids are not valid" if bucket.blank? || ts_utc.blank? || ids_str.blank?
|
31
|
+
|
32
|
+
ts = ts_utc.to_i
|
33
|
+
|
34
|
+
ids = ids_str.split(IDS_SEPARATOR).map(&:to_i)
|
35
|
+
raise "invalid data format: IDs are empty" if ids.blank?
|
36
|
+
|
37
|
+
new(bucket, ids, ts)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,48 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sbmt
|
4
|
+
module Outbox
|
5
|
+
module V2
|
6
|
+
module Tasks
|
7
|
+
class Base
|
8
|
+
attr_reader :item_class, :worker_name, :worker_version, :log_tags, :yabeda_labels
|
9
|
+
|
10
|
+
def initialize(item_class:, worker_name:, worker_version: 2)
|
11
|
+
@item_class = item_class
|
12
|
+
@worker_name = worker_name
|
13
|
+
@worker_version = worker_version
|
14
|
+
|
15
|
+
@log_tags = {
|
16
|
+
box_type: item_class.box_type,
|
17
|
+
box_name: item_class.box_name,
|
18
|
+
worker_name: worker_name,
|
19
|
+
worker_version: worker_version
|
20
|
+
}
|
21
|
+
|
22
|
+
@yabeda_labels = {
|
23
|
+
type: item_class.box_type,
|
24
|
+
name: metric_safe(item_class.box_name),
|
25
|
+
worker_version: 2,
|
26
|
+
worker_name: worker_name
|
27
|
+
}
|
28
|
+
end
|
29
|
+
|
30
|
+
def to_h
|
31
|
+
result = {}
|
32
|
+
instance_variables.each do |iv|
|
33
|
+
iv = iv.to_s[1..]
|
34
|
+
result[iv.to_sym] = instance_variable_get(:"@#{iv}")
|
35
|
+
end
|
36
|
+
result
|
37
|
+
end
|
38
|
+
|
39
|
+
private
|
40
|
+
|
41
|
+
def metric_safe(str)
|
42
|
+
str.tr("/", "-")
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/tasks/base"
|
4
|
+
|
5
|
+
module Sbmt
|
6
|
+
module Outbox
|
7
|
+
module V2
|
8
|
+
module Tasks
|
9
|
+
class Default < Base
|
10
|
+
def to_s
|
11
|
+
"#{item_class.box_type}/#{item_class.box_name}"
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/tasks/base"
|
4
|
+
|
5
|
+
module Sbmt
|
6
|
+
module Outbox
|
7
|
+
module V2
|
8
|
+
module Tasks
|
9
|
+
class Poll < Base
|
10
|
+
attr_reader :partition, :buckets, :resource_key, :resource_path, :redis_queue
|
11
|
+
|
12
|
+
def initialize(item_class:, worker_name:, partition:, buckets:)
|
13
|
+
super(item_class: item_class, worker_name: worker_name)
|
14
|
+
|
15
|
+
@partition = partition
|
16
|
+
@buckets = buckets
|
17
|
+
|
18
|
+
@resource_key = "#{item_class.box_name}:#{partition}"
|
19
|
+
@resource_path = "sbmt:outbox:#{worker_name}:#{resource_key}"
|
20
|
+
@redis_queue = "#{item_class.box_name}:job_queue"
|
21
|
+
|
22
|
+
@log_tags = log_tags.merge(box_partition: partition)
|
23
|
+
|
24
|
+
@yabeda_labels = yabeda_labels.merge(partition: partition)
|
25
|
+
end
|
26
|
+
|
27
|
+
def to_s
|
28
|
+
resource_path
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sbmt/outbox/v2/tasks/base"
|
4
|
+
|
5
|
+
module Sbmt
|
6
|
+
module Outbox
|
7
|
+
module V2
|
8
|
+
module Tasks
|
9
|
+
class Process < Base
|
10
|
+
attr_reader :partition, :bucket, :ids, :resource_key, :resource_path
|
11
|
+
|
12
|
+
def initialize(item_class:, worker_name:, bucket:, ids:)
|
13
|
+
super(item_class: item_class, worker_name: worker_name)
|
14
|
+
|
15
|
+
@bucket = bucket
|
16
|
+
@ids = ids
|
17
|
+
|
18
|
+
@resource_key = "#{item_class.box_name}:#{bucket}"
|
19
|
+
@resource_path = "sbmt:outbox:#{worker_name}:#{resource_key}"
|
20
|
+
|
21
|
+
@log_tags = log_tags.merge(bucket: bucket)
|
22
|
+
end
|
23
|
+
|
24
|
+
def to_s
|
25
|
+
resource_path
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|