sbmt-outbox 5.0.4 → 6.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +56 -7
  3. data/app/interactors/sbmt/outbox/process_item.rb +2 -1
  4. data/app/interactors/sbmt/outbox/retry_strategies/base.rb +15 -0
  5. data/app/interactors/sbmt/outbox/retry_strategies/compacted_log.rb +2 -32
  6. data/app/interactors/sbmt/outbox/retry_strategies/exponential_backoff.rb +3 -5
  7. data/app/interactors/sbmt/outbox/retry_strategies/latest_available.rb +39 -0
  8. data/app/interactors/sbmt/outbox/retry_strategies/no_delay.rb +13 -0
  9. data/app/models/sbmt/outbox/base_item.rb +9 -8
  10. data/app/models/sbmt/outbox/base_item_config.rb +11 -2
  11. data/config/initializers/yabeda.rb +32 -5
  12. data/lib/generators/helpers/migration.rb +2 -2
  13. data/lib/sbmt/outbox/cli.rb +50 -7
  14. data/lib/sbmt/outbox/engine.rb +25 -0
  15. data/lib/sbmt/outbox/logger.rb +6 -0
  16. data/lib/sbmt/outbox/v1/thread_pool.rb +110 -0
  17. data/lib/sbmt/outbox/v1/throttler.rb +54 -0
  18. data/lib/sbmt/outbox/v1/worker.rb +231 -0
  19. data/lib/sbmt/outbox/v2/box_processor.rb +148 -0
  20. data/lib/sbmt/outbox/v2/poll_throttler/base.rb +43 -0
  21. data/lib/sbmt/outbox/v2/poll_throttler/composite.rb +42 -0
  22. data/lib/sbmt/outbox/v2/poll_throttler/fixed_delay.rb +28 -0
  23. data/lib/sbmt/outbox/v2/poll_throttler/noop.rb +17 -0
  24. data/lib/sbmt/outbox/v2/poll_throttler/rate_limited.rb +45 -0
  25. data/lib/sbmt/outbox/v2/poll_throttler/redis_queue_size.rb +46 -0
  26. data/lib/sbmt/outbox/v2/poll_throttler/redis_queue_time_lag.rb +45 -0
  27. data/lib/sbmt/outbox/v2/poll_throttler.rb +49 -0
  28. data/lib/sbmt/outbox/v2/poller.rb +180 -0
  29. data/lib/sbmt/outbox/v2/processor.rb +109 -0
  30. data/lib/sbmt/outbox/v2/redis_job.rb +42 -0
  31. data/lib/sbmt/outbox/v2/tasks/base.rb +48 -0
  32. data/lib/sbmt/outbox/v2/tasks/default.rb +17 -0
  33. data/lib/sbmt/outbox/v2/tasks/poll.rb +34 -0
  34. data/lib/sbmt/outbox/v2/tasks/process.rb +31 -0
  35. data/lib/sbmt/outbox/v2/thread_pool.rb +152 -0
  36. data/lib/sbmt/outbox/v2/throttler.rb +13 -0
  37. data/lib/sbmt/outbox/v2/worker.rb +52 -0
  38. data/lib/sbmt/outbox/version.rb +1 -1
  39. data/lib/sbmt/outbox.rb +16 -2
  40. metadata +40 -4
  41. data/lib/sbmt/outbox/thread_pool.rb +0 -108
  42. data/lib/sbmt/outbox/throttler.rb +0 -52
  43. data/lib/sbmt/outbox/worker.rb +0 -233
@@ -0,0 +1,49 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "sbmt/outbox/v2/poll_throttler/base"
4
+ require "sbmt/outbox/v2/poll_throttler/composite"
5
+ require "sbmt/outbox/v2/poll_throttler/rate_limited"
6
+ require "sbmt/outbox/v2/poll_throttler/fixed_delay"
7
+ require "sbmt/outbox/v2/poll_throttler/noop"
8
+ require "sbmt/outbox/v2/poll_throttler/redis_queue_size"
9
+ require "sbmt/outbox/v2/poll_throttler/redis_queue_time_lag"
10
+
11
+ module Sbmt
12
+ module Outbox
13
+ module V2
14
+ module PollThrottler
15
+ POLL_TACTICS = %w[noop default low-priority aggressive]
16
+
17
+ def self.build(tactic, redis, poller_config)
18
+ raise "WARN: invalid poller poll tactic provided: #{tactic}, available options: #{POLL_TACTICS}" unless POLL_TACTICS.include?(tactic)
19
+
20
+ if tactic == "default"
21
+ # composite of RateLimited & RedisQueueSize (upper bound only)
22
+ # optimal polling performance for most cases
23
+ Composite.new(throttlers: [
24
+ RedisQueueSize.new(redis: redis, max_size: poller_config.max_queue_size, delay: poller_config.queue_delay),
25
+ RateLimited.new(limit: poller_config.rate_limit, interval: poller_config.rate_interval)
26
+ ])
27
+ elsif tactic == "low-priority"
28
+ # composite of RateLimited & RedisQueueSize (with lower & upper bounds) & RedisQueueTimeLag,
29
+ # delays polling depending on min job queue size threshold
30
+ # and also by min redis queue oldest item lag
31
+ # optimal polling performance for low-intensity data flow
32
+ Composite.new(throttlers: [
33
+ RedisQueueSize.new(redis: redis, min_size: poller_config.min_queue_size, max_size: poller_config.max_queue_size, delay: poller_config.queue_delay),
34
+ RedisQueueTimeLag.new(redis: redis, min_lag: poller_config.min_queue_timelag, delay: poller_config.queue_delay),
35
+ RateLimited.new(limit: poller_config.rate_limit, interval: poller_config.rate_interval)
36
+ ])
37
+ elsif tactic == "aggressive"
38
+ # throttles only by max job queue size, max polling performance
39
+ # optimal polling performance for high-intensity data flow
40
+ RedisQueueSize.new(redis: redis, max_size: poller_config.max_queue_size, delay: poller_config.queue_delay)
41
+ elsif tactic == "noop"
42
+ # no-op, for testing purposes
43
+ Noop.new
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
49
+ end
@@ -0,0 +1,180 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "redlock"
4
+ require "sbmt/outbox/v2/box_processor"
5
+ require "sbmt/outbox/v2/redis_job"
6
+ require "sbmt/outbox/v2/poll_throttler"
7
+ require "sbmt/outbox/v2/tasks/poll"
8
+
9
+ module Sbmt
10
+ module Outbox
11
+ module V2
12
+ class Poller < BoxProcessor
13
+ delegate :poller_config, :logger, to: "Sbmt::Outbox"
14
+ delegate :box_worker, to: "Yabeda"
15
+ attr_reader :partitions_count, :lock_timeout, :regular_items_batch_size, :retryable_items_batch_size, :max_buffer_size, :max_batch_size, :throttler
16
+
17
+ def initialize(
18
+ boxes,
19
+ partitions_count: nil,
20
+ threads_count: nil,
21
+ lock_timeout: nil,
22
+ regular_items_batch_size: nil,
23
+ retryable_items_batch_size: nil,
24
+ throttler_tactic: nil,
25
+ redis: nil
26
+ )
27
+ @partitions_count = partitions_count || poller_config.concurrency
28
+ @lock_timeout = lock_timeout || poller_config.general_timeout
29
+
30
+ @regular_items_batch_size = regular_items_batch_size || poller_config.regular_items_batch_size
31
+ @retryable_items_batch_size = retryable_items_batch_size || poller_config.retryable_items_batch_size
32
+ @max_buffer_size = @regular_items_batch_size + @retryable_items_batch_size
33
+ @max_batch_size = @regular_items_batch_size
34
+
35
+ super(boxes: boxes, threads_count: threads_count || poller_config.threads_count, name: "poller", redis: redis)
36
+
37
+ @throttler = PollThrottler.build(throttler_tactic || poller_config.tactic || "default", self.redis, poller_config)
38
+ end
39
+
40
+ def throttle(worker_number, poll_task, result)
41
+ throttler.call(worker_number, poll_task, result)
42
+ end
43
+
44
+ def process_task(_worker_number, task)
45
+ poll(task)
46
+ end
47
+
48
+ private
49
+
50
+ def build_task_queue(boxes)
51
+ scheduled_tasks = boxes.map do |item_class|
52
+ schedule_concurrency = (0...partitions_count).to_a
53
+ schedule_concurrency.map do |partition|
54
+ buckets = item_class.calc_bucket_partitions(partitions_count).fetch(partition)
55
+
56
+ Tasks::Poll.new(
57
+ item_class: item_class,
58
+ worker_name: worker_name,
59
+ partition: partition,
60
+ buckets: buckets
61
+ )
62
+ end
63
+ end.flatten
64
+
65
+ scheduled_tasks.shuffle!
66
+ Queue.new.tap { |queue| scheduled_tasks.each { |task| queue << task } }
67
+ end
68
+
69
+ def lock_task(poll_task)
70
+ lock_manager.lock("#{poll_task.resource_path}:lock", lock_timeout * 1000) do |locked|
71
+ lock_status = locked ? "locked" : "skipped"
72
+ logger.log_debug("poller: lock for #{poll_task}: #{lock_status}")
73
+
74
+ yield(locked ? poll_task : nil)
75
+ end
76
+ end
77
+
78
+ def poll(task)
79
+ lock_timer = Cutoff.new(lock_timeout)
80
+ last_id = 0
81
+
82
+ box_worker.item_execution_runtime.measure(task.yabeda_labels) do
83
+ Outbox.database_switcher.use_slave do
84
+ result = fetch_items(task) do |item|
85
+ box_worker.job_items_counter.increment(task.yabeda_labels)
86
+
87
+ last_id = item.id
88
+ lock_timer.checkpoint!
89
+ end
90
+
91
+ logger.log_debug("poll task #{task}: fetched buckets:#{result.keys.count}, items:#{result.values.sum(0) { |ids| ids.count }}")
92
+
93
+ push_to_redis(task, result) if result.present?
94
+ end
95
+ end
96
+ rescue Cutoff::CutoffExceededError
97
+ box_worker.job_timeout_counter.increment(labels)
98
+ logger.log_info("Lock timeout while processing #{task.resource_key} at id #{last_id}")
99
+ end
100
+
101
+ def fetch_items(task)
102
+ regular_count = 0
103
+ retryable_count = 0
104
+
105
+ # single buffer to preserve item's positions
106
+ poll_buffer = {}
107
+
108
+ fetch_items_with_retries(task, max_batch_size).each do |item|
109
+ if item.errors_count > 0
110
+ # skip if retryable buffer capacity limit reached
111
+ next if retryable_count >= retryable_items_batch_size
112
+
113
+ poll_buffer[item.bucket] ||= []
114
+ poll_buffer[item.bucket] << item.id
115
+
116
+ retryable_count += 1
117
+ else
118
+ poll_buffer[item.bucket] ||= []
119
+ poll_buffer[item.bucket] << item.id
120
+
121
+ regular_count += 1
122
+ end
123
+
124
+ yield(item)
125
+ end
126
+
127
+ box_worker.batches_per_poll_counter.increment(task.yabeda_labels)
128
+
129
+ return {} if poll_buffer.blank?
130
+
131
+ # regular items have priority over retryable ones
132
+ return poll_buffer if regular_count >= regular_items_batch_size
133
+
134
+ # additionally poll regular items only when retryable buffer capacity limit reached
135
+ # and no regular items were found
136
+ if retryable_count >= retryable_items_batch_size && regular_count == 0
137
+ fetch_regular_items(task, regular_items_batch_size).each do |item|
138
+ poll_buffer[item.bucket] ||= []
139
+ poll_buffer[item.bucket] << item.id
140
+
141
+ yield(item)
142
+ end
143
+ box_worker.batches_per_poll_counter.increment(task.yabeda_labels)
144
+ end
145
+
146
+ poll_buffer
147
+ end
148
+
149
+ def fetch_items_with_retries(task, limit)
150
+ task.item_class
151
+ .for_processing
152
+ .where(bucket: task.buckets)
153
+ .order(id: :asc)
154
+ .limit(limit)
155
+ .select(:id, :bucket, :errors_count)
156
+ end
157
+
158
+ def fetch_regular_items(task, limit)
159
+ task.item_class
160
+ .for_processing
161
+ .where(bucket: task.buckets, errors_count: 0)
162
+ .order(id: :asc)
163
+ .limit(limit)
164
+ .select(:id, :bucket)
165
+ end
166
+
167
+ def push_to_redis(poll_task, ids_per_bucket)
168
+ redis.pipelined do |conn|
169
+ ids_per_bucket.each do |bucket, ids|
170
+ redis_job = RedisJob.new(bucket, ids)
171
+
172
+ logger.log_debug("pushing job to redis, items count: #{ids.count}: #{redis_job}")
173
+ conn.call("LPUSH", poll_task.redis_queue, redis_job.serialize)
174
+ end
175
+ end
176
+ end
177
+ end
178
+ end
179
+ end
180
+ end
@@ -0,0 +1,109 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "redlock"
4
+ require "sbmt/outbox/v2/box_processor"
5
+ require "sbmt/outbox/v2/redis_job"
6
+ require "sbmt/outbox/v2/tasks/process"
7
+
8
+ module Sbmt
9
+ module Outbox
10
+ module V2
11
+ class Processor < BoxProcessor
12
+ delegate :processor_config, :batch_process_middlewares, :logger, to: "Sbmt::Outbox"
13
+ attr_reader :lock_timeout, :brpop_delay
14
+
15
+ REDIS_BRPOP_MIN_DELAY = 0.1
16
+
17
+ def initialize(
18
+ boxes,
19
+ threads_count: nil,
20
+ lock_timeout: nil,
21
+ brpop_delay: nil,
22
+ redis: nil
23
+ )
24
+ @lock_timeout = lock_timeout || processor_config.general_timeout
25
+ @brpop_delay = brpop_delay || redis_brpop_delay(boxes.count, processor_config.brpop_delay)
26
+
27
+ super(boxes: boxes, threads_count: threads_count || processor_config.threads_count, name: "processor", redis: redis)
28
+ end
29
+
30
+ def process_task(_worker_number, task)
31
+ middlewares = Middleware::Builder.new(batch_process_middlewares)
32
+ middlewares.call(task) { process(task) }
33
+ end
34
+
35
+ private
36
+
37
+ def build_task_queue(boxes)
38
+ # queue size is: boxes_count * threads_count
39
+ # to simplify scheduling per box
40
+ tasks = boxes.map do |item_class|
41
+ (0...threads_count)
42
+ .to_a
43
+ .map { Tasks::Base.new(item_class: item_class, worker_name: worker_name) }
44
+ end.flatten
45
+
46
+ tasks.shuffle!
47
+ Queue.new.tap { |queue| tasks.each { |task| queue << task } }
48
+ end
49
+
50
+ def lock_task(scheduled_task)
51
+ redis_job = fetch_redis_job(scheduled_task)
52
+ return yield(nil) if redis_job.blank?
53
+
54
+ processor_task = Tasks::Process.new(
55
+ item_class: scheduled_task.item_class,
56
+ worker_name: worker_name,
57
+ bucket: redis_job.bucket,
58
+ ids: redis_job.ids
59
+ )
60
+ lock_manager.lock("#{processor_task.resource_path}:lock", lock_timeout * 1000) do |locked|
61
+ lock_status = locked ? "locked" : "skipped"
62
+ logger.log_debug("processor: lock for #{processor_task}: #{lock_status}")
63
+
64
+ yield(locked ? processor_task : nil)
65
+ end
66
+ end
67
+
68
+ def process(task)
69
+ lock_timer = Cutoff.new(lock_timeout)
70
+ last_id = 0
71
+
72
+ box_worker.item_execution_runtime.measure(task.yabeda_labels) do
73
+ Outbox.database_switcher.use_master do
74
+ task.ids.each do |id|
75
+ ProcessItem.call(task.item_class, id, worker_version: task.yabeda_labels[:worker_version])
76
+
77
+ box_worker.job_items_counter.increment(task.yabeda_labels)
78
+ last_id = id
79
+ lock_timer.checkpoint!
80
+ end
81
+ end
82
+ end
83
+ rescue Cutoff::CutoffExceededError
84
+ box_worker.job_timeout_counter.increment(task.yabeda_labels)
85
+ logger.log_info("Lock timeout while processing #{task.resource_key} at id #{last_id}")
86
+ end
87
+
88
+ def fetch_redis_job(scheduled_task)
89
+ _queue, result = redis.blocking_call(redis_block_timeout, "BRPOP", "#{scheduled_task.item_class.box_name}:job_queue", brpop_delay)
90
+ return if result.blank?
91
+
92
+ RedisJob.deserialize!(result)
93
+ rescue => ex
94
+ logger.log_error("error while fetching redis job: #{ex.message}")
95
+ end
96
+
97
+ def redis_block_timeout
98
+ redis.read_timeout + brpop_delay
99
+ end
100
+
101
+ def redis_brpop_delay(boxes_count, default_delay)
102
+ return default_delay if boxes_count == 1
103
+
104
+ REDIS_BRPOP_MIN_DELAY
105
+ end
106
+ end
107
+ end
108
+ end
109
+ end
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sbmt
4
+ module Outbox
5
+ module V2
6
+ class RedisJob
7
+ attr_reader :bucket, :timestamp, :ids
8
+
9
+ GENERIC_SEPARATOR = ":"
10
+ IDS_SEPARATOR = ","
11
+
12
+ def initialize(bucket, ids, timestamp = Time.current.to_i)
13
+ @bucket = bucket
14
+ @ids = ids
15
+ @timestamp = timestamp
16
+ end
17
+
18
+ def to_s
19
+ serialize
20
+ end
21
+
22
+ def serialize
23
+ [bucket, timestamp, ids.join(IDS_SEPARATOR)].join(GENERIC_SEPARATOR)
24
+ end
25
+
26
+ def self.deserialize!(value)
27
+ raise "invalid data type: string is required" unless value.is_a?(String)
28
+
29
+ bucket, ts_utc, ids_str, _ = value.split(GENERIC_SEPARATOR)
30
+ raise "invalid data format: bucket or ids are not valid" if bucket.blank? || ts_utc.blank? || ids_str.blank?
31
+
32
+ ts = ts_utc.to_i
33
+
34
+ ids = ids_str.split(IDS_SEPARATOR).map(&:to_i)
35
+ raise "invalid data format: IDs are empty" if ids.blank?
36
+
37
+ new(bucket, ids, ts)
38
+ end
39
+ end
40
+ end
41
+ end
42
+ end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sbmt
4
+ module Outbox
5
+ module V2
6
+ module Tasks
7
+ class Base
8
+ attr_reader :item_class, :worker_name, :worker_version, :log_tags, :yabeda_labels
9
+
10
+ def initialize(item_class:, worker_name:, worker_version: 2)
11
+ @item_class = item_class
12
+ @worker_name = worker_name
13
+ @worker_version = worker_version
14
+
15
+ @log_tags = {
16
+ box_type: item_class.box_type,
17
+ box_name: item_class.box_name,
18
+ worker_name: worker_name,
19
+ worker_version: worker_version
20
+ }
21
+
22
+ @yabeda_labels = {
23
+ type: item_class.box_type,
24
+ name: metric_safe(item_class.box_name),
25
+ worker_version: 2,
26
+ worker_name: worker_name
27
+ }
28
+ end
29
+
30
+ def to_h
31
+ result = {}
32
+ instance_variables.each do |iv|
33
+ iv = iv.to_s[1..]
34
+ result[iv.to_sym] = instance_variable_get(:"@#{iv}")
35
+ end
36
+ result
37
+ end
38
+
39
+ private
40
+
41
+ def metric_safe(str)
42
+ str.tr("/", "-")
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
48
+ end
@@ -0,0 +1,17 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "sbmt/outbox/v2/tasks/base"
4
+
5
+ module Sbmt
6
+ module Outbox
7
+ module V2
8
+ module Tasks
9
+ class Default < Base
10
+ def to_s
11
+ "#{item_class.box_type}/#{item_class.box_name}"
12
+ end
13
+ end
14
+ end
15
+ end
16
+ end
17
+ end
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "sbmt/outbox/v2/tasks/base"
4
+
5
+ module Sbmt
6
+ module Outbox
7
+ module V2
8
+ module Tasks
9
+ class Poll < Base
10
+ attr_reader :partition, :buckets, :resource_key, :resource_path, :redis_queue
11
+
12
+ def initialize(item_class:, worker_name:, partition:, buckets:)
13
+ super(item_class: item_class, worker_name: worker_name)
14
+
15
+ @partition = partition
16
+ @buckets = buckets
17
+
18
+ @resource_key = "#{item_class.box_name}:#{partition}"
19
+ @resource_path = "sbmt:outbox:#{worker_name}:#{resource_key}"
20
+ @redis_queue = "#{item_class.box_name}:job_queue"
21
+
22
+ @log_tags = log_tags.merge(box_partition: partition)
23
+
24
+ @yabeda_labels = yabeda_labels.merge(partition: partition)
25
+ end
26
+
27
+ def to_s
28
+ resource_path
29
+ end
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "sbmt/outbox/v2/tasks/base"
4
+
5
+ module Sbmt
6
+ module Outbox
7
+ module V2
8
+ module Tasks
9
+ class Process < Base
10
+ attr_reader :partition, :bucket, :ids, :resource_key, :resource_path
11
+
12
+ def initialize(item_class:, worker_name:, bucket:, ids:)
13
+ super(item_class: item_class, worker_name: worker_name)
14
+
15
+ @bucket = bucket
16
+ @ids = ids
17
+
18
+ @resource_key = "#{item_class.box_name}:#{bucket}"
19
+ @resource_path = "sbmt:outbox:#{worker_name}:#{resource_key}"
20
+
21
+ @log_tags = log_tags.merge(bucket: bucket)
22
+ end
23
+
24
+ def to_s
25
+ resource_path
26
+ end
27
+ end
28
+ end
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,152 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sbmt
4
+ module Outbox
5
+ module V2
6
+ class ThreadPool
7
+ delegate :logger, to: "Sbmt::Outbox"
8
+
9
+ BREAK = Object.new.freeze
10
+ SKIPPED = Object.new.freeze
11
+ PROCESSED = Object.new.freeze
12
+
13
+ def initialize(concurrency:, name: "thread_pool", random_startup_delay: true, start_async: true, &block)
14
+ self.concurrency = concurrency
15
+ self.name = name
16
+ self.random_startup_delay = random_startup_delay
17
+ self.start_async = start_async
18
+ self.task_source = block
19
+ self.task_mutex = Mutex.new
20
+ self.stopped = true
21
+ self.threads = Concurrent::Array.new
22
+ end
23
+
24
+ def next_task
25
+ task_mutex.synchronize do
26
+ return if stopped
27
+ task = task_source.call
28
+
29
+ if task == BREAK
30
+ self.stopped = true
31
+ return
32
+ end
33
+
34
+ task
35
+ end
36
+ end
37
+
38
+ def start
39
+ self.stopped = false
40
+
41
+ mode = start_async ? "async" : "sync"
42
+ logger.log_info("#{name}: starting #{concurrency} threads in #{mode} mode")
43
+
44
+ result = run_threads do |task|
45
+ logger.with_tags(worker: worker_number) do
46
+ yield worker_number, task
47
+ end
48
+ end
49
+
50
+ logger.log_info("#{name}: threads started")
51
+
52
+ raise result if result.is_a?(Exception)
53
+ end
54
+
55
+ def stop
56
+ self.stopped = true
57
+
58
+ threads.map(&:join) if start_async
59
+ ensure
60
+ stop_threads
61
+ end
62
+
63
+ def running?
64
+ return false if stopped
65
+
66
+ true
67
+ end
68
+
69
+ def alive?(timeout)
70
+ return false if stopped
71
+
72
+ deadline = Time.current - timeout
73
+ threads.all? do |thread|
74
+ last_active_at = last_active_at(thread)
75
+ return false unless last_active_at
76
+
77
+ deadline < last_active_at
78
+ end
79
+ end
80
+
81
+ private
82
+
83
+ attr_accessor :concurrency, :name, :random_startup_delay, :task_source, :task_mutex, :stopped, :start_async, :threads
84
+
85
+ def touch_worker!
86
+ self.last_active_at = Time.current
87
+ end
88
+
89
+ def worker_number(thread = Thread.current)
90
+ thread.thread_variable_get("#{name}_worker_number:#{object_id}")
91
+ end
92
+
93
+ def last_active_at(thread = Thread.current)
94
+ thread.thread_variable_get("#{name}_last_active_at:#{object_id}")
95
+ end
96
+
97
+ def run_threads
98
+ exception = nil
99
+
100
+ in_threads do |worker_num|
101
+ self.worker_number = worker_num
102
+ # We don't want to start all threads at the same time
103
+ sleep(rand * (worker_num + 1)) if random_startup_delay
104
+
105
+ touch_worker!
106
+
107
+ until exception
108
+ task = next_task
109
+ break unless task
110
+
111
+ touch_worker!
112
+
113
+ begin
114
+ yield task
115
+ rescue Exception => e # rubocop:disable Lint/RescueException
116
+ exception = e
117
+ end
118
+ end
119
+ end
120
+
121
+ exception
122
+ end
123
+
124
+ def in_threads
125
+ Thread.handle_interrupt(Exception => :never) do
126
+ Thread.handle_interrupt(Exception => :immediate) do
127
+ concurrency.times do |i|
128
+ threads << Thread.new { yield(i) }
129
+ end
130
+ threads.map(&:value) unless start_async
131
+ end
132
+ ensure
133
+ stop_threads unless start_async
134
+ end
135
+ end
136
+
137
+ def stop_threads
138
+ threads.each(&:kill)
139
+ threads.clear
140
+ end
141
+
142
+ def worker_number=(num)
143
+ Thread.current.thread_variable_set("#{name}_worker_number:#{object_id}", num)
144
+ end
145
+
146
+ def last_active_at=(at)
147
+ Thread.current.thread_variable_set("#{name}_last_active_at:#{object_id}", at)
148
+ end
149
+ end
150
+ end
151
+ end
152
+ end
@@ -0,0 +1,13 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sbmt
4
+ module Outbox
5
+ module V2
6
+ module Throttler
7
+ THROTTLE_STATUS = "throttle"
8
+ SKIP_STATUS = "skip"
9
+ NOOP_STATUS = "noop"
10
+ end
11
+ end
12
+ end
13
+ end