karafka 2.2.11 → 2.2.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +2 -4
  3. data/CHANGELOG.md +12 -0
  4. data/Gemfile.lock +13 -13
  5. data/config/locales/errors.yml +3 -1
  6. data/docker-compose.yml +1 -1
  7. data/karafka.gemspec +2 -2
  8. data/lib/karafka/connection/client.rb +77 -11
  9. data/lib/karafka/connection/consumer_group_coordinator.rb +3 -3
  10. data/lib/karafka/connection/listener.rb +30 -7
  11. data/lib/karafka/connection/listeners_batch.rb +6 -1
  12. data/lib/karafka/contracts/config.rb +5 -1
  13. data/lib/karafka/helpers/interval_runner.rb +39 -0
  14. data/lib/karafka/instrumentation/notifications.rb +1 -0
  15. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +1 -9
  16. data/lib/karafka/pro/loader.rb +2 -1
  17. data/lib/karafka/pro/processing/coordinator.rb +12 -6
  18. data/lib/karafka/pro/processing/jobs_queue.rb +109 -0
  19. data/lib/karafka/pro/processing/scheduler.rb +2 -3
  20. data/lib/karafka/pro/processing/strategies/default.rb +2 -0
  21. data/lib/karafka/pro/processing/strategies/lrj/default.rb +9 -0
  22. data/lib/karafka/pro/processing/strategies/vp/default.rb +8 -4
  23. data/lib/karafka/processing/coordinator.rb +13 -7
  24. data/lib/karafka/processing/inline_insights/consumer.rb +2 -0
  25. data/lib/karafka/processing/jobs_queue.rb +41 -13
  26. data/lib/karafka/processing/scheduler.rb +19 -3
  27. data/lib/karafka/processing/strategies/default.rb +2 -0
  28. data/lib/karafka/processing/timed_queue.rb +62 -0
  29. data/lib/karafka/routing/builder.rb +32 -17
  30. data/lib/karafka/routing/subscription_group.rb +11 -6
  31. data/lib/karafka/runner.rb +1 -1
  32. data/lib/karafka/setup/config.rb +13 -1
  33. data/lib/karafka/version.rb +1 -1
  34. data/lib/karafka.rb +0 -1
  35. data.tar.gz.sig +0 -0
  36. metadata +9 -6
  37. metadata.gz.sig +0 -0
@@ -0,0 +1,109 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ module Processing
17
+ # Enhanced processing queue that provides ability to build complex work-distribution
18
+ # schedulers dedicated to particular job types
19
+ #
20
+ # Aside from the OSS queue capabilities it allows for jobless locking for advanced schedulers
21
+ class JobsQueue < Karafka::Processing::JobsQueue
22
+ attr_accessor :in_processing
23
+
24
+ # @return [Karafka::Pro::Processing::JobsQueue]
25
+ def initialize
26
+ super
27
+
28
+ @in_waiting = Hash.new { |h, k| h[k] = [] }
29
+
30
+ @statistics[:waiting] = 0
31
+ end
32
+
33
+ # Method that allows us to lock queue on a given subscription group without enqueuing the a
34
+ # job. This can be used when building complex schedulers that want to postpone enqueuing
35
+ # before certain conditions are met.
36
+ #
37
+ # @param job [Jobs::Base] job used for locking
38
+ def lock(job)
39
+ @mutex.synchronize do
40
+ group = @in_waiting[job.group_id]
41
+
42
+ # This should never happen. Same job should not be locked twice
43
+ raise(Errors::JobsQueueSynchronizationError, job.group_id) if group.include?(job)
44
+
45
+ @statistics[:waiting] += 1
46
+
47
+ group << job
48
+ end
49
+ end
50
+
51
+ # Method for unlocking the given subscription group queue space that was locked with a
52
+ # given job that was **not** added to the queue but used via `#lock`.
53
+ #
54
+ # @param job [Jobs::Base] job that locked the queue
55
+ def unlock(job)
56
+ @mutex.synchronize do
57
+ @statistics[:waiting] -= 1
58
+
59
+ return if @in_waiting[job.group_id].delete(job)
60
+
61
+ # This should never happen. It means there was a job being unlocked that was never
62
+ # locked in the first place
63
+ raise(Errors::JobsQueueSynchronizationError, job.group_id)
64
+ end
65
+ end
66
+
67
+ # Clears the processing states for a provided group. Useful when a recovery happens and we
68
+ # need to clean up state but only for a given subscription group.
69
+ #
70
+ # @param group_id [String]
71
+ def clear(group_id)
72
+ @mutex.synchronize do
73
+ @in_processing[group_id].clear
74
+
75
+ @statistics[:waiting] -= @in_waiting[group_id].size
76
+ @in_waiting[group_id].clear
77
+
78
+ # We unlock it just in case it was blocked when clearing started
79
+ tick(group_id)
80
+ end
81
+ end
82
+
83
+ # @param group_id [String]
84
+ #
85
+ # @return [Boolean] tell us if we have anything in the processing (or for processing) from
86
+ # a given group.
87
+ def empty?(group_id)
88
+ @mutex.synchronize do
89
+ @in_processing[group_id].empty? &&
90
+ @in_waiting[group_id].empty?
91
+ end
92
+ end
93
+
94
+ private
95
+
96
+ # @param group_id [String] id of the group in which jobs we're interested.
97
+ # @return [Boolean] should we keep waiting or not
98
+ # @note We do not wait for non-blocking jobs. Their flow should allow for `poll` running
99
+ # as they may exceed `max.poll.interval`
100
+ def wait?(group_id)
101
+ !(
102
+ @in_processing[group_id].all?(&:non_blocking?) &&
103
+ @in_waiting[group_id].all?(&:non_blocking?)
104
+ )
105
+ end
106
+ end
107
+ end
108
+ end
109
+ end
@@ -27,10 +27,9 @@ module Karafka
27
27
  class Scheduler < ::Karafka::Processing::Scheduler
28
28
  # Schedules jobs in the LJF order for consumption
29
29
  #
30
- # @param queue [Karafka::Processing::JobsQueue] queue where we want to put the jobs
31
30
  # @param jobs_array [Array<Karafka::Processing::Jobs::Base>] jobs we want to schedule
32
31
  #
33
- def schedule_consumption(queue, jobs_array)
32
+ def schedule_consumption(jobs_array)
34
33
  perf_tracker = PerformanceTracker.instance
35
34
 
36
35
  ordered = []
@@ -47,7 +46,7 @@ module Karafka
47
46
  ordered.map!(&:first)
48
47
 
49
48
  ordered.each do |job|
50
- queue << job
49
+ @queue << job
51
50
  end
52
51
  end
53
52
 
@@ -29,6 +29,8 @@ module Karafka
29
29
 
30
30
  # No actions needed for the standard flow here
31
31
  def handle_before_enqueue
32
+ Karafka.monitor.instrument('consumer.before_enqueue', caller: self)
33
+
32
34
  nil
33
35
  end
34
36
 
@@ -77,6 +77,15 @@ module Karafka
77
77
  revoked
78
78
  end
79
79
  end
80
+
81
+ # Allows for LRJ to synchronize its work. It may be needed because LRJ can run
82
+ # lifecycle events like revocation while the LRJ work is running and there may be a
83
+ # need for a critical section.
84
+ #
85
+ # @param block [Proc] block we want to run in a mutex to prevent race-conditions
86
+ def synchronize(&block)
87
+ coordinator.shared_mutex.synchronize(&block)
88
+ end
80
89
  end
81
90
  end
82
91
  end
@@ -94,13 +94,15 @@ module Karafka
94
94
 
95
95
  # Allows for cross-virtual-partition consumers locks
96
96
  #
97
- # This is not needed in the non-VP flows because there is always only one consumer
98
- # per partition at the same time, so no coordination is needed directly for the
99
- # end users
97
+ # This is not needed in the non-VP flows except LRJ because there is always only one
98
+ # consumer per partition at the same time, so no coordination is needed directly for
99
+ # the end users. With LRJ it is needed and provided in the `LRJ::Default` strategy,
100
+ # because lifecycle events on revocation can run in parallel to the LRJ job as it is
101
+ # non-blocking.
100
102
  #
101
103
  # @param block [Proc] block we want to run in a mutex to prevent race-conditions
102
104
  def synchronize(&block)
103
- coordinator.synchronize(&block)
105
+ coordinator.shared_mutex.synchronize(&block)
104
106
  end
105
107
 
106
108
  private
@@ -111,6 +113,8 @@ module Karafka
111
113
  # @note This can be done without the mutex, because it happens from the same thread
112
114
  # for all the work (listener thread)
113
115
  def handle_before_enqueue
116
+ super
117
+
114
118
  coordinator.virtual_offset_manager.register(
115
119
  messages.map(&:offset)
116
120
  )
@@ -162,11 +162,24 @@ module Karafka
162
162
  @manual_seek
163
163
  end
164
164
 
165
+ # @param consumer [Object] karafka consumer (normal or pro)
166
+ # @return [Karafka::Processing::Result] result object which we can use to indicate
167
+ # consumption processing state.
168
+ def consumption(consumer)
169
+ @consumptions[consumer] ||= Processing::Result.new
170
+ end
171
+
165
172
  # Allows to run synchronized (locked) code that can operate only from a given thread
166
173
  #
167
174
  # @param block [Proc] code we want to run in the synchronized mode
175
+ #
168
176
  # @note We check if mutex is not owned already by the current thread so we won't end up with
169
177
  # a deadlock in case user runs coordinated code from inside of his own lock
178
+ #
179
+ # @note This is internal and should **not** be used to synchronize user-facing code.
180
+ # Otherwise user indirectly could cause deadlocks or prolonged locks by running his logic.
181
+ # This can and should however be used for multi-thread strategy applications and other
182
+ # internal operations locks.
170
183
  def synchronize(&block)
171
184
  if @mutex.owned?
172
185
  yield
@@ -174,13 +187,6 @@ module Karafka
174
187
  @mutex.synchronize(&block)
175
188
  end
176
189
  end
177
-
178
- # @param consumer [Object] karafka consumer (normal or pro)
179
- # @return [Karafka::Processing::Result] result object which we can use to indicate
180
- # consumption processing state.
181
- def consumption(consumer)
182
- @consumptions[consumer] ||= Processing::Result.new
183
- end
184
190
  end
185
191
  end
186
192
  end
@@ -35,6 +35,8 @@ module Karafka
35
35
 
36
36
  alias statistics insights
37
37
  alias statistics? insights?
38
+ alias inline_insights insights
39
+ alias inline_insights? insights?
38
40
  end
39
41
  end
40
42
  end
@@ -9,6 +9,9 @@ module Karafka
9
9
  # on this queue, that's why internally we keep track of processing per group.
10
10
  #
11
11
  # We work with the assumption, that partitions data is evenly distributed.
12
+ #
13
+ # @note This job queue also keeps track / understands number of busy workers. This is because
14
+ # we use a single workers poll that can have granular scheduling.
12
15
  class JobsQueue
13
16
  # @return [Karafka::Processing::JobsQueue]
14
17
  def initialize
@@ -21,21 +24,19 @@ module Karafka
21
24
  # We cannot use a single semaphore as it could potentially block in listeners that should
22
25
  # process with their data and also could unlock when a given group needs to remain locked
23
26
  @semaphores = Concurrent::Map.new do |h, k|
24
- h.compute_if_absent(k) { Queue.new }
27
+ # Ruby prior to 3.2 did not have queue with a timeout on `#pop`, that is why for those
28
+ # versions we use our custom queue wrapper
29
+ h.compute_if_absent(k) { RUBY_VERSION < '3.2' ? TimedQueue.new : Queue.new }
25
30
  end
26
31
 
32
+ @concurrency = Karafka::App.config.concurrency
33
+ @tick_interval = ::Karafka::App.config.internal.tick_interval
27
34
  @in_processing = Hash.new { |h, k| h[k] = [] }
35
+ @statistics = { busy: 0, enqueued: 0 }
28
36
 
29
37
  @mutex = Mutex.new
30
38
  end
31
39
 
32
- # Returns number of jobs that are either enqueued or in processing (but not finished)
33
- # @return [Integer] number of elements in the queue
34
- # @note Using `#pop` won't decrease this number as only marking job as completed does this
35
- def size
36
- @in_processing.values.map(&:size).sum
37
- end
38
-
39
40
  # Adds the job to the internal main queue, scheduling it for execution in a worker and marks
40
41
  # this job as in processing pipeline.
41
42
  #
@@ -52,6 +53,16 @@ module Karafka
52
53
 
53
54
  group << job
54
55
 
56
+ # Assume that moving to queue means being picked up immediately not to create stats
57
+ # race conditions because of pop overhead. If there are workers available, we assume
58
+ # work is going to be handled as we never reject enqueued jobs
59
+ if @statistics[:busy] < @concurrency
60
+ @statistics[:busy] += 1
61
+ else
62
+ # If system is fully loaded, it means this job is indeed enqueued
63
+ @statistics[:enqueued] += 1
64
+ end
65
+
55
66
  @queue << job
56
67
  end
57
68
  end
@@ -77,7 +88,16 @@ module Karafka
77
88
  # @param [Jobs::Base] job that was completed
78
89
  def complete(job)
79
90
  @mutex.synchronize do
91
+ # We finish one job and if there is another, we pick it up
92
+ if @statistics[:enqueued].positive?
93
+ @statistics[:enqueued] -= 1
94
+ # If no more enqueued jobs, we will be just less busy
95
+ else
96
+ @statistics[:busy] -= 1
97
+ end
98
+
80
99
  @in_processing[job.group_id].delete(job)
100
+
81
101
  tick(job.group_id)
82
102
  end
83
103
  end
@@ -118,11 +138,19 @@ module Karafka
118
138
  # jobs from a given group are completed
119
139
  #
120
140
  # @param group_id [String] id of the group in which jobs we're interested.
141
+ # @yieldparam [Block] block we want to run before each pop (in case of Ruby pre 3.2) or
142
+ # before each pop and on every tick interval.
143
+ # This allows us to run extra code that needs to be executed even when we are waiting on
144
+ # the work to be finished.
121
145
  # @note This method is blocking.
122
146
  def wait(group_id)
123
147
  # Go doing other things while we cannot process and wait for anyone to finish their work
124
148
  # and re-check the wait status
125
- @semaphores[group_id].pop while wait?(group_id)
149
+ while wait?(group_id)
150
+ yield if block_given?
151
+
152
+ @semaphores[group_id].pop(timeout: @tick_interval / 1_000.0)
153
+ end
126
154
  end
127
155
 
128
156
  # - `busy` - number of jobs that are currently being processed (active work)
@@ -130,10 +158,10 @@ module Karafka
130
158
  #
131
159
  # @return [Hash] hash with basic usage statistics of this queue.
132
160
  def statistics
133
- {
134
- busy: size - @queue.size,
135
- enqueued: @queue.size
136
- }.freeze
161
+ # Ensures there are no race conditions when returning this data
162
+ @mutex.synchronize do
163
+ @statistics.dup.freeze
164
+ end
137
165
  end
138
166
 
139
167
  private
@@ -4,19 +4,35 @@ module Karafka
4
4
  module Processing
5
5
  # FIFO scheduler for messages coming from various topics and partitions
6
6
  class Scheduler
7
+ # @param queue [Karafka::Processing::JobsQueue] queue where we want to put the jobs
8
+ def initialize(queue)
9
+ @queue = queue
10
+ end
11
+
7
12
  # Schedules jobs in the fifo order
8
13
  #
9
- # @param queue [Karafka::Processing::JobsQueue] queue where we want to put the jobs
10
14
  # @param jobs_array [Array<Karafka::Processing::Jobs::Base>] jobs we want to schedule
11
- def schedule_consumption(queue, jobs_array)
15
+ def schedule_consumption(jobs_array)
12
16
  jobs_array.each do |job|
13
- queue << job
17
+ @queue << job
14
18
  end
15
19
  end
16
20
 
17
21
  # Both revocation and shutdown jobs can also run in fifo by default
18
22
  alias schedule_revocation schedule_consumption
19
23
  alias schedule_shutdown schedule_consumption
24
+
25
+ # This scheduler does not have anything to manage as it is a pass through and has no state
26
+ def manage
27
+ nil
28
+ end
29
+
30
+ # This scheduler does not need to be cleared because it is stateless
31
+ #
32
+ # @param _group_id [String] Subscription group id
33
+ def clear(_group_id)
34
+ nil
35
+ end
20
36
  end
21
37
  end
22
38
  end
@@ -78,6 +78,8 @@ module Karafka
78
78
 
79
79
  # No actions needed for the standard flow here
80
80
  def handle_before_enqueue
81
+ Karafka.monitor.instrument('consumer.before_enqueue', caller: self)
82
+
81
83
  nil
82
84
  end
83
85
 
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Minimal queue with timeout for Ruby 3.1 and lower.
6
+ #
7
+ # It is needed because only since 3.2, Ruby has a timeout on `#pop`
8
+ class TimedQueue
9
+ include Karafka::Core::Helpers::Time
10
+
11
+ def initialize
12
+ @queue = Queue.new
13
+ @mutex = Thread::Mutex.new
14
+ @resource = Thread::ConditionVariable.new
15
+ end
16
+
17
+ # Adds element to the queue
18
+ #
19
+ # @param obj [Object] pushes an element onto the queue
20
+ def push(obj)
21
+ @mutex.synchronize do
22
+ @queue << obj
23
+ @resource.broadcast
24
+ end
25
+ end
26
+
27
+ alias << push
28
+
29
+ # No timeout means waiting up to 31 years
30
+ #
31
+ # @param timeout [Integer] max number of seconds to wait on the pop
32
+ # @return [Object] element inserted on the array or `nil` on timeout
33
+ #
34
+ # @note We use timeout in seconds because this is how Ruby 3.2+ works and we want to have
35
+ # the same API for newer and older Ruby versions
36
+ def pop(timeout: 10_000_000_000)
37
+ deadline = monotonic_now + timeout * 1000
38
+
39
+ @mutex.synchronize do
40
+ loop do
41
+ return @queue.pop unless @queue.empty?
42
+ return @queue.pop if @queue.closed?
43
+
44
+ to_wait = (deadline - monotonic_now) / 1_000.0
45
+
46
+ return nil if to_wait <= 0
47
+
48
+ @resource.wait(@mutex, to_wait)
49
+ end
50
+ end
51
+ end
52
+
53
+ # Closes the internal queue and releases the lock
54
+ def close
55
+ @mutex.synchronize do
56
+ @queue.close
57
+ @resource.broadcast
58
+ end
59
+ end
60
+ end
61
+ end
62
+ end
@@ -3,20 +3,25 @@
3
3
  module Karafka
4
4
  module Routing
5
5
  # Builder used as a DSL layer for building consumers and telling them which topics to consume
6
+ #
7
+ # @note We lock the access just in case this is used in patterns. The locks here do not have
8
+ # any impact on routing usage unless being expanded, so no race conditions risks.
9
+ #
6
10
  # @example Build a simple (most common) route
7
11
  # consumers do
8
12
  # topic :new_videos do
9
13
  # consumer NewVideosConsumer
10
14
  # end
11
15
  # end
12
- class Builder < Concurrent::Array
16
+ class Builder < Array
13
17
  # Empty default per-topic config
14
18
  EMPTY_DEFAULTS = ->(_) {}.freeze
15
19
 
16
20
  private_constant :EMPTY_DEFAULTS
17
21
 
18
22
  def initialize
19
- @draws = Concurrent::Array.new
23
+ @mutex = Mutex.new
24
+ @draws = []
20
25
  @defaults = EMPTY_DEFAULTS
21
26
  super
22
27
  end
@@ -34,21 +39,23 @@ module Karafka
34
39
  # end
35
40
  # end
36
41
  def draw(&block)
37
- @draws << block
42
+ @mutex.synchronize do
43
+ @draws << block
38
44
 
39
- instance_eval(&block)
45
+ instance_eval(&block)
40
46
 
41
- each do |consumer_group|
42
- # Validate consumer group settings
43
- Contracts::ConsumerGroup.new.validate!(consumer_group.to_h)
47
+ each do |consumer_group|
48
+ # Validate consumer group settings
49
+ Contracts::ConsumerGroup.new.validate!(consumer_group.to_h)
44
50
 
45
- # and then its topics settings
46
- consumer_group.topics.each do |topic|
47
- Contracts::Topic.new.validate!(topic.to_h)
48
- end
51
+ # and then its topics settings
52
+ consumer_group.topics.each do |topic|
53
+ Contracts::Topic.new.validate!(topic.to_h)
54
+ end
49
55
 
50
- # Initialize subscription groups after all the routing is done
51
- consumer_group.subscription_groups
56
+ # Initialize subscription groups after all the routing is done
57
+ consumer_group.subscription_groups
58
+ end
52
59
  end
53
60
  end
54
61
 
@@ -61,9 +68,11 @@ module Karafka
61
68
 
62
69
  # Clears the builder and the draws memory
63
70
  def clear
64
- @defaults = EMPTY_DEFAULTS
65
- @draws.clear
66
- super
71
+ @mutex.synchronize do
72
+ @defaults = EMPTY_DEFAULTS
73
+ @draws.clear
74
+ super
75
+ end
67
76
  end
68
77
 
69
78
  # @param block [Proc] block with per-topic evaluated defaults
@@ -71,7 +80,13 @@ module Karafka
71
80
  def defaults(&block)
72
81
  return @defaults unless block
73
82
 
74
- @defaults = block
83
+ if @mutex.owned?
84
+ @defaults = block
85
+ else
86
+ @mutex.synchronize do
87
+ @defaults = block
88
+ end
89
+ end
75
90
  end
76
91
 
77
92
  private
@@ -10,19 +10,24 @@ module Karafka
10
10
  class SubscriptionGroup
11
11
  attr_reader :id, :name, :topics, :kafka, :consumer_group
12
12
 
13
- # Numeric for counting groups
14
- GROUP_COUNT = Concurrent::AtomicFixnum.new
13
+ # Lock for generating new ids safely
14
+ ID_MUTEX = Mutex.new
15
15
 
16
- private_constant :GROUP_COUNT
16
+ private_constant :ID_MUTEX
17
17
 
18
18
  class << self
19
19
  # Generates new subscription group id that will be used in case of anonymous subscription
20
20
  # groups
21
21
  # @return [String] hex(6) compatible reproducible id
22
22
  def id
23
- ::Digest::MD5.hexdigest(
24
- GROUP_COUNT.increment.to_s
25
- )[0..11]
23
+ ID_MUTEX.synchronize do
24
+ @group_counter ||= 0
25
+ @group_counter += 1
26
+
27
+ ::Digest::MD5.hexdigest(
28
+ @group_counter.to_s
29
+ )[0..11]
30
+ end
26
31
  end
27
32
  end
28
33
 
@@ -8,7 +8,7 @@ module Karafka
8
8
  def call
9
9
  # Despite possibility of having several independent listeners, we aim to have one queue for
10
10
  # jobs across and one workers poll for that
11
- jobs_queue = Processing::JobsQueue.new
11
+ jobs_queue = App.config.internal.processing.jobs_queue_class.new
12
12
 
13
13
  workers = Processing::WorkersBatch.new(jobs_queue)
14
14
  listeners = Connection::ListenersBatch.new(jobs_queue)
@@ -152,6 +152,17 @@ module Karafka
152
152
  # instances
153
153
  setting :process, default: Process.new
154
154
 
155
+ # Interval of "ticking". This is used to define the maximum time between consecutive
156
+ # polling of the main rdkafka queue. It should match also the `statistics.interval.ms`
157
+ # smallest value defined in any of the per-kafka settings, so metrics are published with
158
+ # the desired frequency. It is set to 5 seconds because `statistics.interval.ms` is also
159
+ # set to five seconds.
160
+ #
161
+ # It is NOT allowed to set it to a value less than 1 seconds because it could cause polling
162
+ # not to have enough time to run. This (not directly) defines also a single poll
163
+ # max timeout as to allow for frequent enough events polling
164
+ setting :tick_interval, default: 5_000
165
+
155
166
  # Namespace for CLI related settings
156
167
  setting :cli do
157
168
  # option contract [Object] cli setup validation contract (in the context of options and
@@ -198,8 +209,9 @@ module Karafka
198
209
  end
199
210
 
200
211
  setting :processing do
212
+ setting :jobs_queue_class, default: Processing::JobsQueue
201
213
  # option scheduler [Object] scheduler we will be using
202
- setting :scheduler, default: Processing::Scheduler.new
214
+ setting :scheduler_class, default: Processing::Scheduler
203
215
  # option jobs_builder [Object] jobs builder we want to use
204
216
  setting :jobs_builder, default: Processing::JobsBuilder.new
205
217
  # option coordinator [Class] work coordinator we want to user for processing coordination
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.2.11'
6
+ VERSION = '2.2.13'
7
7
  end
data/lib/karafka.rb CHANGED
@@ -16,7 +16,6 @@
16
16
  singleton
17
17
  digest
18
18
  zeitwerk
19
- concurrent/atomic/atomic_fixnum
20
19
  ].each(&method(:require))
21
20
 
22
21
  # Karafka framework main namespace
data.tar.gz.sig CHANGED
Binary file