karafka 2.2.12 → 2.2.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/CHANGELOG.md +141 -121
  4. data/Gemfile.lock +10 -10
  5. data/config/locales/errors.yml +2 -1
  6. data/docker-compose.yml +2 -0
  7. data/lib/karafka/admin.rb +109 -3
  8. data/lib/karafka/app.rb +7 -0
  9. data/lib/karafka/base_consumer.rb +23 -30
  10. data/lib/karafka/connection/client.rb +13 -10
  11. data/lib/karafka/connection/consumer_group_coordinator.rb +3 -3
  12. data/lib/karafka/connection/listener.rb +18 -10
  13. data/lib/karafka/connection/listeners_batch.rb +6 -1
  14. data/lib/karafka/contracts/config.rb +2 -1
  15. data/lib/karafka/instrumentation/assignments_tracker.rb +96 -0
  16. data/lib/karafka/instrumentation/callbacks/rebalance.rb +10 -7
  17. data/lib/karafka/instrumentation/logger_listener.rb +0 -9
  18. data/lib/karafka/instrumentation/notifications.rb +6 -3
  19. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +2 -2
  20. data/lib/karafka/pro/instrumentation/performance_tracker.rb +85 -0
  21. data/lib/karafka/pro/loader.rb +3 -2
  22. data/lib/karafka/pro/processing/coordinator.rb +12 -6
  23. data/lib/karafka/pro/processing/jobs_queue.rb +109 -0
  24. data/lib/karafka/pro/processing/schedulers/base.rb +127 -0
  25. data/lib/karafka/pro/processing/schedulers/default.rb +109 -0
  26. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +1 -1
  27. data/lib/karafka/pro/processing/strategies/default.rb +3 -1
  28. data/lib/karafka/pro/processing/strategies/lrj/default.rb +10 -1
  29. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +1 -1
  30. data/lib/karafka/pro/processing/strategies/vp/default.rb +9 -5
  31. data/lib/karafka/processing/coordinator.rb +13 -7
  32. data/lib/karafka/processing/executor.rb +27 -3
  33. data/lib/karafka/processing/executors_buffer.rb +3 -3
  34. data/lib/karafka/processing/jobs/base.rb +19 -2
  35. data/lib/karafka/processing/jobs/consume.rb +3 -3
  36. data/lib/karafka/processing/jobs/idle.rb +5 -0
  37. data/lib/karafka/processing/jobs/revoked.rb +5 -0
  38. data/lib/karafka/processing/jobs/shutdown.rb +5 -0
  39. data/lib/karafka/processing/jobs_queue.rb +45 -17
  40. data/lib/karafka/processing/schedulers/default.rb +41 -0
  41. data/lib/karafka/processing/strategies/base.rb +13 -4
  42. data/lib/karafka/processing/strategies/default.rb +17 -5
  43. data/lib/karafka/processing/worker.rb +4 -1
  44. data/lib/karafka/routing/builder.rb +32 -17
  45. data/lib/karafka/routing/proxy.rb +4 -3
  46. data/lib/karafka/routing/subscription_group.rb +11 -6
  47. data/lib/karafka/routing/topics.rb +1 -1
  48. data/lib/karafka/runner.rb +1 -1
  49. data/lib/karafka/setup/config.rb +5 -1
  50. data/lib/karafka/version.rb +1 -1
  51. data/lib/karafka.rb +0 -1
  52. data.tar.gz.sig +0 -0
  53. metadata +8 -5
  54. metadata.gz.sig +0 -0
  55. data/lib/karafka/pro/performance_tracker.rb +0 -84
  56. data/lib/karafka/pro/processing/scheduler.rb +0 -75
  57. data/lib/karafka/processing/scheduler.rb +0 -22
@@ -11,6 +11,15 @@ module Karafka
11
11
  #
12
12
  # @note Executors are not removed after partition is revoked. They are not that big and will
13
13
  # be re-used in case of a re-claim
14
+ #
15
+ # @note Since given consumer can run various operations, executor manages that and its
16
+ # lifecycle. There are following types of operations with appropriate before/after, etc:
17
+ #
18
+ # - consume - primary operation related to running user consumption code
19
+ # - idle - cleanup job that runs on idle runs where no messages would be passed to the end
20
+ # user. This is used for complex flows with filters, etc
21
+ # - revoked - runs after the partition was revoked
22
+ # - shutdown - runs when process is going to shutdown
14
23
  class Executor
15
24
  extend Forwardable
16
25
 
@@ -39,11 +48,11 @@ module Karafka
39
48
  end
40
49
 
41
50
  # Allows us to prepare the consumer in the listener thread prior to the job being send to
42
- # the queue. It also allows to run some code that is time sensitive and cannot wait in the
51
+ # be scheduled. It also allows to run some code that is time sensitive and cannot wait in the
43
52
  # queue as it could cause starvation.
44
53
  #
45
54
  # @param messages [Array<Karafka::Messages::Message>]
46
- def before_enqueue(messages)
55
+ def before_schedule_consume(messages)
47
56
  # Recreate consumer with each batch if persistence is not enabled
48
57
  # We reload the consumers with each batch instead of relying on some external signals
49
58
  # when needed for consistency. That way devs may have it on or off and not in this
@@ -60,7 +69,7 @@ module Karafka
60
69
  Time.now
61
70
  )
62
71
 
63
- consumer.on_before_enqueue
72
+ consumer.on_before_schedule_consume
64
73
  end
65
74
 
66
75
  # Runs setup and warm-up code in the worker prior to running the consumption
@@ -79,6 +88,11 @@ module Karafka
79
88
  consumer.on_after_consume
80
89
  end
81
90
 
91
+ # Runs the code needed before idle work is scheduled
92
+ def before_schedule_idle
93
+ consumer.on_before_schedule_idle
94
+ end
95
+
82
96
  # Runs consumer idle operations
83
97
  # This may include house-keeping or other state management changes that can occur but that
84
98
  # not mean there are any new messages available for the end user to process
@@ -96,6 +110,11 @@ module Karafka
96
110
  consumer.on_idle
97
111
  end
98
112
 
113
+ # Runs code needed before revoked job is scheduled
114
+ def before_schedule_revoked
115
+ consumer.on_before_schedule_revoked if @consumer
116
+ end
117
+
99
118
  # Runs the controller `#revoked` method that should be triggered when a given consumer is
100
119
  # no longer needed due to partitions reassignment.
101
120
  #
@@ -112,6 +131,11 @@ module Karafka
112
131
  consumer.on_revoked if @consumer
113
132
  end
114
133
 
134
+ # Runs code needed before shutdown job is scheduled
135
+ def before_schedule_shutdown
136
+ consumer.on_before_schedule_shutdown if @consumer
137
+ end
138
+
115
139
  # Runs the controller `#shutdown` method that should be triggered when a given consumer is
116
140
  # no longer needed as we're closing the process.
117
141
  #
@@ -54,9 +54,9 @@ module Karafka
54
54
  # @yieldparam [Integer] partition number
55
55
  # @yieldparam [Executor] given executor
56
56
  def each
57
- @buffer.each do |_, partitions|
58
- partitions.each do |_, executors|
59
- executors.each do |_, executor|
57
+ @buffer.each_value do |partitions|
58
+ partitions.each_value do |executors|
59
+ executors.each_value do |executor|
60
60
  yield(executor)
61
61
  end
62
62
  end
@@ -20,11 +20,14 @@ module Karafka
20
20
  # All jobs are blocking by default and they can release the lock when blocking operations
21
21
  # are done (if needed)
22
22
  @non_blocking = false
23
+ @status = :pending
23
24
  end
24
25
 
25
- # When redefined can run any code prior to the job being enqueued
26
+ # When redefined can run any code prior to the job being scheduled
26
27
  # @note This will run in the listener thread and not in the worker
27
- def before_enqueue; end
28
+ def before_schedule
29
+ raise NotImplementedError, 'Please implement in a subclass'
30
+ end
28
31
 
29
32
  # When redefined can run any code that should run before executing the proper code
30
33
  def before_call; end
@@ -49,6 +52,20 @@ module Karafka
49
52
  def non_blocking?
50
53
  @non_blocking
51
54
  end
55
+
56
+ # @return [Boolean] was this job finished.
57
+ def finished?
58
+ @status == :finished
59
+ end
60
+
61
+ # Marks the job as finished. Used by the worker to indicate, that this job is done.
62
+ #
63
+ # @note Since the scheduler knows exactly when it schedules jobs and when it keeps them
64
+ # pending, we do not need advanced state tracking and the only information from the
65
+ # "outside" is whether it was finished or not after it was scheduled for execution.
66
+ def finish!
67
+ @status = :finished
68
+ end
52
69
  end
53
70
  end
54
71
  end
@@ -20,9 +20,9 @@ module Karafka
20
20
  end
21
21
 
22
22
  # Runs all the preparation code on the executor that needs to happen before the job is
23
- # enqueued.
24
- def before_enqueue
25
- executor.before_enqueue(@messages)
23
+ # scheduled.
24
+ def before_schedule
25
+ executor.before_schedule_consume(@messages)
26
26
  end
27
27
 
28
28
  # Runs the before consumption preparations on the executor
@@ -14,6 +14,11 @@ module Karafka
14
14
  super()
15
15
  end
16
16
 
17
+ # Runs code prior to scheduling this idle job
18
+ def before_schedule
19
+ executor.before_schedule_idle
20
+ end
21
+
17
22
  # Run the idle work via the executor
18
23
  def call
19
24
  executor.idle
@@ -12,6 +12,11 @@ module Karafka
12
12
  super()
13
13
  end
14
14
 
15
+ # Runs code prior to scheduling this revoked job
16
+ def before_schedule
17
+ executor.before_schedule_revoked
18
+ end
19
+
15
20
  # Runs the revoking job via an executor.
16
21
  def call
17
22
  executor.revoked
@@ -13,6 +13,11 @@ module Karafka
13
13
  super()
14
14
  end
15
15
 
16
+ # Runs code prior to scheduling this shutdown job
17
+ def before_schedule
18
+ executor.before_schedule_shutdown
19
+ end
20
+
16
21
  # Runs the shutdown job via an executor.
17
22
  def call
18
23
  executor.shutdown
@@ -9,6 +9,9 @@ module Karafka
9
9
  # on this queue, that's why internally we keep track of processing per group.
10
10
  #
11
11
  # We work with the assumption, that partitions data is evenly distributed.
12
+ #
13
+ # @note This job queue also keeps track / understands number of busy workers. This is because
14
+ # we use a single workers poll that can have granular scheduling.
12
15
  class JobsQueue
13
16
  # @return [Karafka::Processing::JobsQueue]
14
17
  def initialize
@@ -20,23 +23,29 @@ module Karafka
20
23
  # scheduled by Ruby hundreds of thousands of times per group.
21
24
  # We cannot use a single semaphore as it could potentially block in listeners that should
22
25
  # process with their data and also could unlock when a given group needs to remain locked
23
- @semaphores = Concurrent::Map.new do |h, k|
24
- # Ruby prior to 3.2 did not have queue with a timeout on `#pop`, that is why for those
25
- # versions we use our custom queue wrapper
26
- h.compute_if_absent(k) { RUBY_VERSION < '3.2' ? TimedQueue.new : Queue.new }
27
- end
28
-
26
+ @semaphores = {}
27
+ @concurrency = Karafka::App.config.concurrency
29
28
  @tick_interval = ::Karafka::App.config.internal.tick_interval
30
29
  @in_processing = Hash.new { |h, k| h[k] = [] }
30
+ @statistics = { busy: 0, enqueued: 0 }
31
31
 
32
32
  @mutex = Mutex.new
33
33
  end
34
34
 
35
- # Returns number of jobs that are either enqueued or in processing (but not finished)
36
- # @return [Integer] number of elements in the queue
37
- # @note Using `#pop` won't decrease this number as only marking job as completed does this
38
- def size
39
- @in_processing.values.map(&:size).sum
35
+ # Registers given subscription group id in the queue. It is needed so we do not dynamically
36
+ # create semaphore, hence avoiding potential race conditions
37
+ #
38
+ # @param group_id [String]
39
+ def register(group_id)
40
+ # Ruby prior to 3.2 did not have queue with a timeout on `#pop`, that is why for those
41
+ @mutex.synchronize do
42
+ # versions we use our custom queue wrapper
43
+ #
44
+ # Initializes this semaphore from the mutex, so it is never auto-created
45
+ # Since we always schedule a job before waiting using semaphores, there won't be any
46
+ # concurrency problems
47
+ @semaphores[group_id] = RUBY_VERSION < '3.2' ? TimedQueue.new : Queue.new
48
+ end
40
49
  end
41
50
 
42
51
  # Adds the job to the internal main queue, scheduling it for execution in a worker and marks
@@ -55,6 +64,16 @@ module Karafka
55
64
 
56
65
  group << job
57
66
 
67
+ # Assume that moving to queue means being picked up immediately not to create stats
68
+ # race conditions because of pop overhead. If there are workers available, we assume
69
+ # work is going to be handled as we never reject enqueued jobs
70
+ if @statistics[:busy] < @concurrency
71
+ @statistics[:busy] += 1
72
+ else
73
+ # If system is fully loaded, it means this job is indeed enqueued
74
+ @statistics[:enqueued] += 1
75
+ end
76
+
58
77
  @queue << job
59
78
  end
60
79
  end
@@ -71,7 +90,7 @@ module Karafka
71
90
  # @param group_id [String] id of the group we want to unlock for one tick
72
91
  # @note This does not release the wait lock. It just causes a conditions recheck
73
92
  def tick(group_id)
74
- @semaphores[group_id] << true
93
+ @semaphores.fetch(group_id) << true
75
94
  end
76
95
 
77
96
  # Marks a given job from a given group as completed. When there are no more jobs from a given
@@ -80,7 +99,16 @@ module Karafka
80
99
  # @param [Jobs::Base] job that was completed
81
100
  def complete(job)
82
101
  @mutex.synchronize do
102
+ # We finish one job and if there is another, we pick it up
103
+ if @statistics[:enqueued].positive?
104
+ @statistics[:enqueued] -= 1
105
+ # If no more enqueued jobs, we will be just less busy
106
+ else
107
+ @statistics[:busy] -= 1
108
+ end
109
+
83
110
  @in_processing[job.group_id].delete(job)
111
+
84
112
  tick(job.group_id)
85
113
  end
86
114
  end
@@ -132,7 +160,7 @@ module Karafka
132
160
  while wait?(group_id)
133
161
  yield if block_given?
134
162
 
135
- @semaphores[group_id].pop(timeout: @tick_interval / 1_000.0)
163
+ @semaphores.fetch(group_id).pop(timeout: @tick_interval / 1_000.0)
136
164
  end
137
165
  end
138
166
 
@@ -141,10 +169,10 @@ module Karafka
141
169
  #
142
170
  # @return [Hash] hash with basic usage statistics of this queue.
143
171
  def statistics
144
- {
145
- busy: size - @queue.size,
146
- enqueued: @queue.size
147
- }.freeze
172
+ # Ensures there are no race conditions when returning this data
173
+ @mutex.synchronize do
174
+ @statistics.dup.freeze
175
+ end
148
176
  end
149
177
 
150
178
  private
@@ -0,0 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Namespace for Karafka OSS schedulers
6
+ module Schedulers
7
+ # FIFO scheduler for messages coming from various topics and partitions
8
+ class Default
9
+ # @param queue [Karafka::Processing::JobsQueue] queue where we want to put the jobs
10
+ def initialize(queue)
11
+ @queue = queue
12
+ end
13
+
14
+ # Schedules jobs in the fifo order
15
+ #
16
+ # @param jobs_array [Array<Karafka::Processing::Jobs::Base>] jobs we want to schedule
17
+ def on_schedule_consumption(jobs_array)
18
+ jobs_array.each do |job|
19
+ @queue << job
20
+ end
21
+ end
22
+
23
+ # Both revocation and shutdown jobs can also run in fifo by default
24
+ alias on_schedule_revocation on_schedule_consumption
25
+ alias on_schedule_shutdown on_schedule_consumption
26
+
27
+ # This scheduler does not have anything to manage as it is a pass through and has no state
28
+ def on_manage
29
+ nil
30
+ end
31
+
32
+ # This scheduler does not need to be cleared because it is stateless
33
+ #
34
+ # @param _group_id [String] Subscription group id
35
+ def on_clear(_group_id)
36
+ nil
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -11,10 +11,19 @@ module Karafka
11
11
  module Strategies
12
12
  # Base strategy that should be included in each strategy, just to ensure the API
13
13
  module Base
14
- # What should happen before jobs are enqueued
15
- # @note This runs from the listener thread, not recommended to put anything slow here
16
- def handle_before_enqueue
17
- raise NotImplementedError, 'Implement in a subclass'
14
+ # Defines all the before schedule handlers for appropriate actions
15
+ %i[
16
+ consume
17
+ idle
18
+ revoked
19
+ shutdown
20
+ ].each do |action|
21
+ class_eval <<~RUBY, __FILE__, __LINE__ + 1
22
+ def handle_before_schedule_#{action}
23
+ # What should happen before scheduling this work
24
+ raise NotImplementedError, 'Implement in a subclass'
25
+ end
26
+ RUBY
18
27
  end
19
28
 
20
29
  # What should happen before we kick in the processing
@@ -13,6 +13,23 @@ module Karafka
13
13
  # Apply strategy for a non-feature based flow
14
14
  FEATURES = %i[].freeze
15
15
 
16
+ # By default on all "before schedule" we just run instrumentation, nothing more
17
+ %i[
18
+ consume
19
+ idle
20
+ revoked
21
+ shutdown
22
+ ].each do |action|
23
+ class_eval <<~RUBY, __FILE__, __LINE__ + 1
24
+ # No actions needed for the standard flow here
25
+ def handle_before_schedule_#{action}
26
+ Karafka.monitor.instrument('consumer.before_schedule_#{action}', caller: self)
27
+
28
+ nil
29
+ end
30
+ RUBY
31
+ end
32
+
16
33
  # Marks message as consumed in an async way.
17
34
  #
18
35
  # @param message [Messages::Message] last successfully processed message.
@@ -76,11 +93,6 @@ module Karafka
76
93
  commit_offsets(async: false)
77
94
  end
78
95
 
79
- # No actions needed for the standard flow here
80
- def handle_before_enqueue
81
- nil
82
- end
83
-
84
96
  # Increment number of attempts
85
97
  def handle_before_consume
86
98
  coordinator.pause_tracker.increment
@@ -83,7 +83,10 @@ module Karafka
83
83
  )
84
84
  ensure
85
85
  # job can be nil when the queue is being closed
86
- @jobs_queue.complete(job) if job
86
+ if job
87
+ @jobs_queue.complete(job)
88
+ job.finish!
89
+ end
87
90
 
88
91
  # Always publish info, that we completed all the work despite its result
89
92
  Karafka.monitor.instrument('worker.completed', instrument_details)
@@ -3,20 +3,25 @@
3
3
  module Karafka
4
4
  module Routing
5
5
  # Builder used as a DSL layer for building consumers and telling them which topics to consume
6
+ #
7
+ # @note We lock the access just in case this is used in patterns. The locks here do not have
8
+ # any impact on routing usage unless being expanded, so no race conditions risks.
9
+ #
6
10
  # @example Build a simple (most common) route
7
11
  # consumers do
8
12
  # topic :new_videos do
9
13
  # consumer NewVideosConsumer
10
14
  # end
11
15
  # end
12
- class Builder < Concurrent::Array
16
+ class Builder < Array
13
17
  # Empty default per-topic config
14
18
  EMPTY_DEFAULTS = ->(_) {}.freeze
15
19
 
16
20
  private_constant :EMPTY_DEFAULTS
17
21
 
18
22
  def initialize
19
- @draws = Concurrent::Array.new
23
+ @mutex = Mutex.new
24
+ @draws = []
20
25
  @defaults = EMPTY_DEFAULTS
21
26
  super
22
27
  end
@@ -34,21 +39,23 @@ module Karafka
34
39
  # end
35
40
  # end
36
41
  def draw(&block)
37
- @draws << block
42
+ @mutex.synchronize do
43
+ @draws << block
38
44
 
39
- instance_eval(&block)
45
+ instance_eval(&block)
40
46
 
41
- each do |consumer_group|
42
- # Validate consumer group settings
43
- Contracts::ConsumerGroup.new.validate!(consumer_group.to_h)
47
+ each do |consumer_group|
48
+ # Validate consumer group settings
49
+ Contracts::ConsumerGroup.new.validate!(consumer_group.to_h)
44
50
 
45
- # and then its topics settings
46
- consumer_group.topics.each do |topic|
47
- Contracts::Topic.new.validate!(topic.to_h)
48
- end
51
+ # and then its topics settings
52
+ consumer_group.topics.each do |topic|
53
+ Contracts::Topic.new.validate!(topic.to_h)
54
+ end
49
55
 
50
- # Initialize subscription groups after all the routing is done
51
- consumer_group.subscription_groups
56
+ # Initialize subscription groups after all the routing is done
57
+ consumer_group.subscription_groups
58
+ end
52
59
  end
53
60
  end
54
61
 
@@ -61,9 +68,11 @@ module Karafka
61
68
 
62
69
  # Clears the builder and the draws memory
63
70
  def clear
64
- @defaults = EMPTY_DEFAULTS
65
- @draws.clear
66
- super
71
+ @mutex.synchronize do
72
+ @defaults = EMPTY_DEFAULTS
73
+ @draws.clear
74
+ super
75
+ end
67
76
  end
68
77
 
69
78
  # @param block [Proc] block with per-topic evaluated defaults
@@ -71,7 +80,13 @@ module Karafka
71
80
  def defaults(&block)
72
81
  return @defaults unless block
73
82
 
74
- @defaults = block
83
+ if @mutex.owned?
84
+ @defaults = block
85
+ else
86
+ @mutex.synchronize do
87
+ @defaults = block
88
+ end
89
+ end
75
90
  end
76
91
 
77
92
  private
@@ -10,11 +10,12 @@ module Karafka
10
10
  # @param target [Object] target object to which we proxy any DSL call
11
11
  # @param defaults [Proc] defaults for target that should be applicable after the proper
12
12
  # proxy context (if needed)
13
- # @param block [Proc] block that we want to evaluate in the proxy context
13
+ # @param block [Proc, nil] block that we want to evaluate in the proxy context or nil if no
14
+ # proxy block context for example because whole context is taken from defaults
14
15
  def initialize(target, defaults = ->(_) {}, &block)
15
16
  @target = target
16
- instance_eval(&block)
17
- instance_eval(&defaults)
17
+ instance_eval(&block) if block
18
+ instance_eval(&defaults) if defaults
18
19
  end
19
20
 
20
21
  # Ruby 2.7.0 to 2.7.2 do not have arg forwarding, so we fallback to the old way
@@ -10,19 +10,24 @@ module Karafka
10
10
  class SubscriptionGroup
11
11
  attr_reader :id, :name, :topics, :kafka, :consumer_group
12
12
 
13
- # Numeric for counting groups
14
- GROUP_COUNT = Concurrent::AtomicFixnum.new
13
+ # Lock for generating new ids safely
14
+ ID_MUTEX = Mutex.new
15
15
 
16
- private_constant :GROUP_COUNT
16
+ private_constant :ID_MUTEX
17
17
 
18
18
  class << self
19
19
  # Generates new subscription group id that will be used in case of anonymous subscription
20
20
  # groups
21
21
  # @return [String] hex(6) compatible reproducible id
22
22
  def id
23
- ::Digest::MD5.hexdigest(
24
- GROUP_COUNT.increment.to_s
25
- )[0..11]
23
+ ID_MUTEX.synchronize do
24
+ @group_counter ||= 0
25
+ @group_counter += 1
26
+
27
+ ::Digest::MD5.hexdigest(
28
+ @group_counter.to_s
29
+ )[0..11]
30
+ end
26
31
  end
27
32
  end
28
33
 
@@ -9,7 +9,7 @@ module Karafka
9
9
  include Enumerable
10
10
  extend Forwardable
11
11
 
12
- def_delegators :@accumulator, :[], :size, :empty?, :last, :<<
12
+ def_delegators :@accumulator, :[], :size, :empty?, :last, :<<, :map!, :sort_by!, :reverse!
13
13
 
14
14
  # @param topics_array [Array<Karafka::Routing::Topic>] array with topics
15
15
  def initialize(topics_array)
@@ -8,7 +8,7 @@ module Karafka
8
8
  def call
9
9
  # Despite possibility of having several independent listeners, we aim to have one queue for
10
10
  # jobs across and one workers poll for that
11
- jobs_queue = Processing::JobsQueue.new
11
+ jobs_queue = App.config.internal.processing.jobs_queue_class.new
12
12
 
13
13
  workers = Processing::WorkersBatch.new(jobs_queue)
14
14
  listeners = Connection::ListenersBatch.new(jobs_queue)
@@ -209,8 +209,9 @@ module Karafka
209
209
  end
210
210
 
211
211
  setting :processing do
212
+ setting :jobs_queue_class, default: Processing::JobsQueue
212
213
  # option scheduler [Object] scheduler we will be using
213
- setting :scheduler, default: Processing::Scheduler.new
214
+ setting :scheduler_class, default: Processing::Schedulers::Default
214
215
  # option jobs_builder [Object] jobs builder we want to use
215
216
  setting :jobs_builder, default: Processing::JobsBuilder.new
216
217
  # option coordinator [Class] work coordinator we want to user for processing coordination
@@ -277,6 +278,9 @@ module Karafka
277
278
  # are also configured
278
279
  Pro::Loader.post_setup_all(config) if Karafka.pro?
279
280
 
281
+ # Subscribe the assignments tracker so we can always query all current assignments
282
+ config.monitor.subscribe(Instrumentation::AssignmentsTracker.instance)
283
+
280
284
  Karafka::App.initialized!
281
285
  end
282
286
 
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.2.12'
6
+ VERSION = '2.2.14'
7
7
  end
data/lib/karafka.rb CHANGED
@@ -16,7 +16,6 @@
16
16
  singleton
17
17
  digest
18
18
  zeitwerk
19
- concurrent/atomic/atomic_fixnum
20
19
  ].each(&method(:require))
21
20
 
22
21
  # Karafka framework main namespace
data.tar.gz.sig CHANGED
Binary file