karafka 2.2.13 → 2.3.0.alpha1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (125) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +38 -12
  4. data/.ruby-version +1 -1
  5. data/CHANGELOG.md +161 -125
  6. data/Gemfile.lock +12 -12
  7. data/README.md +0 -2
  8. data/SECURITY.md +23 -0
  9. data/config/locales/errors.yml +7 -1
  10. data/config/locales/pro_errors.yml +22 -0
  11. data/docker-compose.yml +3 -1
  12. data/karafka.gemspec +2 -2
  13. data/lib/karafka/admin/acl.rb +287 -0
  14. data/lib/karafka/admin.rb +118 -16
  15. data/lib/karafka/app.rb +12 -3
  16. data/lib/karafka/base_consumer.rb +32 -31
  17. data/lib/karafka/cli/base.rb +1 -1
  18. data/lib/karafka/connection/client.rb +94 -84
  19. data/lib/karafka/connection/conductor.rb +28 -0
  20. data/lib/karafka/connection/listener.rb +165 -46
  21. data/lib/karafka/connection/listeners_batch.rb +5 -11
  22. data/lib/karafka/connection/manager.rb +72 -0
  23. data/lib/karafka/connection/messages_buffer.rb +12 -0
  24. data/lib/karafka/connection/proxy.rb +17 -0
  25. data/lib/karafka/connection/status.rb +75 -0
  26. data/lib/karafka/contracts/config.rb +14 -10
  27. data/lib/karafka/contracts/consumer_group.rb +9 -1
  28. data/lib/karafka/contracts/topic.rb +3 -1
  29. data/lib/karafka/errors.rb +13 -0
  30. data/lib/karafka/instrumentation/assignments_tracker.rb +96 -0
  31. data/lib/karafka/instrumentation/callbacks/rebalance.rb +10 -7
  32. data/lib/karafka/instrumentation/logger_listener.rb +3 -9
  33. data/lib/karafka/instrumentation/notifications.rb +19 -9
  34. data/lib/karafka/instrumentation/vendors/appsignal/metrics_listener.rb +31 -28
  35. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +22 -3
  36. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +15 -12
  37. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +39 -36
  38. data/lib/karafka/pro/base_consumer.rb +47 -0
  39. data/lib/karafka/pro/connection/manager.rb +300 -0
  40. data/lib/karafka/pro/connection/multiplexing/listener.rb +40 -0
  41. data/lib/karafka/pro/instrumentation/performance_tracker.rb +85 -0
  42. data/lib/karafka/pro/iterator/tpl_builder.rb +1 -1
  43. data/lib/karafka/pro/iterator.rb +1 -6
  44. data/lib/karafka/pro/loader.rb +16 -2
  45. data/lib/karafka/pro/processing/coordinator.rb +2 -1
  46. data/lib/karafka/pro/processing/executor.rb +37 -0
  47. data/lib/karafka/pro/processing/expansions_selector.rb +32 -0
  48. data/lib/karafka/pro/processing/jobs/periodic.rb +41 -0
  49. data/lib/karafka/pro/processing/jobs/periodic_non_blocking.rb +32 -0
  50. data/lib/karafka/pro/processing/jobs_builder.rb +14 -3
  51. data/lib/karafka/pro/processing/offset_metadata/consumer.rb +44 -0
  52. data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +131 -0
  53. data/lib/karafka/pro/processing/offset_metadata/listener.rb +46 -0
  54. data/lib/karafka/pro/processing/schedulers/base.rb +143 -0
  55. data/lib/karafka/pro/processing/schedulers/default.rb +107 -0
  56. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +1 -1
  57. data/lib/karafka/pro/processing/strategies/default.rb +136 -3
  58. data/lib/karafka/pro/processing/strategies/dlq/default.rb +35 -0
  59. data/lib/karafka/pro/processing/strategies/lrj/default.rb +1 -1
  60. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +1 -1
  61. data/lib/karafka/pro/processing/strategies/vp/default.rb +60 -26
  62. data/lib/karafka/pro/processing/virtual_offset_manager.rb +41 -11
  63. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +2 -0
  64. data/lib/karafka/pro/routing/features/multiplexing/config.rb +38 -0
  65. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +114 -0
  66. data/lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb +42 -0
  67. data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +38 -0
  68. data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +42 -0
  69. data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +40 -0
  70. data/lib/karafka/pro/routing/features/multiplexing.rb +59 -0
  71. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +32 -0
  72. data/lib/karafka/pro/routing/features/non_blocking_job.rb +37 -0
  73. data/lib/karafka/pro/routing/features/offset_metadata/config.rb +33 -0
  74. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +42 -0
  75. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +65 -0
  76. data/lib/karafka/pro/routing/features/offset_metadata.rb +40 -0
  77. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +4 -0
  78. data/lib/karafka/pro/routing/features/patterns/detector.rb +18 -10
  79. data/lib/karafka/pro/routing/features/periodic_job/config.rb +37 -0
  80. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +44 -0
  81. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +94 -0
  82. data/lib/karafka/pro/routing/features/periodic_job.rb +27 -0
  83. data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +1 -0
  84. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -0
  85. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +7 -2
  86. data/lib/karafka/process.rb +5 -3
  87. data/lib/karafka/processing/coordinator.rb +5 -1
  88. data/lib/karafka/processing/executor.rb +43 -13
  89. data/lib/karafka/processing/executors_buffer.rb +22 -7
  90. data/lib/karafka/processing/jobs/base.rb +19 -2
  91. data/lib/karafka/processing/jobs/consume.rb +3 -3
  92. data/lib/karafka/processing/jobs/idle.rb +5 -0
  93. data/lib/karafka/processing/jobs/revoked.rb +5 -0
  94. data/lib/karafka/processing/jobs/shutdown.rb +5 -0
  95. data/lib/karafka/processing/jobs_queue.rb +19 -8
  96. data/lib/karafka/processing/schedulers/default.rb +42 -0
  97. data/lib/karafka/processing/strategies/base.rb +13 -4
  98. data/lib/karafka/processing/strategies/default.rb +23 -7
  99. data/lib/karafka/processing/strategies/dlq.rb +36 -0
  100. data/lib/karafka/processing/worker.rb +4 -1
  101. data/lib/karafka/routing/builder.rb +12 -2
  102. data/lib/karafka/routing/consumer_group.rb +5 -5
  103. data/lib/karafka/routing/features/base.rb +44 -8
  104. data/lib/karafka/routing/features/dead_letter_queue/config.rb +6 -1
  105. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -0
  106. data/lib/karafka/routing/features/dead_letter_queue/topic.rb +9 -2
  107. data/lib/karafka/routing/proxy.rb +4 -3
  108. data/lib/karafka/routing/subscription_group.rb +2 -2
  109. data/lib/karafka/routing/subscription_groups_builder.rb +11 -2
  110. data/lib/karafka/routing/topic.rb +8 -10
  111. data/lib/karafka/routing/topics.rb +1 -1
  112. data/lib/karafka/runner.rb +13 -3
  113. data/lib/karafka/server.rb +5 -9
  114. data/lib/karafka/setup/config.rb +21 -1
  115. data/lib/karafka/status.rb +23 -14
  116. data/lib/karafka/templates/karafka.rb.erb +7 -0
  117. data/lib/karafka/time_trackers/partition_usage.rb +56 -0
  118. data/lib/karafka/version.rb +1 -1
  119. data.tar.gz.sig +0 -0
  120. metadata +47 -13
  121. metadata.gz.sig +0 -0
  122. data/lib/karafka/connection/consumer_group_coordinator.rb +0 -48
  123. data/lib/karafka/pro/performance_tracker.rb +0 -84
  124. data/lib/karafka/pro/processing/scheduler.rb +0 -74
  125. data/lib/karafka/processing/scheduler.rb +0 -38
@@ -14,12 +14,19 @@ module Karafka
14
14
  # @param max_retries [Integer] after how many retries should we move data to dlq
15
15
  # @param topic [String, false] where the messages should be moved if failing or false
16
16
  # if we do not want to move it anywhere and just skip
17
+ # @param independent [Boolean] needs to be true in order for each marking as consumed
18
+ # in a retry flow to reset the errors counter
17
19
  # @return [Config] defined config
18
- def dead_letter_queue(max_retries: DEFAULT_MAX_RETRIES, topic: nil)
20
+ def dead_letter_queue(
21
+ max_retries: DEFAULT_MAX_RETRIES,
22
+ topic: nil,
23
+ independent: false
24
+ )
19
25
  @dead_letter_queue ||= Config.new(
20
26
  active: !topic.nil?,
21
27
  max_retries: max_retries,
22
- topic: topic
28
+ topic: topic,
29
+ independent: independent
23
30
  )
24
31
  end
25
32
 
@@ -10,11 +10,12 @@ module Karafka
10
10
  # @param target [Object] target object to which we proxy any DSL call
11
11
  # @param defaults [Proc] defaults for target that should be applicable after the proper
12
12
  # proxy context (if needed)
13
- # @param block [Proc] block that we want to evaluate in the proxy context
13
+ # @param block [Proc, nil] block that we want to evaluate in the proxy context or nil if no
14
+ # proxy block context for example because whole context is taken from defaults
14
15
  def initialize(target, defaults = ->(_) {}, &block)
15
16
  @target = target
16
- instance_eval(&block)
17
- instance_eval(&defaults)
17
+ instance_eval(&block) if block
18
+ instance_eval(&defaults) if defaults
18
19
  end
19
20
 
20
21
  # Ruby 2.7.0 to 2.7.2 do not have arg forwarding, so we fallback to the old way
@@ -37,7 +37,8 @@ module Karafka
37
37
  # @param topics [Karafka::Routing::Topics] all the topics that share the same key settings
38
38
  # @return [SubscriptionGroup] built subscription group
39
39
  def initialize(position, topics)
40
- @name = topics.first.subscription_group_name
40
+ @details = topics.first.subscription_group_details
41
+ @name = @details.fetch(:name)
41
42
  @consumer_group = topics.first.consumer_group
42
43
  # We include the consumer group id here because we want to have unique ids of subscription
43
44
  # groups across the system. Otherwise user could set the same name for multiple
@@ -47,7 +48,6 @@ module Karafka
47
48
  @position = position
48
49
  @topics = topics
49
50
  @kafka = build_kafka
50
- freeze
51
51
  end
52
52
 
53
53
  # @return [String] consumer group id
@@ -19,7 +19,7 @@ module Karafka
19
19
  max_messages
20
20
  max_wait_time
21
21
  initial_offset
22
- subscription_group_name
22
+ subscription_group_details
23
23
  ].freeze
24
24
 
25
25
  private_constant :DISTRIBUTION_KEYS
@@ -37,7 +37,7 @@ module Karafka
37
37
  .group_by(&:first)
38
38
  .values
39
39
  .map { |value| value.map(&:last) }
40
- .map { |topics_array| Routing::Topics.new(topics_array) }
40
+ .flat_map { |value| expand(value) }
41
41
  .map { |grouped_topics| SubscriptionGroup.new(@position += 1, grouped_topics) }
42
42
  .tap do |subscription_groups|
43
43
  subscription_groups.each do |subscription_group|
@@ -60,6 +60,15 @@ module Karafka
60
60
 
61
61
  accu.hash
62
62
  end
63
+
64
+ # Hook for optional expansion of groups based on subscription group features
65
+ #
66
+ # @param topics_array [Array<Routing::Topic>] group of topics that have the same settings
67
+ # and can use the same connection
68
+ # @return [Array<Array<Routing::Topics>>] expanded groups
69
+ def expand(topics_array)
70
+ [Routing::Topics.new(topics_array)]
71
+ end
63
72
  end
64
73
  end
65
74
  end
@@ -9,7 +9,7 @@ module Karafka
9
9
  attr_reader :id, :name, :consumer_group
10
10
  attr_writer :consumer
11
11
 
12
- attr_accessor :subscription_group_name
12
+ attr_accessor :subscription_group_details
13
13
 
14
14
  # Full subscription group reference can be built only when we have knowledge about the
15
15
  # whole routing tree, this is why it is going to be set later on
@@ -46,15 +46,13 @@ module Karafka
46
46
  INHERITABLE_ATTRIBUTES.each do |attribute|
47
47
  attr_writer attribute
48
48
 
49
- define_method attribute do
50
- current_value = instance_variable_get(:"@#{attribute}")
49
+ class_eval <<~RUBY, __FILE__, __LINE__ + 1
50
+ def #{attribute}
51
+ return @#{attribute} unless @#{attribute}.nil?
51
52
 
52
- return current_value unless current_value.nil?
53
-
54
- value = Karafka::App.config.send(attribute)
55
-
56
- instance_variable_set(:"@#{attribute}", value)
57
- end
53
+ @#{attribute} = Karafka::App.config.send(:#{attribute})
54
+ end
55
+ RUBY
58
56
  end
59
57
 
60
58
  # @return [String] name of subscription that will go to librdkafka
@@ -117,7 +115,7 @@ module Karafka
117
115
  active: active?,
118
116
  consumer: consumer,
119
117
  consumer_group_id: consumer_group.id,
120
- subscription_group_name: subscription_group_name
118
+ subscription_group_details: subscription_group_details
121
119
  ).freeze
122
120
  end
123
121
  end
@@ -9,7 +9,7 @@ module Karafka
9
9
  include Enumerable
10
10
  extend Forwardable
11
11
 
12
- def_delegators :@accumulator, :[], :size, :empty?, :last, :<<
12
+ def_delegators :@accumulator, :[], :size, :empty?, :last, :<<, :map!, :sort_by!, :reverse!
13
13
 
14
14
  # @param topics_array [Array<Karafka::Routing::Topic>] array with topics
15
15
  def initialize(topics_array)
@@ -3,6 +3,11 @@
3
3
  module Karafka
4
4
  # Class used to run the Karafka listeners in separate threads
5
5
  class Runner
6
+ def initialize
7
+ @manager = App.config.internal.connection.manager
8
+ @conductor = App.config.internal.connection.conductor
9
+ end
10
+
6
11
  # Starts listening on all the listeners asynchronously and handles the jobs queue closing
7
12
  # after listeners are done with their work.
8
13
  def call
@@ -13,16 +18,21 @@ module Karafka
13
18
  workers = Processing::WorkersBatch.new(jobs_queue)
14
19
  listeners = Connection::ListenersBatch.new(jobs_queue)
15
20
 
21
+ # Register all the listeners so they can be started and managed
22
+ @manager.register(listeners)
23
+
16
24
  workers.each(&:async_call)
17
- listeners.each(&:async_call)
18
25
 
19
26
  # We aggregate threads here for a supervised shutdown process
20
27
  Karafka::Server.workers = workers
21
28
  Karafka::Server.listeners = listeners
22
29
  Karafka::Server.jobs_queue = jobs_queue
23
30
 
24
- # All the listener threads need to finish
25
- listeners.each(&:join)
31
+ until @manager.done?
32
+ @conductor.wait
33
+
34
+ @manager.control
35
+ end
26
36
 
27
37
  # We close the jobs queue only when no listener threads are working.
28
38
  # This ensures, that everything was closed prior to us not accepting anymore jobs and that
@@ -88,7 +88,10 @@ module Karafka
88
88
  # their work and if so, we can just return and normal shutdown process will take place
89
89
  # We divide it by 1000 because we use time in ms.
90
90
  ((timeout / 1_000) * SUPERVISION_CHECK_FACTOR).to_i.times do
91
- return if listeners.count(&:alive?).zero? && workers.count(&:alive?).zero?
91
+ all_listeners_stopped = listeners.all?(&:stopped?)
92
+ all_workers_stopped = workers.none?(&:alive?)
93
+
94
+ return if all_listeners_stopped && all_workers_stopped
92
95
 
93
96
  sleep SUPERVISION_SLEEP
94
97
  end
@@ -104,7 +107,7 @@ module Karafka
104
107
 
105
108
  # We're done waiting, lets kill them!
106
109
  workers.each(&:terminate)
107
- listeners.each(&:terminate)
110
+ listeners.active.each(&:terminate)
108
111
  # We always need to shutdown clients to make sure we do not force the GC to close consumer.
109
112
  # This can cause memory leaks and crashes.
110
113
  listeners.each(&:shutdown)
@@ -137,13 +140,6 @@ module Karafka
137
140
  # We don't have to safe-guard it with check states as the state transitions work only
138
141
  # in one direction
139
142
  Karafka::App.quiet!
140
-
141
- # We need one more thread to monitor the process and move to quieted once everything
142
- # is quiet and no processing is happening anymore
143
- Thread.new do
144
- sleep(0.1) until listeners.coordinators.all?(&:finished?)
145
- Karafka::App.quieted!
146
- end
147
143
  end
148
144
 
149
145
  private
@@ -184,8 +184,23 @@ module Karafka
184
184
 
185
185
  # Namespace for internal connection related settings
186
186
  setting :connection do
187
+ # Manages starting up and stopping Kafka connections
188
+ setting :manager, default: Connection::Manager.new
189
+ # Controls frequency of connections management checks
190
+ setting :conductor, default: Connection::Conductor.new
191
+
187
192
  # Settings that are altered by our client proxy layer
188
193
  setting :proxy do
194
+ # Committed offsets for given CG query
195
+ setting :committed do
196
+ # timeout for this request. For busy or remote clusters, this should be high enough
197
+ setting :timeout, default: 5_000
198
+ # How many times should we try to run this call before raising an error
199
+ setting :max_attempts, default: 3
200
+ # How long should we wait before next attempt in case of a failure
201
+ setting :wait_time, default: 1_000
202
+ end
203
+
189
204
  # Watermark offsets request settings
190
205
  setting :query_watermark_offsets do
191
206
  # timeout for this request. For busy or remote clusters, this should be high enough
@@ -211,7 +226,7 @@ module Karafka
211
226
  setting :processing do
212
227
  setting :jobs_queue_class, default: Processing::JobsQueue
213
228
  # option scheduler [Object] scheduler we will be using
214
- setting :scheduler_class, default: Processing::Scheduler
229
+ setting :scheduler_class, default: Processing::Schedulers::Default
215
230
  # option jobs_builder [Object] jobs builder we want to use
216
231
  setting :jobs_builder, default: Processing::JobsBuilder.new
217
232
  # option coordinator [Class] work coordinator we want to user for processing coordination
@@ -222,6 +237,8 @@ module Karafka
222
237
  setting :strategy_selector, default: Processing::StrategySelector.new
223
238
  # option expansions_selector [Object] processing expansions selector to be used
224
239
  setting :expansions_selector, default: Processing::ExpansionsSelector.new
240
+ # option [Class] executor class
241
+ setting :executor_class, default: Processing::Executor
225
242
  end
226
243
 
227
244
  # Things related to operating on messages
@@ -278,6 +295,9 @@ module Karafka
278
295
  # are also configured
279
296
  Pro::Loader.post_setup_all(config) if Karafka.pro?
280
297
 
298
+ # Subscribe the assignments tracker so we can always query all current assignments
299
+ config.monitor.subscribe(Instrumentation::AssignmentsTracker.instance)
300
+
281
301
  Karafka::App.initialized!
282
302
  end
283
303
 
@@ -42,25 +42,34 @@ module Karafka
42
42
  end
43
43
 
44
44
  STATES.each do |state, transition|
45
- define_method :"#{state}?" do
46
- @status == state
47
- end
45
+ class_eval <<~RUBY, __FILE__, __LINE__ + 1
46
+ def #{state}?
47
+ @status == :#{state}
48
+ end
49
+
50
+ def #{transition}
51
+ MUTEX.synchronize do
52
+ # Do not allow reverse state transitions (we always go one way) or transition to the same
53
+ # state as currently
54
+ return if @status && STATES.keys.index(:#{state}) <= STATES.keys.index(@status)
48
55
 
49
- define_method transition do
50
- MUTEX.synchronize do
51
- # Do not allow reverse state transitions (we always go one way) or transition to the same
52
- # state as currently
53
- return if @status && STATES.keys.index(state) <= STATES.keys.index(@status)
56
+ @status = :#{state}
54
57
 
55
- @status = state
58
+ # Skip on creation (initializing)
59
+ # We skip as during this state we do not have yet a monitor
60
+ return if initializing?
56
61
 
57
- # Skip on creation (initializing)
58
- # We skip as during this state we do not have yet a monitor
59
- return if initializing?
62
+ # We do not set conductor in the initializer because this status object is created
63
+ # before the configuration kicks in
64
+ # We need to signal conductor on each state change as those may be relevant to
65
+ # listeners operations
66
+ @conductor ||= Karafka::App.config.internal.connection.conductor
67
+ @conductor.signal
60
68
 
61
- Karafka.monitor.instrument("app.#{state}")
69
+ Karafka.monitor.instrument("app.#{state}")
70
+ end
62
71
  end
63
- end
72
+ RUBY
64
73
  end
65
74
 
66
75
  # @return [Boolean] true if we are in any of the status that would indicate we should no longer
@@ -68,3 +68,10 @@ class KarafkaApp < Karafka::App
68
68
  end
69
69
  end
70
70
  end
71
+
72
+ # Karafka now features a Web UI!
73
+ # Visit the setup documentation to get started and enhance your experience.
74
+ #
75
+ # https://karafka.io/docs/Web-UI-Getting-Started
76
+ #
77
+ # Karafka::Web.enable!
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module TimeTrackers
5
+ # Tracker used to keep time reference when we last time dispatched any job related to a given
6
+ # topic partition.
7
+ #
8
+ # We can use it to know when last time a job was scheduled
9
+ #
10
+ # @note We do not track revocation as on revocation we clear given topic partition reference
11
+ # not to have a potential memory leak
12
+ #
13
+ # @note We do not track shutdown jobs as shutdown is finishing the process, so no time
14
+ # sensitive operations remain that would use this
15
+ #
16
+ # @note We consider partition as active if we scheduled any job related to it within the tick
17
+ # interval. This has nothing to do whether a partition is assigned.
18
+ class PartitionUsage < Base
19
+ # Creates new partition usage time tracker
20
+ def initialize
21
+ super
22
+
23
+ @last_usage = Hash.new do |topics_hash, topic_name|
24
+ topics_hash[topic_name] = Hash.new do |partitions_hash, partition_id|
25
+ partitions_hash[partition_id] = 0
26
+ end
27
+ end
28
+ end
29
+
30
+ # @param topic [String]
31
+ # @param partition [Integer]
32
+ # @param interval [Integer] minimum interval
33
+ # @return [Boolean] was this topic partition active
34
+ def active?(topic, partition, interval)
35
+ monotonic_now - @last_usage[topic][partition] < interval
36
+ end
37
+
38
+ # Marks usage of given partition
39
+ #
40
+ # @param topic [String]
41
+ # @param partition [Integer]
42
+ def track(topic, partition)
43
+ @last_usage[topic][partition] = monotonic_now
44
+ end
45
+
46
+ # Clears references about given partition. Useful on revocation so we do not store old
47
+ # unassigned partitions data
48
+ #
49
+ # @param topic [String]
50
+ # @param partition [Integer]
51
+ def revoke(topic, partition)
52
+ @last_usage[topic].delete(partition)
53
+ end
54
+ end
55
+ end
56
+ end
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.2.13'
6
+ VERSION = '2.3.0.alpha1'
7
7
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.2.13
4
+ version: 2.3.0.alpha1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
36
36
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
37
37
  -----END CERTIFICATE-----
38
- date: 2023-11-17 00:00:00.000000000 Z
38
+ date: 2024-01-15 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: karafka-core
@@ -43,27 +43,27 @@ dependencies:
43
43
  requirements:
44
44
  - - ">="
45
45
  - !ruby/object:Gem::Version
46
- version: 2.2.7
46
+ version: 2.3.0.alpha1
47
47
  - - "<"
48
48
  - !ruby/object:Gem::Version
49
- version: 2.3.0
49
+ version: 2.4.0
50
50
  type: :runtime
51
51
  prerelease: false
52
52
  version_requirements: !ruby/object:Gem::Requirement
53
53
  requirements:
54
54
  - - ">="
55
55
  - !ruby/object:Gem::Version
56
- version: 2.2.7
56
+ version: 2.3.0.alpha1
57
57
  - - "<"
58
58
  - !ruby/object:Gem::Version
59
- version: 2.3.0
59
+ version: 2.4.0
60
60
  - !ruby/object:Gem::Dependency
61
61
  name: waterdrop
62
62
  requirement: !ruby/object:Gem::Requirement
63
63
  requirements:
64
64
  - - ">="
65
65
  - !ruby/object:Gem::Version
66
- version: 2.6.11
66
+ version: 2.6.12
67
67
  - - "<"
68
68
  - !ruby/object:Gem::Version
69
69
  version: 3.0.0
@@ -73,7 +73,7 @@ dependencies:
73
73
  requirements:
74
74
  - - ">="
75
75
  - !ruby/object:Gem::Version
76
- version: 2.6.11
76
+ version: 2.6.12
77
77
  - - "<"
78
78
  - !ruby/object:Gem::Version
79
79
  version: 3.0.0
@@ -123,6 +123,7 @@ files:
123
123
  - LICENSE-COMM
124
124
  - LICENSE-LGPL
125
125
  - README.md
126
+ - SECURITY.md
126
127
  - bin/benchmarks
127
128
  - bin/create_token
128
129
  - bin/integrations
@@ -151,6 +152,7 @@ files:
151
152
  - lib/karafka/active_job/job_extensions.rb
152
153
  - lib/karafka/active_job/job_options_contract.rb
153
154
  - lib/karafka/admin.rb
155
+ - lib/karafka/admin/acl.rb
154
156
  - lib/karafka/app.rb
155
157
  - lib/karafka/base_consumer.rb
156
158
  - lib/karafka/cli.rb
@@ -162,14 +164,16 @@ files:
162
164
  - lib/karafka/cli/server.rb
163
165
  - lib/karafka/cli/topics.rb
164
166
  - lib/karafka/connection/client.rb
165
- - lib/karafka/connection/consumer_group_coordinator.rb
167
+ - lib/karafka/connection/conductor.rb
166
168
  - lib/karafka/connection/listener.rb
167
169
  - lib/karafka/connection/listeners_batch.rb
170
+ - lib/karafka/connection/manager.rb
168
171
  - lib/karafka/connection/messages_buffer.rb
169
172
  - lib/karafka/connection/pauses_manager.rb
170
173
  - lib/karafka/connection/proxy.rb
171
174
  - lib/karafka/connection/raw_messages_buffer.rb
172
175
  - lib/karafka/connection/rebalance_manager.rb
176
+ - lib/karafka/connection/status.rb
173
177
  - lib/karafka/contracts.rb
174
178
  - lib/karafka/contracts/base.rb
175
179
  - lib/karafka/contracts/config.rb
@@ -183,6 +187,7 @@ files:
183
187
  - lib/karafka/helpers/colorize.rb
184
188
  - lib/karafka/helpers/interval_runner.rb
185
189
  - lib/karafka/helpers/multi_delegator.rb
190
+ - lib/karafka/instrumentation/assignments_tracker.rb
186
191
  - lib/karafka/instrumentation/callbacks/error.rb
187
192
  - lib/karafka/instrumentation/callbacks/rebalance.rb
188
193
  - lib/karafka/instrumentation/callbacks/statistics.rb
@@ -216,10 +221,13 @@ files:
216
221
  - lib/karafka/pro/active_job/consumer.rb
217
222
  - lib/karafka/pro/active_job/dispatcher.rb
218
223
  - lib/karafka/pro/active_job/job_options_contract.rb
224
+ - lib/karafka/pro/base_consumer.rb
219
225
  - lib/karafka/pro/cleaner.rb
220
226
  - lib/karafka/pro/cleaner/errors.rb
221
227
  - lib/karafka/pro/cleaner/messages/message.rb
222
228
  - lib/karafka/pro/cleaner/messages/messages.rb
229
+ - lib/karafka/pro/connection/manager.rb
230
+ - lib/karafka/pro/connection/multiplexing/listener.rb
223
231
  - lib/karafka/pro/contracts/base.rb
224
232
  - lib/karafka/pro/contracts/server_cli_options.rb
225
233
  - lib/karafka/pro/encryption.rb
@@ -229,13 +237,15 @@ files:
229
237
  - lib/karafka/pro/encryption/messages/middleware.rb
230
238
  - lib/karafka/pro/encryption/messages/parser.rb
231
239
  - lib/karafka/pro/encryption/setup/config.rb
240
+ - lib/karafka/pro/instrumentation/performance_tracker.rb
232
241
  - lib/karafka/pro/iterator.rb
233
242
  - lib/karafka/pro/iterator/expander.rb
234
243
  - lib/karafka/pro/iterator/tpl_builder.rb
235
244
  - lib/karafka/pro/loader.rb
236
- - lib/karafka/pro/performance_tracker.rb
237
245
  - lib/karafka/pro/processing/collapser.rb
238
246
  - lib/karafka/pro/processing/coordinator.rb
247
+ - lib/karafka/pro/processing/executor.rb
248
+ - lib/karafka/pro/processing/expansions_selector.rb
239
249
  - lib/karafka/pro/processing/filters/base.rb
240
250
  - lib/karafka/pro/processing/filters/delayer.rb
241
251
  - lib/karafka/pro/processing/filters/expirer.rb
@@ -244,11 +254,17 @@ files:
244
254
  - lib/karafka/pro/processing/filters/virtual_limiter.rb
245
255
  - lib/karafka/pro/processing/filters_applier.rb
246
256
  - lib/karafka/pro/processing/jobs/consume_non_blocking.rb
257
+ - lib/karafka/pro/processing/jobs/periodic.rb
258
+ - lib/karafka/pro/processing/jobs/periodic_non_blocking.rb
247
259
  - lib/karafka/pro/processing/jobs/revoked_non_blocking.rb
248
260
  - lib/karafka/pro/processing/jobs_builder.rb
249
261
  - lib/karafka/pro/processing/jobs_queue.rb
262
+ - lib/karafka/pro/processing/offset_metadata/consumer.rb
263
+ - lib/karafka/pro/processing/offset_metadata/fetcher.rb
264
+ - lib/karafka/pro/processing/offset_metadata/listener.rb
250
265
  - lib/karafka/pro/processing/partitioner.rb
251
- - lib/karafka/pro/processing/scheduler.rb
266
+ - lib/karafka/pro/processing/schedulers/base.rb
267
+ - lib/karafka/pro/processing/schedulers/default.rb
252
268
  - lib/karafka/pro/processing/strategies.rb
253
269
  - lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb
254
270
  - lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb
@@ -326,6 +342,19 @@ files:
326
342
  - lib/karafka/pro/routing/features/long_running_job/config.rb
327
343
  - lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb
328
344
  - lib/karafka/pro/routing/features/long_running_job/topic.rb
345
+ - lib/karafka/pro/routing/features/multiplexing.rb
346
+ - lib/karafka/pro/routing/features/multiplexing/config.rb
347
+ - lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb
348
+ - lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb
349
+ - lib/karafka/pro/routing/features/multiplexing/proxy.rb
350
+ - lib/karafka/pro/routing/features/multiplexing/subscription_group.rb
351
+ - lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb
352
+ - lib/karafka/pro/routing/features/non_blocking_job.rb
353
+ - lib/karafka/pro/routing/features/non_blocking_job/topic.rb
354
+ - lib/karafka/pro/routing/features/offset_metadata.rb
355
+ - lib/karafka/pro/routing/features/offset_metadata/config.rb
356
+ - lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb
357
+ - lib/karafka/pro/routing/features/offset_metadata/topic.rb
329
358
  - lib/karafka/pro/routing/features/patterns.rb
330
359
  - lib/karafka/pro/routing/features/patterns/builder.rb
331
360
  - lib/karafka/pro/routing/features/patterns/config.rb
@@ -341,6 +370,10 @@ files:
341
370
  - lib/karafka/pro/routing/features/pausing.rb
342
371
  - lib/karafka/pro/routing/features/pausing/contracts/topic.rb
343
372
  - lib/karafka/pro/routing/features/pausing/topic.rb
373
+ - lib/karafka/pro/routing/features/periodic_job.rb
374
+ - lib/karafka/pro/routing/features/periodic_job/config.rb
375
+ - lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb
376
+ - lib/karafka/pro/routing/features/periodic_job/topic.rb
344
377
  - lib/karafka/pro/routing/features/throttling.rb
345
378
  - lib/karafka/pro/routing/features/throttling/config.rb
346
379
  - lib/karafka/pro/routing/features/throttling/contracts/topic.rb
@@ -367,7 +400,7 @@ files:
367
400
  - lib/karafka/processing/jobs_queue.rb
368
401
  - lib/karafka/processing/partitioner.rb
369
402
  - lib/karafka/processing/result.rb
370
- - lib/karafka/processing/scheduler.rb
403
+ - lib/karafka/processing/schedulers/default.rb
371
404
  - lib/karafka/processing/strategies/aj_dlq_mom.rb
372
405
  - lib/karafka/processing/strategies/aj_mom.rb
373
406
  - lib/karafka/processing/strategies/base.rb
@@ -425,6 +458,7 @@ files:
425
458
  - lib/karafka/templates/example_consumer.rb.erb
426
459
  - lib/karafka/templates/karafka.rb.erb
427
460
  - lib/karafka/time_trackers/base.rb
461
+ - lib/karafka/time_trackers/partition_usage.rb
428
462
  - lib/karafka/time_trackers/pause.rb
429
463
  - lib/karafka/time_trackers/poll.rb
430
464
  - lib/karafka/version.rb
@@ -457,7 +491,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
457
491
  - !ruby/object:Gem::Version
458
492
  version: '0'
459
493
  requirements: []
460
- rubygems_version: 3.4.19
494
+ rubygems_version: 3.5.3
461
495
  signing_key:
462
496
  specification_version: 4
463
497
  summary: Karafka is Ruby and Rails efficient Kafka processing framework.
metadata.gz.sig CHANGED
Binary file
@@ -1,48 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Karafka
4
- module Connection
5
- # This object represents a collective status of execution of group of listeners running inside
6
- # of one consumer group but in separate subscription groups.
7
- #
8
- # There are cases when we do not want to close a given client when others from the same
9
- # consumer group are running because it can cause instabilities due to early shutdown of some
10
- # of the clients out of same consumer group.
11
- #
12
- # We also want to make sure, we close one consumer at a time while others can continue polling.
13
- #
14
- # This prevents a scenario, where a rebalance is not acknowledged and we loose assignment
15
- # without having a chance to commit changes.
16
- class ConsumerGroupCoordinator
17
- # @param group_size [Integer] number of separate subscription groups in a consumer group
18
- def initialize(group_size)
19
- @shutdown_mutex = Mutex.new
20
- @group_size = group_size
21
- @finished = Set.new
22
- end
23
-
24
- # @return [Boolean] true if all the subscription groups from a given consumer group are
25
- # finished
26
- def finished?
27
- @finished.size == @group_size
28
- end
29
-
30
- # @return [Boolean] can we start shutdown on a given listener
31
- # @note If true, will also obtain a lock so no-one else will be closing the same time we do
32
- def shutdown?
33
- finished? && @shutdown_mutex.try_lock
34
- end
35
-
36
- # Unlocks the shutdown lock
37
- def unlock
38
- @shutdown_mutex.unlock if @shutdown_mutex.owned?
39
- end
40
-
41
- # Marks given listener as finished
42
- # @param listener_id [String]
43
- def finish_work(listener_id)
44
- @finished << listener_id
45
- end
46
- end
47
- end
48
- end