karafka 2.4.18 → 2.5.0.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/CODEOWNERS +3 -0
  4. data/.github/workflows/ci.yml +58 -14
  5. data/.github/workflows/verify-action-pins.yml +16 -0
  6. data/.ruby-version +1 -1
  7. data/CHANGELOG.md +53 -0
  8. data/Gemfile +3 -3
  9. data/Gemfile.lock +55 -58
  10. data/LICENSE-COMM +2 -2
  11. data/bin/clean_kafka +43 -0
  12. data/bin/integrations +17 -5
  13. data/bin/rspecs +15 -3
  14. data/bin/verify_kafka_warnings +35 -0
  15. data/bin/verify_topics_naming +27 -0
  16. data/config/locales/errors.yml +3 -0
  17. data/config/locales/pro_errors.yml +13 -2
  18. data/docker-compose.yml +1 -1
  19. data/examples/payloads/json/enrollment_event.json +579 -0
  20. data/examples/payloads/json/ingestion_event.json +30 -0
  21. data/examples/payloads/json/transaction_event.json +17 -0
  22. data/examples/payloads/json/user_event.json +11 -0
  23. data/karafka.gemspec +3 -3
  24. data/lib/karafka/active_job/current_attributes.rb +1 -1
  25. data/lib/karafka/admin/acl.rb +5 -1
  26. data/lib/karafka/admin.rb +51 -19
  27. data/lib/karafka/base_consumer.rb +17 -8
  28. data/lib/karafka/cli/base.rb +8 -2
  29. data/lib/karafka/connection/client.rb +20 -7
  30. data/lib/karafka/connection/listener.rb +24 -12
  31. data/lib/karafka/connection/messages_buffer.rb +1 -1
  32. data/lib/karafka/connection/proxy.rb +3 -0
  33. data/lib/karafka/contracts/config.rb +3 -0
  34. data/lib/karafka/contracts/topic.rb +1 -1
  35. data/lib/karafka/errors.rb +11 -0
  36. data/lib/karafka/helpers/async.rb +3 -1
  37. data/lib/karafka/instrumentation/callbacks/rebalance.rb +5 -1
  38. data/lib/karafka/instrumentation/logger_listener.rb +86 -23
  39. data/lib/karafka/instrumentation/proctitle_listener.rb +5 -1
  40. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +2 -2
  41. data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
  42. data/lib/karafka/pro/cleaner.rb +8 -0
  43. data/lib/karafka/pro/cli/parallel_segments/base.rb +89 -0
  44. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +164 -0
  45. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +164 -0
  46. data/lib/karafka/pro/cli/parallel_segments.rb +60 -0
  47. data/lib/karafka/pro/connection/manager.rb +5 -8
  48. data/lib/karafka/pro/encryption.rb +8 -0
  49. data/lib/karafka/pro/instrumentation/performance_tracker.rb +1 -1
  50. data/lib/karafka/pro/iterator/expander.rb +5 -3
  51. data/lib/karafka/pro/iterator/tpl_builder.rb +23 -0
  52. data/lib/karafka/pro/loader.rb +10 -0
  53. data/lib/karafka/pro/processing/coordinator.rb +4 -1
  54. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +27 -3
  55. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +11 -0
  56. data/lib/karafka/pro/processing/filters/base.rb +10 -2
  57. data/lib/karafka/pro/processing/filters/expirer.rb +5 -0
  58. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +2 -2
  59. data/lib/karafka/pro/processing/filters/virtual_limiter.rb +5 -0
  60. data/lib/karafka/pro/processing/parallel_segments/filters/base.rb +73 -0
  61. data/lib/karafka/pro/processing/parallel_segments/filters/default.rb +85 -0
  62. data/lib/karafka/pro/processing/parallel_segments/filters/mom.rb +66 -0
  63. data/lib/karafka/pro/processing/partitioner.rb +1 -13
  64. data/lib/karafka/pro/processing/piping/consumer.rb +13 -13
  65. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +1 -1
  66. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +1 -1
  67. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +1 -1
  68. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +1 -1
  69. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +1 -1
  70. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +1 -1
  71. data/lib/karafka/pro/processing/strategies/default.rb +36 -8
  72. data/lib/karafka/pro/processing/strategies/dlq/default.rb +14 -10
  73. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +1 -1
  74. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +1 -1
  75. data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +3 -1
  76. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +1 -1
  77. data/lib/karafka/pro/processing/strategies/ftr/default.rb +1 -1
  78. data/lib/karafka/pro/processing/strategies/lrj/default.rb +4 -1
  79. data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +1 -1
  80. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +1 -1
  81. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +1 -1
  82. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +50 -0
  83. data/lib/karafka/pro/processing/virtual_partitions/distributors/base.rb +29 -0
  84. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +27 -0
  85. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +8 -4
  86. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +3 -3
  87. data/lib/karafka/pro/recurring_tasks/setup/config.rb +7 -2
  88. data/lib/karafka/pro/recurring_tasks.rb +13 -0
  89. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +1 -1
  90. data/lib/karafka/pro/routing/features/multiplexing/config.rb +1 -0
  91. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +17 -0
  92. data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +5 -2
  93. data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +8 -1
  94. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +47 -0
  95. data/lib/karafka/pro/routing/features/parallel_segments/config.rb +27 -0
  96. data/lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb +83 -0
  97. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +49 -0
  98. data/lib/karafka/pro/routing/features/parallel_segments/topic.rb +43 -0
  99. data/lib/karafka/pro/routing/features/parallel_segments.rb +24 -0
  100. data/lib/karafka/pro/routing/features/patterns/pattern.rb +1 -1
  101. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +2 -2
  102. data/lib/karafka/pro/routing/features/scheduled_messages/builder.rb +10 -6
  103. data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +20 -2
  104. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -0
  105. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +8 -2
  106. data/lib/karafka/pro/scheduled_messages/consumer.rb +14 -15
  107. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +9 -6
  108. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +7 -1
  109. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +15 -6
  110. data/lib/karafka/pro/scheduled_messages.rb +13 -0
  111. data/lib/karafka/processing/coordinators_buffer.rb +1 -0
  112. data/lib/karafka/processing/strategies/default.rb +4 -4
  113. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -0
  114. data/lib/karafka/routing/subscription_group.rb +1 -1
  115. data/lib/karafka/runner.rb +7 -1
  116. data/lib/karafka/server.rb +5 -0
  117. data/lib/karafka/setup/attributes_map.rb +2 -0
  118. data/lib/karafka/setup/config.rb +22 -1
  119. data/lib/karafka/setup/defaults_injector.rb +26 -1
  120. data/lib/karafka/status.rb +6 -1
  121. data/lib/karafka/swarm/node.rb +31 -0
  122. data/lib/karafka/swarm/supervisor.rb +4 -0
  123. data/lib/karafka/templates/karafka.rb.erb +14 -1
  124. data/lib/karafka/version.rb +1 -1
  125. data/lib/karafka.rb +17 -9
  126. data/renovate.json +14 -2
  127. data.tar.gz.sig +0 -0
  128. metadata +36 -11
  129. metadata.gz.sig +0 -0
@@ -22,6 +22,7 @@ module Karafka
22
22
 
23
23
  # @param topic_name [String] topic name
24
24
  # @param partition [Integer] partition number
25
+ # @return [Karafka::Processing::Coordinator] found or created coordinator
25
26
  def find_or_create(topic_name, partition)
26
27
  @coordinators[topic_name][partition] ||= begin
27
28
  routing_topic = @topics.find(topic_name)
@@ -55,8 +55,8 @@ module Karafka
55
55
  # seek offset can be nil only in case `#seek` was invoked with offset reset request
56
56
  # In case like this we ignore marking
57
57
  return true if seek_offset.nil?
58
- # Ignore earlier offsets than the one we already committed
59
- return true if seek_offset > message.offset
58
+ # Ignore double markings of the same offset
59
+ return true if (seek_offset - 1) == message.offset
60
60
  return false if revoked?
61
61
  return revoked? unless client.mark_as_consumed(message)
62
62
 
@@ -74,8 +74,8 @@ module Karafka
74
74
  # seek offset can be nil only in case `#seek` was invoked with offset reset request
75
75
  # In case like this we ignore marking
76
76
  return true if seek_offset.nil?
77
- # Ignore earlier offsets than the one we already committed
78
- return true if seek_offset > message.offset
77
+ # Ignore double markings of the same offset
78
+ return true if (seek_offset - 1) == message.offset
79
79
  return false if revoked?
80
80
 
81
81
  return revoked? unless client.mark_as_consumed!(message)
@@ -46,6 +46,7 @@ module Karafka
46
46
  # When topic is set to false, it means we just want to skip dispatch on DLQ
47
47
  next if topic == false
48
48
  next if topic.is_a?(String) && topic_regexp.match?(topic)
49
+ next if topic == :strategy
49
50
 
50
51
  [[%i[dead_letter_queue topic], :format]]
51
52
  end
@@ -30,7 +30,7 @@ module Karafka
30
30
  @group_counter ||= 0
31
31
  @group_counter += 1
32
32
 
33
- ::Digest::MD5.hexdigest(
33
+ ::Digest::SHA256.hexdigest(
34
34
  @group_counter.to_s
35
35
  )[0..11]
36
36
  end
@@ -4,6 +4,7 @@ module Karafka
4
4
  # Class used to run the Karafka listeners in separate threads
5
5
  class Runner
6
6
  include Helpers::ConfigImporter.new(
7
+ worker_thread_priority: %i[worker_thread_priority],
7
8
  manager: %i[internal connection manager],
8
9
  conductor: %i[internal connection conductor],
9
10
  jobs_queue_class: %i[internal processing jobs_queue_class]
@@ -26,7 +27,12 @@ module Karafka
26
27
  # Register all the listeners so they can be started and managed
27
28
  manager.register(listeners)
28
29
 
29
- workers.each_with_index { |worker, i| worker.async_call("karafka.worker##{i}") }
30
+ workers.each_with_index do |worker, i|
31
+ worker.async_call(
32
+ "karafka.worker##{i}",
33
+ worker_thread_priority
34
+ )
35
+ end
30
36
 
31
37
  # We aggregate threads here for a supervised shutdown process
32
38
  Karafka::Server.workers = workers
@@ -30,6 +30,9 @@ module Karafka
30
30
  # as not everything is possible when operating in non-standalone mode, etc.
31
31
  attr_accessor :execution_mode
32
32
 
33
+ # id of the server. Useful for logging when we want to reference things issued by the server.
34
+ attr_accessor :id
35
+
33
36
  # Method which runs app
34
37
  def run
35
38
  self.listeners = []
@@ -187,5 +190,7 @@ module Karafka
187
190
  # This is overwritten quickly during boot, but just in case someone would reach it prior to
188
191
  # booting, we want to have the default value.
189
192
  self.execution_mode = :standalone
193
+
194
+ self.id = SecureRandom.hex(6)
190
195
  end
191
196
  end
@@ -73,6 +73,7 @@ module Karafka
73
73
  message.max.bytes
74
74
  metadata.broker.list
75
75
  metadata.max.age.ms
76
+ metadata.recovery.strategy
76
77
  oauthbearer_token_refresh_cb
77
78
  offset.store.method
78
79
  offset.store.path
@@ -207,6 +208,7 @@ module Karafka
207
208
  message.timeout.ms
208
209
  metadata.broker.list
209
210
  metadata.max.age.ms
211
+ metadata.recovery.strategy
210
212
  msg_order_cmp
211
213
  oauthbearer_token_refresh_cb
212
214
  opaque
@@ -73,6 +73,9 @@ module Karafka
73
73
  # Really useful when you want to ensure that all topics in routing are managed via
74
74
  # declaratives.
75
75
  setting :strict_declarative_topics, default: false
76
+ # Defaults to the CPU thread priority slice to -1 (50ms) to ensure that CPU intense
77
+ # processing does not affect other threads and prevents starvation
78
+ setting :worker_thread_priority, default: -1
76
79
 
77
80
  setting :oauth do
78
81
  # option [false, #call] Listener for using oauth bearer. This listener will be able to
@@ -133,6 +136,14 @@ module Karafka
133
136
  # How many times should be try. 1 000 ms x 60 => 60 seconds wait in total and then we give
134
137
  # up on pending operations
135
138
  setting :max_attempts, default: 60
139
+
140
+ # option poll_timeout [Integer] time in ms
141
+ # How long should a poll wait before yielding on no results (rdkafka-ruby setting)
142
+ # Lower value can be especially useful when working with Web UI, because it allows for
143
+ # increased responsiveness. Many admin operations do not take 100ms but they wait on poll
144
+ # until then prior to finishing, blocking the execution. Lowering to 25 ms can
145
+ # improve responsiveness of the Web UI. 50ms is a good trade-off for admin.
146
+ setting :poll_timeout, default: 50
136
147
  end
137
148
 
138
149
  # Namespace for internal settings that should not be modified directly
@@ -211,6 +222,10 @@ module Karafka
211
222
  # How long should we wait before a critical listener recovery
212
223
  # Too short may cause endless rebalance loops
213
224
  setting :reset_backoff, default: 60_000
225
+ # Similar to the `#worker_thread_priority`. Listener threads do not operate for long
226
+ # time and release GVL on polling but we provide this for API consistency and some
227
+ # special edge cases.
228
+ setting :listener_thread_priority, default: 0
214
229
 
215
230
  # Settings that are altered by our client proxy layer
216
231
  setting :proxy do
@@ -282,6 +297,9 @@ module Karafka
282
297
  setting :jobs_builder, default: Processing::JobsBuilder.new
283
298
  # option coordinator [Class] work coordinator we want to user for processing coordination
284
299
  setting :coordinator_class, default: Processing::Coordinator
300
+ # option errors_tracker_class [Class, nil] errors tracker that is used by the coordinator
301
+ # for granular error tracking. `nil` for OSS as it is not in use.
302
+ setting :errors_tracker_class, default: nil
285
303
  # option partitioner_class [Class] partitioner we use against a batch of data
286
304
  setting :partitioner_class, default: Processing::Partitioner
287
305
  # option strategy_selector [Object] processing strategy selector to be used
@@ -367,7 +385,10 @@ module Karafka
367
385
  config.producer ||= ::WaterDrop::Producer.new do |producer_config|
368
386
  # In some cases WaterDrop updates the config and we don't want our consumer config to
369
387
  # be polluted by those updates, that's why we copy
370
- producer_config.kafka = AttributesMap.producer(config.kafka.dup)
388
+ producer_kafka = AttributesMap.producer(config.kafka.dup)
389
+ # We inject some defaults (mostly for dev) unless user defined them
390
+ Setup::DefaultsInjector.producer(producer_kafka)
391
+ producer_config.kafka = producer_kafka
371
392
  # We also propagate same listener to the default producer to make sure, that the
372
393
  # listener for oauth is also automatically used by the producer. That way we don't
373
394
  # have to configure it manually for the default producer
@@ -36,7 +36,17 @@ module Karafka
36
36
  'topic.metadata.refresh.interval.ms': 5_000
37
37
  }.freeze
38
38
 
39
- private_constant :CONSUMER_KAFKA_DEFAULTS, :CONSUMER_KAFKA_DEV_DEFAULTS
39
+ # Contains settings that should not be used in production but make life easier in dev
40
+ # It is applied only to the default producer. If users setup their own producers, then
41
+ # they have to set this by themselves.
42
+ PRODUCER_KAFKA_DEV_DEFAULTS = {
43
+ # For all of those same reasoning as for the consumer
44
+ 'allow.auto.create.topics': 'true',
45
+ 'topic.metadata.refresh.interval.ms': 5_000
46
+ }.freeze
47
+
48
+ private_constant :CONSUMER_KAFKA_DEFAULTS, :CONSUMER_KAFKA_DEV_DEFAULTS,
49
+ :PRODUCER_KAFKA_DEV_DEFAULTS
40
50
 
41
51
  class << self
42
52
  # Propagates the kafka setting defaults unless they are already present for consumer config
@@ -58,6 +68,21 @@ module Karafka
58
68
  kafka_config[key] = value
59
69
  end
60
70
  end
71
+
72
+ # Propagates the kafka settings defaults unless they are already present for producer
73
+ # config. This makes it easier to set some values that users usually don't change but still
74
+ # allows them to overwrite the whole hash.
75
+ #
76
+ # @param kafka_config [Hash] kafka scoped config
77
+ def producer(kafka_config)
78
+ return if Karafka::App.env.production?
79
+
80
+ PRODUCER_KAFKA_DEV_DEFAULTS.each do |key, value|
81
+ next if kafka_config.key?(key)
82
+
83
+ kafka_config[key] = value
84
+ end
85
+ end
61
86
  end
62
87
  end
63
88
  end
@@ -66,7 +66,12 @@ module Karafka
66
66
  return if initializing?
67
67
 
68
68
  conductor.signal
69
- monitor.instrument("app.#{state}", caller: self)
69
+
70
+ monitor.instrument(
71
+ "app.#{state}",
72
+ caller: self,
73
+ server_id: Karafka::Server.id
74
+ )
70
75
  end
71
76
  end
72
77
  RUBY
@@ -27,6 +27,18 @@ module Karafka
27
27
  # @return [Integer] pid of the node
28
28
  attr_reader :pid
29
29
 
30
+ # When re-creating a producer in the fork, those are not attributes we want to inherit
31
+ # from the parent process because they are updated in the fork. If user wants to take those
32
+ # from the parent process, he should redefine them by overwriting the whole producer.
33
+ SKIPPABLE_NEW_PRODUCER_ATTRIBUTES = %i[
34
+ id
35
+ kafka
36
+ logger
37
+ oauth
38
+ ].freeze
39
+
40
+ private_constant :SKIPPABLE_NEW_PRODUCER_ATTRIBUTES
41
+
30
42
  # @param id [Integer] number of the fork. Used for uniqueness setup for group client ids and
31
43
  # other stuff where we need to know a unique reference of the fork in regards to the rest
32
44
  # of them.
@@ -52,15 +64,32 @@ module Karafka
52
64
  # an attempt to close it when finalized, meaning it would be kept in memory.
53
65
  config.producer.close
54
66
 
67
+ old_producer = config.producer
68
+ old_producer_config = old_producer.config
69
+
55
70
  # Supervisor producer is closed, hence we need a new one here
56
71
  config.producer = ::WaterDrop::Producer.new do |p_config|
57
72
  p_config.kafka = Setup::AttributesMap.producer(kafka.dup)
58
73
  p_config.logger = config.logger
74
+
75
+ old_producer_config.to_h.each do |key, value|
76
+ next if SKIPPABLE_NEW_PRODUCER_ATTRIBUTES.include?(key)
77
+
78
+ p_config.public_send("#{key}=", value)
79
+ end
80
+
81
+ # Namespaced attributes need to be migrated directly on their config node
82
+ old_producer_config.oauth.to_h.each do |key, value|
83
+ p_config.oauth.public_send("#{key}=", value)
84
+ end
59
85
  end
60
86
 
61
87
  @pid = ::Process.pid
62
88
  @reader.close
63
89
 
90
+ # Certain features need to be reconfigured / reinitialized after fork in Pro
91
+ Pro::Loader.post_fork(config, old_producer) if Karafka.pro?
92
+
64
93
  # Indicate we are alive right after start
65
94
  healthy
66
95
 
@@ -69,6 +98,8 @@ module Karafka
69
98
  monitor.instrument('swarm.node.after_fork', caller: self)
70
99
 
71
100
  Karafka::Process.tags.add(:execution_mode, 'mode:swarm')
101
+ Karafka::Process.tags.add(:swarm_nodeid, "node:#{@id}")
102
+
72
103
  Server.execution_mode = :swarm
73
104
  Server.run
74
105
 
@@ -50,6 +50,10 @@ module Karafka
50
50
  # producer (should not be initialized but just in case)
51
51
  Karafka.producer.close
52
52
 
53
+ # Ensure rdkafka stuff is loaded into memory pre-fork. This will ensure, that we save
54
+ # few MB on forking as this will be already in memory.
55
+ Rdkafka::Bindings.rd_kafka_global_init
56
+
53
57
  Karafka::App.warmup
54
58
 
55
59
  manager.start
@@ -25,7 +25,20 @@ APP_LOADER.eager_load
25
25
  class KarafkaApp < Karafka::App
26
26
  setup do |config|
27
27
  config.kafka = { 'bootstrap.servers': '127.0.0.1:9092' }
28
- config.client_id = 'example_app'
28
+ config.client_id = 'YOUR_APP_NAME'
29
+
30
+ # IMPORTANT: Customize this group_id with your application name.
31
+ # The group_id should be unique per application to properly track message consumption.
32
+ # Example: config.group_id = 'inventory_service_consumer'
33
+ #
34
+ # Note: Advanced features and custom routing configurations may define their own consumer
35
+ # groups. These should also be uniquely named per application to avoid conflicts.
36
+ # For the advanced features, subscription groups and consumer groups in your routing
37
+ # configuration, follow the same uniqueness principle.
38
+ #
39
+ # For more details on consumer groups and routing configuration, please refer to the
40
+ # Karafka documentation: https://karafka.io/docs
41
+ config.group_id = 'YOUR_APP_NAME_consumer'
29
42
  <% if rails? -%>
30
43
  # Recreate consumers with each batch. This will allow Rails code reload to work in the
31
44
  # development mode. Otherwise Karafka process would not be aware of code changes
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.4.18'
6
+ VERSION = '2.5.0.beta1'
7
7
  end
data/lib/karafka.rb CHANGED
@@ -16,6 +16,7 @@
16
16
  singleton
17
17
  digest
18
18
  zeitwerk
19
+ logger
19
20
  ].each(&method(:require))
20
21
 
21
22
  # Karafka framework main namespace
@@ -65,16 +66,23 @@ module Karafka
65
66
  return @root
66
67
  end
67
68
 
68
- # By default we infer the project root from bundler.
69
- # We cannot use the BUNDLE_GEMFILE env directly because it may be altered by things like
70
- # ruby-lsp. Instead we always fallback to the most outer Gemfile. In most of the cases, it
71
- # won't matter but in case of some automatic setup alterations like ruby-lsp, the location
72
- # from which the project starts may not match the original Gemfile.
73
- @root = Pathname.new(
74
- File.dirname(
75
- Bundler.with_unbundled_env { Bundler.default_gemfile }
69
+ if defined?(::Bundler)
70
+ # By default we infer the project root from bundler.
71
+ # We cannot use the BUNDLE_GEMFILE env directly because it may be altered by things like
72
+ # ruby-lsp. Instead we always fallback to the most outer Gemfile. In most of the cases, it
73
+ # won't matter but in case of some automatic setup alterations like ruby-lsp, the location
74
+ # from which the project starts may not match the original Gemfile.
75
+ @root = Pathname.new(
76
+ File.dirname(
77
+ Bundler.with_unbundled_env { Bundler.default_gemfile }
78
+ )
76
79
  )
77
- )
80
+ else
81
+ # Fallback when Bundler is not available: use current directory
82
+ @root = Pathname.new(Dir.pwd)
83
+ end
84
+
85
+ @root
78
86
  end
79
87
 
80
88
  # @return [Pathname] path to Karafka gem root core
data/renovate.json CHANGED
@@ -1,9 +1,21 @@
1
1
  {
2
2
  "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3
3
  "extends": [
4
- "config:base"
4
+ "config:recommended"
5
5
  ],
6
6
  "ignorePaths": [
7
- "spec/integrations"
7
+ "spec/integrations"
8
+ ],
9
+ "github-actions": {
10
+ "enabled": true,
11
+ "pinDigests": true
12
+ },
13
+ "packageRules": [
14
+ {
15
+ "matchManagers": [
16
+ "github-actions"
17
+ ],
18
+ "minimumReleaseAge": "7 days"
19
+ }
8
20
  ]
9
21
  }
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.4.18
4
+ version: 2.5.0.beta1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -34,7 +34,7 @@ cert_chain:
34
34
  i9zWxov0mr44TWegTVeypcWGd/0nxu1+QHVNHJrpqlPBRvwQsUm7fwmRInGpcaB8
35
35
  ap8wNYvryYzrzvzUxIVFBVM5PacgkFqRmolCa8I7tdKQN+R1
36
36
  -----END CERTIFICATE-----
37
- date: 2025-04-09 00:00:00.000000000 Z
37
+ date: 1980-01-02 00:00:00.000000000 Z
38
38
  dependencies:
39
39
  - !ruby/object:Gem::Dependency
40
40
  name: base64
@@ -56,41 +56,41 @@ dependencies:
56
56
  requirements:
57
57
  - - ">="
58
58
  - !ruby/object:Gem::Version
59
- version: 2.4.4
59
+ version: 2.5.0
60
60
  - - "<"
61
61
  - !ruby/object:Gem::Version
62
- version: 2.5.0
62
+ version: 2.6.0
63
63
  type: :runtime
64
64
  prerelease: false
65
65
  version_requirements: !ruby/object:Gem::Requirement
66
66
  requirements:
67
67
  - - ">="
68
68
  - !ruby/object:Gem::Version
69
- version: 2.4.4
69
+ version: 2.5.0
70
70
  - - "<"
71
71
  - !ruby/object:Gem::Version
72
- version: 2.5.0
72
+ version: 2.6.0
73
73
  - !ruby/object:Gem::Dependency
74
74
  name: karafka-rdkafka
75
75
  requirement: !ruby/object:Gem::Requirement
76
76
  requirements:
77
77
  - - ">="
78
78
  - !ruby/object:Gem::Version
79
- version: 0.17.2
79
+ version: 0.19.2
80
80
  type: :runtime
81
81
  prerelease: false
82
82
  version_requirements: !ruby/object:Gem::Requirement
83
83
  requirements:
84
84
  - - ">="
85
85
  - !ruby/object:Gem::Version
86
- version: 0.17.2
86
+ version: 0.19.2
87
87
  - !ruby/object:Gem::Dependency
88
88
  name: waterdrop
89
89
  requirement: !ruby/object:Gem::Requirement
90
90
  requirements:
91
91
  - - ">="
92
92
  - !ruby/object:Gem::Version
93
- version: 2.7.3
93
+ version: 2.8.3
94
94
  - - "<"
95
95
  - !ruby/object:Gem::Version
96
96
  version: 3.0.0
@@ -100,7 +100,7 @@ dependencies:
100
100
  requirements:
101
101
  - - ">="
102
102
  - !ruby/object:Gem::Version
103
- version: 2.7.3
103
+ version: 2.8.3
104
104
  - - "<"
105
105
  - !ruby/object:Gem::Version
106
106
  version: 3.0.0
@@ -133,10 +133,12 @@ files:
133
133
  - ".coditsu/ci.yml"
134
134
  - ".console_irbrc"
135
135
  - ".diffend.yml"
136
+ - ".github/CODEOWNERS"
136
137
  - ".github/FUNDING.yml"
137
138
  - ".github/ISSUE_TEMPLATE/bug_report.md"
138
139
  - ".github/ISSUE_TEMPLATE/feature_request.md"
139
140
  - ".github/workflows/ci.yml"
141
+ - ".github/workflows/verify-action-pins.yml"
140
142
  - ".gitignore"
141
143
  - ".rspec"
142
144
  - ".ruby-gemset"
@@ -152,6 +154,7 @@ files:
152
154
  - README.md
153
155
  - SECURITY.md
154
156
  - bin/benchmarks
157
+ - bin/clean_kafka
155
158
  - bin/create_token
156
159
  - bin/integrations
157
160
  - bin/karafka
@@ -160,13 +163,19 @@ files:
160
163
  - bin/scenario
161
164
  - bin/stress_many
162
165
  - bin/stress_one
166
+ - bin/verify_kafka_warnings
163
167
  - bin/verify_license_integrity
168
+ - bin/verify_topics_naming
164
169
  - bin/wait_for_kafka
165
170
  - certs/cert.pem
166
171
  - certs/karafka-pro.pem
167
172
  - config/locales/errors.yml
168
173
  - config/locales/pro_errors.yml
169
174
  - docker-compose.yml
175
+ - examples/payloads/json/enrollment_event.json
176
+ - examples/payloads/json/ingestion_event.json
177
+ - examples/payloads/json/transaction_event.json
178
+ - examples/payloads/json/user_event.json
170
179
  - karafka.gemspec
171
180
  - lib/active_job/karafka.rb
172
181
  - lib/active_job/queue_adapters/karafka_adapter.rb
@@ -275,6 +284,10 @@ files:
275
284
  - lib/karafka/pro/cleaner/messages/message.rb
276
285
  - lib/karafka/pro/cleaner/messages/messages.rb
277
286
  - lib/karafka/pro/cleaner/messages/metadata.rb
287
+ - lib/karafka/pro/cli/parallel_segments.rb
288
+ - lib/karafka/pro/cli/parallel_segments/base.rb
289
+ - lib/karafka/pro/cli/parallel_segments/collapse.rb
290
+ - lib/karafka/pro/cli/parallel_segments/distribute.rb
278
291
  - lib/karafka/pro/connection/manager.rb
279
292
  - lib/karafka/pro/connection/multiplexing/listener.rb
280
293
  - lib/karafka/pro/contracts/base.rb
@@ -316,6 +329,9 @@ files:
316
329
  - lib/karafka/pro/processing/offset_metadata/consumer.rb
317
330
  - lib/karafka/pro/processing/offset_metadata/fetcher.rb
318
331
  - lib/karafka/pro/processing/offset_metadata/listener.rb
332
+ - lib/karafka/pro/processing/parallel_segments/filters/base.rb
333
+ - lib/karafka/pro/processing/parallel_segments/filters/default.rb
334
+ - lib/karafka/pro/processing/parallel_segments/filters/mom.rb
319
335
  - lib/karafka/pro/processing/partitioner.rb
320
336
  - lib/karafka/pro/processing/periodic_job/consumer.rb
321
337
  - lib/karafka/pro/processing/piping/consumer.rb
@@ -373,6 +389,9 @@ files:
373
389
  - lib/karafka/pro/processing/strategies/vp/default.rb
374
390
  - lib/karafka/pro/processing/strategy_selector.rb
375
391
  - lib/karafka/pro/processing/subscription_groups_coordinator.rb
392
+ - lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb
393
+ - lib/karafka/pro/processing/virtual_partitions/distributors/base.rb
394
+ - lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb
376
395
  - lib/karafka/pro/recurring_tasks.rb
377
396
  - lib/karafka/pro/recurring_tasks/consumer.rb
378
397
  - lib/karafka/pro/recurring_tasks/contracts/config.rb
@@ -436,6 +455,12 @@ files:
436
455
  - lib/karafka/pro/routing/features/offset_metadata/config.rb
437
456
  - lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb
438
457
  - lib/karafka/pro/routing/features/offset_metadata/topic.rb
458
+ - lib/karafka/pro/routing/features/parallel_segments.rb
459
+ - lib/karafka/pro/routing/features/parallel_segments/builder.rb
460
+ - lib/karafka/pro/routing/features/parallel_segments/config.rb
461
+ - lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb
462
+ - lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb
463
+ - lib/karafka/pro/routing/features/parallel_segments/topic.rb
439
464
  - lib/karafka/pro/routing/features/patterns.rb
440
465
  - lib/karafka/pro/routing/features/patterns/builder.rb
441
466
  - lib/karafka/pro/routing/features/patterns/config.rb
@@ -619,7 +644,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
619
644
  - !ruby/object:Gem::Version
620
645
  version: '0'
621
646
  requirements: []
622
- rubygems_version: 3.6.2
647
+ rubygems_version: 3.6.9
623
648
  specification_version: 4
624
649
  summary: Karafka is Ruby and Rails efficient Kafka processing framework.
625
650
  test_files: []
metadata.gz.sig CHANGED
Binary file