karafka 2.5.0 → 2.5.1.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/{ci.yml → ci_linux_ubuntu_x86_64_gnu.yml} +54 -30
  3. data/.github/workflows/ci_macos_arm64.yml +148 -0
  4. data/.github/workflows/push.yml +2 -2
  5. data/.github/workflows/trigger-wiki-refresh.yml +30 -0
  6. data/.github/workflows/verify-action-pins.yml +1 -1
  7. data/.ruby-version +1 -1
  8. data/CHANGELOG.md +28 -1
  9. data/Gemfile +2 -1
  10. data/Gemfile.lock +55 -26
  11. data/README.md +2 -2
  12. data/bin/integrations +3 -1
  13. data/bin/verify_kafka_warnings +2 -1
  14. data/config/locales/errors.yml +153 -152
  15. data/config/locales/pro_errors.yml +135 -134
  16. data/karafka.gemspec +3 -3
  17. data/lib/active_job/queue_adapters/karafka_adapter.rb +30 -1
  18. data/lib/karafka/active_job/dispatcher.rb +19 -9
  19. data/lib/karafka/admin/acl.rb +7 -8
  20. data/lib/karafka/admin/configs/config.rb +2 -2
  21. data/lib/karafka/admin/configs/resource.rb +2 -2
  22. data/lib/karafka/admin/configs.rb +3 -7
  23. data/lib/karafka/admin/consumer_groups.rb +351 -0
  24. data/lib/karafka/admin/topics.rb +206 -0
  25. data/lib/karafka/admin.rb +42 -451
  26. data/lib/karafka/base_consumer.rb +22 -0
  27. data/lib/karafka/{pro/contracts/server_cli_options.rb → cli/contracts/server.rb} +4 -12
  28. data/lib/karafka/cli/info.rb +1 -1
  29. data/lib/karafka/cli/install.rb +0 -2
  30. data/lib/karafka/connection/client.rb +8 -0
  31. data/lib/karafka/connection/listener.rb +5 -1
  32. data/lib/karafka/connection/status.rb +12 -9
  33. data/lib/karafka/errors.rb +0 -8
  34. data/lib/karafka/instrumentation/assignments_tracker.rb +16 -0
  35. data/lib/karafka/instrumentation/logger_listener.rb +109 -50
  36. data/lib/karafka/pro/active_job/dispatcher.rb +5 -0
  37. data/lib/karafka/pro/cleaner/messages/messages.rb +18 -8
  38. data/lib/karafka/pro/cli/contracts/server.rb +106 -0
  39. data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
  40. data/lib/karafka/pro/loader.rb +1 -1
  41. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +1 -1
  42. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +1 -1
  43. data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +9 -0
  44. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
  45. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +9 -0
  46. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +1 -1
  47. data/lib/karafka/pro/routing/features/delaying/topic.rb +9 -0
  48. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +1 -1
  49. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +1 -1
  50. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +9 -0
  51. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +1 -1
  52. data/lib/karafka/pro/routing/features/expiring/topic.rb +9 -0
  53. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +1 -1
  54. data/lib/karafka/pro/routing/features/filtering/topic.rb +9 -0
  55. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +1 -1
  56. data/lib/karafka/pro/routing/features/inline_insights/topic.rb +9 -0
  57. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +1 -1
  58. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +9 -0
  59. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +1 -1
  60. data/lib/karafka/pro/routing/features/multiplexing.rb +1 -1
  61. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +1 -1
  62. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +9 -0
  63. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +1 -1
  64. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +1 -1
  65. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +1 -1
  66. data/lib/karafka/pro/routing/features/patterns/topic.rb +9 -0
  67. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +1 -1
  68. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +1 -1
  69. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +9 -0
  70. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +1 -1
  71. data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +9 -0
  72. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +1 -1
  73. data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +9 -0
  74. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +1 -1
  75. data/lib/karafka/pro/routing/features/swarm/topic.rb +9 -0
  76. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +1 -1
  77. data/lib/karafka/pro/routing/features/throttling/topic.rb +9 -0
  78. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -1
  79. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +9 -0
  80. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +1 -1
  81. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +9 -3
  82. data/lib/karafka/pro/swarm/liveness_listener.rb +17 -2
  83. data/lib/karafka/processing/executor.rb +1 -1
  84. data/lib/karafka/routing/builder.rb +0 -3
  85. data/lib/karafka/routing/consumer_group.rb +1 -4
  86. data/lib/karafka/routing/contracts/consumer_group.rb +84 -0
  87. data/lib/karafka/routing/contracts/routing.rb +61 -0
  88. data/lib/karafka/routing/contracts/topic.rb +83 -0
  89. data/lib/karafka/routing/features/active_job/contracts/topic.rb +1 -1
  90. data/lib/karafka/routing/features/active_job/topic.rb +9 -0
  91. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
  92. data/lib/karafka/routing/features/dead_letter_queue/topic.rb +9 -0
  93. data/lib/karafka/routing/features/declaratives/contracts/topic.rb +1 -1
  94. data/lib/karafka/routing/features/declaratives/topic.rb +9 -0
  95. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +1 -1
  96. data/lib/karafka/routing/features/deserializers/topic.rb +9 -0
  97. data/lib/karafka/routing/features/eofed/contracts/topic.rb +1 -1
  98. data/lib/karafka/routing/features/eofed/topic.rb +9 -0
  99. data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +1 -1
  100. data/lib/karafka/routing/features/inline_insights/topic.rb +9 -0
  101. data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +1 -1
  102. data/lib/karafka/routing/features/manual_offset_management/topic.rb +9 -0
  103. data/lib/karafka/routing/subscription_group.rb +1 -10
  104. data/lib/karafka/routing/topic.rb +9 -1
  105. data/lib/karafka/server.rb +2 -7
  106. data/lib/karafka/setup/attributes_map.rb +36 -0
  107. data/lib/karafka/setup/config.rb +6 -7
  108. data/lib/karafka/setup/contracts/config.rb +217 -0
  109. data/lib/karafka/setup/defaults_injector.rb +3 -1
  110. data/lib/karafka/swarm/node.rb +66 -6
  111. data/lib/karafka/swarm.rb +2 -2
  112. data/lib/karafka/templates/karafka.rb.erb +2 -7
  113. data/lib/karafka/version.rb +1 -1
  114. data/lib/karafka.rb +17 -18
  115. metadata +18 -15
  116. data/lib/karafka/contracts/config.rb +0 -210
  117. data/lib/karafka/contracts/consumer_group.rb +0 -81
  118. data/lib/karafka/contracts/routing.rb +0 -59
  119. data/lib/karafka/contracts/server_cli_options.rb +0 -92
  120. data/lib/karafka/contracts/topic.rb +0 -81
  121. data/lib/karafka/swarm/pidfd.rb +0 -147
@@ -0,0 +1,217 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Setup
5
+ # Namespace for setup related contracts
6
+ module Contracts
7
+ # Contract with validation rules for Karafka configuration details.
8
+ #
9
+ # @note There are many more configuration options inside of the
10
+ # `Karafka::Setup::Config` model, but we don't validate them here as they are
11
+ # validated per each route (topic + consumer_group) because they can be overwritten,
12
+ # so we validate all of that once all the routes are defined and ready.
13
+ class Config < Karafka::Contracts::Base
14
+ configure do |config|
15
+ config.error_messages = YAML.safe_load(
16
+ File.read(
17
+ File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
18
+ )
19
+ ).fetch('en').fetch('validations').fetch('setup').fetch('config')
20
+ end
21
+
22
+ # Topics regexp constant reference for easier usage
23
+ TOPIC_REGEXP = ::Karafka::Contracts::TOPIC_REGEXP
24
+
25
+ private_constant :TOPIC_REGEXP
26
+
27
+ # License validity happens in the licenser. Here we do only the simple consistency checks
28
+ nested(:license) do
29
+ required(:token) { |val| [true, false].include?(val) || val.is_a?(String) }
30
+ required(:entity) { |val| val.is_a?(String) }
31
+ end
32
+
33
+ required(:client_id) { |val| val.is_a?(String) && TOPIC_REGEXP.match?(val) }
34
+ required(:concurrency) { |val| val.is_a?(Integer) && val.positive? }
35
+ required(:consumer_persistence) { |val| [true, false].include?(val) }
36
+ required(:pause_timeout) { |val| val.is_a?(Integer) && val.positive? }
37
+ required(:pause_max_timeout) { |val| val.is_a?(Integer) && val.positive? }
38
+ required(:pause_with_exponential_backoff) { |val| [true, false].include?(val) }
39
+ required(:strict_topics_namespacing) { |val| [true, false].include?(val) }
40
+ required(:shutdown_timeout) { |val| val.is_a?(Integer) && val.positive? }
41
+ required(:max_wait_time) { |val| val.is_a?(Integer) && val.positive? }
42
+ required(:group_id) { |val| val.is_a?(String) && TOPIC_REGEXP.match?(val) }
43
+ required(:kafka) { |val| val.is_a?(Hash) && !val.empty? }
44
+ required(:strict_declarative_topics) { |val| [true, false].include?(val) }
45
+ required(:worker_thread_priority) { |val| (-3..3).to_a.include?(val) }
46
+
47
+ nested(:swarm) do
48
+ required(:nodes) { |val| val.is_a?(Integer) && val.positive? }
49
+ required(:node) { |val| val == false || val.is_a?(Karafka::Swarm::Node) }
50
+ end
51
+
52
+ nested(:oauth) do
53
+ required(:token_provider_listener) do |val|
54
+ val == false || val.respond_to?(:on_oauthbearer_token_refresh)
55
+ end
56
+ end
57
+
58
+ nested(:admin) do
59
+ # Can be empty because inherits values from the root kafka
60
+ required(:kafka) { |val| val.is_a?(Hash) }
61
+ required(:group_id) { |val| val.is_a?(String) && TOPIC_REGEXP.match?(val) }
62
+ required(:max_wait_time) { |val| val.is_a?(Integer) && val.positive? }
63
+ required(:retry_backoff) { |val| val.is_a?(Integer) && val >= 100 }
64
+ required(:max_retries_duration) { |val| val.is_a?(Integer) && val >= 1_000 }
65
+ end
66
+
67
+ # We validate internals just to be sure, that they are present and working
68
+ nested(:internal) do
69
+ required(:status) { |val| !val.nil? }
70
+ required(:process) { |val| !val.nil? }
71
+ # In theory this could be less than a second, however this would impact the maximum time
72
+ # of a single consumer queue poll, hence we prevent it
73
+ required(:tick_interval) { |val| val.is_a?(Integer) && val >= 1_000 }
74
+ required(:supervision_sleep) { |val| val.is_a?(Numeric) && val.positive? }
75
+ required(:forceful_exit_code) { |val| val.is_a?(Integer) && val >= 0 }
76
+ required(:forceful_shutdown_wait) { |val| val.is_a?(Integer) && val >= 0 }
77
+
78
+ nested(:swarm) do
79
+ required(:manager) { |val| !val.nil? }
80
+ required(:orphaned_exit_code) { |val| val.is_a?(Integer) && val >= 0 }
81
+ required(:supervision_interval) { |val| val.is_a?(Integer) && val >= 1_000 }
82
+ required(:liveness_interval) { |val| val.is_a?(Integer) && val >= 1_000 }
83
+ required(:liveness_listener) { |val| !val.nil? }
84
+ required(:node_report_timeout) { |val| val.is_a?(Integer) && val >= 1_000 }
85
+ required(:node_restart_timeout) { |val| val.is_a?(Integer) && val >= 1_000 }
86
+ end
87
+
88
+ nested(:connection) do
89
+ required(:manager) { |val| !val.nil? }
90
+ required(:conductor) { |val| !val.nil? }
91
+ required(:reset_backoff) { |val| val.is_a?(Integer) && val >= 1_000 }
92
+ required(:listener_thread_priority) { |val| (-3..3).to_a.include?(val) }
93
+
94
+ nested(:proxy) do
95
+ nested(:commit) do
96
+ required(:max_attempts) { |val| val.is_a?(Integer) && val.positive? }
97
+ required(:wait_time) { |val| val.is_a?(Integer) && val.positive? }
98
+ end
99
+
100
+ # All of them have the same requirements
101
+ %i[
102
+ query_watermark_offsets
103
+ offsets_for_times
104
+ committed
105
+ metadata
106
+ ].each do |scope|
107
+ nested(scope) do
108
+ required(:timeout) { |val| val.is_a?(Integer) && val.positive? }
109
+ required(:max_attempts) { |val| val.is_a?(Integer) && val.positive? }
110
+ required(:wait_time) { |val| val.is_a?(Integer) && val.positive? }
111
+ end
112
+ end
113
+ end
114
+ end
115
+
116
+ nested(:routing) do
117
+ required(:builder) { |val| !val.nil? }
118
+ required(:subscription_groups_builder) { |val| !val.nil? }
119
+ end
120
+
121
+ nested(:processing) do
122
+ required(:jobs_builder) { |val| !val.nil? }
123
+ required(:jobs_queue_class) { |val| !val.nil? }
124
+ required(:scheduler_class) { |val| !val.nil? }
125
+ required(:coordinator_class) { |val| !val.nil? }
126
+ required(:errors_tracker_class) { |val| val.nil? || val.is_a?(Class) }
127
+ required(:partitioner_class) { |val| !val.nil? }
128
+ required(:strategy_selector) { |val| !val.nil? }
129
+ required(:expansions_selector) { |val| !val.nil? }
130
+ required(:executor_class) { |val| !val.nil? }
131
+ required(:worker_job_call_wrapper) { |val| val == false || val.respond_to?(:wrap) }
132
+ end
133
+
134
+ nested(:active_job) do
135
+ required(:dispatcher) { |val| !val.nil? }
136
+ required(:job_options_contract) { |val| !val.nil? }
137
+ required(:consumer_class) { |val| !val.nil? }
138
+ end
139
+ end
140
+
141
+ # Ensure all root kafka keys are symbols
142
+ virtual do |data, errors|
143
+ next unless errors.empty?
144
+
145
+ detected_errors = []
146
+
147
+ data.fetch(:kafka).each_key do |key|
148
+ next if key.is_a?(Symbol)
149
+
150
+ detected_errors << [[:kafka, key], :key_must_be_a_symbol]
151
+ end
152
+
153
+ detected_errors
154
+ end
155
+
156
+ # Ensure all admin kafka keys are symbols
157
+ virtual do |data, errors|
158
+ next unless errors.empty?
159
+
160
+ detected_errors = []
161
+
162
+ data.fetch(:admin).fetch(:kafka).each_key do |key|
163
+ next if key.is_a?(Symbol)
164
+
165
+ detected_errors << [[:admin, :kafka, key], :key_must_be_a_symbol]
166
+ end
167
+
168
+ detected_errors
169
+ end
170
+
171
+ virtual do |data, errors|
172
+ next unless errors.empty?
173
+
174
+ pause_timeout = data.fetch(:pause_timeout)
175
+ pause_max_timeout = data.fetch(:pause_max_timeout)
176
+
177
+ next if pause_timeout <= pause_max_timeout
178
+
179
+ [[%i[pause_timeout], :max_timeout_vs_pause_max_timeout]]
180
+ end
181
+
182
+ virtual do |data, errors|
183
+ next unless errors.empty?
184
+
185
+ shutdown_timeout = data.fetch(:shutdown_timeout)
186
+ max_wait_time = data.fetch(:max_wait_time)
187
+
188
+ next if max_wait_time < shutdown_timeout
189
+
190
+ [[%i[shutdown_timeout], :shutdown_timeout_vs_max_wait_time]]
191
+ end
192
+
193
+ # `internal.swarm.node_report_timeout` should not be close to `max_wait_time` otherwise
194
+ # there may be a case where node cannot report often enough because it is clogged by
195
+ # waiting on more data.
196
+ #
197
+ # We handle that at a config level to make sure that this is correctly configured.
198
+ #
199
+ # We do not validate this in the context of swarm usage (validate only if...) because it is
200
+ # often that swarm only runs on prod and we do not want to crash it surprisingly.
201
+ virtual do |data, errors|
202
+ next unless errors.empty?
203
+
204
+ max_wait_time = data.fetch(:max_wait_time)
205
+ node_report_timeout = data.fetch(:internal)[:swarm][:node_report_timeout] || false
206
+
207
+ next unless node_report_timeout
208
+ # max wait time should be at least 20% smaller than the reporting time to have enough
209
+ # time for reporting
210
+ next if max_wait_time < node_report_timeout * 0.8
211
+
212
+ [[%i[max_wait_time], :max_wait_time_vs_swarm_node_report_timeout]]
213
+ end
214
+ end
215
+ end
216
+ end
217
+ end
@@ -16,6 +16,7 @@ module Karafka
16
16
  # some features may use this value for computation and it is better to ensure, we do
17
17
  # always have it
18
18
  'max.poll.interval.ms': 300_000,
19
+ 'socket.nagle.disable': true,
19
20
  'client.software.version': [
20
21
  "v#{Karafka::VERSION}",
21
22
  "rdkafka-ruby-v#{Rdkafka::VERSION}",
@@ -42,7 +43,8 @@ module Karafka
42
43
  PRODUCER_KAFKA_DEV_DEFAULTS = {
43
44
  # For all of those same reasoning as for the consumer
44
45
  'allow.auto.create.topics': 'true',
45
- 'topic.metadata.refresh.interval.ms': 5_000
46
+ 'topic.metadata.refresh.interval.ms': 5_000,
47
+ 'socket.nagle.disable': true
46
48
  }.freeze
47
49
 
48
50
  private_constant :CONSUMER_KAFKA_DEFAULTS, :CONSUMER_KAFKA_DEV_DEFAULTS,
@@ -45,7 +45,9 @@ module Karafka
45
45
  # @param parent_pid [Integer] parent pid for zombie fencing
46
46
  def initialize(id, parent_pid)
47
47
  @id = id
48
- @parent_pidfd = Pidfd.new(parent_pid)
48
+ @parent_pid = parent_pid
49
+ @mutex = Mutex.new
50
+ @alive = nil
49
51
  end
50
52
 
51
53
  # Starts a new fork and:
@@ -56,6 +58,9 @@ module Karafka
56
58
  # @note Parent API
57
59
  def start
58
60
  @reader, @writer = IO.pipe
61
+ # Reset alive status when starting/restarting a node
62
+ # nil means unknown status - will check with waitpid
63
+ @mutex.synchronize { @alive = nil }
59
64
 
60
65
  # :nocov:
61
66
  @pid = ::Process.fork do
@@ -108,7 +113,6 @@ module Karafka
108
113
  # :nocov:
109
114
 
110
115
  @writer.close
111
- @pidfd = Pidfd.new(@pid)
112
116
  end
113
117
 
114
118
  # Indicates that this node is doing well
@@ -147,13 +151,42 @@ module Karafka
147
151
  # @note Parent API
148
152
  # @note Keep in mind that the fact that process is alive does not mean it is healthy
149
153
  def alive?
150
- @pidfd.alive?
154
+ # Don't try to waitpid on ourselves - just check if process exists
155
+ return true if @pid == ::Process.pid
156
+
157
+ @mutex.synchronize do
158
+ # Return cached result if we've already determined the process is dead
159
+ return false if @alive == false
160
+
161
+ begin
162
+ # Try to reap the process without blocking. If it returns the pid,
163
+ # the process has exited (zombie). If it returns nil, still running.
164
+ result = ::Process.waitpid(@pid, ::Process::WNOHANG)
165
+
166
+ if result
167
+ # Process has exited and we've reaped it
168
+ @alive = false
169
+ false
170
+ else
171
+ # Process is still running
172
+ true
173
+ end
174
+ rescue Errno::ECHILD
175
+ # Process doesn't exist or already reaped
176
+ @alive = false
177
+ false
178
+ rescue Errno::ESRCH
179
+ # Process doesn't exist
180
+ @alive = false
181
+ false
182
+ end
183
+ end
151
184
  end
152
185
 
153
186
  # @return [Boolean] true if node is orphaned or false otherwise. Used for orphans detection.
154
187
  # @note Child API
155
188
  def orphaned?
156
- !@parent_pidfd.alive?
189
+ ::Process.ppid != @parent_pid
157
190
  end
158
191
 
159
192
  # Sends sigterm to the node
@@ -176,13 +209,40 @@ module Karafka
176
209
 
177
210
  # Sends provided signal to the node
178
211
  # @param signal [String]
212
+ # @return [Boolean] true if signal was sent, false if process doesn't exist
179
213
  def signal(signal)
180
- @pidfd.signal(signal)
214
+ ::Process.kill(signal, @pid)
215
+ true
216
+ rescue Errno::ESRCH
217
+ # Process doesn't exist
218
+ false
181
219
  end
182
220
 
183
221
  # Removes the dead process from the processes table
222
+ # @return [Boolean] true if process was reaped, false if still running or already reaped
184
223
  def cleanup
185
- @pidfd.cleanup
224
+ @mutex.synchronize do
225
+ # If we've already marked it as dead (reaped in alive?), nothing to do
226
+ return false if @alive == false
227
+
228
+ begin
229
+ # WNOHANG means don't block if process hasn't exited yet
230
+ result = ::Process.waitpid(@pid, ::Process::WNOHANG)
231
+
232
+ if result
233
+ # Process exited and was reaped
234
+ @alive = false
235
+ true
236
+ else
237
+ # Process is still running
238
+ false
239
+ end
240
+ rescue Errno::ECHILD
241
+ # Process already reaped or doesn't exist, which is fine
242
+ @alive = false
243
+ false
244
+ end
245
+ end
186
246
  end
187
247
 
188
248
  private
data/lib/karafka/swarm.rb CHANGED
@@ -18,9 +18,9 @@ module Karafka
18
18
  )
19
19
  end
20
20
 
21
- # @return [Boolean] true if fork API and pidfd OS API are available, otherwise false
21
+ # @return [Boolean] true if fork API is available, otherwise false
22
22
  def supported?
23
- ::Process.respond_to?(:fork) && Swarm::Pidfd.supported?
23
+ ::Process.respond_to?(:fork)
24
24
  end
25
25
  end
26
26
  end
@@ -12,12 +12,7 @@ Bundler.require(:default, ENV['KARAFKA_ENV'])
12
12
  # Karafka framework configuration
13
13
  APP_LOADER = Zeitwerk::Loader.new
14
14
  APP_LOADER.enable_reloading
15
-
16
- %w[
17
- lib
18
- app/consumers
19
- ].each { |dir| APP_LOADER.push_dir(dir) }
20
-
15
+ APP_LOADER.push_dir('app/consumers')
21
16
  APP_LOADER.setup
22
17
  APP_LOADER.eager_load
23
18
  <% end -%>
@@ -25,7 +20,7 @@ APP_LOADER.eager_load
25
20
  class KarafkaApp < Karafka::App
26
21
  setup do |config|
27
22
  config.kafka = { 'bootstrap.servers': '127.0.0.1:9092' }
28
- config.client_id = 'YOUR_APP_NAME'
23
+ config.client_id = "YOUR_APP_NAME-#{Process.pid}-#{Socket.gethostname}"
29
24
 
30
25
  # IMPORTANT: Customize this group_id with your application name.
31
26
  # The group_id should be unique per application to properly track message consumption.
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.5.0'
6
+ VERSION = '2.5.1.beta1'
7
7
  end
data/lib/karafka.rb CHANGED
@@ -1,23 +1,22 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- %w[
4
- karafka-core
5
- delegate
6
- English
7
- rdkafka
8
- waterdrop
9
- json
10
- forwardable
11
- fileutils
12
- openssl
13
- optparse
14
- base64
15
- date
16
- singleton
17
- digest
18
- zeitwerk
19
- logger
20
- ].each(&method(:require))
3
+ require 'karafka-core'
4
+ require 'delegate'
5
+ require 'English'
6
+ require 'rdkafka'
7
+ require 'waterdrop'
8
+ require 'json'
9
+ require 'forwardable'
10
+ require 'fileutils'
11
+ require 'openssl'
12
+ require 'optparse'
13
+ require 'socket'
14
+ require 'base64'
15
+ require 'date'
16
+ require 'singleton'
17
+ require 'digest'
18
+ require 'zeitwerk'
19
+ require 'logger'
21
20
 
22
21
  # Karafka framework main namespace
23
22
  module Karafka
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.5.0
4
+ version: 2.5.1.beta1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -29,7 +29,7 @@ dependencies:
29
29
  requirements:
30
30
  - - ">="
31
31
  - !ruby/object:Gem::Version
32
- version: 2.5.2
32
+ version: 2.5.6
33
33
  - - "<"
34
34
  - !ruby/object:Gem::Version
35
35
  version: 2.6.0
@@ -39,7 +39,7 @@ dependencies:
39
39
  requirements:
40
40
  - - ">="
41
41
  - !ruby/object:Gem::Version
42
- version: 2.5.2
42
+ version: 2.5.6
43
43
  - - "<"
44
44
  - !ruby/object:Gem::Version
45
45
  version: 2.6.0
@@ -49,14 +49,14 @@ dependencies:
49
49
  requirements:
50
50
  - - ">="
51
51
  - !ruby/object:Gem::Version
52
- version: 0.19.5
52
+ version: 0.21.0
53
53
  type: :runtime
54
54
  prerelease: false
55
55
  version_requirements: !ruby/object:Gem::Requirement
56
56
  requirements:
57
57
  - - ">="
58
58
  - !ruby/object:Gem::Version
59
- version: 0.19.5
59
+ version: 0.21.0
60
60
  - !ruby/object:Gem::Dependency
61
61
  name: waterdrop
62
62
  requirement: !ruby/object:Gem::Requirement
@@ -110,8 +110,10 @@ files:
110
110
  - ".github/FUNDING.yml"
111
111
  - ".github/ISSUE_TEMPLATE/bug_report.md"
112
112
  - ".github/ISSUE_TEMPLATE/feature_request.md"
113
- - ".github/workflows/ci.yml"
113
+ - ".github/workflows/ci_linux_ubuntu_x86_64_gnu.yml"
114
+ - ".github/workflows/ci_macos_arm64.yml"
114
115
  - ".github/workflows/push.yml"
116
+ - ".github/workflows/trigger-wiki-refresh.yml"
115
117
  - ".github/workflows/verify-action-pins.yml"
116
118
  - ".gitignore"
117
119
  - ".rspec"
@@ -167,11 +169,14 @@ files:
167
169
  - lib/karafka/admin/configs.rb
168
170
  - lib/karafka/admin/configs/config.rb
169
171
  - lib/karafka/admin/configs/resource.rb
172
+ - lib/karafka/admin/consumer_groups.rb
173
+ - lib/karafka/admin/topics.rb
170
174
  - lib/karafka/app.rb
171
175
  - lib/karafka/base_consumer.rb
172
176
  - lib/karafka/cli.rb
173
177
  - lib/karafka/cli/base.rb
174
178
  - lib/karafka/cli/console.rb
179
+ - lib/karafka/cli/contracts/server.rb
175
180
  - lib/karafka/cli/help.rb
176
181
  - lib/karafka/cli/info.rb
177
182
  - lib/karafka/cli/install.rb
@@ -201,11 +206,6 @@ files:
201
206
  - lib/karafka/constraints.rb
202
207
  - lib/karafka/contracts.rb
203
208
  - lib/karafka/contracts/base.rb
204
- - lib/karafka/contracts/config.rb
205
- - lib/karafka/contracts/consumer_group.rb
206
- - lib/karafka/contracts/routing.rb
207
- - lib/karafka/contracts/server_cli_options.rb
208
- - lib/karafka/contracts/topic.rb
209
209
  - lib/karafka/deserializers/headers.rb
210
210
  - lib/karafka/deserializers/key.rb
211
211
  - lib/karafka/deserializers/payload.rb
@@ -260,6 +260,7 @@ files:
260
260
  - lib/karafka/pro/cleaner/messages/message.rb
261
261
  - lib/karafka/pro/cleaner/messages/messages.rb
262
262
  - lib/karafka/pro/cleaner/messages/metadata.rb
263
+ - lib/karafka/pro/cli/contracts/server.rb
263
264
  - lib/karafka/pro/cli/parallel_segments.rb
264
265
  - lib/karafka/pro/cli/parallel_segments/base.rb
265
266
  - lib/karafka/pro/cli/parallel_segments/collapse.rb
@@ -267,7 +268,6 @@ files:
267
268
  - lib/karafka/pro/connection/manager.rb
268
269
  - lib/karafka/pro/connection/multiplexing/listener.rb
269
270
  - lib/karafka/pro/contracts/base.rb
270
- - lib/karafka/pro/contracts/server_cli_options.rb
271
271
  - lib/karafka/pro/encryption.rb
272
272
  - lib/karafka/pro/encryption/cipher.rb
273
273
  - lib/karafka/pro/encryption/contracts/config.rb
@@ -534,6 +534,9 @@ files:
534
534
  - lib/karafka/routing/activity_manager.rb
535
535
  - lib/karafka/routing/builder.rb
536
536
  - lib/karafka/routing/consumer_group.rb
537
+ - lib/karafka/routing/contracts/consumer_group.rb
538
+ - lib/karafka/routing/contracts/routing.rb
539
+ - lib/karafka/routing/contracts/topic.rb
537
540
  - lib/karafka/routing/features/active_job.rb
538
541
  - lib/karafka/routing/features/active_job/builder.rb
539
542
  - lib/karafka/routing/features/active_job/config.rb
@@ -576,6 +579,7 @@ files:
576
579
  - lib/karafka/server.rb
577
580
  - lib/karafka/setup/attributes_map.rb
578
581
  - lib/karafka/setup/config.rb
582
+ - lib/karafka/setup/contracts/config.rb
579
583
  - lib/karafka/setup/defaults_injector.rb
580
584
  - lib/karafka/setup/dsl.rb
581
585
  - lib/karafka/status.rb
@@ -583,7 +587,6 @@ files:
583
587
  - lib/karafka/swarm/liveness_listener.rb
584
588
  - lib/karafka/swarm/manager.rb
585
589
  - lib/karafka/swarm/node.rb
586
- - lib/karafka/swarm/pidfd.rb
587
590
  - lib/karafka/swarm/supervisor.rb
588
591
  - lib/karafka/templates/application_consumer.rb.erb
589
592
  - lib/karafka/templates/example_consumer.rb.erb
@@ -613,14 +616,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
613
616
  requirements:
614
617
  - - ">="
615
618
  - !ruby/object:Gem::Version
616
- version: 3.0.0
619
+ version: 3.1.0
617
620
  required_rubygems_version: !ruby/object:Gem::Requirement
618
621
  requirements:
619
622
  - - ">="
620
623
  - !ruby/object:Gem::Version
621
624
  version: '0'
622
625
  requirements: []
623
- rubygems_version: 3.6.7
626
+ rubygems_version: 3.6.9
624
627
  specification_version: 4
625
628
  summary: Karafka is Ruby and Rails efficient Kafka processing framework.
626
629
  test_files: []