karafka 2.4.18 → 2.5.0.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/CODEOWNERS +3 -0
  4. data/.github/workflows/ci.yml +58 -14
  5. data/.github/workflows/verify-action-pins.yml +16 -0
  6. data/.ruby-version +1 -1
  7. data/CHANGELOG.md +53 -0
  8. data/Gemfile +3 -3
  9. data/Gemfile.lock +55 -58
  10. data/LICENSE-COMM +2 -2
  11. data/bin/clean_kafka +43 -0
  12. data/bin/integrations +17 -5
  13. data/bin/rspecs +15 -3
  14. data/bin/verify_kafka_warnings +35 -0
  15. data/bin/verify_topics_naming +27 -0
  16. data/config/locales/errors.yml +3 -0
  17. data/config/locales/pro_errors.yml +13 -2
  18. data/docker-compose.yml +1 -1
  19. data/examples/payloads/json/enrollment_event.json +579 -0
  20. data/examples/payloads/json/ingestion_event.json +30 -0
  21. data/examples/payloads/json/transaction_event.json +17 -0
  22. data/examples/payloads/json/user_event.json +11 -0
  23. data/karafka.gemspec +3 -3
  24. data/lib/karafka/active_job/current_attributes.rb +1 -1
  25. data/lib/karafka/admin/acl.rb +5 -1
  26. data/lib/karafka/admin.rb +51 -19
  27. data/lib/karafka/base_consumer.rb +17 -8
  28. data/lib/karafka/cli/base.rb +8 -2
  29. data/lib/karafka/connection/client.rb +20 -7
  30. data/lib/karafka/connection/listener.rb +24 -12
  31. data/lib/karafka/connection/messages_buffer.rb +1 -1
  32. data/lib/karafka/connection/proxy.rb +3 -0
  33. data/lib/karafka/contracts/config.rb +3 -0
  34. data/lib/karafka/contracts/topic.rb +1 -1
  35. data/lib/karafka/errors.rb +11 -0
  36. data/lib/karafka/helpers/async.rb +3 -1
  37. data/lib/karafka/instrumentation/callbacks/rebalance.rb +5 -1
  38. data/lib/karafka/instrumentation/logger_listener.rb +86 -23
  39. data/lib/karafka/instrumentation/proctitle_listener.rb +5 -1
  40. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +2 -2
  41. data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
  42. data/lib/karafka/pro/cleaner.rb +8 -0
  43. data/lib/karafka/pro/cli/parallel_segments/base.rb +89 -0
  44. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +164 -0
  45. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +164 -0
  46. data/lib/karafka/pro/cli/parallel_segments.rb +60 -0
  47. data/lib/karafka/pro/connection/manager.rb +5 -8
  48. data/lib/karafka/pro/encryption.rb +8 -0
  49. data/lib/karafka/pro/instrumentation/performance_tracker.rb +1 -1
  50. data/lib/karafka/pro/iterator/expander.rb +5 -3
  51. data/lib/karafka/pro/iterator/tpl_builder.rb +23 -0
  52. data/lib/karafka/pro/loader.rb +10 -0
  53. data/lib/karafka/pro/processing/coordinator.rb +4 -1
  54. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +27 -3
  55. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +11 -0
  56. data/lib/karafka/pro/processing/filters/base.rb +10 -2
  57. data/lib/karafka/pro/processing/filters/expirer.rb +5 -0
  58. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +2 -2
  59. data/lib/karafka/pro/processing/filters/virtual_limiter.rb +5 -0
  60. data/lib/karafka/pro/processing/parallel_segments/filters/base.rb +73 -0
  61. data/lib/karafka/pro/processing/parallel_segments/filters/default.rb +85 -0
  62. data/lib/karafka/pro/processing/parallel_segments/filters/mom.rb +66 -0
  63. data/lib/karafka/pro/processing/partitioner.rb +1 -13
  64. data/lib/karafka/pro/processing/piping/consumer.rb +13 -13
  65. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +1 -1
  66. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +1 -1
  67. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +1 -1
  68. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +1 -1
  69. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +1 -1
  70. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +1 -1
  71. data/lib/karafka/pro/processing/strategies/default.rb +36 -8
  72. data/lib/karafka/pro/processing/strategies/dlq/default.rb +14 -10
  73. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +1 -1
  74. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +1 -1
  75. data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +3 -1
  76. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +1 -1
  77. data/lib/karafka/pro/processing/strategies/ftr/default.rb +1 -1
  78. data/lib/karafka/pro/processing/strategies/lrj/default.rb +4 -1
  79. data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +1 -1
  80. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +1 -1
  81. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +1 -1
  82. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +50 -0
  83. data/lib/karafka/pro/processing/virtual_partitions/distributors/base.rb +29 -0
  84. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +27 -0
  85. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +8 -4
  86. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +3 -3
  87. data/lib/karafka/pro/recurring_tasks/setup/config.rb +7 -2
  88. data/lib/karafka/pro/recurring_tasks.rb +13 -0
  89. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +1 -1
  90. data/lib/karafka/pro/routing/features/multiplexing/config.rb +1 -0
  91. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +17 -0
  92. data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +5 -2
  93. data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +8 -1
  94. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +47 -0
  95. data/lib/karafka/pro/routing/features/parallel_segments/config.rb +27 -0
  96. data/lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb +83 -0
  97. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +49 -0
  98. data/lib/karafka/pro/routing/features/parallel_segments/topic.rb +43 -0
  99. data/lib/karafka/pro/routing/features/parallel_segments.rb +24 -0
  100. data/lib/karafka/pro/routing/features/patterns/pattern.rb +1 -1
  101. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +2 -2
  102. data/lib/karafka/pro/routing/features/scheduled_messages/builder.rb +10 -6
  103. data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +20 -2
  104. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -0
  105. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +8 -2
  106. data/lib/karafka/pro/scheduled_messages/consumer.rb +14 -15
  107. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +9 -6
  108. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +7 -1
  109. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +15 -6
  110. data/lib/karafka/pro/scheduled_messages.rb +13 -0
  111. data/lib/karafka/processing/coordinators_buffer.rb +1 -0
  112. data/lib/karafka/processing/strategies/default.rb +4 -4
  113. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -0
  114. data/lib/karafka/routing/subscription_group.rb +1 -1
  115. data/lib/karafka/runner.rb +7 -1
  116. data/lib/karafka/server.rb +5 -0
  117. data/lib/karafka/setup/attributes_map.rb +2 -0
  118. data/lib/karafka/setup/config.rb +22 -1
  119. data/lib/karafka/setup/defaults_injector.rb +26 -1
  120. data/lib/karafka/status.rb +6 -1
  121. data/lib/karafka/swarm/node.rb +31 -0
  122. data/lib/karafka/swarm/supervisor.rb +4 -0
  123. data/lib/karafka/templates/karafka.rb.erb +14 -1
  124. data/lib/karafka/version.rb +1 -1
  125. data/lib/karafka.rb +17 -9
  126. data/renovate.json +14 -2
  127. data.tar.gz.sig +0 -0
  128. metadata +36 -11
  129. metadata.gz.sig +0 -0
@@ -24,14 +24,29 @@ module Karafka
24
24
  @log_polling = log_polling
25
25
  end
26
26
 
27
+ #
28
+ #
29
+ # @param event [Karafka::Core::Monitoring::Event] event details including payload
30
+ def on_connection_listener_before_fetch_loop(event)
31
+ listener_id = event[:caller].id
32
+ subscription_group = event[:subscription_group]
33
+ consumer_group_id = subscription_group.consumer_group.id
34
+ topics = subscription_group.topics.select(&:active?).map(&:name).join(', ')
35
+ group_details = "#{consumer_group_id}/#{subscription_group.id}"
36
+
37
+ info(
38
+ "[#{listener_id}] Group #{group_details} subscribing to topics: #{topics}"
39
+ )
40
+ end
41
+
27
42
  # Logs each messages fetching attempt
28
43
  #
29
44
  # @param event [Karafka::Core::Monitoring::Event] event details including payload
30
45
  def on_connection_listener_fetch_loop(event)
31
46
  return unless log_polling?
32
47
 
33
- listener = event[:caller]
34
- debug "[#{listener.id}] Polling messages..."
48
+ listener_id = event[:caller].id
49
+ debug "[#{listener_id}] Polling messages..."
35
50
  end
36
51
 
37
52
  # Logs about messages that we've received from Kafka
@@ -40,11 +55,11 @@ module Karafka
40
55
  def on_connection_listener_fetch_loop_received(event)
41
56
  return unless log_polling?
42
57
 
43
- listener = event[:caller]
58
+ listener_id = event[:caller].id
44
59
  time = event[:time].round(2)
45
60
  messages_count = event[:messages_buffer].size
46
61
 
47
- message = "[#{listener.id}] Polled #{messages_count} messages in #{time}ms"
62
+ message = "[#{listener_id}] Polled #{messages_count} messages in #{time}ms"
48
63
 
49
64
  # We don't want the "polled 0" in dev as it would spam the log
50
65
  # Instead we publish only info when there was anything we could poll and fail over to the
@@ -147,7 +162,8 @@ module Karafka
147
162
  #
148
163
  # @param event [Karafka::Core::Monitoring::Event] event details including payload
149
164
  def on_process_notice_signal(event)
150
- info "Received #{event[:signal]} system signal"
165
+ server_id = Karafka::Server.id
166
+ info "[#{server_id}] Received #{event[:signal]} system signal"
151
167
 
152
168
  # We print backtrace only for ttin
153
169
  return unless event[:signal] == :SIGTTIN
@@ -168,38 +184,76 @@ module Karafka
168
184
 
169
185
  # Logs info that we're running Karafka app.
170
186
  #
171
- # @param _event [Karafka::Core::Monitoring::Event] event details including payload
172
- def on_app_running(_event)
173
- info "Running in #{RUBY_DESCRIPTION}"
174
- info "Running Karafka #{Karafka::VERSION} server"
187
+ # @param event [Karafka::Core::Monitoring::Event] event details including payload
188
+ def on_app_running(event)
189
+ server_id = event[:server_id]
190
+
191
+ info "[#{server_id}] Running in #{RUBY_DESCRIPTION}"
192
+ info "[#{server_id}] Running Karafka #{Karafka::VERSION} server"
175
193
 
176
194
  return if Karafka.pro?
177
195
 
178
- info 'See LICENSE and the LGPL-3.0 for licensing details'
196
+ info "[#{server_id}] See LICENSE and the LGPL-3.0 for licensing details"
179
197
  end
180
198
 
181
- # @param _event [Karafka::Core::Monitoring::Event] event details including payload
182
- def on_app_quieting(_event)
183
- info 'Switching to quiet mode. New messages will not be processed'
199
+ # @param event [Karafka::Core::Monitoring::Event] event details including payload
200
+ def on_app_quieting(event)
201
+ info "[#{event[:server_id]}] Switching to quiet mode. New messages will not be processed"
184
202
  end
185
203
 
186
- # @param _event [Karafka::Core::Monitoring::Event] event details including payload
187
- def on_app_quiet(_event)
188
- info 'Reached quiet mode. No messages will be processed anymore'
204
+ # @param event [Karafka::Core::Monitoring::Event] event details including payload
205
+ def on_app_quiet(event)
206
+ info "[#{event[:server_id]}] Reached quiet mode. No messages will be processed anymore"
189
207
  end
190
208
 
191
209
  # Logs info that we're going to stop the Karafka server.
192
210
  #
193
- # @param _event [Karafka::Core::Monitoring::Event] event details including payload
194
- def on_app_stopping(_event)
195
- info 'Stopping Karafka server'
211
+ # @param event [Karafka::Core::Monitoring::Event] event details including payload
212
+ def on_app_stopping(event)
213
+ info "[#{event[:server_id]}] Stopping Karafka server"
196
214
  end
197
215
 
198
216
  # Logs info that we stopped the Karafka server.
199
217
  #
200
- # @param _event [Karafka::Core::Monitoring::Event] event details including payload
201
- def on_app_stopped(_event)
202
- info 'Stopped Karafka server'
218
+ # @param event [Karafka::Core::Monitoring::Event] event details including payload
219
+ def on_app_stopped(event)
220
+ info "[#{event[:server_id]}] Stopped Karafka server"
221
+ end
222
+
223
+ # Logs info about partitions we have lost
224
+ #
225
+ # @param event [Karafka::Core::Monitoring::Event] event details with revoked partitions
226
+ def on_rebalance_partitions_revoked(event)
227
+ revoked_partitions = event[:tpl].to_h.transform_values { |part| part.map(&:partition) }
228
+ group_id = event[:consumer_group_id]
229
+ client_id = event[:client_id]
230
+ group_prefix = "[#{client_id}] Group #{group_id} rebalance"
231
+
232
+ if revoked_partitions.empty?
233
+ info "#{group_prefix}: No partitions revoked"
234
+ else
235
+ revoked_partitions.each do |topic, partitions|
236
+ info "#{group_prefix}: Partition(s) #{partitions.join(', ')} of #{topic} revoked"
237
+ end
238
+ end
239
+ end
240
+
241
+ # Logs info about partitions that we've gained
242
+ #
243
+ # @param event [Karafka::Core::Monitoring::Event] event details with assigned partitions
244
+ def on_rebalance_partitions_assigned(event)
245
+ assigned_partitions = event[:tpl].to_h.transform_values { |part| part.map(&:partition) }
246
+ group_id = event[:consumer_group_id]
247
+ client_id = event[:client_id]
248
+ group_prefix = "[#{client_id}] Group #{group_id} rebalance"
249
+
250
+ if assigned_partitions.empty?
251
+ info "#{group_prefix}: No partitions assigned"
252
+ else
253
+ assigned_partitions.each do |topic, partitions|
254
+ info "#{group_prefix}: Partition(s) #{partitions.join(', ')} of #{topic} assigned"
255
+ end
256
+ end
203
257
  end
204
258
 
205
259
  # Logs info when we have dispatched a message the the DLQ
@@ -371,9 +425,18 @@ module Karafka
371
425
  when 'connection.client.unsubscribe.error'
372
426
  error "Client unsubscribe error occurred: #{error}"
373
427
  error details
428
+ when 'parallel_segments.reducer.error'
429
+ error "Parallel segments reducer error occurred: #{error}"
430
+ error details
431
+ when 'parallel_segments.partitioner.error'
432
+ error "Parallel segments partitioner error occurred: #{error}"
433
+ error details
434
+ when 'virtual_partitions.partitioner.error'
435
+ error "Virtual partitions partitioner error occurred: #{error}"
436
+ error details
374
437
  # This handles any custom errors coming from places like Web-UI, etc
375
438
  else
376
- error "#{type} error occurred: #{error}"
439
+ error "#{type} error occurred: #{error.class} - #{error}"
377
440
  error details
378
441
  end
379
442
  end
@@ -4,6 +4,10 @@ module Karafka
4
4
  module Instrumentation
5
5
  # Listener that sets a proc title with a nice descriptive value
6
6
  class ProctitleListener
7
+ include Helpers::ConfigImporter.new(
8
+ client_id: %i[client_id]
9
+ )
10
+
7
11
  Status::STATES.each_key do |state|
8
12
  class_eval <<~RUBY, __FILE__, __LINE__ + 1
9
13
  # Updates proc title to an appropriate state
@@ -19,7 +23,7 @@ module Karafka
19
23
  # @param status [String] any status we want to set
20
24
  def setproctitle(status)
21
25
  ::Process.setproctitle(
22
- "karafka #{Karafka::App.config.client_id} (#{status})"
26
+ "karafka #{client_id} (#{status})"
23
27
  )
24
28
  end
25
29
  end
@@ -131,11 +131,11 @@ module Karafka
131
131
  tags = consumer_tags(consumer)
132
132
  tags.concat(default_tags)
133
133
 
134
- count('consumer.messages', messages.count, tags: tags)
134
+ count('consumer.messages', messages.size, tags: tags)
135
135
  count('consumer.batches', 1, tags: tags)
136
136
  gauge('consumer.offset', metadata.last_offset, tags: tags)
137
137
  histogram('consumer.consumed.time_taken', event[:time], tags: tags)
138
- histogram('consumer.batch_size', messages.count, tags: tags)
138
+ histogram('consumer.batch_size', messages.size, tags: tags)
139
139
  histogram('consumer.processing_lag', metadata.processing_lag, tags: tags)
140
140
  histogram('consumer.consumption_lag', metadata.consumption_lag, tags: tags)
141
141
  end
@@ -18,7 +18,7 @@ module Karafka
18
18
  # picked up for processing.
19
19
  def call(messages, topic, partition, scheduled_at)
20
20
  Karafka::Messages::BatchMetadata.new(
21
- size: messages.count,
21
+ size: messages.size,
22
22
  first_offset: messages.first&.offset || -1001,
23
23
  last_offset: messages.last&.offset || -1001,
24
24
  deserializers: topic.deserializers,
@@ -28,6 +28,14 @@ module Karafka
28
28
  def post_setup(_config)
29
29
  true
30
30
  end
31
+
32
+ # This feature does not need any changes post-fork
33
+ #
34
+ # @param _config [Karafka::Core::Configurable::Node]
35
+ # @param _pre_fork_producer [WaterDrop::Producer]
36
+ def post_fork(_config, _pre_fork_producer)
37
+ true
38
+ end
31
39
  end
32
40
  end
33
41
  end
@@ -0,0 +1,89 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This code is part of Karafka Pro, a commercial component not licensed under LGPL.
4
+ # See LICENSE for details.
5
+
6
+ module Karafka
7
+ module Pro
8
+ module Cli
9
+ class ParallelSegments < Karafka::Cli::Base
10
+ # Base class for all the parallel segments related operations
11
+ class Base
12
+ include Helpers::Colorize
13
+
14
+ # @param options [Hash] cli flags options
15
+ def initialize(options)
16
+ @options = options
17
+ end
18
+
19
+ private
20
+
21
+ # @return [Hash]
22
+ attr_reader :options
23
+
24
+ # Returns consumer groups for parallel segments with which we should be working
25
+ #
26
+ # @return [Hash<String, Array<Karafka::Routing::ConsumerGroup>>] hash with all parallel
27
+ # consumer groups as values and names of segments origin consumer group as the key.
28
+ def applicable_groups
29
+ requested_groups = options[:groups].dup || []
30
+
31
+ workable_groups = ::Karafka::App
32
+ .routes
33
+ .select(&:parallel_segments?)
34
+ .group_by(&:segment_origin)
35
+
36
+ # Use all if none provided
37
+ return workable_groups if requested_groups.empty?
38
+
39
+ applicable_groups = {}
40
+
41
+ requested_groups.each do |requested_group|
42
+ workable_group = workable_groups[requested_group]
43
+
44
+ if workable_group
45
+ requested_groups.delete(requested_group)
46
+ applicable_groups[requested_group] = workable_group
47
+ else
48
+ raise(
49
+ ::Karafka::Errors::ConsumerGroupNotFoundError,
50
+ "Consumer group #{requested_group} was not found"
51
+ )
52
+ end
53
+ end
54
+
55
+ applicable_groups
56
+ end
57
+
58
+ # Collects the offsets for the segment origin consumer group and the parallel segments
59
+ # consumers groups. We use segment origin cg offsets as a baseline for the distribution
60
+ # and use existing (if any) parallel segments cgs offsets for validations.
61
+ #
62
+ # @param segment_origin [String] name of the origin consumer group
63
+ # @param segments [Array<Karafka::Routing::ConsumerGroup>]
64
+ # @return [Hash] fetched offsets for all the cg topics for all the consumer groups
65
+ def collect_offsets(segment_origin, segments)
66
+ topics_names = segments.first.topics.map(&:name)
67
+ consumer_groups = [segment_origin, segments.map(&:name)].flatten
68
+
69
+ consumer_groups_with_topics = consumer_groups
70
+ .map { |name| [name, topics_names] }
71
+ .to_h
72
+
73
+ lags_with_offsets = Karafka::Admin.read_lags_with_offsets(
74
+ consumer_groups_with_topics
75
+ )
76
+
77
+ lags_with_offsets.each do |_cg_name, topics|
78
+ topics.each do |_topic_name, partitions|
79
+ partitions.transform_values! { |details| details[:offset] }
80
+ end
81
+ end
82
+
83
+ lags_with_offsets
84
+ end
85
+ end
86
+ end
87
+ end
88
+ end
89
+ end
@@ -0,0 +1,164 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This code is part of Karafka Pro, a commercial component not licensed under LGPL.
4
+ # See LICENSE for details.
5
+
6
+ module Karafka
7
+ module Pro
8
+ module Cli
9
+ class ParallelSegments < Karafka::Cli::Base
10
+ # Takes the committed offset of each parallel segment for each topic and records
11
+ # them back onto the segment origin consumer group. Without `--force` it will raise an
12
+ # error on conflicts. With `--force` it will take the lowest possible offset for each
13
+ # topic partition as the baseline.
14
+ #
15
+ # @note Running this can cause you some double processing if the parallel segments final
16
+ # offsets are not aligned.
17
+ #
18
+ # @note This will **not** remove the parallel segments consumer groups. Please use the
19
+ # Admin API if you want them to be removed.
20
+ class Collapse < Base
21
+ # Runs the collapse operation
22
+ def call
23
+ puts 'Starting parallel segments collapse...'
24
+
25
+ segments_count = applicable_groups.size
26
+
27
+ if segments_count.zero?
28
+ puts "#{red('No')} consumer groups with parallel segments configuration found"
29
+
30
+ return
31
+ end
32
+
33
+ puts(
34
+ "Found #{green(segments_count)} consumer groups with parallel segments configuration"
35
+ )
36
+
37
+ collapses = []
38
+
39
+ applicable_groups.each do |segment_origin, segments|
40
+ puts
41
+ puts "Collecting group #{yellow(segment_origin)} details..."
42
+ offsets = collect_offsets(segment_origin, segments)
43
+
44
+ unless options.key?(:force)
45
+ puts
46
+ puts "Validating offsets positions for #{yellow(segment_origin)} consumer group..."
47
+ validate!(offsets, segment_origin)
48
+ end
49
+
50
+ puts
51
+ puts "Computing collapsed offsets for #{yellow(segment_origin)} consumer group..."
52
+ collapses << collapse(offsets, segments)
53
+ end
54
+
55
+ collapses.each do |collapse|
56
+ apply(collapse)
57
+ end
58
+
59
+ puts
60
+ puts "Collapse completed #{green('successfully')}!"
61
+ end
62
+
63
+ private
64
+
65
+ # Computes the lowest possible offset available for each topic partition and sets it
66
+ # on the segment origin consumer group.
67
+ #
68
+ # @param offsets [Hash]
69
+ # @param segments [Array<Karafka::Routing::ConsumerGroup>]
70
+ # @note This code does **not** apply the offsets, just computes their positions
71
+ def collapse(offsets, segments)
72
+ collapse = Hash.new { |h, k| h[k] = {} }
73
+ segments_names = segments.map(&:name)
74
+
75
+ offsets.each do |cg_name, topics|
76
+ next unless segments_names.include?(cg_name)
77
+
78
+ topics.each do |topic_name, partitions|
79
+ partitions.each do |partition_id, offset|
80
+ current_lowest_offset = collapse[topic_name][partition_id]
81
+
82
+ next if current_lowest_offset && current_lowest_offset < offset
83
+
84
+ collapse[topic_name][partition_id] = offset
85
+ end
86
+ end
87
+ end
88
+
89
+ {
90
+ collapse: collapse,
91
+ segment_origin: segments.first.segment_origin
92
+ }
93
+ end
94
+
95
+ # In order to collapse the offsets of parallel segments back to one, we need to know
96
+ # to what offsets to collapse. The issue (that we solve picking lowest when forced)
97
+ # arises when there are more offsets that are not even in parallel segments for one
98
+ # topic partition. We should let user know about this if this happens so he does not
99
+ # end up with double-processing.
100
+ #
101
+ # @param offsets [Hash]
102
+ # @param segment_origin [String]
103
+ def validate!(offsets, segment_origin)
104
+ collapse = Hash.new { |h, k| h[k] = {} }
105
+
106
+ offsets.each do |cg_name, topics|
107
+ next if cg_name == segment_origin
108
+
109
+ topics.each do |topic_name, partitions|
110
+ partitions.each do |partition_id, offset|
111
+ collapse[topic_name][partition_id] ||= Set.new
112
+ collapse[topic_name][partition_id] << offset
113
+ end
114
+ end
115
+ end
116
+
117
+ inconclusive = false
118
+
119
+ collapse.each do |topic_name, partitions|
120
+ partitions.each do |partition_id, parallel_offsets|
121
+ next if parallel_offsets.size <= 1
122
+
123
+ inconclusive = true
124
+
125
+ puts(
126
+ " Inconclusive offsets for #{red(topic_name)}##{red(partition_id)}:" \
127
+ " #{parallel_offsets.to_a.join(', ')}"
128
+ )
129
+ end
130
+ end
131
+
132
+ return unless inconclusive
133
+
134
+ raise(
135
+ ::Karafka::Errors::CommandValidationError,
136
+ "Parallel segments for #{red(segment_origin)} have #{red('inconclusive')} offsets"
137
+ )
138
+ end
139
+
140
+ # Applies the collapsed lowest offsets onto the segment origin consumer group
141
+ #
142
+ # @param collapse [Hash]
143
+ def apply(collapse)
144
+ segment_origin = collapse[:segment_origin]
145
+ alignments = collapse[:collapse]
146
+
147
+ puts
148
+ puts "Adjusting offsets of segment origin consumer group: #{green(segment_origin)}"
149
+
150
+ alignments.each do |topic_name, partitions|
151
+ puts " Topic #{green(topic_name)}:"
152
+
153
+ partitions.each do |partition_id, offset|
154
+ puts " Partition #{green(partition_id)}: starting offset #{green(offset)}"
155
+ end
156
+ end
157
+
158
+ Karafka::Admin.seek_consumer_group(segment_origin, alignments)
159
+ end
160
+ end
161
+ end
162
+ end
163
+ end
164
+ end
@@ -0,0 +1,164 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This code is part of Karafka Pro, a commercial component not licensed under LGPL.
4
+ # See LICENSE for details.
5
+
6
+ module Karafka
7
+ module Pro
8
+ module Cli
9
+ class ParallelSegments < Karafka::Cli::Base
10
+ # Command that makes it easier for users to migrate from regular consumer groups to
11
+ # the parallel segments consumers groups by automatically distributing offsets based on
12
+ # the used "normal" consumer group.
13
+ #
14
+ # Takes the segments origin consumer group offsets for a given set of topics and
15
+ # distributes those offsets onto the parallel segments consumer groups, so they can pick
16
+ # up where the origin group left.
17
+ #
18
+ # To make sure users do not accidentally "re-distribute" their offsets from the original
19
+ # consumer group after the parallel consumer groups had offsets assigned and started to
20
+ # work, we check if the parallel groups have any offsets, if so unless forced we halt.
21
+ #
22
+ # @note This command does not remove the original consumer group from Kafka. We keep it
23
+ # just as a backup. User can remove it himself.
24
+ #
25
+ # @note Kafka has no atomic operations this is why we first collect all the data and run
26
+ # needed validations before applying offsets.
27
+ class Distribute < Base
28
+ # Runs the distribution process
29
+ def call
30
+ puts 'Starting parallel segments distribution...'
31
+
32
+ segments_count = applicable_groups.size
33
+
34
+ if segments_count.zero?
35
+ puts "#{red('No')} consumer groups with parallel segments configuration found"
36
+
37
+ return
38
+ end
39
+
40
+ puts(
41
+ "Found #{green(segments_count)} consumer groups with parallel segments configuration"
42
+ )
43
+
44
+ distributions = []
45
+
46
+ applicable_groups.each do |segment_origin, segments|
47
+ puts
48
+ puts "Collecting group #{yellow(segment_origin)} details..."
49
+ offsets = collect_offsets(segment_origin, segments)
50
+
51
+ unless options.key?(:force)
52
+ puts "Validating group #{yellow(segment_origin)} parallel segments..."
53
+ validate!(offsets, segments)
54
+ end
55
+
56
+ puts "Distributing group #{yellow(segment_origin)} offsets..."
57
+ distributions += distribute(offsets, segments)
58
+ end
59
+
60
+ distributions.each do |distribution|
61
+ apply(distribution)
62
+ end
63
+
64
+ puts
65
+ puts "Distribution completed #{green('successfully')}!"
66
+ end
67
+
68
+ private
69
+
70
+ # Validates the current state of topics offsets assignments.
71
+ # We want to make sure, that users do not run distribution twice, especially for a
72
+ # parallel segments consumers group set that was already actively consumed. This is why
73
+ # we check if there was any offsets already present in the parallel segments consumer
74
+ # groups and if so, we raise an error. This can be disabled with `--force`.
75
+ #
76
+ # It prevents users from overwriting the already set segments distribution.
77
+ # Adding new topics to the same parallel segments consumer group does not require us to
78
+ # run this at all and on top of that users can always use `--consumer_groups` flag to
79
+ # limit the cgs that we will be operating here
80
+ #
81
+ # @param offsets [Hash]
82
+ # @param segments [Array<Karafka::Routing::ConsumerGroup>]
83
+ def validate!(offsets, segments)
84
+ segments_names = segments.map(&:name)
85
+
86
+ offsets.each do |cg_name, topics|
87
+ next unless segments_names.include?(cg_name)
88
+
89
+ topics.each do |topic_name, partitions|
90
+ partitions.each do |partition_id, offset|
91
+ next unless offset.to_i.positive?
92
+
93
+ raise(
94
+ ::Karafka::Errors::CommandValidationError,
95
+ "Parallel segment #{red(cg_name)} already has offset #{red(offset)}" \
96
+ " set for #{red("#{topic_name}##{partition_id}")}"
97
+ )
98
+ end
99
+ end
100
+ end
101
+ end
102
+
103
+ # Computes the offsets distribution for all the segments consumer groups so when user
104
+ # migrates from one CG to parallel segments, those segments know where to start consuming
105
+ # the data.
106
+ #
107
+ # @param offsets [Hash]
108
+ # @param segments [Array<Karafka::Routing::ConsumerGroup>]
109
+ # @note This code does **not** apply the offsets, just computes their positions
110
+ def distribute(offsets, segments)
111
+ distributions = []
112
+ segments_names = segments.map(&:name)
113
+
114
+ offsets.each do |cg_name, topics|
115
+ next if segments_names.include?(cg_name)
116
+
117
+ distribution = {}
118
+
119
+ topics.each do |topic_name, partitions|
120
+ partitions.each do |partition_id, offset|
121
+ distribution[topic_name] ||= {}
122
+ distribution[topic_name][partition_id] = offset
123
+ end
124
+ end
125
+
126
+ next if distribution.empty?
127
+
128
+ segments_names.each do |segment_name|
129
+ distributions << {
130
+ segment_name: segment_name,
131
+ distribution: distribution
132
+ }
133
+ end
134
+ end
135
+
136
+ distributions
137
+ end
138
+
139
+ # Takes the details of the distribution of offsets for a given segment and adjust the
140
+ # starting offsets for all the consumer group topics based on the distribution.
141
+ #
142
+ # @param distribution [Hash]
143
+ def apply(distribution)
144
+ segment_name = distribution[:segment_name]
145
+ alignments = distribution[:distribution]
146
+
147
+ puts
148
+ puts "Adjusting offsets of parallel segments consumer group: #{green(segment_name)}"
149
+
150
+ alignments.each do |topic_name, partitions|
151
+ puts " Topic #{green(topic_name)}:"
152
+
153
+ partitions.each do |partition_id, offset|
154
+ puts " Partition #{green(partition_id)}: starting offset #{green(offset)}"
155
+ end
156
+ end
157
+
158
+ Karafka::Admin.seek_consumer_group(segment_name, alignments)
159
+ end
160
+ end
161
+ end
162
+ end
163
+ end
164
+ end