karafka 2.5.4.rc1 → 2.5.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -1
- data/LICENSE-COMM +4 -2
- data/lib/karafka/admin/acl.rb +127 -80
- data/lib/karafka/admin/configs.rb +84 -70
- data/lib/karafka/admin/consumer_groups.rb +377 -330
- data/lib/karafka/admin/replication.rb +287 -263
- data/lib/karafka/admin/topics.rb +232 -186
- data/lib/karafka/admin.rb +277 -117
- data/lib/karafka/pro/active_job/consumer.rb +19 -2
- data/lib/karafka/pro/active_job/dispatcher.rb +19 -2
- data/lib/karafka/pro/active_job/job_options_contract.rb +19 -2
- data/lib/karafka/pro/base_consumer.rb +19 -2
- data/lib/karafka/pro/cleaner/errors.rb +19 -2
- data/lib/karafka/pro/cleaner/messages/message.rb +19 -2
- data/lib/karafka/pro/cleaner/messages/messages.rb +19 -2
- data/lib/karafka/pro/cleaner/messages/metadata.rb +19 -2
- data/lib/karafka/pro/cleaner.rb +19 -2
- data/lib/karafka/pro/cli/contracts/server.rb +19 -2
- data/lib/karafka/pro/cli/parallel_segments/base.rb +19 -2
- data/lib/karafka/pro/cli/parallel_segments/collapse.rb +19 -2
- data/lib/karafka/pro/cli/parallel_segments/distribute.rb +19 -2
- data/lib/karafka/pro/cli/parallel_segments.rb +19 -2
- data/lib/karafka/pro/connection/manager.rb +19 -2
- data/lib/karafka/pro/connection/multiplexing/listener.rb +19 -2
- data/lib/karafka/pro/contracts/base.rb +19 -2
- data/lib/karafka/pro/encryption/cipher.rb +19 -2
- data/lib/karafka/pro/encryption/contracts/config.rb +19 -2
- data/lib/karafka/pro/encryption/errors.rb +19 -2
- data/lib/karafka/pro/encryption/messages/middleware.rb +19 -2
- data/lib/karafka/pro/encryption/messages/parser.rb +19 -2
- data/lib/karafka/pro/encryption/setup/config.rb +19 -2
- data/lib/karafka/pro/encryption.rb +19 -2
- data/lib/karafka/pro/instrumentation/performance_tracker.rb +19 -2
- data/lib/karafka/pro/iterator/expander.rb +19 -2
- data/lib/karafka/pro/iterator/tpl_builder.rb +19 -2
- data/lib/karafka/pro/iterator.rb +19 -2
- data/lib/karafka/pro/loader.rb +19 -2
- data/lib/karafka/pro/processing/adaptive_iterator/consumer.rb +19 -2
- data/lib/karafka/pro/processing/adaptive_iterator/tracker.rb +19 -2
- data/lib/karafka/pro/processing/collapser.rb +19 -2
- data/lib/karafka/pro/processing/coordinator.rb +19 -2
- data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +19 -2
- data/lib/karafka/pro/processing/coordinators/filters_applier.rb +19 -2
- data/lib/karafka/pro/processing/coordinators/virtual_offset_manager.rb +19 -2
- data/lib/karafka/pro/processing/executor.rb +19 -2
- data/lib/karafka/pro/processing/expansions_selector.rb +19 -2
- data/lib/karafka/pro/processing/filters/base.rb +19 -2
- data/lib/karafka/pro/processing/filters/delayer.rb +19 -2
- data/lib/karafka/pro/processing/filters/expirer.rb +19 -2
- data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +19 -2
- data/lib/karafka/pro/processing/filters/throttler.rb +19 -2
- data/lib/karafka/pro/processing/filters/virtual_limiter.rb +19 -2
- data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +19 -2
- data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +19 -2
- data/lib/karafka/pro/processing/jobs/periodic.rb +19 -2
- data/lib/karafka/pro/processing/jobs/periodic_non_blocking.rb +19 -2
- data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +19 -2
- data/lib/karafka/pro/processing/jobs_builder.rb +19 -2
- data/lib/karafka/pro/processing/jobs_queue.rb +19 -2
- data/lib/karafka/pro/processing/offset_metadata/consumer.rb +19 -2
- data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +19 -2
- data/lib/karafka/pro/processing/offset_metadata/listener.rb +19 -2
- data/lib/karafka/pro/processing/parallel_segments/filters/base.rb +19 -2
- data/lib/karafka/pro/processing/parallel_segments/filters/default.rb +19 -2
- data/lib/karafka/pro/processing/parallel_segments/filters/mom.rb +19 -2
- data/lib/karafka/pro/processing/partitioner.rb +19 -2
- data/lib/karafka/pro/processing/periodic_job/consumer.rb +19 -2
- data/lib/karafka/pro/processing/piping/consumer.rb +19 -2
- data/lib/karafka/pro/processing/schedulers/base.rb +19 -2
- data/lib/karafka/pro/processing/schedulers/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/base.rb +19 -2
- data/lib/karafka/pro/processing/strategies/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/ftr/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/ftr/vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/ftr_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/mom/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/mom/ftr.rb +19 -2
- data/lib/karafka/pro/processing/strategies/mom/ftr_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/mom/vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/vp/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies.rb +19 -2
- data/lib/karafka/pro/processing/strategy_selector.rb +19 -2
- data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +19 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +19 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/base.rb +19 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/consumer.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/contracts/config.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/contracts/task.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/deserializer.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/dispatcher.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/errors.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/executor.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/listener.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/matcher.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/schedule.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/serializer.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/setup/config.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/task.rb +19 -2
- data/lib/karafka/pro/recurring_tasks.rb +19 -2
- data/lib/karafka/pro/routing/features/active_job/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/active_job.rb +19 -2
- data/lib/karafka/pro/routing/features/adaptive_iterator/config.rb +19 -2
- data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/adaptive_iterator.rb +19 -2
- data/lib/karafka/pro/routing/features/base.rb +19 -2
- data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/dead_letter_queue.rb +19 -2
- data/lib/karafka/pro/routing/features/delaying/config.rb +19 -2
- data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/delaying/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/delaying.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/config.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments.rb +19 -2
- data/lib/karafka/pro/routing/features/expiring/config.rb +19 -2
- data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/expiring/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/expiring.rb +19 -2
- data/lib/karafka/pro/routing/features/filtering/config.rb +19 -2
- data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/filtering/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/filtering.rb +19 -2
- data/lib/karafka/pro/routing/features/inline_insights/config.rb +19 -2
- data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/inline_insights/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/inline_insights.rb +19 -2
- data/lib/karafka/pro/routing/features/long_running_job/config.rb +19 -2
- data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/long_running_job/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/long_running_job.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/config.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing.rb +19 -2
- data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/non_blocking_job.rb +19 -2
- data/lib/karafka/pro/routing/features/offset_metadata/config.rb +19 -2
- data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/offset_metadata.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/config.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/config.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/detector.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/pattern.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/patterns.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/topics.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns.rb +19 -2
- data/lib/karafka/pro/routing/features/pausing/config.rb +19 -2
- data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/pausing/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/pausing.rb +19 -2
- data/lib/karafka/pro/routing/features/periodic_job/config.rb +19 -2
- data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/periodic_job/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/periodic_job.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/config.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/proxy.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/config.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/proxy.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm/config.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm.rb +19 -2
- data/lib/karafka/pro/routing/features/throttling/config.rb +19 -2
- data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/throttling/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/throttling.rb +19 -2
- data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +19 -2
- data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/virtual_partitions.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/consumer.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/contracts/config.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/contracts/message.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/day.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/dispatcher.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/errors.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/max_epoch.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/proxy.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/schema_validator.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/serializer.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/setup/config.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/state.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/tracker.rb +19 -2
- data/lib/karafka/pro/scheduled_messages.rb +19 -2
- data/lib/karafka/pro/swarm/liveness_listener.rb +19 -2
- data/lib/karafka/version.rb +1 -1
- metadata +2 -2
|
@@ -18,401 +18,448 @@ module Karafka
|
|
|
18
18
|
private_constant :LONG_TIME_AGO, :DAY_IN_SECONDS
|
|
19
19
|
|
|
20
20
|
class << self
|
|
21
|
-
#
|
|
22
|
-
#
|
|
23
|
-
# @
|
|
24
|
-
# existing offset
|
|
25
|
-
# @param topics_with_partitions_and_offsets [Hash] Hash with list of topics and settings to
|
|
26
|
-
# where to move given consumer. It allows us to move particular partitions or whole
|
|
27
|
-
# topics if we want to reset all partitions to for example a point in time.
|
|
28
|
-
#
|
|
29
|
-
# @return [void]
|
|
30
|
-
#
|
|
31
|
-
# @note This method should **not** be executed on a running consumer group as it creates a
|
|
32
|
-
# "fake" consumer and uses it to move offsets.
|
|
33
|
-
#
|
|
34
|
-
# @example Move a single topic partition nr 1 offset to 100
|
|
35
|
-
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 100 } })
|
|
36
|
-
#
|
|
37
|
-
# @example Move offsets on all partitions of a topic to 100
|
|
38
|
-
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 100 })
|
|
39
|
-
#
|
|
40
|
-
# @example Move offset to 5 seconds ago on partition 2
|
|
41
|
-
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 2 => 5.seconds.ago } })
|
|
42
|
-
#
|
|
43
|
-
# @example Move to the earliest offset on all the partitions of a topic
|
|
44
|
-
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 'earliest' })
|
|
45
|
-
#
|
|
46
|
-
# @example Move to the latest (high-watermark) offset on all the partitions of a topic
|
|
47
|
-
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 'latest' })
|
|
48
|
-
#
|
|
49
|
-
# @example Move offset of a single partition to earliest
|
|
50
|
-
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 'earliest' } })
|
|
51
|
-
#
|
|
52
|
-
# @example Move offset of a single partition to latest
|
|
53
|
-
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 'latest' } })
|
|
21
|
+
# @param consumer_group_id [String] consumer group for which we want to move offsets
|
|
22
|
+
# @param topics_with_partitions_and_offsets [Hash] hash with topics and settings
|
|
23
|
+
# @see #seek
|
|
54
24
|
def seek(consumer_group_id, topics_with_partitions_and_offsets)
|
|
55
|
-
|
|
25
|
+
new.seek(consumer_group_id, topics_with_partitions_and_offsets)
|
|
26
|
+
end
|
|
56
27
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
28
|
+
# @param previous_name [String] old consumer group name
|
|
29
|
+
# @param new_name [String] new consumer group name
|
|
30
|
+
# @param topics [Array<String>] topics for which we want to copy offsets
|
|
31
|
+
# @see #copy
|
|
32
|
+
def copy(previous_name, new_name, topics)
|
|
33
|
+
new.copy(previous_name, new_name, topics)
|
|
34
|
+
end
|
|
62
35
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
end
|
|
36
|
+
# @param previous_name [String] old consumer group name
|
|
37
|
+
# @param new_name [String] new consumer group name
|
|
38
|
+
# @param topics [Array<String>] topics for which we want to migrate offsets
|
|
39
|
+
# @param delete_previous [Boolean] should we delete previous consumer group after rename
|
|
40
|
+
# @see #rename
|
|
41
|
+
def rename(previous_name, new_name, topics, delete_previous: true)
|
|
42
|
+
new.rename(previous_name, new_name, topics, delete_previous: delete_previous)
|
|
43
|
+
end
|
|
72
44
|
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
# This remap allows us to transform some special cases in a reference that can be
|
|
79
|
-
# understood by Kafka
|
|
80
|
-
case casted_position
|
|
81
|
-
# Earliest is not always 0. When compacting/deleting it can be much later, that's why
|
|
82
|
-
# we fetch the oldest possible offset
|
|
83
|
-
# false is treated the same as 'earliest'
|
|
84
|
-
when 'earliest', false
|
|
85
|
-
LONG_TIME_AGO
|
|
86
|
-
# Latest will always be the high-watermark offset and we can get it just by getting
|
|
87
|
-
# a future position
|
|
88
|
-
when 'latest'
|
|
89
|
-
Time.now + DAY_IN_SECONDS
|
|
90
|
-
# Regular offset case
|
|
91
|
-
else
|
|
92
|
-
position
|
|
93
|
-
end
|
|
94
|
-
end
|
|
95
|
-
end
|
|
45
|
+
# @param consumer_group_id [String] consumer group name
|
|
46
|
+
# @see #delete
|
|
47
|
+
def delete(consumer_group_id)
|
|
48
|
+
new.delete(consumer_group_id)
|
|
49
|
+
end
|
|
96
50
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
51
|
+
# @param consumer_group_id [String] consumer group id to trigger rebalance for
|
|
52
|
+
# @see #trigger_rebalance
|
|
53
|
+
def trigger_rebalance(consumer_group_id)
|
|
54
|
+
new.trigger_rebalance(consumer_group_id)
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# @param consumer_groups_with_topics [Hash{String => Array<String>}] hash with consumer
|
|
58
|
+
# groups names with array of topics
|
|
59
|
+
# @param active_topics_only [Boolean] if set to false, will select also inactive topics
|
|
60
|
+
# @see #read_lags_with_offsets
|
|
61
|
+
def read_lags_with_offsets(consumer_groups_with_topics = {}, active_topics_only: true)
|
|
62
|
+
new.read_lags_with_offsets(
|
|
63
|
+
consumer_groups_with_topics,
|
|
64
|
+
active_topics_only: active_topics_only
|
|
65
|
+
)
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
# Moves the offset on a given consumer group and provided topic to the requested location
|
|
70
|
+
#
|
|
71
|
+
# @param consumer_group_id [String] id of the consumer group for which we want to move the
|
|
72
|
+
# existing offset
|
|
73
|
+
# @param topics_with_partitions_and_offsets [Hash] Hash with list of topics and settings to
|
|
74
|
+
# where to move given consumer. It allows us to move particular partitions or whole
|
|
75
|
+
# topics if we want to reset all partitions to for example a point in time.
|
|
76
|
+
#
|
|
77
|
+
# @return [void]
|
|
78
|
+
#
|
|
79
|
+
# @note This method should **not** be executed on a running consumer group as it creates a
|
|
80
|
+
# "fake" consumer and uses it to move offsets.
|
|
81
|
+
#
|
|
82
|
+
# @example Move a single topic partition nr 1 offset to 100
|
|
83
|
+
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 100 } })
|
|
84
|
+
#
|
|
85
|
+
# @example Move offsets on all partitions of a topic to 100
|
|
86
|
+
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 100 })
|
|
87
|
+
#
|
|
88
|
+
# @example Move offset to 5 seconds ago on partition 2
|
|
89
|
+
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 2 => 5.seconds.ago } })
|
|
90
|
+
#
|
|
91
|
+
# @example Move to the earliest offset on all the partitions of a topic
|
|
92
|
+
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 'earliest' })
|
|
93
|
+
#
|
|
94
|
+
# @example Move to the latest (high-watermark) offset on all the partitions of a topic
|
|
95
|
+
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 'latest' })
|
|
96
|
+
#
|
|
97
|
+
# @example Move offset of a single partition to earliest
|
|
98
|
+
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 'earliest' } })
|
|
99
|
+
#
|
|
100
|
+
# @example Move offset of a single partition to latest
|
|
101
|
+
# Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 'latest' } })
|
|
102
|
+
def seek(consumer_group_id, topics_with_partitions_and_offsets)
|
|
103
|
+
tpl_base = {}
|
|
104
|
+
|
|
105
|
+
# Normalize the data so we always have all partitions and topics in the same format
|
|
106
|
+
# That is in a format where we have topics and all partitions with their per partition
|
|
107
|
+
# assigned offsets
|
|
108
|
+
topics_with_partitions_and_offsets.each do |topic, partitions_with_offsets|
|
|
109
|
+
tpl_base[topic] = {}
|
|
110
|
+
|
|
111
|
+
if partitions_with_offsets.is_a?(Hash)
|
|
112
|
+
tpl_base[topic] = partitions_with_offsets
|
|
113
|
+
else
|
|
114
|
+
topic_info = Topics.new(kafka: @custom_kafka).info(topic)
|
|
115
|
+
topic_info[:partition_count].times do |partition|
|
|
116
|
+
tpl_base[topic][partition] = partitions_with_offsets
|
|
113
117
|
end
|
|
114
118
|
end
|
|
119
|
+
end
|
|
115
120
|
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
offset = result.offset
|
|
137
|
-
end
|
|
138
|
-
|
|
139
|
-
# Since now we have proper offsets, we can add this to the final tpl for commit
|
|
140
|
-
tpl.to_h[name] ||= []
|
|
141
|
-
tpl.to_h[name] << Rdkafka::Consumer::Partition.new(partition, offset)
|
|
142
|
-
tpl.to_h[name].reverse!
|
|
143
|
-
tpl.to_h[name].uniq!(&:partition)
|
|
144
|
-
tpl.to_h[name].reverse!
|
|
145
|
-
end
|
|
146
|
-
end
|
|
121
|
+
tpl_base.each_value do |partitions|
|
|
122
|
+
partitions.transform_values! do |position|
|
|
123
|
+
# Support both symbol and string based references
|
|
124
|
+
casted_position = position.is_a?(Symbol) ? position.to_s : position
|
|
125
|
+
|
|
126
|
+
# This remap allows us to transform some special cases in a reference that can be
|
|
127
|
+
# understood by Kafka
|
|
128
|
+
case casted_position
|
|
129
|
+
# Earliest is not always 0. When compacting/deleting it can be much later, that's why
|
|
130
|
+
# we fetch the oldest possible offset
|
|
131
|
+
# false is treated the same as 'earliest'
|
|
132
|
+
when 'earliest', false
|
|
133
|
+
LONG_TIME_AGO
|
|
134
|
+
# Latest will always be the high-watermark offset and we can get it just by getting
|
|
135
|
+
# a future position
|
|
136
|
+
when 'latest'
|
|
137
|
+
Time.now + DAY_IN_SECONDS
|
|
138
|
+
# Regular offset case
|
|
139
|
+
else
|
|
140
|
+
position
|
|
147
141
|
end
|
|
142
|
+
end
|
|
143
|
+
end
|
|
148
144
|
|
|
149
|
-
|
|
145
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
|
146
|
+
# In case of time based location, we need to to a pre-resolution, that's why we keep it
|
|
147
|
+
# separately
|
|
148
|
+
time_tpl = Rdkafka::Consumer::TopicPartitionList.new
|
|
149
|
+
|
|
150
|
+
# Distribute properly the offset type
|
|
151
|
+
tpl_base.each do |topic, partitions_with_offsets|
|
|
152
|
+
partitions_with_offsets.each do |partition, offset|
|
|
153
|
+
target = offset.is_a?(Time) ? time_tpl : tpl
|
|
154
|
+
# We reverse and uniq to make sure that potentially duplicated references are removed
|
|
155
|
+
# in such a way that the newest stays
|
|
156
|
+
target.to_h[topic] ||= []
|
|
157
|
+
target.to_h[topic] << Rdkafka::Consumer::Partition.new(partition, offset)
|
|
158
|
+
target.to_h[topic].reverse!
|
|
159
|
+
target.to_h[topic].uniq!(&:partition)
|
|
160
|
+
target.to_h[topic].reverse!
|
|
150
161
|
end
|
|
151
162
|
end
|
|
152
163
|
|
|
153
|
-
|
|
154
|
-
#
|
|
155
|
-
# @param previous_name [String] old consumer group name
|
|
156
|
-
# @param new_name [String] new consumer group name
|
|
157
|
-
# @param topics [Array<String>] topics for which we want to migrate offsets during rename
|
|
158
|
-
#
|
|
159
|
-
# @return [Boolean] true if anything was migrated, otherwise false
|
|
160
|
-
#
|
|
161
|
-
# @note This method should **not** be executed on a running consumer group as it creates a
|
|
162
|
-
# "fake" consumer and uses it to move offsets.
|
|
163
|
-
#
|
|
164
|
-
# @note If new consumer group exists, old offsets will be added to it.
|
|
165
|
-
def copy(previous_name, new_name, topics)
|
|
166
|
-
remap = Hash.new { |h, k| h[k] = {} }
|
|
164
|
+
settings = { 'group.id': consumer_group_id }
|
|
167
165
|
|
|
168
|
-
|
|
166
|
+
with_consumer(settings) do |consumer|
|
|
167
|
+
# If we have any time based stuff to resolve, we need to do it prior to commits
|
|
168
|
+
unless time_tpl.empty?
|
|
169
|
+
real_offsets = consumer.offsets_for_times(time_tpl)
|
|
169
170
|
|
|
170
|
-
|
|
171
|
-
|
|
171
|
+
real_offsets.to_h.each do |name, results|
|
|
172
|
+
results.each do |result|
|
|
173
|
+
raise(Errors::InvalidTimeBasedOffsetError) unless result
|
|
172
174
|
|
|
173
|
-
|
|
174
|
-
.fetch(previous_name)
|
|
175
|
-
.each do |topic, partitions|
|
|
176
|
-
partitions.each do |partition_id, details|
|
|
177
|
-
offset = details[:offset]
|
|
175
|
+
partition = result.partition
|
|
178
176
|
|
|
179
|
-
#
|
|
180
|
-
|
|
177
|
+
# Negative offset means we're beyond last message and we need to query for the
|
|
178
|
+
# high watermark offset to get the most recent offset and move there
|
|
179
|
+
if result.offset.negative?
|
|
180
|
+
_, offset = consumer.query_watermark_offsets(name, result.partition)
|
|
181
|
+
else
|
|
182
|
+
# If we get an offset, it means there existed a message close to this time
|
|
183
|
+
# location
|
|
184
|
+
offset = result.offset
|
|
185
|
+
end
|
|
181
186
|
|
|
182
|
-
|
|
187
|
+
# Since now we have proper offsets, we can add this to the final tpl for commit
|
|
188
|
+
tpl.to_h[name] ||= []
|
|
189
|
+
tpl.to_h[name] << Rdkafka::Consumer::Partition.new(partition, offset)
|
|
190
|
+
tpl.to_h[name].reverse!
|
|
191
|
+
tpl.to_h[name].uniq!(&:partition)
|
|
192
|
+
tpl.to_h[name].reverse!
|
|
183
193
|
end
|
|
184
194
|
end
|
|
195
|
+
end
|
|
185
196
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
true
|
|
197
|
+
consumer.commit_offsets(tpl, async: false)
|
|
189
198
|
end
|
|
199
|
+
end
|
|
190
200
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
201
|
+
# Takes consumer group and its topics and copies all the offsets to a new named group
|
|
202
|
+
#
|
|
203
|
+
# @param previous_name [String] old consumer group name
|
|
204
|
+
# @param new_name [String] new consumer group name
|
|
205
|
+
# @param topics [Array<String>] topics for which we want to migrate offsets during rename
|
|
206
|
+
#
|
|
207
|
+
# @return [Boolean] true if anything was migrated, otherwise false
|
|
208
|
+
#
|
|
209
|
+
# @note This method should **not** be executed on a running consumer group as it creates a
|
|
210
|
+
# "fake" consumer and uses it to move offsets.
|
|
211
|
+
#
|
|
212
|
+
# @note If new consumer group exists, old offsets will be added to it.
|
|
213
|
+
def copy(previous_name, new_name, topics)
|
|
214
|
+
remap = Hash.new { |h, k| h[k] = {} }
|
|
215
|
+
|
|
216
|
+
old_lags = read_lags_with_offsets({ previous_name => topics })
|
|
217
|
+
|
|
218
|
+
return false if old_lags.empty?
|
|
219
|
+
return false if old_lags.values.all? { |topic_data| topic_data.values.all?(&:empty?) }
|
|
220
|
+
|
|
221
|
+
read_lags_with_offsets({ previous_name => topics })
|
|
222
|
+
.fetch(previous_name)
|
|
223
|
+
.each do |topic, partitions|
|
|
224
|
+
partitions.each do |partition_id, details|
|
|
225
|
+
offset = details[:offset]
|
|
226
|
+
|
|
227
|
+
# No offset on this partition
|
|
228
|
+
next if offset.negative?
|
|
229
|
+
|
|
230
|
+
remap[topic][partition_id] = offset
|
|
231
|
+
end
|
|
232
|
+
end
|
|
211
233
|
|
|
212
|
-
|
|
213
|
-
return copy_result unless delete_previous
|
|
234
|
+
seek(new_name, remap)
|
|
214
235
|
|
|
215
|
-
|
|
236
|
+
true
|
|
237
|
+
end
|
|
216
238
|
|
|
217
|
-
|
|
218
|
-
|
|
239
|
+
# Takes consumer group and its topics and migrates all the offsets to a new named group
|
|
240
|
+
#
|
|
241
|
+
# @param previous_name [String] old consumer group name
|
|
242
|
+
# @param new_name [String] new consumer group name
|
|
243
|
+
# @param topics [Array<String>] topics for which we want to migrate offsets during rename
|
|
244
|
+
# @param delete_previous [Boolean] should we delete previous consumer group after rename.
|
|
245
|
+
# Defaults to true.
|
|
246
|
+
#
|
|
247
|
+
# @return [Boolean] true if rename (and optionally removal) was ok or false if there was
|
|
248
|
+
# nothing really to rename
|
|
249
|
+
#
|
|
250
|
+
# @note This method should **not** be executed on a running consumer group as it creates a
|
|
251
|
+
# "fake" consumer and uses it to move offsets.
|
|
252
|
+
#
|
|
253
|
+
# @note After migration unless `delete_previous` is set to `false`, old group will be
|
|
254
|
+
# removed.
|
|
255
|
+
#
|
|
256
|
+
# @note If new consumer group exists, old offsets will be added to it.
|
|
257
|
+
def rename(previous_name, new_name, topics, delete_previous: true)
|
|
258
|
+
copy_result = copy(previous_name, new_name, topics)
|
|
259
|
+
|
|
260
|
+
return false unless copy_result
|
|
261
|
+
return copy_result unless delete_previous
|
|
262
|
+
|
|
263
|
+
delete(previous_name)
|
|
264
|
+
|
|
265
|
+
true
|
|
266
|
+
end
|
|
219
267
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
end
|
|
268
|
+
# Removes given consumer group (if exists)
|
|
269
|
+
#
|
|
270
|
+
# @param consumer_group_id [String] consumer group name
|
|
271
|
+
#
|
|
272
|
+
# @return [void]
|
|
273
|
+
#
|
|
274
|
+
# @note This method should not be used on a running consumer group as it will not yield any
|
|
275
|
+
# results.
|
|
276
|
+
def delete(consumer_group_id)
|
|
277
|
+
with_admin do |admin|
|
|
278
|
+
handler = admin.delete_group(consumer_group_id)
|
|
279
|
+
handler.wait(max_wait_timeout: max_wait_time_seconds)
|
|
233
280
|
end
|
|
281
|
+
end
|
|
234
282
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
283
|
+
# Triggers a rebalance for the specified consumer group by briefly joining and leaving
|
|
284
|
+
#
|
|
285
|
+
# @param consumer_group_id [String] consumer group id to trigger rebalance for
|
|
286
|
+
#
|
|
287
|
+
# @return [void]
|
|
288
|
+
#
|
|
289
|
+
# @raise [Karafka::Errors::InvalidConfigurationError] when consumer group is not found in
|
|
290
|
+
# routing or has no topics
|
|
291
|
+
#
|
|
292
|
+
# @note This method creates a temporary "fake" consumer that joins the consumer group,
|
|
293
|
+
# triggering a rebalance when it joins and another when it leaves. This should only be
|
|
294
|
+
# used for operational/testing purposes as it causes two rebalances.
|
|
295
|
+
#
|
|
296
|
+
# @note The consumer group does not need to be running for this to work, but if it is,
|
|
297
|
+
# it will experience rebalances.
|
|
298
|
+
#
|
|
299
|
+
# @note The behavior follows the configured rebalance protocol. For cooperative sticky
|
|
300
|
+
# rebalancing or KIP-848 based protocols, there may be no immediate reaction to the
|
|
301
|
+
# rebalance trigger as these protocols allow incremental partition reassignments without
|
|
302
|
+
# stopping all consumers.
|
|
303
|
+
#
|
|
304
|
+
# @note Topics are always detected from the routing configuration. The consumer settings
|
|
305
|
+
# (kafka config) are taken from the first topic in the consumer group to ensure
|
|
306
|
+
# consistency with the actual consumer configuration.
|
|
307
|
+
#
|
|
308
|
+
# @example Trigger rebalance for a consumer group
|
|
309
|
+
# Karafka::Admin::ConsumerGroups.trigger_rebalance('my-group')
|
|
310
|
+
def trigger_rebalance(consumer_group_id)
|
|
311
|
+
consumer_group = Karafka::App.routes.find { |cg| cg.id == consumer_group_id }
|
|
312
|
+
|
|
313
|
+
unless consumer_group
|
|
314
|
+
raise(
|
|
315
|
+
Errors::InvalidConfigurationError,
|
|
316
|
+
"Consumer group '#{consumer_group_id}' not found in routing"
|
|
317
|
+
)
|
|
318
|
+
end
|
|
271
319
|
|
|
272
|
-
|
|
320
|
+
topics = consumer_group.topics.map(&:name)
|
|
273
321
|
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
322
|
+
if topics.empty?
|
|
323
|
+
raise(
|
|
324
|
+
Errors::InvalidConfigurationError,
|
|
325
|
+
"Consumer group '#{consumer_group_id}' has no topics"
|
|
326
|
+
)
|
|
327
|
+
end
|
|
280
328
|
|
|
281
|
-
|
|
282
|
-
|
|
329
|
+
# Get the first topic to extract kafka settings
|
|
330
|
+
first_topic = consumer_group.topics.first
|
|
283
331
|
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
332
|
+
# Build consumer settings using the consumer group's kafka config from first topic
|
|
333
|
+
# This ensures we use the same settings as the actual consumers
|
|
334
|
+
# Following the same pattern as in Karafka::Connection::Client#build_kafka
|
|
335
|
+
consumer_settings = Setup::AttributesMap.consumer(first_topic.kafka.dup)
|
|
336
|
+
consumer_settings[:'group.id'] = consumer_group.id
|
|
337
|
+
consumer_settings[:'enable.auto.offset.store'] = false
|
|
338
|
+
consumer_settings[:'auto.offset.reset'] ||= first_topic.initial_offset
|
|
291
339
|
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
340
|
+
with_consumer(consumer_settings) do |consumer|
|
|
341
|
+
# Subscribe to the topics - this triggers the first rebalance
|
|
342
|
+
consumer.subscribe(*topics)
|
|
295
343
|
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
344
|
+
# Wait briefly (100ms) to allow the rebalance to initiate
|
|
345
|
+
# The actual rebalance happens asynchronously, so we just need to give it a moment
|
|
346
|
+
sleep(0.1)
|
|
299
347
|
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
end
|
|
348
|
+
# Unsubscribe - this will trigger the second rebalance when the consumer closes
|
|
349
|
+
# The ensure block in with_consumer will handle the unsubscribe and close
|
|
303
350
|
end
|
|
351
|
+
end
|
|
304
352
|
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
end
|
|
338
|
-
|
|
339
|
-
[cg.id, cg_topics.map(&:name)]
|
|
353
|
+
# Reads lags and offsets for given topics in the context of consumer groups defined in the
|
|
354
|
+
# routing
|
|
355
|
+
#
|
|
356
|
+
# @param consumer_groups_with_topics [Hash{String => Array<String>}] hash with consumer
|
|
357
|
+
# groups names with array of topics to query per consumer group inside
|
|
358
|
+
# @param active_topics_only [Boolean] if set to false, when we use routing topics, will
|
|
359
|
+
# select also topics that are marked as inactive in routing
|
|
360
|
+
#
|
|
361
|
+
# @return [Hash{String => Hash{Integer => Hash{Integer => Object}}}] hash where the top
|
|
362
|
+
# level keys are the consumer groups and values are hashes with topics and inside
|
|
363
|
+
# partitions with lags and offsets
|
|
364
|
+
#
|
|
365
|
+
# @note For topics that do not exist, topic details will be set to an empty hash
|
|
366
|
+
#
|
|
367
|
+
# @note For topics that exist but were never consumed by a given CG we set `-1` as lag and
|
|
368
|
+
# the offset on each of the partitions that were not consumed.
|
|
369
|
+
#
|
|
370
|
+
# @note This lag reporting is for committed lags and is "Kafka-centric", meaning that this
|
|
371
|
+
# represents lags from Kafka perspective and not the consumer. They may differ.
|
|
372
|
+
def read_lags_with_offsets(consumer_groups_with_topics = {}, active_topics_only: true)
|
|
373
|
+
# We first fetch all the topics with partitions count that exist in the cluster so we
|
|
374
|
+
# do not query for topics that do not exist and so we can get partitions count for all
|
|
375
|
+
# the topics we may need. The non-existent and not consumed will be filled at the end
|
|
376
|
+
existing_topics = cluster_info.topics.to_h do |topic|
|
|
377
|
+
[topic[:topic_name], topic[:partition_count]]
|
|
378
|
+
end.freeze
|
|
379
|
+
|
|
380
|
+
# If no expected CGs, we use all from routing that have active topics
|
|
381
|
+
if consumer_groups_with_topics.empty?
|
|
382
|
+
consumer_groups_with_topics = Karafka::App.routes.to_h do |cg|
|
|
383
|
+
cg_topics = cg.topics.select do |cg_topic|
|
|
384
|
+
active_topics_only ? cg_topic.active? : true
|
|
340
385
|
end
|
|
386
|
+
|
|
387
|
+
[cg.id, cg_topics.map(&:name)]
|
|
341
388
|
end
|
|
389
|
+
end
|
|
342
390
|
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
391
|
+
# We make a copy because we will remove once with non-existing topics
|
|
392
|
+
# We keep original requested consumer groups with topics for later backfilling
|
|
393
|
+
cgs_with_topics = consumer_groups_with_topics.dup
|
|
394
|
+
cgs_with_topics.transform_values!(&:dup)
|
|
347
395
|
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
396
|
+
# We can query only topics that do exist, this is why we are cleaning those that do not
|
|
397
|
+
# exist
|
|
398
|
+
cgs_with_topics.each_value do |requested_topics|
|
|
399
|
+
requested_topics.delete_if { |topic| !existing_topics.include?(topic) }
|
|
400
|
+
end
|
|
353
401
|
|
|
354
|
-
|
|
355
|
-
|
|
402
|
+
groups_lags = Hash.new { |h, k| h[k] = {} }
|
|
403
|
+
groups_offs = Hash.new { |h, k| h[k] = {} }
|
|
356
404
|
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
405
|
+
cgs_with_topics.each do |cg, topics|
|
|
406
|
+
# Do not add to tpl topics that do not exist
|
|
407
|
+
next if topics.empty?
|
|
360
408
|
|
|
361
|
-
|
|
409
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
|
362
410
|
|
|
363
|
-
|
|
364
|
-
|
|
411
|
+
with_consumer('group.id': cg) do |consumer|
|
|
412
|
+
topics.each { |topic| tpl.add_topic(topic, existing_topics[topic]) }
|
|
365
413
|
|
|
366
|
-
|
|
414
|
+
commit_offsets = consumer.committed(tpl)
|
|
367
415
|
|
|
368
|
-
|
|
369
|
-
|
|
416
|
+
commit_offsets.to_h.each do |topic, partitions|
|
|
417
|
+
groups_offs[cg][topic] = {}
|
|
370
418
|
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
end
|
|
419
|
+
partitions.each do |partition|
|
|
420
|
+
# -1 when no offset is stored
|
|
421
|
+
groups_offs[cg][topic][partition.partition] = partition.offset || -1
|
|
375
422
|
end
|
|
423
|
+
end
|
|
376
424
|
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
end
|
|
425
|
+
consumer.lag(commit_offsets).each do |topic, partitions_lags|
|
|
426
|
+
groups_lags[cg][topic] = partitions_lags
|
|
380
427
|
end
|
|
381
428
|
end
|
|
429
|
+
end
|
|
382
430
|
|
|
383
|
-
|
|
384
|
-
|
|
431
|
+
consumer_groups_with_topics.each do |cg, topics|
|
|
432
|
+
groups_lags[cg]
|
|
385
433
|
|
|
386
|
-
|
|
387
|
-
|
|
434
|
+
topics.each do |topic|
|
|
435
|
+
groups_lags[cg][topic] ||= {}
|
|
388
436
|
|
|
389
|
-
|
|
437
|
+
next unless existing_topics.key?(topic)
|
|
390
438
|
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
end
|
|
439
|
+
# We backfill because there is a case where our consumer group would consume for
|
|
440
|
+
# example only one partition out of 20, rest needs to get -1
|
|
441
|
+
existing_topics[topic].times do |partition_id|
|
|
442
|
+
groups_lags[cg][topic][partition_id] ||= -1
|
|
396
443
|
end
|
|
397
444
|
end
|
|
445
|
+
end
|
|
398
446
|
|
|
399
|
-
|
|
447
|
+
merged = Hash.new { |h, k| h[k] = {} }
|
|
400
448
|
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
449
|
+
groups_lags.each do |cg, topics|
|
|
450
|
+
topics.each do |topic, partitions|
|
|
451
|
+
merged[cg][topic] = {}
|
|
404
452
|
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
end
|
|
453
|
+
partitions.each do |partition, lag|
|
|
454
|
+
merged[cg][topic][partition] = {
|
|
455
|
+
offset: groups_offs.fetch(cg).fetch(topic).fetch(partition),
|
|
456
|
+
lag: lag
|
|
457
|
+
}
|
|
411
458
|
end
|
|
412
459
|
end
|
|
413
|
-
|
|
414
|
-
merged
|
|
415
460
|
end
|
|
461
|
+
|
|
462
|
+
merged
|
|
416
463
|
end
|
|
417
464
|
end
|
|
418
465
|
end
|