karafka 2.2.13 → 2.3.0.alpha1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +38 -12
- data/.ruby-version +1 -1
- data/CHANGELOG.md +161 -125
- data/Gemfile.lock +12 -12
- data/README.md +0 -2
- data/SECURITY.md +23 -0
- data/config/locales/errors.yml +7 -1
- data/config/locales/pro_errors.yml +22 -0
- data/docker-compose.yml +3 -1
- data/karafka.gemspec +2 -2
- data/lib/karafka/admin/acl.rb +287 -0
- data/lib/karafka/admin.rb +118 -16
- data/lib/karafka/app.rb +12 -3
- data/lib/karafka/base_consumer.rb +32 -31
- data/lib/karafka/cli/base.rb +1 -1
- data/lib/karafka/connection/client.rb +94 -84
- data/lib/karafka/connection/conductor.rb +28 -0
- data/lib/karafka/connection/listener.rb +165 -46
- data/lib/karafka/connection/listeners_batch.rb +5 -11
- data/lib/karafka/connection/manager.rb +72 -0
- data/lib/karafka/connection/messages_buffer.rb +12 -0
- data/lib/karafka/connection/proxy.rb +17 -0
- data/lib/karafka/connection/status.rb +75 -0
- data/lib/karafka/contracts/config.rb +14 -10
- data/lib/karafka/contracts/consumer_group.rb +9 -1
- data/lib/karafka/contracts/topic.rb +3 -1
- data/lib/karafka/errors.rb +13 -0
- data/lib/karafka/instrumentation/assignments_tracker.rb +96 -0
- data/lib/karafka/instrumentation/callbacks/rebalance.rb +10 -7
- data/lib/karafka/instrumentation/logger_listener.rb +3 -9
- data/lib/karafka/instrumentation/notifications.rb +19 -9
- data/lib/karafka/instrumentation/vendors/appsignal/metrics_listener.rb +31 -28
- data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +22 -3
- data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +15 -12
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +39 -36
- data/lib/karafka/pro/base_consumer.rb +47 -0
- data/lib/karafka/pro/connection/manager.rb +300 -0
- data/lib/karafka/pro/connection/multiplexing/listener.rb +40 -0
- data/lib/karafka/pro/instrumentation/performance_tracker.rb +85 -0
- data/lib/karafka/pro/iterator/tpl_builder.rb +1 -1
- data/lib/karafka/pro/iterator.rb +1 -6
- data/lib/karafka/pro/loader.rb +16 -2
- data/lib/karafka/pro/processing/coordinator.rb +2 -1
- data/lib/karafka/pro/processing/executor.rb +37 -0
- data/lib/karafka/pro/processing/expansions_selector.rb +32 -0
- data/lib/karafka/pro/processing/jobs/periodic.rb +41 -0
- data/lib/karafka/pro/processing/jobs/periodic_non_blocking.rb +32 -0
- data/lib/karafka/pro/processing/jobs_builder.rb +14 -3
- data/lib/karafka/pro/processing/offset_metadata/consumer.rb +44 -0
- data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +131 -0
- data/lib/karafka/pro/processing/offset_metadata/listener.rb +46 -0
- data/lib/karafka/pro/processing/schedulers/base.rb +143 -0
- data/lib/karafka/pro/processing/schedulers/default.rb +107 -0
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +1 -1
- data/lib/karafka/pro/processing/strategies/default.rb +136 -3
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +35 -0
- data/lib/karafka/pro/processing/strategies/lrj/default.rb +1 -1
- data/lib/karafka/pro/processing/strategies/lrj/mom.rb +1 -1
- data/lib/karafka/pro/processing/strategies/vp/default.rb +60 -26
- data/lib/karafka/pro/processing/virtual_offset_manager.rb +41 -11
- data/lib/karafka/pro/routing/features/long_running_job/topic.rb +2 -0
- data/lib/karafka/pro/routing/features/multiplexing/config.rb +38 -0
- data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +114 -0
- data/lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb +42 -0
- data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +38 -0
- data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +42 -0
- data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +40 -0
- data/lib/karafka/pro/routing/features/multiplexing.rb +59 -0
- data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +32 -0
- data/lib/karafka/pro/routing/features/non_blocking_job.rb +37 -0
- data/lib/karafka/pro/routing/features/offset_metadata/config.rb +33 -0
- data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +42 -0
- data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +65 -0
- data/lib/karafka/pro/routing/features/offset_metadata.rb +40 -0
- data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +4 -0
- data/lib/karafka/pro/routing/features/patterns/detector.rb +18 -10
- data/lib/karafka/pro/routing/features/periodic_job/config.rb +37 -0
- data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +44 -0
- data/lib/karafka/pro/routing/features/periodic_job/topic.rb +94 -0
- data/lib/karafka/pro/routing/features/periodic_job.rb +27 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +1 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +7 -2
- data/lib/karafka/process.rb +5 -3
- data/lib/karafka/processing/coordinator.rb +5 -1
- data/lib/karafka/processing/executor.rb +43 -13
- data/lib/karafka/processing/executors_buffer.rb +22 -7
- data/lib/karafka/processing/jobs/base.rb +19 -2
- data/lib/karafka/processing/jobs/consume.rb +3 -3
- data/lib/karafka/processing/jobs/idle.rb +5 -0
- data/lib/karafka/processing/jobs/revoked.rb +5 -0
- data/lib/karafka/processing/jobs/shutdown.rb +5 -0
- data/lib/karafka/processing/jobs_queue.rb +19 -8
- data/lib/karafka/processing/schedulers/default.rb +42 -0
- data/lib/karafka/processing/strategies/base.rb +13 -4
- data/lib/karafka/processing/strategies/default.rb +23 -7
- data/lib/karafka/processing/strategies/dlq.rb +36 -0
- data/lib/karafka/processing/worker.rb +4 -1
- data/lib/karafka/routing/builder.rb +12 -2
- data/lib/karafka/routing/consumer_group.rb +5 -5
- data/lib/karafka/routing/features/base.rb +44 -8
- data/lib/karafka/routing/features/dead_letter_queue/config.rb +6 -1
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -0
- data/lib/karafka/routing/features/dead_letter_queue/topic.rb +9 -2
- data/lib/karafka/routing/proxy.rb +4 -3
- data/lib/karafka/routing/subscription_group.rb +2 -2
- data/lib/karafka/routing/subscription_groups_builder.rb +11 -2
- data/lib/karafka/routing/topic.rb +8 -10
- data/lib/karafka/routing/topics.rb +1 -1
- data/lib/karafka/runner.rb +13 -3
- data/lib/karafka/server.rb +5 -9
- data/lib/karafka/setup/config.rb +21 -1
- data/lib/karafka/status.rb +23 -14
- data/lib/karafka/templates/karafka.rb.erb +7 -0
- data/lib/karafka/time_trackers/partition_usage.rb +56 -0
- data/lib/karafka/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +47 -13
- metadata.gz.sig +0 -0
- data/lib/karafka/connection/consumer_group_coordinator.rb +0 -48
- data/lib/karafka/pro/performance_tracker.rb +0 -84
- data/lib/karafka/pro/processing/scheduler.rb +0 -74
- data/lib/karafka/processing/scheduler.rb +0 -38
data/Gemfile.lock
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
PATH
|
|
2
2
|
remote: .
|
|
3
3
|
specs:
|
|
4
|
-
karafka (2.
|
|
5
|
-
karafka-core (>= 2.
|
|
6
|
-
waterdrop (>= 2.6.
|
|
4
|
+
karafka (2.3.0.alpha1)
|
|
5
|
+
karafka-core (>= 2.3.0.alpha1, < 2.4.0)
|
|
6
|
+
waterdrop (>= 2.6.12, < 3.0.0)
|
|
7
7
|
zeitwerk (~> 2.3)
|
|
8
8
|
|
|
9
9
|
GEM
|
|
@@ -23,7 +23,7 @@ GEM
|
|
|
23
23
|
mutex_m
|
|
24
24
|
tzinfo (~> 2.0)
|
|
25
25
|
base64 (0.2.0)
|
|
26
|
-
bigdecimal (3.1.
|
|
26
|
+
bigdecimal (3.1.5)
|
|
27
27
|
byebug (11.1.3)
|
|
28
28
|
concurrent-ruby (1.2.2)
|
|
29
29
|
connection_pool (2.4.1)
|
|
@@ -32,17 +32,16 @@ GEM
|
|
|
32
32
|
drb (2.2.0)
|
|
33
33
|
ruby2_keywords
|
|
34
34
|
erubi (1.12.0)
|
|
35
|
-
factory_bot (6.
|
|
35
|
+
factory_bot (6.4.5)
|
|
36
36
|
activesupport (>= 5.0.0)
|
|
37
37
|
ffi (1.16.3)
|
|
38
38
|
globalid (1.2.1)
|
|
39
39
|
activesupport (>= 6.1)
|
|
40
40
|
i18n (1.14.1)
|
|
41
41
|
concurrent-ruby (~> 1.0)
|
|
42
|
-
karafka-core (2.
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
karafka-rdkafka (0.14.0)
|
|
42
|
+
karafka-core (2.3.0.alpha1)
|
|
43
|
+
karafka-rdkafka (>= 0.14.7, < 0.15.0)
|
|
44
|
+
karafka-rdkafka (0.14.7)
|
|
46
45
|
ffi (~> 1.15)
|
|
47
46
|
mini_portile2 (~> 2.6)
|
|
48
47
|
rake (> 12)
|
|
@@ -57,7 +56,7 @@ GEM
|
|
|
57
56
|
mutex_m (0.2.0)
|
|
58
57
|
rack (3.0.8)
|
|
59
58
|
rake (13.1.0)
|
|
60
|
-
roda (3.
|
|
59
|
+
roda (3.75.0)
|
|
61
60
|
rack
|
|
62
61
|
rspec (3.12.0)
|
|
63
62
|
rspec-core (~> 3.12.0)
|
|
@@ -82,12 +81,13 @@ GEM
|
|
|
82
81
|
tilt (2.3.0)
|
|
83
82
|
tzinfo (2.0.6)
|
|
84
83
|
concurrent-ruby (~> 1.0)
|
|
85
|
-
waterdrop (2.6.
|
|
84
|
+
waterdrop (2.6.12)
|
|
86
85
|
karafka-core (>= 2.2.3, < 3.0.0)
|
|
87
86
|
zeitwerk (~> 2.3)
|
|
88
87
|
zeitwerk (2.6.12)
|
|
89
88
|
|
|
90
89
|
PLATFORMS
|
|
90
|
+
ruby
|
|
91
91
|
x86_64-linux
|
|
92
92
|
|
|
93
93
|
DEPENDENCIES
|
|
@@ -100,4 +100,4 @@ DEPENDENCIES
|
|
|
100
100
|
simplecov
|
|
101
101
|
|
|
102
102
|
BUNDLED WITH
|
|
103
|
-
2.
|
|
103
|
+
2.5.3
|
data/README.md
CHANGED
|
@@ -4,8 +4,6 @@
|
|
|
4
4
|
[](http://badge.fury.io/rb/karafka)
|
|
5
5
|
[](https://slack.karafka.io)
|
|
6
6
|
|
|
7
|
-
**Note**: Upgrade instructions for migration from Karafka `1.4` to Karafka `2.0` can be found [here](https://karafka.io/docs/Upgrades-2.0/).
|
|
8
|
-
|
|
9
7
|
## About Karafka
|
|
10
8
|
|
|
11
9
|
Karafka is a Ruby and Rails multi-threaded efficient Kafka processing framework that:
|
data/SECURITY.md
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# Security Policy
|
|
2
|
+
|
|
3
|
+
## Supported Versions
|
|
4
|
+
|
|
5
|
+
Please refer to the Karafka [EOL documentation](https://karafka.io/docs/Versions-Lifecycle-and-EOL/) page for detailed information on which versions are actively supported with security updates.
|
|
6
|
+
|
|
7
|
+
## Reporting a Vulnerability
|
|
8
|
+
|
|
9
|
+
If you have identified a potential security vulnerability in our projects, we encourage you to report it immediately. We take all reports of security issues seriously and will work diligently to address them.
|
|
10
|
+
|
|
11
|
+
To report a vulnerability, please send an email directly to contact@karafka.io.
|
|
12
|
+
|
|
13
|
+
We understand the importance of addressing security vulnerabilities promptly. You can expect a reply from us within 2 working days of your report. This initial response will confirm receipt of your report.
|
|
14
|
+
|
|
15
|
+
After acknowledging your report, we will:
|
|
16
|
+
|
|
17
|
+
- Evaluate the reported vulnerability in the context of our project.
|
|
18
|
+
- Provide you with regular updates on our progress.
|
|
19
|
+
- Upon completing our assessment, we will inform you of the outcome. This includes whether the vulnerability will be accepted or declined for further action.
|
|
20
|
+
|
|
21
|
+
Your report will be kept confidential and not disclosed to third parties without your consent, except as required by law.
|
|
22
|
+
|
|
23
|
+
We appreciate your assistance in keeping our projects and their users safe by responsibly reporting vulnerabilities. Together, we can maintain a high standard of security for our community.
|
data/config/locales/errors.yml
CHANGED
|
@@ -30,12 +30,17 @@ en:
|
|
|
30
30
|
internal.tick_interval_format: needs to be an integer bigger or equal to 1000
|
|
31
31
|
internal.routing.builder_format: needs to be present
|
|
32
32
|
internal.routing.subscription_groups_builder_format: needs to be present
|
|
33
|
+
internal.connection.manager_format: needs to be present
|
|
34
|
+
internal.connection.conductor_format: needs to be present
|
|
33
35
|
internal.connection.proxy.query_watermark_offsets.timeout_format: needs to be an integer bigger than 0
|
|
34
36
|
internal.connection.proxy.query_watermark_offsets.max_attempts_format: needs to be an integer bigger than 0
|
|
35
37
|
internal.connection.proxy.query_watermark_offsets.wait_time_format: needs to be an integer bigger than 0
|
|
36
38
|
internal.connection.proxy.offsets_for_times.timeout_format: needs to be an integer bigger than 0
|
|
37
39
|
internal.connection.proxy.offsets_for_times.max_attempts_format: needs to be an integer bigger than 0
|
|
38
40
|
internal.connection.proxy.offsets_for_times.wait_time_format: needs to be an integer bigger than 0
|
|
41
|
+
internal.connection.proxy.committed.timeout_format: needs to be an integer bigger than 0
|
|
42
|
+
internal.connection.proxy.committed.max_attempts_format: needs to be an integer bigger than 0
|
|
43
|
+
internal.connection.proxy.committed.wait_time_format: needs to be an integer bigger than 0
|
|
39
44
|
key_must_be_a_symbol: All keys under the kafka settings scope need to be symbols
|
|
40
45
|
max_timeout_vs_pause_max_timeout: pause_timeout must be less or equal to pause_max_timeout
|
|
41
46
|
shutdown_timeout_vs_max_wait_time: shutdown_timeout must be more than max_wait_time
|
|
@@ -61,7 +66,7 @@ en:
|
|
|
61
66
|
consumer_format: needs to be present
|
|
62
67
|
id_format: 'needs to be a string with a Kafka accepted format'
|
|
63
68
|
initial_offset_format: needs to be either earliest or latest
|
|
64
|
-
|
|
69
|
+
subscription_group_details.name_format: must be a non-empty string
|
|
65
70
|
manual_offset_management.active_format: needs to be either true or false
|
|
66
71
|
manual_offset_management_must_be_enabled: cannot be disabled for ActiveJob topics
|
|
67
72
|
inline_insights.active_format: needs to be either true or false
|
|
@@ -69,6 +74,7 @@ en:
|
|
|
69
74
|
dead_letter_queue.max_retries_format: needs to be equal or bigger than 0
|
|
70
75
|
dead_letter_queue.topic_format: 'needs to be a string with a Kafka accepted format'
|
|
71
76
|
dead_letter_queue.active_format: needs to be either true or false
|
|
77
|
+
dead_letter_queue.independent_format: needs to be either true or false
|
|
72
78
|
active_format: needs to be either true or false
|
|
73
79
|
declaratives.partitions_format: needs to be more or equal to 1
|
|
74
80
|
declaratives.active_format: needs to be true
|
|
@@ -3,6 +3,7 @@ en:
|
|
|
3
3
|
topic:
|
|
4
4
|
virtual_partitions.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
|
|
5
5
|
virtual_partitions.max_partitions_format: needs to be equal or more than 1
|
|
6
|
+
virtual_partitions.offset_metadata_strategy_format: needs to be either :exact or :current
|
|
6
7
|
|
|
7
8
|
long_running_job.active_format: needs to be either true or false
|
|
8
9
|
|
|
@@ -31,9 +32,30 @@ en:
|
|
|
31
32
|
patterns.active_format: 'needs to be boolean'
|
|
32
33
|
patterns.type_format: 'needs to be :matcher, :discovered or :regular'
|
|
33
34
|
|
|
35
|
+
periodic_job.active_missing: needs to be present
|
|
36
|
+
periodic_job.active_format: 'needs to be boolean'
|
|
37
|
+
periodic_job.interval_missing: 'needs to be present'
|
|
38
|
+
periodic_job.interval_format: 'needs to be an integer equal or more than 100'
|
|
39
|
+
periodic_job.during_pause_format: 'needs to be boolean'
|
|
40
|
+
periodic_job.during_retry_format: 'needs to be boolean'
|
|
41
|
+
periodic_job.materialized_format: 'needs to be boolean'
|
|
42
|
+
periodic_job.materialized_missing: 'needs to be present'
|
|
43
|
+
|
|
34
44
|
inline_insights.active_format: 'needs to be boolean'
|
|
35
45
|
inline_insights.required_format: 'needs to be boolean'
|
|
36
46
|
|
|
47
|
+
offset_metadata.active_format: 'needs to be boolean'
|
|
48
|
+
offset_metadata.cache_format: 'needs to be boolean'
|
|
49
|
+
offset_metadata.deserializer_missing: needs to be present
|
|
50
|
+
offset_metadata.deserializer_format: 'needs to respond to #call'
|
|
51
|
+
|
|
52
|
+
subscription_group_details.multiplexing_min_format: 'needs to be an integer equal or more than 1'
|
|
53
|
+
subscription_group_details.multiplexing_max_format: 'needs to be an integer equal or more than 1'
|
|
54
|
+
subscription_group_details_multiplexing_min_max_mismatch: 'min needs to be equal or less than max'
|
|
55
|
+
subscription_group_details_multiplexing_boot_mismatch: 'boot needs to be between min and max'
|
|
56
|
+
subscription_group_details.multiplexing_boot_format: 'needs to be an integer equal or more than 1'
|
|
57
|
+
subscription_group_details.multiplexing_boot_not_dynamic: 'needs to be equal to max when not in dynamic mode'
|
|
58
|
+
|
|
37
59
|
consumer_group:
|
|
38
60
|
patterns_format: must be an array with hashes
|
|
39
61
|
patterns_missing: needs to be present
|
data/docker-compose.yml
CHANGED
|
@@ -3,7 +3,7 @@ version: '2'
|
|
|
3
3
|
services:
|
|
4
4
|
kafka:
|
|
5
5
|
container_name: kafka
|
|
6
|
-
image: confluentinc/cp-kafka:7.5.
|
|
6
|
+
image: confluentinc/cp-kafka:7.5.3
|
|
7
7
|
|
|
8
8
|
ports:
|
|
9
9
|
- 9092:9092
|
|
@@ -23,3 +23,5 @@ services:
|
|
|
23
23
|
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
|
24
24
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
|
25
25
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
|
26
|
+
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
|
|
27
|
+
KAFKA_AUTHORIZER_CLASS_NAME: org.apache.kafka.metadata.authorizer.StandardAuthorizer
|
data/karafka.gemspec
CHANGED
|
@@ -21,8 +21,8 @@ Gem::Specification.new do |spec|
|
|
|
21
21
|
without having to focus on things that are not your business domain.
|
|
22
22
|
DESC
|
|
23
23
|
|
|
24
|
-
spec.add_dependency 'karafka-core', '>= 2.
|
|
25
|
-
spec.add_dependency 'waterdrop', '>= 2.6.
|
|
24
|
+
spec.add_dependency 'karafka-core', '>= 2.3.0.alpha1', '< 2.4.0'
|
|
25
|
+
spec.add_dependency 'waterdrop', '>= 2.6.12', '< 3.0.0'
|
|
26
26
|
spec.add_dependency 'zeitwerk', '~> 2.3'
|
|
27
27
|
|
|
28
28
|
if $PROGRAM_NAME.end_with?('gem')
|
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Karafka
|
|
4
|
+
module Admin
|
|
5
|
+
# Struct and set of operations for ACLs management that simplifies their usage.
|
|
6
|
+
# It allows to use Ruby symbol based definitions instead of usage of librdkafka types
|
|
7
|
+
# (it allows to use rdkafka numerical types as well out of the box)
|
|
8
|
+
#
|
|
9
|
+
# We map the numerical values because they are less descriptive and harder to follow.
|
|
10
|
+
#
|
|
11
|
+
# This API works based on ability to create a `Karafka:Admin::Acl` object that can be then used
|
|
12
|
+
# using `#create`, `#delete` and `#describe` class API.
|
|
13
|
+
class Acl
|
|
14
|
+
# Types of resources for which we can assign permissions.
|
|
15
|
+
#
|
|
16
|
+
# Resource refers to any entity within the Kafka ecosystem for which access control can be
|
|
17
|
+
# managed using ACLs (Access Control Lists).
|
|
18
|
+
# These resources represent different components of Kafka, such as topics, consumer groups,
|
|
19
|
+
# and the Kafka cluster itself. ACLs can be applied to these resources to control and
|
|
20
|
+
# restrict reading, writing, and administrative operations, ensuring secure and authorized
|
|
21
|
+
# access to Kafka's functionalities.
|
|
22
|
+
RESOURCE_TYPES_MAP = {
|
|
23
|
+
# `:any` is only used for lookups and cannot be used for permission assignments
|
|
24
|
+
any: Rdkafka::Bindings::RD_KAFKA_RESOURCE_ANY,
|
|
25
|
+
# use when you want to assign acl to a given topic
|
|
26
|
+
topic: Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC,
|
|
27
|
+
# use when you want to assign acl to a given consumer group
|
|
28
|
+
consumer_group: Rdkafka::Bindings::RD_KAFKA_RESOURCE_GROUP,
|
|
29
|
+
# use when you want to assign acl to a given broker
|
|
30
|
+
broker: Rdkafka::Bindings::RD_KAFKA_RESOURCE_BROKER
|
|
31
|
+
}.freeze
|
|
32
|
+
|
|
33
|
+
# Resource pattern types define how ACLs (Access Control Lists) are applied to resources,
|
|
34
|
+
# specifying the scope and applicability of access rules.
|
|
35
|
+
# They determine whether an ACL should apply to a specific named resource, a prefixed group
|
|
36
|
+
# of resources, or all resources of a particular type.
|
|
37
|
+
RESOURCE_PATTERNS_TYPE_MAP = {
|
|
38
|
+
# `:any` is only used for lookups and cannot be used for permission assignments
|
|
39
|
+
any: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY,
|
|
40
|
+
# Targets resources with a pattern matching for broader control with a single rule.
|
|
41
|
+
match: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_MATCH,
|
|
42
|
+
# Targets a specific named resource, applying ACLs directly to that resource.
|
|
43
|
+
literal: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL,
|
|
44
|
+
# Applies ACLs to all resources with a common name prefix, enabling broader control with a
|
|
45
|
+
# single rule.
|
|
46
|
+
prefixed: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_PREFIXED
|
|
47
|
+
}.freeze
|
|
48
|
+
|
|
49
|
+
# ACL operations define the actions that can be performed on Kafka resources. Each operation
|
|
50
|
+
# represents a specific type of access or action that can be allowed or denied.
|
|
51
|
+
OPERATIONS_MAP = {
|
|
52
|
+
# `:any` is only used for lookups
|
|
53
|
+
any: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ANY,
|
|
54
|
+
# Grants complete access to a resource, encompassing all possible operations,
|
|
55
|
+
# typically used for unrestricted control.
|
|
56
|
+
all: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ALL,
|
|
57
|
+
# Grants the ability to read data from a topic or a consumer group.
|
|
58
|
+
read: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ,
|
|
59
|
+
# Allows for writing data on a topic.
|
|
60
|
+
write: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_WRITE,
|
|
61
|
+
# Permits the creation of topics or consumer groups.
|
|
62
|
+
create: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_CREATE,
|
|
63
|
+
# Enables the deletion of topics.
|
|
64
|
+
delete: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_DELETE,
|
|
65
|
+
# Allows modification of topics or consumer groups.
|
|
66
|
+
alter: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ALTER,
|
|
67
|
+
# Grants the ability to view metadata and configurations of topics or consumer groups.
|
|
68
|
+
describe: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_DESCRIBE,
|
|
69
|
+
# Permits actions that apply to the Kafka cluster, like broker management.
|
|
70
|
+
cluster_action: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION,
|
|
71
|
+
# Allows viewing configurations for resources like topics and brokers.
|
|
72
|
+
describe_configs: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS,
|
|
73
|
+
# Enables modification of configurations for resources.
|
|
74
|
+
alter_configs: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS,
|
|
75
|
+
# Grants the ability to perform idempotent writes, ensuring exactly-once semantics in
|
|
76
|
+
# message production.
|
|
77
|
+
idempotent_write: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE
|
|
78
|
+
}.freeze
|
|
79
|
+
|
|
80
|
+
# ACL permission types specify the nature of the access control applied to Kafka resources.
|
|
81
|
+
# These types are used to either grant or deny specified operations.
|
|
82
|
+
PERMISSION_TYPES_MAP = {
|
|
83
|
+
# Used for lookups, indicating no specific permission type.
|
|
84
|
+
any: Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ANY,
|
|
85
|
+
# Grants the specified operations, enabling the associated actions on the resource.
|
|
86
|
+
allow: Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
|
|
87
|
+
# Blocks the specified operations, preventing the associated actions on the resource.
|
|
88
|
+
deny: Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_DENY
|
|
89
|
+
}.freeze
|
|
90
|
+
|
|
91
|
+
# Array with all maps used for the Acls support
|
|
92
|
+
ALL_MAPS = [
|
|
93
|
+
RESOURCE_TYPES_MAP,
|
|
94
|
+
RESOURCE_PATTERNS_TYPE_MAP,
|
|
95
|
+
OPERATIONS_MAP,
|
|
96
|
+
PERMISSION_TYPES_MAP
|
|
97
|
+
].freeze
|
|
98
|
+
|
|
99
|
+
private_constant :RESOURCE_TYPES_MAP, :RESOURCE_PATTERNS_TYPE_MAP, :OPERATIONS_MAP,
|
|
100
|
+
:PERMISSION_TYPES_MAP, :ALL_MAPS
|
|
101
|
+
|
|
102
|
+
# Class level APIs that operate on Acl instances and/or return Acl instances.
|
|
103
|
+
# @note For the sake of consistency all methods from this API return array of Acls
|
|
104
|
+
class << self
|
|
105
|
+
# Creates (unless already present) a given ACL rule in Kafka
|
|
106
|
+
# @param acl [Acl]
|
|
107
|
+
# @return [Array<Acl>] created acls
|
|
108
|
+
def create(acl)
|
|
109
|
+
with_admin_wait do |admin|
|
|
110
|
+
admin.create_acl(**acl.to_native_hash)
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
[acl]
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
# Removes acls matching provide acl pattern.
|
|
117
|
+
# @param acl [Acl]
|
|
118
|
+
# @return [Array<Acl>] deleted acls
|
|
119
|
+
# @note More than one Acl may be removed if rules match that way
|
|
120
|
+
def delete(acl)
|
|
121
|
+
result = with_admin_wait do |admin|
|
|
122
|
+
admin.delete_acl(**acl.to_native_hash)
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
result.deleted_acls.map do |result_acl|
|
|
126
|
+
from_rdkafka(result_acl)
|
|
127
|
+
end
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
# Takes an Acl definition and describes all existing Acls matching its criteria
|
|
131
|
+
# @param acl [Acl]
|
|
132
|
+
# @return [Array<Acl>] described acls
|
|
133
|
+
def describe(acl)
|
|
134
|
+
result = with_admin_wait do |admin|
|
|
135
|
+
admin.describe_acl(**acl.to_native_hash)
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
result.acls.map do |result_acl|
|
|
139
|
+
from_rdkafka(result_acl)
|
|
140
|
+
end
|
|
141
|
+
end
|
|
142
|
+
|
|
143
|
+
# Returns all acls on a cluster level
|
|
144
|
+
# @return [Array<Acl>] all acls
|
|
145
|
+
def all
|
|
146
|
+
describe(
|
|
147
|
+
new(
|
|
148
|
+
resource_type: :any,
|
|
149
|
+
resource_name: nil,
|
|
150
|
+
resource_pattern_type: :any,
|
|
151
|
+
principal: nil,
|
|
152
|
+
operation: :any,
|
|
153
|
+
permission_type: :any,
|
|
154
|
+
host: '*'
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
private
|
|
160
|
+
|
|
161
|
+
# Yields admin instance, allows to run Acl operations and awaits on the final result
|
|
162
|
+
# Makes sure that admin is closed afterwards.
|
|
163
|
+
def with_admin_wait
|
|
164
|
+
Admin.with_admin do |admin|
|
|
165
|
+
yield(admin).wait(max_wait_timeout: Karafka::App.config.admin.max_wait_time)
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
# Takes a rdkafka Acl result and converts it into our local Acl representation. Since the
|
|
170
|
+
# rdkafka Acl object is an integer based on on types, etc we remap it into our "more" Ruby
|
|
171
|
+
# form.
|
|
172
|
+
#
|
|
173
|
+
# @param rdkafka_acl [Rdkafka::Admin::AclBindingResult]
|
|
174
|
+
# return [Acl] mapped acl
|
|
175
|
+
def from_rdkafka(rdkafka_acl)
|
|
176
|
+
new(
|
|
177
|
+
resource_type: rdkafka_acl.matching_acl_resource_type,
|
|
178
|
+
resource_name: rdkafka_acl.matching_acl_resource_name,
|
|
179
|
+
resource_pattern_type: rdkafka_acl.matching_acl_pattern_type,
|
|
180
|
+
principal: rdkafka_acl.matching_acl_principal,
|
|
181
|
+
host: rdkafka_acl.matching_acl_host,
|
|
182
|
+
operation: rdkafka_acl.matching_acl_operation,
|
|
183
|
+
permission_type: rdkafka_acl.matching_acl_permission_type
|
|
184
|
+
)
|
|
185
|
+
end
|
|
186
|
+
end
|
|
187
|
+
|
|
188
|
+
attr_reader :resource_type, :resource_name, :resource_pattern_type, :principal, :host,
|
|
189
|
+
:operation, :permission_type
|
|
190
|
+
|
|
191
|
+
# Initializes a new Acl instance with specified attributes.
|
|
192
|
+
#
|
|
193
|
+
# @param resource_type [Symbol, Integer] Specifies the type of Kafka resource
|
|
194
|
+
# (like :topic, :consumer_group).
|
|
195
|
+
# Accepts either a symbol from RESOURCE_TYPES_MAP or a direct rdkafka numerical type.
|
|
196
|
+
# @param resource_name [String, nil] The name of the Kafka resource
|
|
197
|
+
# (like a specific topic name). Can be nil for certain types of resource pattern types.
|
|
198
|
+
# @param resource_pattern_type [Symbol, Integer] Determines how the ACL is applied to the
|
|
199
|
+
# resource. Uses a symbol from RESOURCE_PATTERNS_TYPE_MAP or a direct rdkafka numerical
|
|
200
|
+
# type.
|
|
201
|
+
# @param principal [String, nil] Specifies the principal (user or client) for which the ACL
|
|
202
|
+
# is being defined. Can be nil if not applicable.
|
|
203
|
+
# @param host [String] (default: '*') Defines the host from which the principal can access
|
|
204
|
+
# the resource. Defaults to '*' for all hosts.
|
|
205
|
+
# @param operation [Symbol, Integer] Indicates the operation type allowed or denied by the
|
|
206
|
+
# ACL. Uses a symbol from OPERATIONS_MAP or a direct rdkafka numerical type.
|
|
207
|
+
# @param permission_type [Symbol, Integer] Specifies whether to allow or deny the specified
|
|
208
|
+
# operation. Uses a symbol from PERMISSION_TYPES_MAP or a direct rdkafka numerical type.
|
|
209
|
+
#
|
|
210
|
+
# Each parameter is mapped to its corresponding value in the respective *_MAP constant,
|
|
211
|
+
# allowing usage of more descriptive Ruby symbols instead of numerical types.
|
|
212
|
+
def initialize(
|
|
213
|
+
resource_type:,
|
|
214
|
+
resource_name:,
|
|
215
|
+
resource_pattern_type:,
|
|
216
|
+
principal:,
|
|
217
|
+
host: '*',
|
|
218
|
+
operation:,
|
|
219
|
+
permission_type:
|
|
220
|
+
)
|
|
221
|
+
@resource_type = map(resource_type, RESOURCE_TYPES_MAP)
|
|
222
|
+
@resource_name = resource_name
|
|
223
|
+
@resource_pattern_type = map(resource_pattern_type, RESOURCE_PATTERNS_TYPE_MAP)
|
|
224
|
+
@principal = principal
|
|
225
|
+
@host = host
|
|
226
|
+
@operation = map(operation, OPERATIONS_MAP)
|
|
227
|
+
@permission_type = map(permission_type, PERMISSION_TYPES_MAP)
|
|
228
|
+
freeze
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
# Converts the Acl into a hash with native rdkafka types
|
|
232
|
+
# @return [Hash] hash with attributes matching rdkafka numerical types
|
|
233
|
+
def to_native_hash
|
|
234
|
+
{
|
|
235
|
+
resource_type: remap(resource_type, RESOURCE_TYPES_MAP),
|
|
236
|
+
resource_name: resource_name,
|
|
237
|
+
resource_pattern_type: remap(resource_pattern_type, RESOURCE_PATTERNS_TYPE_MAP),
|
|
238
|
+
principal: principal,
|
|
239
|
+
host: host,
|
|
240
|
+
operation: remap(operation, OPERATIONS_MAP),
|
|
241
|
+
permission_type: remap(permission_type, PERMISSION_TYPES_MAP)
|
|
242
|
+
}.freeze
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
private
|
|
246
|
+
|
|
247
|
+
# Maps the provided attribute based on the mapping hash and if not found returns the
|
|
248
|
+
# attribute itself. Useful when converting from Acl symbol based representation to the
|
|
249
|
+
# rdkafka one.
|
|
250
|
+
#
|
|
251
|
+
# @param value [Symbol, Integer] The value to be mapped.
|
|
252
|
+
# @param mappings [Hash] The hash containing the mapping data.
|
|
253
|
+
# @return [Integer, Symbol] The mapped value or the original value if not found in mappings.
|
|
254
|
+
def map(value, mappings)
|
|
255
|
+
validate_attribute!(value)
|
|
256
|
+
|
|
257
|
+
mappings.invert.fetch(value, value)
|
|
258
|
+
end
|
|
259
|
+
|
|
260
|
+
# Remaps the provided attribute based on the mapping hash and if not found returns the
|
|
261
|
+
# attribute itself. Useful when converting from Acl symbol based representation to the
|
|
262
|
+
# rdkafka one.
|
|
263
|
+
#
|
|
264
|
+
# @param value [Symbol, Integer] The value to be mapped.
|
|
265
|
+
# @param mappings [Hash] The hash containing the mapping data.
|
|
266
|
+
# @return [Integer, Symbol] The mapped value or the original value if not found in mappings.
|
|
267
|
+
def remap(value, mappings)
|
|
268
|
+
validate_attribute!(value)
|
|
269
|
+
|
|
270
|
+
mappings.fetch(value, value)
|
|
271
|
+
end
|
|
272
|
+
|
|
273
|
+
# Validates that the attribute exists in any of the ACL mappings.
|
|
274
|
+
# Raises an error if the attribute is not supported.
|
|
275
|
+
# @param attribute [Symbol, Integer] The attribute to be validated.
|
|
276
|
+
# @raise [Karafka::Errors::UnsupportedCaseError] raised if attribute not found
|
|
277
|
+
def validate_attribute!(attribute)
|
|
278
|
+
ALL_MAPS.each do |mappings|
|
|
279
|
+
return if mappings.keys.any?(attribute)
|
|
280
|
+
return if mappings.values.any?(attribute)
|
|
281
|
+
end
|
|
282
|
+
|
|
283
|
+
raise Karafka::Errors::UnsupportedCaseError, attribute
|
|
284
|
+
end
|
|
285
|
+
end
|
|
286
|
+
end
|
|
287
|
+
end
|
data/lib/karafka/admin.rb
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
3
|
module Karafka
|
|
4
|
-
#
|
|
4
|
+
# Admin actions that we can perform via Karafka on our Kafka cluster
|
|
5
5
|
#
|
|
6
6
|
# @note It always initializes a new admin instance as we want to ensure it is always closed
|
|
7
7
|
# Since admin actions are not performed that often, that should be ok.
|
|
@@ -137,6 +137,109 @@ module Karafka
|
|
|
137
137
|
end
|
|
138
138
|
end
|
|
139
139
|
|
|
140
|
+
# Moves the offset on a given consumer group and provided topic to the requested location
|
|
141
|
+
#
|
|
142
|
+
# @param consumer_group_id [String] id of the consumer group for which we want to move the
|
|
143
|
+
# existing offset
|
|
144
|
+
# @param topics_with_partitions_and_offsets [Hash] Hash with list of topics and settings to
|
|
145
|
+
# where to move given consumer. It allows us to move particular partitions or whole topics
|
|
146
|
+
# if we want to reset all partitions to for example a point in time.
|
|
147
|
+
#
|
|
148
|
+
# @note This method should **not** be executed on a running consumer group as it creates a
|
|
149
|
+
# "fake" consumer and uses it to move offsets.
|
|
150
|
+
#
|
|
151
|
+
# @example Move a single topic partition nr 1 offset to 100
|
|
152
|
+
# Karafka::Admin.seek_consumer_group('group-id', { 'topic' => { 1 => 100 } })
|
|
153
|
+
#
|
|
154
|
+
# @example Move offsets on all partitions of a topic to 100
|
|
155
|
+
# Karafka::Admin.seek_consumer_group('group-id', { 'topic' => 100 })
|
|
156
|
+
#
|
|
157
|
+
# @example Move offset to 5 seconds ago on partition 2
|
|
158
|
+
# Karafka::Admin.seek_consumer_group('group-id', { 'topic' => { 2 => 5.seconds.ago } })
|
|
159
|
+
def seek_consumer_group(consumer_group_id, topics_with_partitions_and_offsets)
|
|
160
|
+
tpl_base = {}
|
|
161
|
+
|
|
162
|
+
# Normalize the data so we always have all partitions and topics in the same format
|
|
163
|
+
# That is in a format where we have topics and all partitions with their per partition
|
|
164
|
+
# assigned offsets
|
|
165
|
+
topics_with_partitions_and_offsets.each do |topic, partitions_with_offsets|
|
|
166
|
+
tpl_base[topic] = {}
|
|
167
|
+
|
|
168
|
+
if partitions_with_offsets.is_a?(Hash)
|
|
169
|
+
tpl_base[topic] = partitions_with_offsets
|
|
170
|
+
else
|
|
171
|
+
topic(topic)[:partition_count].times do |partition|
|
|
172
|
+
tpl_base[topic][partition] = partitions_with_offsets
|
|
173
|
+
end
|
|
174
|
+
end
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
|
178
|
+
# In case of time based location, we need to to a pre-resolution, that's why we keep it
|
|
179
|
+
# separately
|
|
180
|
+
time_tpl = Rdkafka::Consumer::TopicPartitionList.new
|
|
181
|
+
|
|
182
|
+
# Distribute properly the offset type
|
|
183
|
+
tpl_base.each do |topic, partitions_with_offsets|
|
|
184
|
+
partitions_with_offsets.each do |partition, offset|
|
|
185
|
+
target = offset.is_a?(Time) ? time_tpl : tpl
|
|
186
|
+
target.add_topic_and_partitions_with_offsets(topic, [[partition, offset]])
|
|
187
|
+
end
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
# We set this that way so we can impersonate this consumer group and seek where we want
|
|
191
|
+
mapped_consumer_group_id = app_config.consumer_mapper.call(consumer_group_id)
|
|
192
|
+
settings = { 'group.id': mapped_consumer_group_id }
|
|
193
|
+
|
|
194
|
+
with_consumer(settings) do |consumer|
|
|
195
|
+
# If we have any time based stuff to resolve, we need to do it prior to commits
|
|
196
|
+
unless time_tpl.empty?
|
|
197
|
+
real_offsets = consumer.offsets_for_times(time_tpl)
|
|
198
|
+
|
|
199
|
+
real_offsets.to_h.each do |name, results|
|
|
200
|
+
results.each do |result|
|
|
201
|
+
raise(Errors::InvalidTimeBasedOffsetError) unless result
|
|
202
|
+
|
|
203
|
+
partition = result.partition
|
|
204
|
+
|
|
205
|
+
# Negative offset means we're beyond last message and we need to query for the
|
|
206
|
+
# high watermark offset to get the most recent offset and move there
|
|
207
|
+
if result.offset.negative?
|
|
208
|
+
_, offset = consumer.query_watermark_offsets(name, result.partition)
|
|
209
|
+
else
|
|
210
|
+
# If we get an offset, it means there existed a message close to this time
|
|
211
|
+
# location
|
|
212
|
+
offset = result.offset
|
|
213
|
+
end
|
|
214
|
+
|
|
215
|
+
# Since now we have proper offsets, we can add this to the final tpl for commit
|
|
216
|
+
tpl.add_topic_and_partitions_with_offsets(name, [[partition, offset]])
|
|
217
|
+
end
|
|
218
|
+
end
|
|
219
|
+
end
|
|
220
|
+
|
|
221
|
+
consumer.commit(tpl, false)
|
|
222
|
+
end
|
|
223
|
+
end
|
|
224
|
+
|
|
225
|
+
# Removes given consumer group (if exists)
|
|
226
|
+
#
|
|
227
|
+
# @param consumer_group_id [String] consumer group name without the mapper name (if any used)
|
|
228
|
+
#
|
|
229
|
+
# @note Please note, Karafka will apply the consumer group mapper on the provided consumer
|
|
230
|
+
# group.
|
|
231
|
+
#
|
|
232
|
+
# @note This method should not be used on a running consumer group as it will not yield any
|
|
233
|
+
# results.
|
|
234
|
+
def delete_consumer_group(consumer_group_id)
|
|
235
|
+
mapped_consumer_group_id = app_config.consumer_mapper.call(consumer_group_id)
|
|
236
|
+
|
|
237
|
+
with_admin do |admin|
|
|
238
|
+
handler = admin.delete_group(mapped_consumer_group_id)
|
|
239
|
+
handler.wait(max_wait_timeout: app_config.admin.max_wait_time)
|
|
240
|
+
end
|
|
241
|
+
end
|
|
242
|
+
|
|
140
243
|
# Fetches the watermark offsets for a given topic partition
|
|
141
244
|
#
|
|
142
245
|
# @param name [String, Symbol] topic name
|
|
@@ -155,11 +258,7 @@ module Karafka
|
|
|
155
258
|
|
|
156
259
|
# @return [Rdkafka::Metadata] cluster metadata info
|
|
157
260
|
def cluster_info
|
|
158
|
-
with_admin
|
|
159
|
-
admin.instance_variable_get('@native_kafka').with_inner do |inner|
|
|
160
|
-
Rdkafka::Metadata.new(inner)
|
|
161
|
-
end
|
|
162
|
-
end
|
|
261
|
+
with_admin(&:metadata)
|
|
163
262
|
end
|
|
164
263
|
|
|
165
264
|
# Creates consumer instance and yields it. After usage it closes the consumer instance
|
|
@@ -187,6 +286,14 @@ module Karafka
|
|
|
187
286
|
consumer&.close
|
|
188
287
|
end
|
|
189
288
|
|
|
289
|
+
# Creates admin instance and yields it. After usage it closes the admin instance
|
|
290
|
+
def with_admin
|
|
291
|
+
admin = config(:producer, {}).admin
|
|
292
|
+
yield(admin)
|
|
293
|
+
ensure
|
|
294
|
+
admin&.close
|
|
295
|
+
end
|
|
296
|
+
|
|
190
297
|
private
|
|
191
298
|
|
|
192
299
|
# @return [Array<String>] topics names
|
|
@@ -201,14 +308,6 @@ module Karafka
|
|
|
201
308
|
cluster_info.topics.find { |topic| topic[:topic_name] == name }
|
|
202
309
|
end
|
|
203
310
|
|
|
204
|
-
# Creates admin instance and yields it. After usage it closes the admin instance
|
|
205
|
-
def with_admin
|
|
206
|
-
admin = config(:producer, {}).admin
|
|
207
|
-
yield(admin)
|
|
208
|
-
ensure
|
|
209
|
-
admin&.close
|
|
210
|
-
end
|
|
211
|
-
|
|
212
311
|
# There are some cases where rdkafka admin operations finish successfully but without the
|
|
213
312
|
# callback being triggered to materialize the post-promise object. Until this is fixed we
|
|
214
313
|
# can figure out, that operation we wanted to do finished successfully by checking that the
|
|
@@ -264,7 +363,7 @@ module Karafka
|
|
|
264
363
|
# @param settings [Hash] extra settings for config (if needed)
|
|
265
364
|
# @return [::Rdkafka::Config] rdkafka config
|
|
266
365
|
def config(type, settings)
|
|
267
|
-
|
|
366
|
+
mapped_admin_group_id = app_config.consumer_mapper.call(
|
|
268
367
|
app_config.admin.group_id
|
|
269
368
|
)
|
|
270
369
|
|
|
@@ -272,8 +371,11 @@ module Karafka
|
|
|
272
371
|
.kafka
|
|
273
372
|
.then(&:dup)
|
|
274
373
|
.merge(app_config.admin.kafka)
|
|
374
|
+
.tap { |config| config[:'group.id'] = mapped_admin_group_id }
|
|
375
|
+
# We merge after setting the group id so it can be altered if needed
|
|
376
|
+
# In general in admin we only should alter it when we need to impersonate a given
|
|
377
|
+
# consumer group or do something similar
|
|
275
378
|
.merge!(settings)
|
|
276
|
-
.tap { |config| config[:'group.id'] = group_id }
|
|
277
379
|
.then { |config| Karafka::Setup::AttributesMap.public_send(type, config) }
|
|
278
380
|
.then { |config| ::Rdkafka::Config.new(config) }
|
|
279
381
|
end
|