karafka 2.2.14 → 2.3.0.alpha1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +38 -12
- data/.ruby-version +1 -1
- data/CHANGELOG.md +23 -0
- data/Gemfile.lock +12 -12
- data/README.md +0 -2
- data/SECURITY.md +23 -0
- data/config/locales/errors.yml +7 -1
- data/config/locales/pro_errors.yml +22 -0
- data/docker-compose.yml +1 -1
- data/karafka.gemspec +2 -2
- data/lib/karafka/admin/acl.rb +287 -0
- data/lib/karafka/admin.rb +9 -13
- data/lib/karafka/app.rb +5 -3
- data/lib/karafka/base_consumer.rb +9 -1
- data/lib/karafka/cli/base.rb +1 -1
- data/lib/karafka/connection/client.rb +83 -76
- data/lib/karafka/connection/conductor.rb +28 -0
- data/lib/karafka/connection/listener.rb +159 -42
- data/lib/karafka/connection/listeners_batch.rb +5 -11
- data/lib/karafka/connection/manager.rb +72 -0
- data/lib/karafka/connection/messages_buffer.rb +12 -0
- data/lib/karafka/connection/proxy.rb +17 -0
- data/lib/karafka/connection/status.rb +75 -0
- data/lib/karafka/contracts/config.rb +14 -10
- data/lib/karafka/contracts/consumer_group.rb +9 -1
- data/lib/karafka/contracts/topic.rb +3 -1
- data/lib/karafka/errors.rb +13 -0
- data/lib/karafka/instrumentation/logger_listener.rb +3 -0
- data/lib/karafka/instrumentation/notifications.rb +13 -5
- data/lib/karafka/instrumentation/vendors/appsignal/metrics_listener.rb +31 -28
- data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +20 -1
- data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +15 -12
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +39 -36
- data/lib/karafka/pro/base_consumer.rb +47 -0
- data/lib/karafka/pro/connection/manager.rb +300 -0
- data/lib/karafka/pro/connection/multiplexing/listener.rb +40 -0
- data/lib/karafka/pro/iterator/tpl_builder.rb +1 -1
- data/lib/karafka/pro/iterator.rb +1 -6
- data/lib/karafka/pro/loader.rb +14 -0
- data/lib/karafka/pro/processing/coordinator.rb +2 -1
- data/lib/karafka/pro/processing/executor.rb +37 -0
- data/lib/karafka/pro/processing/expansions_selector.rb +32 -0
- data/lib/karafka/pro/processing/jobs/periodic.rb +41 -0
- data/lib/karafka/pro/processing/jobs/periodic_non_blocking.rb +32 -0
- data/lib/karafka/pro/processing/jobs_builder.rb +14 -3
- data/lib/karafka/pro/processing/offset_metadata/consumer.rb +44 -0
- data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +131 -0
- data/lib/karafka/pro/processing/offset_metadata/listener.rb +46 -0
- data/lib/karafka/pro/processing/schedulers/base.rb +39 -23
- data/lib/karafka/pro/processing/schedulers/default.rb +12 -14
- data/lib/karafka/pro/processing/strategies/default.rb +134 -1
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +35 -0
- data/lib/karafka/pro/processing/strategies/vp/default.rb +59 -25
- data/lib/karafka/pro/processing/virtual_offset_manager.rb +41 -11
- data/lib/karafka/pro/routing/features/long_running_job/topic.rb +2 -0
- data/lib/karafka/pro/routing/features/multiplexing/config.rb +38 -0
- data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +114 -0
- data/lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb +42 -0
- data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +38 -0
- data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +42 -0
- data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +40 -0
- data/lib/karafka/pro/routing/features/multiplexing.rb +59 -0
- data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +32 -0
- data/lib/karafka/pro/routing/features/non_blocking_job.rb +37 -0
- data/lib/karafka/pro/routing/features/offset_metadata/config.rb +33 -0
- data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +42 -0
- data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +65 -0
- data/lib/karafka/pro/routing/features/offset_metadata.rb +40 -0
- data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +4 -0
- data/lib/karafka/pro/routing/features/patterns/detector.rb +18 -10
- data/lib/karafka/pro/routing/features/periodic_job/config.rb +37 -0
- data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +44 -0
- data/lib/karafka/pro/routing/features/periodic_job/topic.rb +94 -0
- data/lib/karafka/pro/routing/features/periodic_job.rb +27 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +1 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +7 -2
- data/lib/karafka/process.rb +5 -3
- data/lib/karafka/processing/coordinator.rb +5 -1
- data/lib/karafka/processing/executor.rb +16 -10
- data/lib/karafka/processing/executors_buffer.rb +19 -4
- data/lib/karafka/processing/schedulers/default.rb +3 -2
- data/lib/karafka/processing/strategies/default.rb +6 -0
- data/lib/karafka/processing/strategies/dlq.rb +36 -0
- data/lib/karafka/routing/builder.rb +12 -2
- data/lib/karafka/routing/consumer_group.rb +5 -5
- data/lib/karafka/routing/features/base.rb +44 -8
- data/lib/karafka/routing/features/dead_letter_queue/config.rb +6 -1
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -0
- data/lib/karafka/routing/features/dead_letter_queue/topic.rb +9 -2
- data/lib/karafka/routing/subscription_group.rb +2 -2
- data/lib/karafka/routing/subscription_groups_builder.rb +11 -2
- data/lib/karafka/routing/topic.rb +8 -10
- data/lib/karafka/runner.rb +13 -3
- data/lib/karafka/server.rb +5 -9
- data/lib/karafka/setup/config.rb +17 -0
- data/lib/karafka/status.rb +23 -14
- data/lib/karafka/templates/karafka.rb.erb +7 -0
- data/lib/karafka/time_trackers/partition_usage.rb +56 -0
- data/lib/karafka/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +42 -10
- metadata.gz.sig +0 -0
- data/lib/karafka/connection/consumer_group_coordinator.rb +0 -48
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: de7ea23762cefa19d5f3620e92a39f0030cd8ff78f318c92f30c494d79b78163
|
4
|
+
data.tar.gz: 775cfbd40d181036004dcf72dbcb84394dc8367bed0f6d69812f2324dc179d6f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: d68a4122a35afad517e4280b94f6f3d7cb3cab94fb37c11729e5e5c7a7aca082a7a272a52ff09a86e2f55ad0e078e234c88be79ce3b730527a2f6e7629ef259c
|
7
|
+
data.tar.gz: aa2ddb108cc39caa8ad5c95a86d07006b5be374647e703414a7761ffd5c333010d7e53b9fd2c42216780e35f00639d8cf80126c291a59d0585424077840cc6b5
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/.github/workflows/ci.yml
CHANGED
@@ -27,7 +27,7 @@ jobs:
|
|
27
27
|
- name: Set up Ruby
|
28
28
|
uses: ruby/setup-ruby@v1
|
29
29
|
with:
|
30
|
-
ruby-version: 3.
|
30
|
+
ruby-version: 3.3
|
31
31
|
bundler-cache: true
|
32
32
|
|
33
33
|
- name: Install Diffend plugin
|
@@ -73,7 +73,7 @@ jobs:
|
|
73
73
|
fail-fast: false
|
74
74
|
matrix:
|
75
75
|
ruby:
|
76
|
-
- '3.3
|
76
|
+
- '3.3'
|
77
77
|
- '3.2'
|
78
78
|
# We run it against the oldest and the newest of a given major to make sure, that there
|
79
79
|
# are no syntax-sugars that we would use that were introduced down the road
|
@@ -82,9 +82,8 @@ jobs:
|
|
82
82
|
- '3.0'
|
83
83
|
- '3.0.0'
|
84
84
|
- '2.7'
|
85
|
-
- '2.7.0'
|
86
85
|
include:
|
87
|
-
- ruby: '3.
|
86
|
+
- ruby: '3.3'
|
88
87
|
coverage: 'true'
|
89
88
|
steps:
|
90
89
|
- uses: actions/checkout@v4
|
@@ -100,6 +99,7 @@ jobs:
|
|
100
99
|
with:
|
101
100
|
ruby-version: ${{matrix.ruby}}
|
102
101
|
bundler-cache: true
|
102
|
+
bundler: 'latest'
|
103
103
|
|
104
104
|
- name: Wait for Kafka
|
105
105
|
run: |
|
@@ -118,7 +118,7 @@ jobs:
|
|
118
118
|
fail-fast: false
|
119
119
|
matrix:
|
120
120
|
ruby:
|
121
|
-
- '3.3
|
121
|
+
- '3.3'
|
122
122
|
- '3.2'
|
123
123
|
- '3.1'
|
124
124
|
- '3.0'
|
@@ -143,17 +143,30 @@ jobs:
|
|
143
143
|
#
|
144
144
|
# We also want to check that librdkafka is compiling as expected on all versions of Ruby
|
145
145
|
ruby-version: ${{matrix.ruby}}
|
146
|
+
bundler: 'latest'
|
146
147
|
|
147
148
|
- name: Install latest Bundler
|
148
149
|
run: |
|
149
|
-
|
150
|
-
|
150
|
+
if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
|
151
|
+
gem install bundler -v 2.4.22 --no-document
|
152
|
+
bundle config set version 2.4.22
|
153
|
+
gem update --system 3.4.22 --no-document
|
154
|
+
else
|
155
|
+
gem install bundler --no-document
|
156
|
+
gem update --system --no-document
|
157
|
+
fi
|
158
|
+
|
151
159
|
bundle config set without 'tools benchmarks docs'
|
152
160
|
|
153
161
|
- name: Bundle install
|
154
162
|
run: |
|
155
163
|
bundle config set without development
|
156
|
-
|
164
|
+
|
165
|
+
if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
|
166
|
+
BUNDLER_VERSION=2.4.22 bundle install --jobs 4 --retry 3
|
167
|
+
else
|
168
|
+
bundle install --jobs 4 --retry 3
|
169
|
+
fi
|
157
170
|
|
158
171
|
- name: Wait for Kafka
|
159
172
|
run: |
|
@@ -170,7 +183,7 @@ jobs:
|
|
170
183
|
fail-fast: false
|
171
184
|
matrix:
|
172
185
|
ruby:
|
173
|
-
- '3.3
|
186
|
+
- '3.3'
|
174
187
|
- '3.2'
|
175
188
|
- '3.1'
|
176
189
|
- '3.0'
|
@@ -188,17 +201,30 @@ jobs:
|
|
188
201
|
uses: ruby/setup-ruby@v1
|
189
202
|
with:
|
190
203
|
ruby-version: ${{matrix.ruby}}
|
204
|
+
bundler: 'latest'
|
191
205
|
|
192
206
|
- name: Install latest Bundler
|
193
207
|
run: |
|
194
|
-
|
195
|
-
|
208
|
+
if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
|
209
|
+
gem install bundler -v 2.4.22 --no-document
|
210
|
+
bundle config set version 2.4.22
|
211
|
+
gem update --system 3.4.22 --no-document
|
212
|
+
else
|
213
|
+
gem install bundler --no-document
|
214
|
+
gem update --system --no-document
|
215
|
+
fi
|
216
|
+
|
196
217
|
bundle config set without 'tools benchmarks docs'
|
197
218
|
|
198
219
|
- name: Bundle install
|
199
220
|
run: |
|
200
221
|
bundle config set without development
|
201
|
-
|
222
|
+
|
223
|
+
if [[ "$(ruby -v | awk '{print $2}')" == 2.7.8* ]]; then
|
224
|
+
BUNDLER_VERSION=2.4.22 bundle install --jobs 4 --retry 3
|
225
|
+
else
|
226
|
+
bundle install --jobs 4 --retry 3
|
227
|
+
fi
|
202
228
|
|
203
229
|
- name: Wait for Kafka
|
204
230
|
run: |
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
3.
|
1
|
+
3.3.0
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,28 @@
|
|
1
1
|
# Karafka framework changelog
|
2
2
|
|
3
|
+
## 2.3.0 (Unreleased)
|
4
|
+
- **[Feature]** Provide ability to multiplex subscription groups (Pro)
|
5
|
+
- **[Feature]** Provide `Karafka::Admin::Acl` for Kafka ACL management via the Admin APIs.
|
6
|
+
- **[Feature]** Periodic Jobs (Pro)
|
7
|
+
- **[Feature]** Offset Metadata storage (Pro)
|
8
|
+
- **[Feature]** Provide low-level listeners management API for dynamic resources scaling (Pro)
|
9
|
+
- [Enhancement] Improve shutdown process by allowing for parallel connections shutdown.
|
10
|
+
- [Enhancement] Introduce `non_blocking` routing API that aliases LRJ to indicate a different use-case for LRJ flow approach.
|
11
|
+
- [Enhancement] Allow to reset offset when seeking backwards by using the `reset_offset` keyword attribute set to `true`.
|
12
|
+
- [Enhancement] Alias producer operations in consumer to skip `#producer` reference.
|
13
|
+
- [Enhancement] Provide an `:independent` configuration to DLQ allowing to reset pause count track on each marking as consumed when retrying.
|
14
|
+
- [Enhancement] Remove no longer needed shutdown patches for `librdkafka` improving multi-sg shutdown times for `cooperative-sticky`.
|
15
|
+
- [Enhancement] Allow for parallel closing of connections from independent consumer groups.
|
16
|
+
- [Change] Make `Kubernetes::LivenessListener` not start until Karafka app starts running.
|
17
|
+
- [Change] Remove the legacy "inside of topics" way of defining subscription groups names
|
18
|
+
- [Change] Update supported instrumentation to report on `#tick`.
|
19
|
+
- [Refactor] Replace `define_method` with `class_eval` in some locations.
|
20
|
+
- [Fix] Fix a case where internal Idle job scheduling would go via the consumption flow.
|
21
|
+
- [Fix] Make the Iterator `#stop_partition` work with karafka-rdkafka `0.14.6`.
|
22
|
+
- [Fix] Ensure Pro components are not loaded during OSS specs execution (not affecting usage).
|
23
|
+
- [Fix] Fix invalid action label for consumers in DataDog logger instrumentation.
|
24
|
+
- [Ignore] option --include-consumer-groups not working as intended after removal of "thor"
|
25
|
+
|
3
26
|
## 2.2.14 (2023-12-07)
|
4
27
|
- **[Feature]** Provide `Karafka::Admin#delete_consumer_group` and `Karafka::Admin#seek_consumer_group`.
|
5
28
|
- **[Feature]** Provide `Karafka::App.assignments` that will return real-time assignments tracking.
|
data/Gemfile.lock
CHANGED
@@ -1,9 +1,9 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
karafka (2.
|
5
|
-
karafka-core (>= 2.
|
6
|
-
waterdrop (>= 2.6.
|
4
|
+
karafka (2.3.0.alpha1)
|
5
|
+
karafka-core (>= 2.3.0.alpha1, < 2.4.0)
|
6
|
+
waterdrop (>= 2.6.12, < 3.0.0)
|
7
7
|
zeitwerk (~> 2.3)
|
8
8
|
|
9
9
|
GEM
|
@@ -23,7 +23,7 @@ GEM
|
|
23
23
|
mutex_m
|
24
24
|
tzinfo (~> 2.0)
|
25
25
|
base64 (0.2.0)
|
26
|
-
bigdecimal (3.1.
|
26
|
+
bigdecimal (3.1.5)
|
27
27
|
byebug (11.1.3)
|
28
28
|
concurrent-ruby (1.2.2)
|
29
29
|
connection_pool (2.4.1)
|
@@ -32,17 +32,16 @@ GEM
|
|
32
32
|
drb (2.2.0)
|
33
33
|
ruby2_keywords
|
34
34
|
erubi (1.12.0)
|
35
|
-
factory_bot (6.4.
|
35
|
+
factory_bot (6.4.5)
|
36
36
|
activesupport (>= 5.0.0)
|
37
37
|
ffi (1.16.3)
|
38
38
|
globalid (1.2.1)
|
39
39
|
activesupport (>= 6.1)
|
40
40
|
i18n (1.14.1)
|
41
41
|
concurrent-ruby (~> 1.0)
|
42
|
-
karafka-core (2.
|
43
|
-
|
44
|
-
|
45
|
-
karafka-rdkafka (0.14.1)
|
42
|
+
karafka-core (2.3.0.alpha1)
|
43
|
+
karafka-rdkafka (>= 0.14.7, < 0.15.0)
|
44
|
+
karafka-rdkafka (0.14.7)
|
46
45
|
ffi (~> 1.15)
|
47
46
|
mini_portile2 (~> 2.6)
|
48
47
|
rake (> 12)
|
@@ -57,7 +56,7 @@ GEM
|
|
57
56
|
mutex_m (0.2.0)
|
58
57
|
rack (3.0.8)
|
59
58
|
rake (13.1.0)
|
60
|
-
roda (3.
|
59
|
+
roda (3.75.0)
|
61
60
|
rack
|
62
61
|
rspec (3.12.0)
|
63
62
|
rspec-core (~> 3.12.0)
|
@@ -82,12 +81,13 @@ GEM
|
|
82
81
|
tilt (2.3.0)
|
83
82
|
tzinfo (2.0.6)
|
84
83
|
concurrent-ruby (~> 1.0)
|
85
|
-
waterdrop (2.6.
|
84
|
+
waterdrop (2.6.12)
|
86
85
|
karafka-core (>= 2.2.3, < 3.0.0)
|
87
86
|
zeitwerk (~> 2.3)
|
88
87
|
zeitwerk (2.6.12)
|
89
88
|
|
90
89
|
PLATFORMS
|
90
|
+
ruby
|
91
91
|
x86_64-linux
|
92
92
|
|
93
93
|
DEPENDENCIES
|
@@ -100,4 +100,4 @@ DEPENDENCIES
|
|
100
100
|
simplecov
|
101
101
|
|
102
102
|
BUNDLED WITH
|
103
|
-
2.
|
103
|
+
2.5.3
|
data/README.md
CHANGED
@@ -4,8 +4,6 @@
|
|
4
4
|
[![Gem Version](https://badge.fury.io/rb/karafka.svg)](http://badge.fury.io/rb/karafka)
|
5
5
|
[![Join the chat at https://slack.karafka.io](https://raw.githubusercontent.com/karafka/misc/master/slack.svg)](https://slack.karafka.io)
|
6
6
|
|
7
|
-
**Note**: Upgrade instructions for migration from Karafka `1.4` to Karafka `2.0` can be found [here](https://karafka.io/docs/Upgrades-2.0/).
|
8
|
-
|
9
7
|
## About Karafka
|
10
8
|
|
11
9
|
Karafka is a Ruby and Rails multi-threaded efficient Kafka processing framework that:
|
data/SECURITY.md
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
# Security Policy
|
2
|
+
|
3
|
+
## Supported Versions
|
4
|
+
|
5
|
+
Please refer to the Karafka [EOL documentation](https://karafka.io/docs/Versions-Lifecycle-and-EOL/) page for detailed information on which versions are actively supported with security updates.
|
6
|
+
|
7
|
+
## Reporting a Vulnerability
|
8
|
+
|
9
|
+
If you have identified a potential security vulnerability in our projects, we encourage you to report it immediately. We take all reports of security issues seriously and will work diligently to address them.
|
10
|
+
|
11
|
+
To report a vulnerability, please send an email directly to contact@karafka.io.
|
12
|
+
|
13
|
+
We understand the importance of addressing security vulnerabilities promptly. You can expect a reply from us within 2 working days of your report. This initial response will confirm receipt of your report.
|
14
|
+
|
15
|
+
After acknowledging your report, we will:
|
16
|
+
|
17
|
+
- Evaluate the reported vulnerability in the context of our project.
|
18
|
+
- Provide you with regular updates on our progress.
|
19
|
+
- Upon completing our assessment, we will inform you of the outcome. This includes whether the vulnerability will be accepted or declined for further action.
|
20
|
+
|
21
|
+
Your report will be kept confidential and not disclosed to third parties without your consent, except as required by law.
|
22
|
+
|
23
|
+
We appreciate your assistance in keeping our projects and their users safe by responsibly reporting vulnerabilities. Together, we can maintain a high standard of security for our community.
|
data/config/locales/errors.yml
CHANGED
@@ -30,12 +30,17 @@ en:
|
|
30
30
|
internal.tick_interval_format: needs to be an integer bigger or equal to 1000
|
31
31
|
internal.routing.builder_format: needs to be present
|
32
32
|
internal.routing.subscription_groups_builder_format: needs to be present
|
33
|
+
internal.connection.manager_format: needs to be present
|
34
|
+
internal.connection.conductor_format: needs to be present
|
33
35
|
internal.connection.proxy.query_watermark_offsets.timeout_format: needs to be an integer bigger than 0
|
34
36
|
internal.connection.proxy.query_watermark_offsets.max_attempts_format: needs to be an integer bigger than 0
|
35
37
|
internal.connection.proxy.query_watermark_offsets.wait_time_format: needs to be an integer bigger than 0
|
36
38
|
internal.connection.proxy.offsets_for_times.timeout_format: needs to be an integer bigger than 0
|
37
39
|
internal.connection.proxy.offsets_for_times.max_attempts_format: needs to be an integer bigger than 0
|
38
40
|
internal.connection.proxy.offsets_for_times.wait_time_format: needs to be an integer bigger than 0
|
41
|
+
internal.connection.proxy.committed.timeout_format: needs to be an integer bigger than 0
|
42
|
+
internal.connection.proxy.committed.max_attempts_format: needs to be an integer bigger than 0
|
43
|
+
internal.connection.proxy.committed.wait_time_format: needs to be an integer bigger than 0
|
39
44
|
key_must_be_a_symbol: All keys under the kafka settings scope need to be symbols
|
40
45
|
max_timeout_vs_pause_max_timeout: pause_timeout must be less or equal to pause_max_timeout
|
41
46
|
shutdown_timeout_vs_max_wait_time: shutdown_timeout must be more than max_wait_time
|
@@ -61,7 +66,7 @@ en:
|
|
61
66
|
consumer_format: needs to be present
|
62
67
|
id_format: 'needs to be a string with a Kafka accepted format'
|
63
68
|
initial_offset_format: needs to be either earliest or latest
|
64
|
-
|
69
|
+
subscription_group_details.name_format: must be a non-empty string
|
65
70
|
manual_offset_management.active_format: needs to be either true or false
|
66
71
|
manual_offset_management_must_be_enabled: cannot be disabled for ActiveJob topics
|
67
72
|
inline_insights.active_format: needs to be either true or false
|
@@ -69,6 +74,7 @@ en:
|
|
69
74
|
dead_letter_queue.max_retries_format: needs to be equal or bigger than 0
|
70
75
|
dead_letter_queue.topic_format: 'needs to be a string with a Kafka accepted format'
|
71
76
|
dead_letter_queue.active_format: needs to be either true or false
|
77
|
+
dead_letter_queue.independent_format: needs to be either true or false
|
72
78
|
active_format: needs to be either true or false
|
73
79
|
declaratives.partitions_format: needs to be more or equal to 1
|
74
80
|
declaratives.active_format: needs to be true
|
@@ -3,6 +3,7 @@ en:
|
|
3
3
|
topic:
|
4
4
|
virtual_partitions.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
|
5
5
|
virtual_partitions.max_partitions_format: needs to be equal or more than 1
|
6
|
+
virtual_partitions.offset_metadata_strategy_format: needs to be either :exact or :current
|
6
7
|
|
7
8
|
long_running_job.active_format: needs to be either true or false
|
8
9
|
|
@@ -31,9 +32,30 @@ en:
|
|
31
32
|
patterns.active_format: 'needs to be boolean'
|
32
33
|
patterns.type_format: 'needs to be :matcher, :discovered or :regular'
|
33
34
|
|
35
|
+
periodic_job.active_missing: needs to be present
|
36
|
+
periodic_job.active_format: 'needs to be boolean'
|
37
|
+
periodic_job.interval_missing: 'needs to be present'
|
38
|
+
periodic_job.interval_format: 'needs to be an integer equal or more than 100'
|
39
|
+
periodic_job.during_pause_format: 'needs to be boolean'
|
40
|
+
periodic_job.during_retry_format: 'needs to be boolean'
|
41
|
+
periodic_job.materialized_format: 'needs to be boolean'
|
42
|
+
periodic_job.materialized_missing: 'needs to be present'
|
43
|
+
|
34
44
|
inline_insights.active_format: 'needs to be boolean'
|
35
45
|
inline_insights.required_format: 'needs to be boolean'
|
36
46
|
|
47
|
+
offset_metadata.active_format: 'needs to be boolean'
|
48
|
+
offset_metadata.cache_format: 'needs to be boolean'
|
49
|
+
offset_metadata.deserializer_missing: needs to be present
|
50
|
+
offset_metadata.deserializer_format: 'needs to respond to #call'
|
51
|
+
|
52
|
+
subscription_group_details.multiplexing_min_format: 'needs to be an integer equal or more than 1'
|
53
|
+
subscription_group_details.multiplexing_max_format: 'needs to be an integer equal or more than 1'
|
54
|
+
subscription_group_details_multiplexing_min_max_mismatch: 'min needs to be equal or less than max'
|
55
|
+
subscription_group_details_multiplexing_boot_mismatch: 'boot needs to be between min and max'
|
56
|
+
subscription_group_details.multiplexing_boot_format: 'needs to be an integer equal or more than 1'
|
57
|
+
subscription_group_details.multiplexing_boot_not_dynamic: 'needs to be equal to max when not in dynamic mode'
|
58
|
+
|
37
59
|
consumer_group:
|
38
60
|
patterns_format: must be an array with hashes
|
39
61
|
patterns_missing: needs to be present
|
data/docker-compose.yml
CHANGED
data/karafka.gemspec
CHANGED
@@ -21,8 +21,8 @@ Gem::Specification.new do |spec|
|
|
21
21
|
without having to focus on things that are not your business domain.
|
22
22
|
DESC
|
23
23
|
|
24
|
-
spec.add_dependency 'karafka-core', '>= 2.
|
25
|
-
spec.add_dependency 'waterdrop', '>= 2.6.
|
24
|
+
spec.add_dependency 'karafka-core', '>= 2.3.0.alpha1', '< 2.4.0'
|
25
|
+
spec.add_dependency 'waterdrop', '>= 2.6.12', '< 3.0.0'
|
26
26
|
spec.add_dependency 'zeitwerk', '~> 2.3'
|
27
27
|
|
28
28
|
if $PROGRAM_NAME.end_with?('gem')
|
@@ -0,0 +1,287 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Admin
|
5
|
+
# Struct and set of operations for ACLs management that simplifies their usage.
|
6
|
+
# It allows to use Ruby symbol based definitions instead of usage of librdkafka types
|
7
|
+
# (it allows to use rdkafka numerical types as well out of the box)
|
8
|
+
#
|
9
|
+
# We map the numerical values because they are less descriptive and harder to follow.
|
10
|
+
#
|
11
|
+
# This API works based on ability to create a `Karafka:Admin::Acl` object that can be then used
|
12
|
+
# using `#create`, `#delete` and `#describe` class API.
|
13
|
+
class Acl
|
14
|
+
# Types of resources for which we can assign permissions.
|
15
|
+
#
|
16
|
+
# Resource refers to any entity within the Kafka ecosystem for which access control can be
|
17
|
+
# managed using ACLs (Access Control Lists).
|
18
|
+
# These resources represent different components of Kafka, such as topics, consumer groups,
|
19
|
+
# and the Kafka cluster itself. ACLs can be applied to these resources to control and
|
20
|
+
# restrict reading, writing, and administrative operations, ensuring secure and authorized
|
21
|
+
# access to Kafka's functionalities.
|
22
|
+
RESOURCE_TYPES_MAP = {
|
23
|
+
# `:any` is only used for lookups and cannot be used for permission assignments
|
24
|
+
any: Rdkafka::Bindings::RD_KAFKA_RESOURCE_ANY,
|
25
|
+
# use when you want to assign acl to a given topic
|
26
|
+
topic: Rdkafka::Bindings::RD_KAFKA_RESOURCE_TOPIC,
|
27
|
+
# use when you want to assign acl to a given consumer group
|
28
|
+
consumer_group: Rdkafka::Bindings::RD_KAFKA_RESOURCE_GROUP,
|
29
|
+
# use when you want to assign acl to a given broker
|
30
|
+
broker: Rdkafka::Bindings::RD_KAFKA_RESOURCE_BROKER
|
31
|
+
}.freeze
|
32
|
+
|
33
|
+
# Resource pattern types define how ACLs (Access Control Lists) are applied to resources,
|
34
|
+
# specifying the scope and applicability of access rules.
|
35
|
+
# They determine whether an ACL should apply to a specific named resource, a prefixed group
|
36
|
+
# of resources, or all resources of a particular type.
|
37
|
+
RESOURCE_PATTERNS_TYPE_MAP = {
|
38
|
+
# `:any` is only used for lookups and cannot be used for permission assignments
|
39
|
+
any: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_ANY,
|
40
|
+
# Targets resources with a pattern matching for broader control with a single rule.
|
41
|
+
match: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_MATCH,
|
42
|
+
# Targets a specific named resource, applying ACLs directly to that resource.
|
43
|
+
literal: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL,
|
44
|
+
# Applies ACLs to all resources with a common name prefix, enabling broader control with a
|
45
|
+
# single rule.
|
46
|
+
prefixed: Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_PREFIXED
|
47
|
+
}.freeze
|
48
|
+
|
49
|
+
# ACL operations define the actions that can be performed on Kafka resources. Each operation
|
50
|
+
# represents a specific type of access or action that can be allowed or denied.
|
51
|
+
OPERATIONS_MAP = {
|
52
|
+
# `:any` is only used for lookups
|
53
|
+
any: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ANY,
|
54
|
+
# Grants complete access to a resource, encompassing all possible operations,
|
55
|
+
# typically used for unrestricted control.
|
56
|
+
all: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ALL,
|
57
|
+
# Grants the ability to read data from a topic or a consumer group.
|
58
|
+
read: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_READ,
|
59
|
+
# Allows for writing data on a topic.
|
60
|
+
write: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_WRITE,
|
61
|
+
# Permits the creation of topics or consumer groups.
|
62
|
+
create: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_CREATE,
|
63
|
+
# Enables the deletion of topics.
|
64
|
+
delete: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_DELETE,
|
65
|
+
# Allows modification of topics or consumer groups.
|
66
|
+
alter: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ALTER,
|
67
|
+
# Grants the ability to view metadata and configurations of topics or consumer groups.
|
68
|
+
describe: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_DESCRIBE,
|
69
|
+
# Permits actions that apply to the Kafka cluster, like broker management.
|
70
|
+
cluster_action: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION,
|
71
|
+
# Allows viewing configurations for resources like topics and brokers.
|
72
|
+
describe_configs: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS,
|
73
|
+
# Enables modification of configurations for resources.
|
74
|
+
alter_configs: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS,
|
75
|
+
# Grants the ability to perform idempotent writes, ensuring exactly-once semantics in
|
76
|
+
# message production.
|
77
|
+
idempotent_write: Rdkafka::Bindings::RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE
|
78
|
+
}.freeze
|
79
|
+
|
80
|
+
# ACL permission types specify the nature of the access control applied to Kafka resources.
|
81
|
+
# These types are used to either grant or deny specified operations.
|
82
|
+
PERMISSION_TYPES_MAP = {
|
83
|
+
# Used for lookups, indicating no specific permission type.
|
84
|
+
any: Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ANY,
|
85
|
+
# Grants the specified operations, enabling the associated actions on the resource.
|
86
|
+
allow: Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
|
87
|
+
# Blocks the specified operations, preventing the associated actions on the resource.
|
88
|
+
deny: Rdkafka::Bindings::RD_KAFKA_ACL_PERMISSION_TYPE_DENY
|
89
|
+
}.freeze
|
90
|
+
|
91
|
+
# Array with all maps used for the Acls support
|
92
|
+
ALL_MAPS = [
|
93
|
+
RESOURCE_TYPES_MAP,
|
94
|
+
RESOURCE_PATTERNS_TYPE_MAP,
|
95
|
+
OPERATIONS_MAP,
|
96
|
+
PERMISSION_TYPES_MAP
|
97
|
+
].freeze
|
98
|
+
|
99
|
+
private_constant :RESOURCE_TYPES_MAP, :RESOURCE_PATTERNS_TYPE_MAP, :OPERATIONS_MAP,
|
100
|
+
:PERMISSION_TYPES_MAP, :ALL_MAPS
|
101
|
+
|
102
|
+
# Class level APIs that operate on Acl instances and/or return Acl instances.
|
103
|
+
# @note For the sake of consistency all methods from this API return array of Acls
|
104
|
+
class << self
|
105
|
+
# Creates (unless already present) a given ACL rule in Kafka
|
106
|
+
# @param acl [Acl]
|
107
|
+
# @return [Array<Acl>] created acls
|
108
|
+
def create(acl)
|
109
|
+
with_admin_wait do |admin|
|
110
|
+
admin.create_acl(**acl.to_native_hash)
|
111
|
+
end
|
112
|
+
|
113
|
+
[acl]
|
114
|
+
end
|
115
|
+
|
116
|
+
# Removes acls matching provide acl pattern.
|
117
|
+
# @param acl [Acl]
|
118
|
+
# @return [Array<Acl>] deleted acls
|
119
|
+
# @note More than one Acl may be removed if rules match that way
|
120
|
+
def delete(acl)
|
121
|
+
result = with_admin_wait do |admin|
|
122
|
+
admin.delete_acl(**acl.to_native_hash)
|
123
|
+
end
|
124
|
+
|
125
|
+
result.deleted_acls.map do |result_acl|
|
126
|
+
from_rdkafka(result_acl)
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
# Takes an Acl definition and describes all existing Acls matching its criteria
|
131
|
+
# @param acl [Acl]
|
132
|
+
# @return [Array<Acl>] described acls
|
133
|
+
def describe(acl)
|
134
|
+
result = with_admin_wait do |admin|
|
135
|
+
admin.describe_acl(**acl.to_native_hash)
|
136
|
+
end
|
137
|
+
|
138
|
+
result.acls.map do |result_acl|
|
139
|
+
from_rdkafka(result_acl)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
# Returns all acls on a cluster level
|
144
|
+
# @return [Array<Acl>] all acls
|
145
|
+
def all
|
146
|
+
describe(
|
147
|
+
new(
|
148
|
+
resource_type: :any,
|
149
|
+
resource_name: nil,
|
150
|
+
resource_pattern_type: :any,
|
151
|
+
principal: nil,
|
152
|
+
operation: :any,
|
153
|
+
permission_type: :any,
|
154
|
+
host: '*'
|
155
|
+
)
|
156
|
+
)
|
157
|
+
end
|
158
|
+
|
159
|
+
private
|
160
|
+
|
161
|
+
# Yields admin instance, allows to run Acl operations and awaits on the final result
|
162
|
+
# Makes sure that admin is closed afterwards.
|
163
|
+
def with_admin_wait
|
164
|
+
Admin.with_admin do |admin|
|
165
|
+
yield(admin).wait(max_wait_timeout: Karafka::App.config.admin.max_wait_time)
|
166
|
+
end
|
167
|
+
end
|
168
|
+
|
169
|
+
# Takes a rdkafka Acl result and converts it into our local Acl representation. Since the
|
170
|
+
# rdkafka Acl object is an integer based on on types, etc we remap it into our "more" Ruby
|
171
|
+
# form.
|
172
|
+
#
|
173
|
+
# @param rdkafka_acl [Rdkafka::Admin::AclBindingResult]
|
174
|
+
# return [Acl] mapped acl
|
175
|
+
def from_rdkafka(rdkafka_acl)
|
176
|
+
new(
|
177
|
+
resource_type: rdkafka_acl.matching_acl_resource_type,
|
178
|
+
resource_name: rdkafka_acl.matching_acl_resource_name,
|
179
|
+
resource_pattern_type: rdkafka_acl.matching_acl_pattern_type,
|
180
|
+
principal: rdkafka_acl.matching_acl_principal,
|
181
|
+
host: rdkafka_acl.matching_acl_host,
|
182
|
+
operation: rdkafka_acl.matching_acl_operation,
|
183
|
+
permission_type: rdkafka_acl.matching_acl_permission_type
|
184
|
+
)
|
185
|
+
end
|
186
|
+
end
|
187
|
+
|
188
|
+
attr_reader :resource_type, :resource_name, :resource_pattern_type, :principal, :host,
|
189
|
+
:operation, :permission_type
|
190
|
+
|
191
|
+
# Initializes a new Acl instance with specified attributes.
|
192
|
+
#
|
193
|
+
# @param resource_type [Symbol, Integer] Specifies the type of Kafka resource
|
194
|
+
# (like :topic, :consumer_group).
|
195
|
+
# Accepts either a symbol from RESOURCE_TYPES_MAP or a direct rdkafka numerical type.
|
196
|
+
# @param resource_name [String, nil] The name of the Kafka resource
|
197
|
+
# (like a specific topic name). Can be nil for certain types of resource pattern types.
|
198
|
+
# @param resource_pattern_type [Symbol, Integer] Determines how the ACL is applied to the
|
199
|
+
# resource. Uses a symbol from RESOURCE_PATTERNS_TYPE_MAP or a direct rdkafka numerical
|
200
|
+
# type.
|
201
|
+
# @param principal [String, nil] Specifies the principal (user or client) for which the ACL
|
202
|
+
# is being defined. Can be nil if not applicable.
|
203
|
+
# @param host [String] (default: '*') Defines the host from which the principal can access
|
204
|
+
# the resource. Defaults to '*' for all hosts.
|
205
|
+
# @param operation [Symbol, Integer] Indicates the operation type allowed or denied by the
|
206
|
+
# ACL. Uses a symbol from OPERATIONS_MAP or a direct rdkafka numerical type.
|
207
|
+
# @param permission_type [Symbol, Integer] Specifies whether to allow or deny the specified
|
208
|
+
# operation. Uses a symbol from PERMISSION_TYPES_MAP or a direct rdkafka numerical type.
|
209
|
+
#
|
210
|
+
# Each parameter is mapped to its corresponding value in the respective *_MAP constant,
|
211
|
+
# allowing usage of more descriptive Ruby symbols instead of numerical types.
|
212
|
+
def initialize(
|
213
|
+
resource_type:,
|
214
|
+
resource_name:,
|
215
|
+
resource_pattern_type:,
|
216
|
+
principal:,
|
217
|
+
host: '*',
|
218
|
+
operation:,
|
219
|
+
permission_type:
|
220
|
+
)
|
221
|
+
@resource_type = map(resource_type, RESOURCE_TYPES_MAP)
|
222
|
+
@resource_name = resource_name
|
223
|
+
@resource_pattern_type = map(resource_pattern_type, RESOURCE_PATTERNS_TYPE_MAP)
|
224
|
+
@principal = principal
|
225
|
+
@host = host
|
226
|
+
@operation = map(operation, OPERATIONS_MAP)
|
227
|
+
@permission_type = map(permission_type, PERMISSION_TYPES_MAP)
|
228
|
+
freeze
|
229
|
+
end
|
230
|
+
|
231
|
+
# Converts the Acl into a hash with native rdkafka types
|
232
|
+
# @return [Hash] hash with attributes matching rdkafka numerical types
|
233
|
+
def to_native_hash
|
234
|
+
{
|
235
|
+
resource_type: remap(resource_type, RESOURCE_TYPES_MAP),
|
236
|
+
resource_name: resource_name,
|
237
|
+
resource_pattern_type: remap(resource_pattern_type, RESOURCE_PATTERNS_TYPE_MAP),
|
238
|
+
principal: principal,
|
239
|
+
host: host,
|
240
|
+
operation: remap(operation, OPERATIONS_MAP),
|
241
|
+
permission_type: remap(permission_type, PERMISSION_TYPES_MAP)
|
242
|
+
}.freeze
|
243
|
+
end
|
244
|
+
|
245
|
+
private
|
246
|
+
|
247
|
+
# Maps the provided attribute based on the mapping hash and if not found returns the
|
248
|
+
# attribute itself. Useful when converting from Acl symbol based representation to the
|
249
|
+
# rdkafka one.
|
250
|
+
#
|
251
|
+
# @param value [Symbol, Integer] The value to be mapped.
|
252
|
+
# @param mappings [Hash] The hash containing the mapping data.
|
253
|
+
# @return [Integer, Symbol] The mapped value or the original value if not found in mappings.
|
254
|
+
def map(value, mappings)
|
255
|
+
validate_attribute!(value)
|
256
|
+
|
257
|
+
mappings.invert.fetch(value, value)
|
258
|
+
end
|
259
|
+
|
260
|
+
# Remaps the provided attribute based on the mapping hash and if not found returns the
|
261
|
+
# attribute itself. Useful when converting from Acl symbol based representation to the
|
262
|
+
# rdkafka one.
|
263
|
+
#
|
264
|
+
# @param value [Symbol, Integer] The value to be mapped.
|
265
|
+
# @param mappings [Hash] The hash containing the mapping data.
|
266
|
+
# @return [Integer, Symbol] The mapped value or the original value if not found in mappings.
|
267
|
+
def remap(value, mappings)
|
268
|
+
validate_attribute!(value)
|
269
|
+
|
270
|
+
mappings.fetch(value, value)
|
271
|
+
end
|
272
|
+
|
273
|
+
# Validates that the attribute exists in any of the ACL mappings.
|
274
|
+
# Raises an error if the attribute is not supported.
|
275
|
+
# @param attribute [Symbol, Integer] The attribute to be validated.
|
276
|
+
# @raise [Karafka::Errors::UnsupportedCaseError] raised if attribute not found
|
277
|
+
def validate_attribute!(attribute)
|
278
|
+
ALL_MAPS.each do |mappings|
|
279
|
+
return if mappings.keys.any?(attribute)
|
280
|
+
return if mappings.values.any?(attribute)
|
281
|
+
end
|
282
|
+
|
283
|
+
raise Karafka::Errors::UnsupportedCaseError, attribute
|
284
|
+
end
|
285
|
+
end
|
286
|
+
end
|
287
|
+
end
|