karafka 2.5.0.beta1 → 2.5.0.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci.yml +5 -5
  3. data/.github/workflows/push.yml +35 -0
  4. data/CHANGELOG.md +17 -2
  5. data/Gemfile +3 -3
  6. data/Gemfile.lock +37 -15
  7. data/README.md +1 -1
  8. data/Rakefile +4 -0
  9. data/bin/integrations +2 -1
  10. data/examples/payloads/avro/.gitkeep +0 -0
  11. data/karafka.gemspec +1 -6
  12. data/lib/karafka/admin/configs.rb +5 -1
  13. data/lib/karafka/admin.rb +18 -15
  14. data/lib/karafka/cli/topics/align.rb +7 -4
  15. data/lib/karafka/cli/topics/base.rb +17 -0
  16. data/lib/karafka/cli/topics/create.rb +9 -7
  17. data/lib/karafka/cli/topics/delete.rb +4 -2
  18. data/lib/karafka/cli/topics/help.rb +39 -0
  19. data/lib/karafka/cli/topics/repartition.rb +4 -2
  20. data/lib/karafka/cli/topics.rb +10 -3
  21. data/lib/karafka/cli.rb +2 -0
  22. data/lib/karafka/connection/client.rb +19 -2
  23. data/lib/karafka/connection/proxy.rb +1 -1
  24. data/lib/karafka/constraints.rb +3 -3
  25. data/lib/karafka/errors.rb +35 -2
  26. data/lib/karafka/helpers/interval_runner.rb +8 -0
  27. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +5 -0
  28. data/lib/karafka/pro/processing/strategies/dlq/default.rb +4 -3
  29. data/lib/karafka/pro/scheduled_messages/consumer.rb +50 -14
  30. data/lib/karafka/pro/scheduled_messages/dispatcher.rb +2 -1
  31. data/lib/karafka/pro/scheduled_messages/serializer.rb +2 -4
  32. data/lib/karafka/pro/scheduled_messages/state.rb +20 -23
  33. data/lib/karafka/pro/scheduled_messages/tracker.rb +34 -8
  34. data/lib/karafka/server.rb +14 -19
  35. data/lib/karafka/version.rb +1 -1
  36. metadata +13 -37
  37. checksums.yaml.gz.sig +0 -0
  38. data/certs/cert.pem +0 -26
  39. data.tar.gz.sig +0 -1
  40. metadata.gz.sig +0 -0
  41. /data/examples/payloads/json/{enrollment_event.json → sample_set_01/enrollment_event.json} +0 -0
  42. /data/examples/payloads/json/{ingestion_event.json → sample_set_01/ingestion_event.json} +0 -0
  43. /data/examples/payloads/json/{transaction_event.json → sample_set_01/transaction_event.json} +0 -0
  44. /data/examples/payloads/json/{user_event.json → sample_set_01/user_event.json} +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4bcac2dd9a093cd85ab76342d9c227d552e9927e681e79f55963a9d13e64f79d
4
- data.tar.gz: 8b5e32c1c1099b8e654599c6df25c33816c3a4a315ffe7e0de06c49a2b4e720e
3
+ metadata.gz: 2a66089d998c0dabb1070e4e8f1895a068e8f2aa8e752fb38ef9da1633b9704d
4
+ data.tar.gz: 188ea36894e0a32168303654510ef2e072d19f8ad39e5f0155547b6c96dbfdb2
5
5
  SHA512:
6
- metadata.gz: d2ffa6709d42103eea487e44b6756d9b449bd762b162e8ce35283b2513b26c2fde8ef559b900c7932d1fe4e3e8c771d9874d1ed54a572e3e94f6df067eb354d8
7
- data.tar.gz: 9bfafb2b2c8ec8975e55fe1bddeaaedfb4d7fd1cedae36abdfed45819e8136eb614922d4f52c0b011d97602ad728dc7a860a3025e9ab1694720b0f35af00429d
6
+ metadata.gz: d3ee8f86dd3b26dea69f9e03972ac3aced8b76d8156a9c359cb2b50114f3156306281a7f147935fa4d566e2d398f0a343205255a96f9560528b9dc2d21ca166c
7
+ data.tar.gz: 843e553470b78b107080df06ae7f7bd716d5d08363fb0d381709522aed397bb5da6d46c0d1aee06a72c6ba063b112118a669ff3f079fef4c53f672cf08ed1ee0
@@ -1,4 +1,4 @@
1
- name: ci
1
+ name: CI
2
2
 
3
3
  concurrency:
4
4
  group: ${{ github.workflow }}-${{ github.ref }}
@@ -31,7 +31,7 @@ jobs:
31
31
  fetch-depth: 0
32
32
 
33
33
  - name: Set up Ruby
34
- uses: ruby/setup-ruby@e34163cd15f4bb403dcd72d98e295997e6a55798 # v1.238.0
34
+ uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
35
35
  with:
36
36
  ruby-version: 3.4
37
37
  bundler-cache: true
@@ -118,7 +118,7 @@ jobs:
118
118
  run: rm -f Gemfile.lock
119
119
 
120
120
  - name: Set up Ruby
121
- uses: ruby/setup-ruby@e34163cd15f4bb403dcd72d98e295997e6a55798 # v1.238.0
121
+ uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
122
122
  with:
123
123
  ruby-version: ${{matrix.ruby}}
124
124
  bundler-cache: true
@@ -164,7 +164,7 @@ jobs:
164
164
  docker compose up -d || (sleep 5 && docker compose up -d)
165
165
 
166
166
  - name: Set up Ruby
167
- uses: ruby/setup-ruby@e34163cd15f4bb403dcd72d98e295997e6a55798 # v1.238.0
167
+ uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
168
168
  with:
169
169
  # Do not use cache here as we run bundle install also later in some of the integration
170
170
  # tests and we need to be able to run it without cache
@@ -228,7 +228,7 @@ jobs:
228
228
  docker compose up -d || (sleep 5 && docker compose up -d)
229
229
 
230
230
  - name: Set up Ruby
231
- uses: ruby/setup-ruby@e34163cd15f4bb403dcd72d98e295997e6a55798 # v1.238.0
231
+ uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
232
232
  with:
233
233
  ruby-version: ${{matrix.ruby}}
234
234
  bundler: 'latest'
@@ -0,0 +1,35 @@
1
+ name: Push Gem
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - v*
7
+
8
+ permissions:
9
+ contents: read
10
+
11
+ jobs:
12
+ push:
13
+ if: github.repository_owner == 'karafka'
14
+ runs-on: ubuntu-latest
15
+ environment: deployment
16
+
17
+ permissions:
18
+ contents: write
19
+ id-token: write
20
+
21
+ steps:
22
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
23
+ with:
24
+ fetch-depth: 0
25
+
26
+ - name: Set up Ruby
27
+ uses: ruby/setup-ruby@13e7a03dc3ac6c3798f4570bfead2aed4d96abfb # v1.244.0
28
+ with:
29
+ bundler-cache: false
30
+
31
+ - name: Bundle install
32
+ run: |
33
+ bundle install --jobs 4 --retry 3
34
+
35
+ - uses: rubygems/release-gem@a25424ba2ba8b387abc8ef40807c2c85b96cbe32 # v1.1.1
data/CHANGELOG.md CHANGED
@@ -1,6 +1,8 @@
1
1
  # Karafka Framework Changelog
2
2
 
3
- ## 2.4.19 (Unreleased)
3
+ ## 2.5.0 (Unreleased)
4
+ - **[Breaking]** Change how consistency of DLQ dispatches works in Pro (`partition_key` vs. direct partition id mapping).
5
+ - **[Breaking]** Remove the headers `source_key` from the Pro DLQ dispatched messages as the original key is now fully preserved.
4
6
  - **[Breaking]** Use DLQ and Piping prefix `source_` instead of `original_` to align with naming convention of Kafka Streams and Apache Flink for future usage.
5
7
  - **[Breaking]** Rename scheduled jobs topics names in their config (Pro).
6
8
  - **[Feature]** Parallel Segments for concurrent processing of the same partition with more than partition count of processes (Pro).
@@ -30,12 +32,22 @@
30
32
  - [Enhancement] Set `topic.metadata.refresh.interval.ms` for default producer in dev to 5s to align with consumer setup.
31
33
  - [Enhancement] Alias `-2` and `-1` with `latest` and `earliest` for seeking.
32
34
  - [Enhancement] Allow for usage of `latest` and `earliest` in the `Karafka::Pro::Iterator`.
35
+ - [Enhancement] Failures during `topics migrate` (and other subcommands) don't show what topic failed, and why it's invalid.
36
+ - [Enhancement] Apply changes to topics configuration in atomic independent requests when using Declarative Topics.
37
+ - [Enhancement] Execute the help CLI command when no command provided (similar to Rails) to improve DX.
38
+ - [Enhancement] Remove backtrace from the CLI error for incorrect commands (similar to Rails) to improve DX.
39
+ - [Enhancement] Provide `karafka topics help` sub-help due to nesting of Declarative Topics actions.
40
+ - [Enhancement] Use independent keys for different states of reporting in scheduled messages.
41
+ - [Enhancement] Enrich scheduled messages state reporter with debug data.
42
+ - [Enhancement] Introduce a new state called `stopped` to the scheduled messages.
43
+ - [Enhancement] Do not overwrite the `key` in the Pro DLQ dispatched messages for routing reasons.
44
+ - [Enhancement] Introduce `errors_tracker.trace_id` for distributed error details correlation with the Web UI.
33
45
  - [Refactor] Introduce a `bin/verify_kafka_warnings` script to clean Kafka from temporary test-suite topics.
34
46
  - [Refactor] Introduce a `bin/verify_topics_naming` script to ensure proper test topics naming convention.
35
47
  - [Refactor] Make sure all temporary topics have a `it-` prefix in their name.
36
48
  - [Refactor] Improve CI specs parallelization.
37
49
  - [Maintenance] Lower the `Karafka::Admin` `poll_timeout` to 50 ms to improve responsiveness of admin operations.
38
- - [Maintenance] Require `karafka-rdkafka` `>=` `0.19.2` due to usage of `#rd_kafka_global_init`, KIP-82 and the new producer caching engine.
50
+ - [Maintenance] Require `karafka-rdkafka` `>=` `0.19.5` due to usage of `#rd_kafka_global_init`, KIP-82, new producer caching engine and improvements to the `partition_key` assignments.
39
51
  - [Maintenance] Add Deimos routing patch into integration suite not to break it in the future.
40
52
  - [Maintenance] Remove Rails `7.0` specs due to upcoming EOL.
41
53
  - [Fix] Fix Recurring Tasks and Scheduled Messages not working with Swarm (using closed producer).
@@ -52,6 +64,9 @@
52
64
  - [Fix] optparse double parse loses ARGV.
53
65
  - [Fix] `karafka` cannot be required without Bundler.
54
66
  - [Fix] Scheduled Messages re-seek moves to `latest` on inheritance of initial offset when `0` offset is compacted.
67
+ - [Fix] Seek to `:latest` without `topic_partition_position` (-1) will not seek at all.
68
+ - [Fix] Extremely high turn over of scheduled messages can cause them not to reach EOF/Loaded state.
69
+ - [Change] Move to trusted-publishers and remove signing since no longer needed.
55
70
 
56
71
  ## 2.4.18 (2025-04-09)
57
72
  - [Fix] Make sure `Bundler.with_unbundled_env` is not called multiple times.
data/Gemfile CHANGED
@@ -16,9 +16,9 @@ group :integrations, :test do
16
16
  end
17
17
 
18
18
  group :integrations do
19
- # gem 'activejob', require: false
20
- # gem 'karafka-testing', '>= 2.4.6', require: false
21
- # gem 'karafka-web', '>= 0.10.4', require: false
19
+ gem 'activejob', require: false
20
+ gem 'karafka-testing', '>= 2.5.0', require: false
21
+ gem 'karafka-web', '>= 0.11.0.beta1', require: false
22
22
  end
23
23
 
24
24
  group :test do
data/Gemfile.lock CHANGED
@@ -1,16 +1,19 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.5.0.beta1)
4
+ karafka (2.5.0.rc1)
5
5
  base64 (~> 0.2)
6
6
  karafka-core (>= 2.5.0, < 2.6.0)
7
- karafka-rdkafka (>= 0.19.2)
7
+ karafka-rdkafka (>= 0.19.5)
8
8
  waterdrop (>= 2.8.3, < 3.0.0)
9
9
  zeitwerk (~> 2.3)
10
10
 
11
11
  GEM
12
12
  remote: https://rubygems.org/
13
13
  specs:
14
+ activejob (8.0.2)
15
+ activesupport (= 8.0.2)
16
+ globalid (>= 0.3.6)
14
17
  activesupport (8.0.2)
15
18
  base64
16
19
  benchmark (>= 0.3)
@@ -25,7 +28,7 @@ GEM
25
28
  tzinfo (~> 2.0, >= 2.0.5)
26
29
  uri (>= 0.13.1)
27
30
  base64 (0.2.0)
28
- benchmark (0.4.0)
31
+ benchmark (0.4.1)
29
32
  bigdecimal (3.1.9)
30
33
  byebug (12.0.0)
31
34
  concurrent-ruby (1.3.5)
@@ -33,9 +36,10 @@ GEM
33
36
  diff-lcs (1.6.2)
34
37
  docile (1.4.1)
35
38
  drb (2.2.3)
39
+ erubi (1.13.1)
36
40
  et-orbi (1.2.11)
37
41
  tzinfo
38
- factory_bot (6.5.1)
42
+ factory_bot (6.5.2)
39
43
  activesupport (>= 6.1.0)
40
44
  ffi (1.17.2)
41
45
  ffi (1.17.2-aarch64-linux-gnu)
@@ -51,34 +55,48 @@ GEM
51
55
  fugit (1.11.1)
52
56
  et-orbi (~> 1, >= 1.2.11)
53
57
  raabro (~> 1.4)
58
+ globalid (1.2.1)
59
+ activesupport (>= 6.1)
54
60
  i18n (1.14.7)
55
61
  concurrent-ruby (~> 1.0)
56
- karafka-core (2.5.0)
62
+ karafka-core (2.5.1)
57
63
  karafka-rdkafka (>= 0.19.2, < 0.21.0)
58
64
  logger (>= 1.6.0)
59
- karafka-rdkafka (0.19.2)
65
+ karafka-rdkafka (0.19.5)
60
66
  ffi (~> 1.15)
61
67
  mini_portile2 (~> 2.6)
62
68
  rake (> 12)
69
+ karafka-testing (2.5.1)
70
+ karafka (>= 2.5.0.beta1, < 2.6.0)
71
+ waterdrop (>= 2.8.0)
72
+ karafka-web (0.11.0.beta3)
73
+ erubi (~> 1.4)
74
+ karafka (>= 2.5.0.beta1, < 2.6.0)
75
+ karafka-core (>= 2.5.0, < 2.6.0)
76
+ roda (~> 3.68, >= 3.69)
77
+ tilt (~> 2.0)
63
78
  logger (1.7.0)
64
79
  mini_portile2 (2.8.9)
65
80
  minitest (5.25.5)
66
81
  ostruct (0.6.1)
67
82
  raabro (1.4.0)
68
- rake (13.2.1)
69
- rspec (3.13.0)
83
+ rack (3.1.15)
84
+ rake (13.3.0)
85
+ roda (3.92.0)
86
+ rack
87
+ rspec (3.13.1)
70
88
  rspec-core (~> 3.13.0)
71
89
  rspec-expectations (~> 3.13.0)
72
90
  rspec-mocks (~> 3.13.0)
73
- rspec-core (3.13.3)
91
+ rspec-core (3.13.4)
74
92
  rspec-support (~> 3.13.0)
75
- rspec-expectations (3.13.4)
93
+ rspec-expectations (3.13.5)
76
94
  diff-lcs (>= 1.2.0, < 2.0)
77
95
  rspec-support (~> 3.13.0)
78
- rspec-mocks (3.13.4)
96
+ rspec-mocks (3.13.5)
79
97
  diff-lcs (>= 1.2.0, < 2.0)
80
98
  rspec-support (~> 3.13.0)
81
- rspec-support (3.13.3)
99
+ rspec-support (3.13.4)
82
100
  securerandom (0.4.1)
83
101
  simplecov (0.22.0)
84
102
  docile (~> 1.1)
@@ -87,14 +105,15 @@ GEM
87
105
  simplecov-html (0.13.1)
88
106
  simplecov_json_formatter (0.1.4)
89
107
  stringio (3.1.7)
108
+ tilt (2.6.0)
90
109
  tzinfo (2.0.6)
91
110
  concurrent-ruby (~> 1.0)
92
111
  uri (1.0.3)
93
- waterdrop (2.8.3)
112
+ waterdrop (2.8.4)
94
113
  karafka-core (>= 2.4.9, < 3.0.0)
95
- karafka-rdkafka (>= 0.19.1)
114
+ karafka-rdkafka (>= 0.19.2)
96
115
  zeitwerk (~> 2.3)
97
- zeitwerk (2.7.3)
116
+ zeitwerk (2.6.18)
98
117
 
99
118
  PLATFORMS
100
119
  aarch64-linux-gnu
@@ -110,10 +129,13 @@ PLATFORMS
110
129
  x86_64-linux-musl
111
130
 
112
131
  DEPENDENCIES
132
+ activejob
113
133
  byebug
114
134
  factory_bot
115
135
  fugit
116
136
  karafka!
137
+ karafka-testing (>= 2.5.0)
138
+ karafka-web (>= 0.11.0.beta1)
117
139
  ostruct
118
140
  rspec
119
141
  simplecov
data/README.md CHANGED
@@ -84,7 +84,7 @@ bundle exec karafka server
84
84
 
85
85
  I also sell Karafka Pro subscriptions. It includes a commercial-friendly license, priority support, architecture consultations, enhanced Web UI and high throughput data processing-related features (virtual partitions, long-running jobs, and more).
86
86
 
87
- **10%** of the income will be distributed back to other OSS projects that Karafka uses under the hood.
87
+ Part of the income is [distributed back](https://github.com/orgs/karafka/sponsoring) to other OSS projects that Karafka uses under the hood.
88
88
 
89
89
  Help me provide high-quality open-source software. Please see the Karafka [homepage](https://karafka.io/#become-pro) for more details.
90
90
 
data/Rakefile ADDED
@@ -0,0 +1,4 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'bundler/setup'
4
+ require 'bundler/gem_tasks'
data/bin/integrations CHANGED
@@ -48,7 +48,8 @@ class Scenario
48
48
  'instrumentation/post_errors_instrumentation_error_spec.rb' => [1].freeze,
49
49
  'cli/declaratives/delete/existing_with_exit_code_spec.rb' => [2].freeze,
50
50
  'cli/declaratives/create/new_with_exit_code_spec.rb' => [2].freeze,
51
- 'cli/declaratives/plan/when_changes_with_detailed_exit_code_spec.rb' => [2].freeze
51
+ 'cli/declaratives/plan/when_changes_with_detailed_exit_code_spec.rb' => [2].freeze,
52
+ 'cli/declaratives/align/incorrectly_spec.rb' => [1].freeze
52
53
  }.freeze
53
54
 
54
55
  private_constant :MAX_RUN_TIME, :EXIT_CODES
File without changes
data/karafka.gemspec CHANGED
@@ -23,17 +23,12 @@ Gem::Specification.new do |spec|
23
23
 
24
24
  spec.add_dependency 'base64', '~> 0.2'
25
25
  spec.add_dependency 'karafka-core', '>= 2.5.0', '< 2.6.0'
26
- spec.add_dependency 'karafka-rdkafka', '>= 0.19.2'
26
+ spec.add_dependency 'karafka-rdkafka', '>= 0.19.5'
27
27
  spec.add_dependency 'waterdrop', '>= 2.8.3', '< 3.0.0'
28
28
  spec.add_dependency 'zeitwerk', '~> 2.3'
29
29
 
30
30
  spec.required_ruby_version = '>= 3.0.0'
31
31
 
32
- if $PROGRAM_NAME.end_with?('gem')
33
- spec.signing_key = File.expand_path('~/.ssh/gem-private_key.pem')
34
- end
35
-
36
- spec.cert_chain = %w[certs/cert.pem]
37
32
  spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(spec)/}) }
38
33
  spec.executables = %w[karafka]
39
34
  spec.require_paths = %w[lib]
@@ -10,6 +10,10 @@ module Karafka
10
10
  #
11
11
  # Altering is done in the incremental way.
12
12
  module Configs
13
+ extend Helpers::ConfigImporter.new(
14
+ max_wait_time: %i[admin max_wait_time]
15
+ )
16
+
13
17
  class << self
14
18
  # Fetches given resources configurations from Kafka
15
19
  #
@@ -94,7 +98,7 @@ module Karafka
94
98
  # Makes sure that admin is closed afterwards.
95
99
  def with_admin_wait
96
100
  Admin.with_admin do |admin|
97
- yield(admin).wait(max_wait_timeout: Karafka::App.config.admin.max_wait_time)
101
+ yield(admin).wait(max_wait_timeout: max_wait_time)
98
102
  end
99
103
  end
100
104
  end
data/lib/karafka/admin.rb CHANGED
@@ -10,6 +10,15 @@ module Karafka
10
10
  # Cluster on which operations are performed can be changed via `admin.kafka` config, however
11
11
  # there is no multi-cluster runtime support.
12
12
  module Admin
13
+ extend Helpers::ConfigImporter.new(
14
+ max_wait_time: %i[admin max_wait_time],
15
+ poll_timeout: %i[admin poll_timeout],
16
+ max_attempts: %i[admin max_attempts],
17
+ group_id: %i[admin group_id],
18
+ app_kafka: %i[kafka],
19
+ admin_kafka: %i[admin kafka]
20
+ )
21
+
13
22
  # 2010-01-01 00:00:00 - way before Kafka was released so no messages should exist prior to
14
23
  # this date
15
24
  # We do not use the explicit -2 librdkafka value here because we resolve this offset without
@@ -113,7 +122,7 @@ module Karafka
113
122
  handler = admin.create_topic(name, partitions, replication_factor, topic_config)
114
123
 
115
124
  with_re_wait(
116
- -> { handler.wait(max_wait_timeout: app_config.admin.max_wait_time) },
125
+ -> { handler.wait(max_wait_timeout: max_wait_time) },
117
126
  -> { topics_names.include?(name) }
118
127
  )
119
128
  end
@@ -127,7 +136,7 @@ module Karafka
127
136
  handler = admin.delete_topic(name)
128
137
 
129
138
  with_re_wait(
130
- -> { handler.wait(max_wait_timeout: app_config.admin.max_wait_time) },
139
+ -> { handler.wait(max_wait_timeout: max_wait_time) },
131
140
  -> { !topics_names.include?(name) }
132
141
  )
133
142
  end
@@ -142,7 +151,7 @@ module Karafka
142
151
  handler = admin.create_partitions(name, partitions)
143
152
 
144
153
  with_re_wait(
145
- -> { handler.wait(max_wait_timeout: app_config.admin.max_wait_time) },
154
+ -> { handler.wait(max_wait_timeout: max_wait_time) },
146
155
  -> { topic_info(name).fetch(:partition_count) >= partitions }
147
156
  )
148
157
  end
@@ -353,7 +362,7 @@ module Karafka
353
362
  def delete_consumer_group(consumer_group_id)
354
363
  with_admin do |admin|
355
364
  handler = admin.delete_group(consumer_group_id)
356
- handler.wait(max_wait_timeout: app_config.admin.max_wait_time)
365
+ handler.wait(max_wait_timeout: max_wait_time)
357
366
  end
358
367
  end
359
368
 
@@ -539,7 +548,7 @@ module Karafka
539
548
 
540
549
  admin = config(:producer, {}).admin(
541
550
  native_kafka_auto_start: false,
542
- native_kafka_poll_timeout_ms: app_config.admin.poll_timeout
551
+ native_kafka_poll_timeout_ms: poll_timeout
543
552
  )
544
553
 
545
554
  bind_oauth(bind_id, admin)
@@ -604,7 +613,7 @@ module Karafka
604
613
  rescue Rdkafka::AbstractHandle::WaitTimeoutError, Errors::ResultNotVisibleError
605
614
  return if breaker.call
606
615
 
607
- retry if attempt <= app_config.admin.max_attempts
616
+ retry if attempt <= max_attempts
608
617
 
609
618
  raise
610
619
  end
@@ -613,11 +622,10 @@ module Karafka
613
622
  # @param settings [Hash] extra settings for config (if needed)
614
623
  # @return [::Rdkafka::Config] rdkafka config
615
624
  def config(type, settings)
616
- app_config
617
- .kafka
625
+ app_kafka
618
626
  .then(&:dup)
619
- .merge(app_config.admin.kafka)
620
- .tap { |config| config[:'group.id'] = app_config.admin.group_id }
627
+ .merge(admin_kafka)
628
+ .tap { |config| config[:'group.id'] = group_id }
621
629
  # We merge after setting the group id so it can be altered if needed
622
630
  # In general in admin we only should alter it when we need to impersonate a given
623
631
  # consumer group or do something similar
@@ -651,11 +659,6 @@ module Karafka
651
659
  offset
652
660
  end
653
661
  end
654
-
655
- # @return [Karafka::Core::Configurable::Node] root node config
656
- def app_config
657
- ::Karafka::App.config
658
- end
659
662
  end
660
663
  end
661
664
  end
@@ -30,10 +30,13 @@ module Karafka
30
30
  return false
31
31
  end
32
32
 
33
- names = resources_to_migrate.map(&:name).join(', ')
34
- puts "Updating configuration of the following topics: #{names}"
35
- Karafka::Admin::Configs.alter(resources_to_migrate)
36
- puts "#{green('Updated')} all requested topics configuration."
33
+ resources_to_migrate.each do |resource|
34
+ supervised("Updating topic: #{resource.name} configuration") do
35
+ Karafka::Admin::Configs.alter(resource)
36
+ end
37
+
38
+ puts "#{green('Updated')} topic #{resource.name} configuration."
39
+ end
37
40
 
38
41
  true
39
42
  end
@@ -12,6 +12,23 @@ module Karafka
12
12
 
13
13
  private
14
14
 
15
+ # Used to run Karafka Admin commands that talk with Kafka and that can fail due to broker
16
+ # errors and other issues. We catch errors and provide nicer printed output prior to
17
+ # re-raising the mapped error for proper exit code status handling
18
+ #
19
+ # @param operation_message [String] message that we use to print that it is going to run
20
+ # and if case if failed with a failure indication.
21
+ def supervised(operation_message)
22
+ puts "#{operation_message}..."
23
+
24
+ yield
25
+ rescue Rdkafka::RdkafkaError => e
26
+ puts "#{operation_message} #{red('failed')}:"
27
+ puts e
28
+
29
+ raise Errors::CommandValidationError, cause: e
30
+ end
31
+
15
32
  # @return [Array<Karafka::Routing::Topic>] all available topics that can be managed
16
33
  # @note If topic is defined in multiple consumer groups, first config will be used. This
17
34
  # means, that this CLI will not work for simultaneous management of multiple clusters
@@ -15,13 +15,15 @@ module Karafka
15
15
  if existing_topics_names.include?(name)
16
16
  puts "#{yellow('Skipping')} because topic #{name} already exists."
17
17
  else
18
- puts "Creating topic #{name}..."
19
- Admin.create_topic(
20
- name,
21
- topic.declaratives.partitions,
22
- topic.declaratives.replication_factor,
23
- topic.declaratives.details
24
- )
18
+ supervised("Creating topic #{name}") do
19
+ Admin.create_topic(
20
+ name,
21
+ topic.declaratives.partitions,
22
+ topic.declaratives.replication_factor,
23
+ topic.declaratives.details
24
+ )
25
+ end
26
+
25
27
  puts "#{green('Created')} topic #{name}."
26
28
  any_created = true
27
29
  end
@@ -13,8 +13,10 @@ module Karafka
13
13
  name = topic.name
14
14
 
15
15
  if existing_topics_names.include?(name)
16
- puts "Deleting topic #{name}..."
17
- Admin.delete_topic(name)
16
+ supervised("Deleting topic #{name}") do
17
+ Admin.delete_topic(name)
18
+ end
19
+
18
20
  puts "#{green('Deleted')} topic #{name}."
19
21
  any_deleted = true
20
22
  else
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ class Cli
5
+ class Topics < Cli::Base
6
+ # Declarative topics CLI sub-help
7
+ class Help < Base
8
+ # Displays help information for all available topics management commands
9
+ def call
10
+ puts <<~HELP
11
+ Karafka topics commands:
12
+ align # Aligns configuration of all declarative topics based on definitions
13
+ create # Creates topics with appropriate settings
14
+ delete # Deletes all topics defined in the routes
15
+ help # Describes available topics management commands
16
+ migrate # Creates missing topics, repartitions existing and aligns configuration
17
+ plan # Plans migration process and prints changes to be applied
18
+ repartition # Adds additional partitions to topics with fewer partitions than expected
19
+ reset # Deletes and re-creates all topics
20
+
21
+ Options:
22
+ --detailed-exitcode # Provides detailed exit codes (0=no changes, 1=error, 2=changes applied)
23
+
24
+ Examples:
25
+ karafka topics create
26
+ karafka topics plan --detailed-exitcode
27
+ karafka topics migrate
28
+ karafka topics align
29
+
30
+ Note: All admin operations run on the default cluster only.
31
+ HELP
32
+
33
+ # We return false to indicate with exit code 0 that no changes were applied
34
+ false
35
+ end
36
+ end
37
+ end
38
+ end
39
+ end
@@ -21,8 +21,10 @@ module Karafka
21
21
  existing_count = existing_partitions.fetch(name, false)
22
22
 
23
23
  if existing_count && existing_count < desired_count
24
- puts "Increasing number of partitions to #{desired_count} on topic #{name}..."
25
- Admin.create_partitions(name, desired_count)
24
+ supervised("Increasing number of partitions to #{desired_count} on topic #{name}") do
25
+ Admin.create_partitions(name, desired_count)
26
+ end
27
+
26
28
  change = desired_count - existing_count
27
29
  puts "#{green('Created')} #{change} additional partitions on topic #{name}."
28
30
  any_repartitioned = true
@@ -27,10 +27,13 @@ module Karafka
27
27
  # crashes
28
28
  CHANGES_EXIT_CODE = 2
29
29
 
30
- private_constant :NO_CHANGES_EXIT_CODE, :CHANGES_EXIT_CODE
30
+ # Used when there was an error during execution.
31
+ ERROR_EXIT_CODE = 1
32
+
33
+ private_constant :NO_CHANGES_EXIT_CODE, :CHANGES_EXIT_CODE, :ERROR_EXIT_CODE
31
34
 
32
35
  # @param action [String] action we want to take
33
- def call(action = 'missing')
36
+ def call(action = 'help')
34
37
  detailed_exit_code = options.fetch(:detailed_exitcode, false)
35
38
 
36
39
  command = case action
@@ -48,8 +51,10 @@ module Karafka
48
51
  Topics::Align
49
52
  when 'plan'
50
53
  Topics::Plan
54
+ when 'help'
55
+ Topics::Help
51
56
  else
52
- raise ::ArgumentError, "Invalid topics action: #{action}"
57
+ raise Errors::UnrecognizedCommandError, "Unrecognized topics action: #{action}"
53
58
  end
54
59
 
55
60
  changes = command.new.call
@@ -57,6 +62,8 @@ module Karafka
57
62
  return unless detailed_exit_code
58
63
 
59
64
  changes ? exit(CHANGES_EXIT_CODE) : exit(NO_CHANGES_EXIT_CODE)
65
+ rescue Errors::CommandValidationError
66
+ exit(ERROR_EXIT_CODE)
60
67
  end
61
68
  end
62
69
  end
data/lib/karafka/cli.rb CHANGED
@@ -21,6 +21,8 @@ module Karafka
21
21
  args = action ? [action] : []
22
22
 
23
23
  command.new.call(*args)
24
+ elsif command_name.nil?
25
+ Help.new.call
24
26
  else
25
27
  raise(
26
28
  Karafka::Errors::UnrecognizedCommandError,
@@ -427,6 +427,15 @@ module Karafka
427
427
  @wrapped_kafka.committed(tpl)
428
428
  end
429
429
 
430
+ # Reads watermark offsets for given topic
431
+ #
432
+ # @param topic [String] topic name
433
+ # @param partition [Integer] partition number
434
+ # @return [Array<Integer, Integer>] watermark offsets (low, high)
435
+ def query_watermark_offsets(topic, partition)
436
+ @wrapped_kafka.query_watermark_offsets(topic, partition)
437
+ end
438
+
430
439
  private
431
440
 
432
441
  # When we cannot store an offset, it means we no longer own the partition
@@ -491,9 +500,17 @@ module Karafka
491
500
  #
492
501
  # This code adds around 0.01 ms per seek but saves from many user unexpected behaviours in
493
502
  # seeking and pausing
494
- return if message.offset == topic_partition_position(message.topic, message.partition)
503
+ position = topic_partition_position(message.topic, message.partition)
504
+
505
+ # Always seek if current position cannot be fetched or is negative. Offset seek can also
506
+ # be negative (-1 or -2) and we should not compare it with the position because they are
507
+ # special (earliest or latest)
508
+ return kafka.seek(message) if position.negative?
509
+ # If offset is the same as the next position, we don't have to seek to get there, hence
510
+ # only in such case we can do nothing.
511
+ return kafka.seek(message) if message.offset != position
495
512
 
496
- kafka.seek(message)
513
+ nil
497
514
  end
498
515
 
499
516
  # Commits the stored offsets in a sync way and closes the consumer.