karafka 2.5.2 → 2.5.4.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +16 -0
- data/config/locales/errors.yml +14 -0
- data/karafka.gemspec +15 -4
- data/lib/active_job/queue_adapters/karafka_adapter.rb +2 -2
- data/lib/karafka/active_job/consumer.rb +2 -2
- data/lib/karafka/active_job/current_attributes.rb +2 -2
- data/lib/karafka/active_job/deserializer.rb +1 -1
- data/lib/karafka/active_job/dispatcher.rb +2 -2
- data/lib/karafka/admin/configs/resource.rb +7 -1
- data/lib/karafka/admin/consumer_groups.rb +6 -8
- data/lib/karafka/admin/contracts/replication.rb +149 -0
- data/lib/karafka/admin/replication.rb +462 -0
- data/lib/karafka/admin/topics.rb +5 -4
- data/lib/karafka/admin.rb +57 -12
- data/lib/karafka/app.rb +3 -3
- data/lib/karafka/base_consumer.rb +1 -1
- data/lib/karafka/cli/base.rb +1 -1
- data/lib/karafka/cli/console.rb +1 -1
- data/lib/karafka/cli/contracts/server.rb +1 -1
- data/lib/karafka/cli/help.rb +1 -1
- data/lib/karafka/cli/install.rb +2 -1
- data/lib/karafka/cli/server.rb +1 -1
- data/lib/karafka/cli/swarm.rb +1 -1
- data/lib/karafka/connection/client.rb +19 -18
- data/lib/karafka/connection/manager.rb +1 -0
- data/lib/karafka/connection/proxy.rb +1 -1
- data/lib/karafka/connection/rebalance_manager.rb +1 -1
- data/lib/karafka/connection/status.rb +1 -0
- data/lib/karafka/constraints.rb +1 -1
- data/lib/karafka/contracts/base.rb +1 -1
- data/lib/karafka/deserializers/payload.rb +1 -1
- data/lib/karafka/helpers/async.rb +1 -1
- data/lib/karafka/helpers/config_importer.rb +3 -3
- data/lib/karafka/helpers/multi_delegator.rb +3 -0
- data/lib/karafka/instrumentation/assignments_tracker.rb +2 -1
- data/lib/karafka/instrumentation/callbacks/error.rb +2 -2
- data/lib/karafka/instrumentation/callbacks/statistics.rb +3 -3
- data/lib/karafka/instrumentation/logger.rb +6 -6
- data/lib/karafka/instrumentation/logger_listener.rb +0 -2
- data/lib/karafka/instrumentation/monitor.rb +2 -2
- data/lib/karafka/instrumentation/vendors/appsignal/base.rb +1 -1
- data/lib/karafka/instrumentation/vendors/appsignal/metrics_listener.rb +4 -0
- data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +32 -16
- data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +2 -2
- data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +1 -1
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +3 -15
- data/lib/karafka/licenser.rb +1 -1
- data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
- data/lib/karafka/messages/messages.rb +32 -0
- data/lib/karafka/pro/active_job/consumer.rb +2 -2
- data/lib/karafka/pro/active_job/dispatcher.rb +3 -3
- data/lib/karafka/pro/cleaner/messages/messages.rb +1 -1
- data/lib/karafka/pro/cleaner.rb +3 -3
- data/lib/karafka/pro/cli/contracts/server.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments/base.rb +4 -3
- data/lib/karafka/pro/cli/parallel_segments/collapse.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments/distribute.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments.rb +1 -1
- data/lib/karafka/pro/connection/manager.rb +1 -2
- data/lib/karafka/pro/connection/multiplexing/listener.rb +1 -0
- data/lib/karafka/pro/contracts/base.rb +1 -1
- data/lib/karafka/pro/encryption/cipher.rb +3 -2
- data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
- data/lib/karafka/pro/encryption/messages/parser.rb +1 -1
- data/lib/karafka/pro/encryption/setup/config.rb +1 -1
- data/lib/karafka/pro/iterator/tpl_builder.rb +1 -1
- data/lib/karafka/pro/iterator.rb +1 -1
- data/lib/karafka/pro/loader.rb +1 -1
- data/lib/karafka/pro/processing/coordinator.rb +1 -1
- data/lib/karafka/pro/processing/filters/base.rb +1 -0
- data/lib/karafka/pro/processing/filters/delayer.rb +1 -1
- data/lib/karafka/pro/processing/filters/expirer.rb +1 -1
- data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +1 -1
- data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs/periodic.rb +1 -1
- data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs_builder.rb +1 -1
- data/lib/karafka/pro/processing/jobs_queue.rb +0 -2
- data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +1 -0
- data/lib/karafka/pro/processing/partitioner.rb +1 -1
- data/lib/karafka/pro/processing/strategies/base.rb +1 -1
- data/lib/karafka/pro/processing/strategies/default.rb +2 -2
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +1 -1
- data/lib/karafka/pro/processing/strategies/vp/default.rb +1 -1
- data/lib/karafka/pro/processing/strategy_selector.rb +1 -0
- data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +4 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +4 -2
- data/lib/karafka/pro/recurring_tasks/consumer.rb +3 -2
- data/lib/karafka/pro/recurring_tasks/contracts/config.rb +2 -2
- data/lib/karafka/pro/recurring_tasks/contracts/task.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/deserializer.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/dispatcher.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/executor.rb +2 -1
- data/lib/karafka/pro/recurring_tasks/schedule.rb +5 -2
- data/lib/karafka/pro/recurring_tasks/serializer.rb +6 -5
- data/lib/karafka/pro/recurring_tasks/setup/config.rb +2 -2
- data/lib/karafka/pro/recurring_tasks/task.rb +1 -1
- data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +3 -0
- data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +1 -1
- data/lib/karafka/pro/routing/features/multiplexing.rb +5 -5
- data/lib/karafka/pro/routing/features/offset_metadata.rb +4 -4
- data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +1 -1
- data/lib/karafka/pro/routing/features/patterns/patterns.rb +1 -1
- data/lib/karafka/pro/routing/features/periodic_job/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +1 -1
- data/lib/karafka/pro/routing/features/swarm.rb +1 -1
- data/lib/karafka/pro/routing/features/throttling/topic.rb +3 -1
- data/lib/karafka/pro/scheduled_messages/consumer.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/contracts/config.rb +2 -2
- data/lib/karafka/pro/scheduled_messages/contracts/message.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +3 -2
- data/lib/karafka/pro/scheduled_messages/day.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/max_epoch.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/proxy.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/serializer.rb +3 -3
- data/lib/karafka/pro/scheduled_messages/setup/config.rb +2 -2
- data/lib/karafka/pro/scheduled_messages/state.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/tracker.rb +1 -0
- data/lib/karafka/process.rb +4 -4
- data/lib/karafka/processing/executor.rb +1 -1
- data/lib/karafka/processing/inline_insights/tracker.rb +1 -0
- data/lib/karafka/processing/jobs_queue.rb +1 -1
- data/lib/karafka/processing/result.rb +1 -0
- data/lib/karafka/processing/strategies/dlq.rb +1 -1
- data/lib/karafka/processing/strategy_selector.rb +1 -0
- data/lib/karafka/routing/activity_manager.rb +1 -0
- data/lib/karafka/routing/builder.rb +3 -1
- data/lib/karafka/routing/consumer_group.rb +19 -1
- data/lib/karafka/routing/contracts/consumer_group.rb +3 -2
- data/lib/karafka/routing/contracts/topic.rb +5 -2
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/declaratives/topic.rb +5 -2
- data/lib/karafka/routing/features/deserializers/topic.rb +3 -3
- data/lib/karafka/routing/features/inline_insights.rb +5 -5
- data/lib/karafka/routing/router.rb +1 -1
- data/lib/karafka/routing/subscription_group.rb +2 -2
- data/lib/karafka/routing/subscription_groups_builder.rb +18 -2
- data/lib/karafka/routing/topic.rb +3 -3
- data/lib/karafka/server.rb +1 -1
- data/lib/karafka/setup/attributes_map.rb +4 -2
- data/lib/karafka/setup/config.rb +21 -10
- data/lib/karafka/setup/config_proxy.rb +209 -0
- data/lib/karafka/setup/contracts/config.rb +1 -1
- data/lib/karafka/swarm/liveness_listener.rb +1 -0
- data/lib/karafka/swarm/manager.rb +7 -6
- data/lib/karafka/swarm/node.rb +1 -1
- data/lib/karafka/swarm/supervisor.rb +1 -0
- data/lib/karafka/time_trackers/base.rb +1 -1
- data/lib/karafka/version.rb +1 -1
- data/lib/karafka.rb +2 -3
- metadata +8 -65
- data/.coditsu/ci.yml +0 -3
- data/.console_irbrc +0 -11
- data/.github/CODEOWNERS +0 -3
- data/.github/FUNDING.yml +0 -1
- data/.github/ISSUE_TEMPLATE/bug_report.md +0 -43
- data/.github/ISSUE_TEMPLATE/feature_request.md +0 -20
- data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +0 -278
- data/.github/workflows/ci_macos_arm64.yml +0 -151
- data/.github/workflows/push.yml +0 -35
- data/.github/workflows/trigger-wiki-refresh.yml +0 -30
- data/.github/workflows/verify-action-pins.yml +0 -16
- data/.gitignore +0 -69
- data/.rspec +0 -7
- data/.ruby-gemset +0 -1
- data/.ruby-version +0 -1
- data/CODE_OF_CONDUCT.md +0 -46
- data/CONTRIBUTING.md +0 -32
- data/Gemfile +0 -28
- data/Gemfile.lock +0 -173
- data/Rakefile +0 -4
- data/SECURITY.md +0 -23
- data/bin/benchmarks +0 -99
- data/bin/clean_kafka +0 -43
- data/bin/create_token +0 -22
- data/bin/integrations +0 -341
- data/bin/record_rss +0 -50
- data/bin/rspecs +0 -26
- data/bin/scenario +0 -29
- data/bin/stress_many +0 -13
- data/bin/stress_one +0 -13
- data/bin/verify_kafka_warnings +0 -36
- data/bin/verify_license_integrity +0 -37
- data/bin/verify_topics_naming +0 -27
- data/bin/wait_for_kafka +0 -24
- data/docker-compose.yml +0 -25
- data/examples/payloads/avro/.gitkeep +0 -0
- data/examples/payloads/json/sample_set_01/enrollment_event.json +0 -579
- data/examples/payloads/json/sample_set_01/ingestion_event.json +0 -30
- data/examples/payloads/json/sample_set_01/transaction_event.json +0 -17
- data/examples/payloads/json/sample_set_01/user_event.json +0 -11
- data/examples/payloads/json/sample_set_02/download.json +0 -191
- data/examples/payloads/json/sample_set_03/event_type_1.json +0 -18
- data/examples/payloads/json/sample_set_03/event_type_2.json +0 -263
- data/examples/payloads/json/sample_set_03/event_type_3.json +0 -41
- data/log/.gitkeep +0 -0
- data/renovate.json +0 -21
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 974c54eac8c3f6045b0eb6f21cde51b48cd8a23af18efd640784870045c01d35
|
|
4
|
+
data.tar.gz: d22621e0199bf8e1adbfe67f59247d2f8e5ae61a52fc389ea1baa2d4f8401418
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: f81840332313f5914afde17caeaa794085442ac702e62ea4b6a51429cc191ac2cd7562181a55b76bc314fcc0ca6411c27308cc7b0b88637cdece4a8b1e5c75e4
|
|
7
|
+
data.tar.gz: acbc167ab3314dc884274992cd17d40256c967ab1a2ad141376565b1164b29a11669a9ebabe79125ac7129103d9fb09447441bc992b8540115576fa906b033a5
|
data/CHANGELOG.md
CHANGED
|
@@ -1,9 +1,25 @@
|
|
|
1
1
|
# Karafka Framework Changelog
|
|
2
2
|
|
|
3
|
+
## 2.5.4 (Unreleased)
|
|
4
|
+
- [Enhancement] Align Datadog logger listener error handling with the main logger listener by adding missing error types and replacing `UnsupportedCaseError` with generic error logging to support dynamic errors reporting.
|
|
5
|
+
- [Enhancement] Align Appsignal metrics listener `USER_CONSUMER_ERROR_TYPES` with all consumer error types for complete error tracking.
|
|
6
|
+
- [Enhancement] Reduce gem package size by excluding development files (spec, examples, CI configs, dev scripts) from the gem build.
|
|
7
|
+
- [Fix] Multiple route draws don't seem to work.
|
|
8
|
+
- [Fix] Fix internal code compatibility with external libraries that prepend modules to `Messages#each` method (e.g., DataDog tracing) by introducing `Messages#raw` for internal iteration that bypasses patched enumerable methods.
|
|
9
|
+
- [Maintenance] Removed `base64` dependency.
|
|
10
|
+
- [Maintenance] Add `ErrorTypesChecker` spec support module to programmatically verify listener error type coverage against source code definitions.
|
|
11
|
+
|
|
12
|
+
## 2.5.3 (2025-11-14)
|
|
13
|
+
- [Enhancement] Dynamically support `librdkafka` fatal errors with correct reported details.
|
|
14
|
+
- [Enhancement] Add `producer` block API to setup for simplified WaterDrop producer configuration without manual producer instance creation, using a transparent ConfigProxy during setup to avoid polluting the permanent config API.
|
|
15
|
+
- [Change] Require `waterdrop` `>=` `2.8.14` to support new features.
|
|
16
|
+
- [Change] Require `karafka-rdkafka` `>=` `0.23.1` to support new rebalance protocol.
|
|
17
|
+
|
|
3
18
|
## 2.5.2 (2025-10-31)
|
|
4
19
|
- **[EOL]** Remove Rails 7.1 support according to EOL while not blocking Rails 7.1 usage.
|
|
5
20
|
- [Enhancement] Retry on the KIP-848 `stale_member_epoch` error.
|
|
6
21
|
- [Enhancement] Provide `Karafka::Admin.trigger_rebalance` API to programmatically trigger consumer group rebalances for operational purposes.
|
|
22
|
+
- [Enhancement] Provide `Karafka::Admin.plan_topic_replication` API to generate partition reassignment plans for increasing topic replication factors with automatic broker distribution or manual placement, compatible with `kafka-reassign-partitions.sh` tool.
|
|
7
23
|
- [Enhancement] Nest pause configuration under `config.pause.*` namespace (`config.pause.timeout`, `config.pause.max_timeout`, `config.pause.with_exponential_backoff`) while maintaining backwards compatibility with the old flat API (`config.pause_timeout`, etc.) via delegation methods that will be removed in Karafka 2.6.
|
|
8
24
|
- [Enhancement] Detect and track involuntary assignment loss during long-running processing that exceeds `max.poll.interval.ms` via `client.events_poll` event and automatically update `Karafka::App.assignments` to reflect reality.
|
|
9
25
|
- [Enhancement] Extend `Karafka::Admin.read_watermark_offsets` to accept either a single topic with partition or a hash of multiple topics with partitions, using a single consumer instance for improved efficiency when querying multiple partitions.
|
data/config/locales/errors.yml
CHANGED
|
@@ -100,6 +100,20 @@ en:
|
|
|
100
100
|
topics_inclusion: Unknown topic name
|
|
101
101
|
topics_missing: No topics to subscribe to
|
|
102
102
|
|
|
103
|
+
admin:
|
|
104
|
+
replication:
|
|
105
|
+
missing: needs to be present
|
|
106
|
+
topic_format: needs to be a non-empty string
|
|
107
|
+
to_format: needs to be an integer greater than 0
|
|
108
|
+
brokers_format: needs to be a hash when provided
|
|
109
|
+
must_be_higher_than_current: target replication factor must be higher than current
|
|
110
|
+
exceeds_broker_count: target replication factor cannot exceed available broker count
|
|
111
|
+
missing_partitions: manual assignment missing partitions
|
|
112
|
+
invalid_partition: partition does not exist in topic
|
|
113
|
+
wrong_broker_count_for_partition: partition broker count does not match target replication factor
|
|
114
|
+
duplicate_brokers_for_partition: partition has duplicate brokers
|
|
115
|
+
invalid_brokers_for_partition: partition references non-existent brokers
|
|
116
|
+
|
|
103
117
|
routing:
|
|
104
118
|
without_declarative_definition: lacks explicit declarative topics definition
|
|
105
119
|
|
data/karafka.gemspec
CHANGED
|
@@ -21,15 +21,26 @@ Gem::Specification.new do |spec|
|
|
|
21
21
|
without having to focus on things that are not your business domain.
|
|
22
22
|
DESC
|
|
23
23
|
|
|
24
|
-
spec.add_dependency 'base64', '~> 0.2'
|
|
25
24
|
spec.add_dependency 'karafka-core', '>= 2.5.6', '< 2.6.0'
|
|
26
|
-
spec.add_dependency 'karafka-rdkafka', '>= 0.
|
|
27
|
-
spec.add_dependency 'waterdrop', '>= 2.8.
|
|
25
|
+
spec.add_dependency 'karafka-rdkafka', '>= 0.23.1'
|
|
26
|
+
spec.add_dependency 'waterdrop', '>= 2.8.14', '< 3.0.0'
|
|
28
27
|
spec.add_dependency 'zeitwerk', '~> 2.3'
|
|
29
28
|
|
|
30
29
|
spec.required_ruby_version = '>= 3.2.0'
|
|
31
30
|
|
|
32
|
-
|
|
31
|
+
gem_files = %w[
|
|
32
|
+
lib/**/*
|
|
33
|
+
bin/karafka
|
|
34
|
+
certs/*
|
|
35
|
+
config/**/*
|
|
36
|
+
LICENSE*
|
|
37
|
+
CHANGELOG.md
|
|
38
|
+
README.md
|
|
39
|
+
karafka.gemspec
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
spec.files = Dir.glob(gem_files) & `git ls-files -z`.split("\x0")
|
|
43
|
+
|
|
33
44
|
spec.executables = %w[karafka]
|
|
34
45
|
spec.require_paths = %w[lib]
|
|
35
46
|
|
|
@@ -21,7 +21,7 @@ module ActiveJob
|
|
|
21
21
|
# - No Rails: Inherit from Object (standalone ActiveJob usage)
|
|
22
22
|
#
|
|
23
23
|
# @see https://github.com/sidekiq/sidekiq/issues/6746 Similar issue in Sidekiq
|
|
24
|
-
base = if defined?(Rails
|
|
24
|
+
base = if defined?(Rails::VERSION)
|
|
25
25
|
(Rails::VERSION::MAJOR == 7 && Rails::VERSION::MINOR < 2 ? Object : AbstractAdapter)
|
|
26
26
|
else
|
|
27
27
|
# Fallback when Rails is not loaded
|
|
@@ -31,7 +31,7 @@ module ActiveJob
|
|
|
31
31
|
# Karafka adapter for enqueuing jobs
|
|
32
32
|
# This is here for ease of integration with ActiveJob.
|
|
33
33
|
class KarafkaAdapter < base
|
|
34
|
-
include Karafka::Helpers::ConfigImporter.new(
|
|
34
|
+
include ::Karafka::Helpers::ConfigImporter.new(
|
|
35
35
|
dispatcher: %i[internal active_job dispatcher]
|
|
36
36
|
)
|
|
37
37
|
|
|
@@ -5,7 +5,7 @@ module Karafka
|
|
|
5
5
|
module ActiveJob
|
|
6
6
|
# This is the consumer for ActiveJob that eats the messages enqueued with it one after another.
|
|
7
7
|
# It marks the offset after each message, so we make sure, none of the jobs is executed twice
|
|
8
|
-
class Consumer <
|
|
8
|
+
class Consumer < Karafka::BaseConsumer
|
|
9
9
|
include Helpers::ConfigImporter.new(
|
|
10
10
|
deserializer: %i[internal active_job deserializer]
|
|
11
11
|
)
|
|
@@ -14,7 +14,7 @@ module Karafka
|
|
|
14
14
|
# @note ActiveJob does not support batches, so we just run one message after another
|
|
15
15
|
def consume
|
|
16
16
|
messages.each do |message|
|
|
17
|
-
break if Karafka::App.stopping?
|
|
17
|
+
break if ::Karafka::App.stopping?
|
|
18
18
|
|
|
19
19
|
consume_job(message)
|
|
20
20
|
|
|
@@ -23,8 +23,8 @@ module Karafka
|
|
|
23
23
|
.each { |expandable| expandable.class_attribute :_cattr_klasses, default: {} }
|
|
24
24
|
|
|
25
25
|
# Do not double inject in case of running persist multiple times
|
|
26
|
-
Dispatcher.prepend(Persistence) unless Dispatcher
|
|
27
|
-
Consumer.prepend(Loading) unless Consumer
|
|
26
|
+
Dispatcher.prepend(Persistence) unless Dispatcher <= Persistence
|
|
27
|
+
Consumer.prepend(Loading) unless Consumer <= Loading
|
|
28
28
|
|
|
29
29
|
klasses.map(&:to_s).each do |stringified_klass|
|
|
30
30
|
# Prevent registering same klass multiple times
|
|
@@ -43,7 +43,7 @@ module Karafka
|
|
|
43
43
|
#
|
|
44
44
|
# @param job [ActiveJob::Base, #serialize] job to serialize. The job must respond to
|
|
45
45
|
# #serialize which returns a Hash of job attributes. When CurrentAttributes are used,
|
|
46
|
-
# this may be a JobWrapper instance instead of the original ActiveJob::Base.
|
|
46
|
+
# this may be a JobWrapper instance instead of the original ::ActiveJob::Base.
|
|
47
47
|
# @return [String] serialized job payload
|
|
48
48
|
def serialize(job)
|
|
49
49
|
::ActiveSupport::JSON.encode(job.serialize)
|
|
@@ -19,7 +19,7 @@ module Karafka
|
|
|
19
19
|
|
|
20
20
|
# @param job [ActiveJob::Base] job
|
|
21
21
|
def dispatch(job)
|
|
22
|
-
|
|
22
|
+
Karafka.producer.public_send(
|
|
23
23
|
fetch_option(job, :dispatch_method, DEFAULTS),
|
|
24
24
|
topic: job.queue_name,
|
|
25
25
|
payload: serialize_job(job)
|
|
@@ -43,7 +43,7 @@ module Karafka
|
|
|
43
43
|
end
|
|
44
44
|
|
|
45
45
|
dispatches.each do |type, messages|
|
|
46
|
-
|
|
46
|
+
Karafka.producer.public_send(
|
|
47
47
|
type,
|
|
48
48
|
messages
|
|
49
49
|
)
|
|
@@ -40,10 +40,16 @@ module Karafka
|
|
|
40
40
|
OPERATIONS_TYPES_MAP.each do |op_name, op_value|
|
|
41
41
|
# Adds an outgoing operation to a given resource of a given type
|
|
42
42
|
# Useful since we alter in batches and not one at a time
|
|
43
|
+
#
|
|
44
|
+
# For example, when op_name is :set and op_value is 0:
|
|
45
|
+
# def set(name, value)
|
|
46
|
+
# @operations[0] << Config.new(name: name, value: value.to_s)
|
|
47
|
+
# end
|
|
48
|
+
default_value = op_name == :delete ? ' = nil' : ''
|
|
43
49
|
class_eval <<~RUBY, __FILE__, __LINE__ + 1
|
|
44
50
|
# @param name [String] name of the config to alter
|
|
45
51
|
# @param value [String] value of the config
|
|
46
|
-
def #{op_name}(name, value
|
|
52
|
+
def #{op_name}(name, value#{default_value})
|
|
47
53
|
@operations[#{op_value}] << Config.new(name: name, value: value.to_s)
|
|
48
54
|
end
|
|
49
55
|
RUBY
|
|
@@ -80,15 +80,13 @@ module Karafka
|
|
|
80
80
|
case casted_position
|
|
81
81
|
# Earliest is not always 0. When compacting/deleting it can be much later, that's why
|
|
82
82
|
# we fetch the oldest possible offset
|
|
83
|
-
|
|
83
|
+
# false is treated the same as 'earliest'
|
|
84
|
+
when 'earliest', false
|
|
84
85
|
LONG_TIME_AGO
|
|
85
86
|
# Latest will always be the high-watermark offset and we can get it just by getting
|
|
86
87
|
# a future position
|
|
87
88
|
when 'latest'
|
|
88
89
|
Time.now + DAY_IN_SECONDS
|
|
89
|
-
# Same as `'earliest'`
|
|
90
|
-
when false
|
|
91
|
-
LONG_TIME_AGO
|
|
92
90
|
# Regular offset case
|
|
93
91
|
else
|
|
94
92
|
position
|
|
@@ -307,14 +305,14 @@ module Karafka
|
|
|
307
305
|
# Reads lags and offsets for given topics in the context of consumer groups defined in the
|
|
308
306
|
# routing
|
|
309
307
|
#
|
|
310
|
-
# @param consumer_groups_with_topics [Hash
|
|
308
|
+
# @param consumer_groups_with_topics [Hash{String => Array<String>}] hash with consumer
|
|
311
309
|
# groups names with array of topics to query per consumer group inside
|
|
312
310
|
# @param active_topics_only [Boolean] if set to false, when we use routing topics, will
|
|
313
311
|
# select also topics that are marked as inactive in routing
|
|
314
312
|
#
|
|
315
|
-
# @return [Hash
|
|
316
|
-
# the consumer groups and values are hashes with topics and inside
|
|
317
|
-
# and offsets
|
|
313
|
+
# @return [Hash{String => Hash{Integer => Hash{Integer => Object}}}] hash where the top
|
|
314
|
+
# level keys are the consumer groups and values are hashes with topics and inside
|
|
315
|
+
# partitions with lags and offsets
|
|
318
316
|
#
|
|
319
317
|
# @note For topics that do not exist, topic details will be set to an empty hash
|
|
320
318
|
#
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Karafka
|
|
4
|
+
class Admin
|
|
5
|
+
# Contracts namespace for Admin validation logic
|
|
6
|
+
module Contracts
|
|
7
|
+
# Contract for replication plan parameters
|
|
8
|
+
class Replication < Karafka::Contracts::Base
|
|
9
|
+
configure do |config|
|
|
10
|
+
config.error_messages = YAML.safe_load_file(
|
|
11
|
+
File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
|
|
12
|
+
).fetch('en').fetch('validations').fetch('admin').fetch('replication')
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
required(:topic) { |val| val.is_a?(String) && !val.empty? }
|
|
16
|
+
required(:to) { |val| val.is_a?(Integer) && val >= 1 }
|
|
17
|
+
optional(:brokers) { |val| val.nil? || val.is_a?(Hash) }
|
|
18
|
+
|
|
19
|
+
# Validate that target replication factor is higher than current
|
|
20
|
+
virtual do |data, errors|
|
|
21
|
+
next unless errors.empty?
|
|
22
|
+
next unless data.key?(:current_rf)
|
|
23
|
+
|
|
24
|
+
target_rf = data.fetch(:to)
|
|
25
|
+
current_rf = data.fetch(:current_rf)
|
|
26
|
+
|
|
27
|
+
next if target_rf > current_rf
|
|
28
|
+
|
|
29
|
+
[[%w[to], :must_be_higher_than_current]]
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# Validate that target replication factor doesn't exceed available brokers
|
|
33
|
+
virtual do |data, errors|
|
|
34
|
+
next unless errors.empty?
|
|
35
|
+
next unless data.key?(:broker_count)
|
|
36
|
+
|
|
37
|
+
target_rf = data.fetch(:to)
|
|
38
|
+
broker_count = data.fetch(:broker_count)
|
|
39
|
+
|
|
40
|
+
next if target_rf <= broker_count
|
|
41
|
+
|
|
42
|
+
[[%w[to], :exceeds_broker_count]]
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
# Validate all partitions are specified in manual broker assignment
|
|
46
|
+
virtual do |data, errors|
|
|
47
|
+
next unless errors.empty?
|
|
48
|
+
|
|
49
|
+
brokers = data[:brokers]
|
|
50
|
+
next if brokers.nil?
|
|
51
|
+
next unless data.key?(:topic_info)
|
|
52
|
+
|
|
53
|
+
topic_info = data.fetch(:topic_info)
|
|
54
|
+
partition_ids = topic_info.fetch(:partitions, []).map { |p| p[:partition_id] }
|
|
55
|
+
|
|
56
|
+
missing_partitions = partition_ids - brokers.keys
|
|
57
|
+
next unless missing_partitions.any?
|
|
58
|
+
|
|
59
|
+
[[%w[brokers], :missing_partitions]]
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
# Validate that manual broker assignments reference valid partitions
|
|
63
|
+
virtual do |data, errors|
|
|
64
|
+
next unless errors.empty?
|
|
65
|
+
|
|
66
|
+
brokers = data[:brokers]
|
|
67
|
+
next if brokers.nil?
|
|
68
|
+
next unless data.key?(:topic_info)
|
|
69
|
+
|
|
70
|
+
topic_info = data.fetch(:topic_info)
|
|
71
|
+
partition_ids = topic_info.fetch(:partitions, []).map { |p| p[:partition_id] }
|
|
72
|
+
|
|
73
|
+
error_found = nil
|
|
74
|
+
brokers.each_key do |partition_id|
|
|
75
|
+
unless partition_ids.include?(partition_id)
|
|
76
|
+
error_found = [[%w[brokers], :invalid_partition]]
|
|
77
|
+
break
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
error_found
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
# Validate broker count matches target replication factor for each partition
|
|
85
|
+
virtual do |data, errors|
|
|
86
|
+
next unless errors.empty?
|
|
87
|
+
|
|
88
|
+
brokers = data[:brokers]
|
|
89
|
+
next if brokers.nil?
|
|
90
|
+
next unless data.key?(:topic_info)
|
|
91
|
+
|
|
92
|
+
target_rf = data.fetch(:to)
|
|
93
|
+
|
|
94
|
+
error_found = nil
|
|
95
|
+
brokers.each_value do |broker_ids|
|
|
96
|
+
if broker_ids.size != target_rf
|
|
97
|
+
error_found = [[%w[brokers], :wrong_broker_count_for_partition]]
|
|
98
|
+
break
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
error_found
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
# Validate no duplicate brokers assigned to the same partition
|
|
106
|
+
virtual do |data, errors|
|
|
107
|
+
next unless errors.empty?
|
|
108
|
+
|
|
109
|
+
brokers = data[:brokers]
|
|
110
|
+
next if brokers.nil?
|
|
111
|
+
next unless data.key?(:topic_info)
|
|
112
|
+
|
|
113
|
+
error_found = nil
|
|
114
|
+
brokers.each_value do |broker_ids|
|
|
115
|
+
if broker_ids.size != broker_ids.uniq.size
|
|
116
|
+
error_found = [[%w[brokers], :duplicate_brokers_for_partition]]
|
|
117
|
+
break
|
|
118
|
+
end
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
error_found
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
# Validate all referenced brokers exist in the cluster
|
|
125
|
+
virtual do |data, errors|
|
|
126
|
+
next unless errors.empty?
|
|
127
|
+
|
|
128
|
+
brokers = data[:brokers]
|
|
129
|
+
next if brokers.nil?
|
|
130
|
+
next unless data.key?(:topic_info)
|
|
131
|
+
|
|
132
|
+
cluster_info = data.fetch(:cluster_info, {})
|
|
133
|
+
all_broker_ids = cluster_info.fetch(:brokers, []).map { |b| b[:node_id] }
|
|
134
|
+
|
|
135
|
+
error_found = nil
|
|
136
|
+
brokers.each_value do |broker_ids|
|
|
137
|
+
invalid_brokers = broker_ids - all_broker_ids
|
|
138
|
+
if invalid_brokers.any?
|
|
139
|
+
error_found = [[%w[brokers], :invalid_brokers_for_partition]]
|
|
140
|
+
break
|
|
141
|
+
end
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
error_found
|
|
145
|
+
end
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
end
|