karafka 2.5.4.rc1 → 2.5.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -1
- data/LICENSE-COMM +4 -2
- data/lib/karafka/admin/acl.rb +127 -80
- data/lib/karafka/admin/configs.rb +84 -70
- data/lib/karafka/admin/consumer_groups.rb +377 -330
- data/lib/karafka/admin/replication.rb +287 -263
- data/lib/karafka/admin/topics.rb +232 -186
- data/lib/karafka/admin.rb +277 -117
- data/lib/karafka/pro/active_job/consumer.rb +19 -2
- data/lib/karafka/pro/active_job/dispatcher.rb +19 -2
- data/lib/karafka/pro/active_job/job_options_contract.rb +19 -2
- data/lib/karafka/pro/base_consumer.rb +19 -2
- data/lib/karafka/pro/cleaner/errors.rb +19 -2
- data/lib/karafka/pro/cleaner/messages/message.rb +19 -2
- data/lib/karafka/pro/cleaner/messages/messages.rb +19 -2
- data/lib/karafka/pro/cleaner/messages/metadata.rb +19 -2
- data/lib/karafka/pro/cleaner.rb +19 -2
- data/lib/karafka/pro/cli/contracts/server.rb +19 -2
- data/lib/karafka/pro/cli/parallel_segments/base.rb +19 -2
- data/lib/karafka/pro/cli/parallel_segments/collapse.rb +19 -2
- data/lib/karafka/pro/cli/parallel_segments/distribute.rb +19 -2
- data/lib/karafka/pro/cli/parallel_segments.rb +19 -2
- data/lib/karafka/pro/connection/manager.rb +19 -2
- data/lib/karafka/pro/connection/multiplexing/listener.rb +19 -2
- data/lib/karafka/pro/contracts/base.rb +19 -2
- data/lib/karafka/pro/encryption/cipher.rb +19 -2
- data/lib/karafka/pro/encryption/contracts/config.rb +19 -2
- data/lib/karafka/pro/encryption/errors.rb +19 -2
- data/lib/karafka/pro/encryption/messages/middleware.rb +19 -2
- data/lib/karafka/pro/encryption/messages/parser.rb +19 -2
- data/lib/karafka/pro/encryption/setup/config.rb +19 -2
- data/lib/karafka/pro/encryption.rb +19 -2
- data/lib/karafka/pro/instrumentation/performance_tracker.rb +19 -2
- data/lib/karafka/pro/iterator/expander.rb +19 -2
- data/lib/karafka/pro/iterator/tpl_builder.rb +19 -2
- data/lib/karafka/pro/iterator.rb +19 -2
- data/lib/karafka/pro/loader.rb +19 -2
- data/lib/karafka/pro/processing/adaptive_iterator/consumer.rb +19 -2
- data/lib/karafka/pro/processing/adaptive_iterator/tracker.rb +19 -2
- data/lib/karafka/pro/processing/collapser.rb +19 -2
- data/lib/karafka/pro/processing/coordinator.rb +19 -2
- data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +19 -2
- data/lib/karafka/pro/processing/coordinators/filters_applier.rb +19 -2
- data/lib/karafka/pro/processing/coordinators/virtual_offset_manager.rb +19 -2
- data/lib/karafka/pro/processing/executor.rb +19 -2
- data/lib/karafka/pro/processing/expansions_selector.rb +19 -2
- data/lib/karafka/pro/processing/filters/base.rb +19 -2
- data/lib/karafka/pro/processing/filters/delayer.rb +19 -2
- data/lib/karafka/pro/processing/filters/expirer.rb +19 -2
- data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +19 -2
- data/lib/karafka/pro/processing/filters/throttler.rb +19 -2
- data/lib/karafka/pro/processing/filters/virtual_limiter.rb +19 -2
- data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +19 -2
- data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +19 -2
- data/lib/karafka/pro/processing/jobs/periodic.rb +19 -2
- data/lib/karafka/pro/processing/jobs/periodic_non_blocking.rb +19 -2
- data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +19 -2
- data/lib/karafka/pro/processing/jobs_builder.rb +19 -2
- data/lib/karafka/pro/processing/jobs_queue.rb +19 -2
- data/lib/karafka/pro/processing/offset_metadata/consumer.rb +19 -2
- data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +19 -2
- data/lib/karafka/pro/processing/offset_metadata/listener.rb +19 -2
- data/lib/karafka/pro/processing/parallel_segments/filters/base.rb +19 -2
- data/lib/karafka/pro/processing/parallel_segments/filters/default.rb +19 -2
- data/lib/karafka/pro/processing/parallel_segments/filters/mom.rb +19 -2
- data/lib/karafka/pro/processing/partitioner.rb +19 -2
- data/lib/karafka/pro/processing/periodic_job/consumer.rb +19 -2
- data/lib/karafka/pro/processing/piping/consumer.rb +19 -2
- data/lib/karafka/pro/processing/schedulers/base.rb +19 -2
- data/lib/karafka/pro/processing/schedulers/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/base.rb +19 -2
- data/lib/karafka/pro/processing/strategies/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/dlq/vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/ftr/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/ftr/vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/ftr_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/mom.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/mom_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/lrj/vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/mom/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies/mom/ftr.rb +19 -2
- data/lib/karafka/pro/processing/strategies/mom/ftr_vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/mom/vp.rb +19 -2
- data/lib/karafka/pro/processing/strategies/vp/default.rb +19 -2
- data/lib/karafka/pro/processing/strategies.rb +19 -2
- data/lib/karafka/pro/processing/strategy_selector.rb +19 -2
- data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +19 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +19 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/base.rb +19 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/consumer.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/contracts/config.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/contracts/task.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/deserializer.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/dispatcher.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/errors.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/executor.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/listener.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/matcher.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/schedule.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/serializer.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/setup/config.rb +19 -2
- data/lib/karafka/pro/recurring_tasks/task.rb +19 -2
- data/lib/karafka/pro/recurring_tasks.rb +19 -2
- data/lib/karafka/pro/routing/features/active_job/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/active_job.rb +19 -2
- data/lib/karafka/pro/routing/features/adaptive_iterator/config.rb +19 -2
- data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/adaptive_iterator.rb +19 -2
- data/lib/karafka/pro/routing/features/base.rb +19 -2
- data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/dead_letter_queue.rb +19 -2
- data/lib/karafka/pro/routing/features/delaying/config.rb +19 -2
- data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/delaying/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/delaying.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/config.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/direct_assignments.rb +19 -2
- data/lib/karafka/pro/routing/features/expiring/config.rb +19 -2
- data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/expiring/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/expiring.rb +19 -2
- data/lib/karafka/pro/routing/features/filtering/config.rb +19 -2
- data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/filtering/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/filtering.rb +19 -2
- data/lib/karafka/pro/routing/features/inline_insights/config.rb +19 -2
- data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/inline_insights/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/inline_insights.rb +19 -2
- data/lib/karafka/pro/routing/features/long_running_job/config.rb +19 -2
- data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/long_running_job/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/long_running_job.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/config.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +19 -2
- data/lib/karafka/pro/routing/features/multiplexing.rb +19 -2
- data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/non_blocking_job.rb +19 -2
- data/lib/karafka/pro/routing/features/offset_metadata/config.rb +19 -2
- data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/offset_metadata.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/config.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/parallel_segments.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/config.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/detector.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/pattern.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/patterns.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns/topics.rb +19 -2
- data/lib/karafka/pro/routing/features/patterns.rb +19 -2
- data/lib/karafka/pro/routing/features/pausing/config.rb +19 -2
- data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/pausing/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/pausing.rb +19 -2
- data/lib/karafka/pro/routing/features/periodic_job/config.rb +19 -2
- data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/periodic_job/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/periodic_job.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/config.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/proxy.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/recurring_tasks.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/builder.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/config.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/proxy.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/scheduled_messages.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm/config.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/swarm.rb +19 -2
- data/lib/karafka/pro/routing/features/throttling/config.rb +19 -2
- data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/throttling/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/throttling.rb +19 -2
- data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +19 -2
- data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +19 -2
- data/lib/karafka/pro/routing/features/virtual_partitions.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/consumer.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/contracts/config.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/contracts/message.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/day.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/dispatcher.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/errors.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/max_epoch.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/proxy.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/schema_validator.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/serializer.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/setup/config.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/state.rb +19 -2
- data/lib/karafka/pro/scheduled_messages/tracker.rb +19 -2
- data/lib/karafka/pro/scheduled_messages.rb +19 -2
- data/lib/karafka/pro/swarm/liveness_listener.rb +19 -2
- data/lib/karafka/version.rb +1 -1
- metadata +2 -2
data/lib/karafka/admin.rb
CHANGED
|
@@ -6,9 +6,9 @@ module Karafka
|
|
|
6
6
|
# @note It always initializes a new admin instance as we want to ensure it is always closed
|
|
7
7
|
# Since admin actions are not performed that often, that should be ok.
|
|
8
8
|
#
|
|
9
|
-
# @note
|
|
10
|
-
#
|
|
11
|
-
#
|
|
9
|
+
# @note By default it uses the primary defined cluster. For multi-cluster operations, create
|
|
10
|
+
# an Admin instance with custom kafka configuration:
|
|
11
|
+
# `Karafka::Admin.new(kafka: { 'bootstrap.servers': 'other:9092' })`
|
|
12
12
|
class Admin
|
|
13
13
|
extend Core::Helpers::Time
|
|
14
14
|
|
|
@@ -22,6 +22,22 @@ module Karafka
|
|
|
22
22
|
admin_kafka: %i[admin kafka]
|
|
23
23
|
)
|
|
24
24
|
|
|
25
|
+
# Custom kafka configuration for this admin instance
|
|
26
|
+
# @return [Hash] custom kafka settings to merge with defaults
|
|
27
|
+
attr_reader :custom_kafka
|
|
28
|
+
|
|
29
|
+
# Creates a new Admin instance
|
|
30
|
+
#
|
|
31
|
+
# @param kafka [Hash] custom kafka configuration to merge with app defaults.
|
|
32
|
+
# Useful for multi-cluster operations where you want to target a different cluster.
|
|
33
|
+
#
|
|
34
|
+
# @example Create admin for a different cluster
|
|
35
|
+
# admin = Karafka::Admin.new(kafka: { 'bootstrap.servers': 'other-cluster:9092' })
|
|
36
|
+
# admin.cluster_info
|
|
37
|
+
def initialize(kafka: {})
|
|
38
|
+
@custom_kafka = kafka
|
|
39
|
+
end
|
|
40
|
+
|
|
25
41
|
class << self
|
|
26
42
|
# Delegate topic-related operations to Topics class
|
|
27
43
|
|
|
@@ -32,7 +48,7 @@ module Karafka
|
|
|
32
48
|
# @param settings [Hash] kafka extra settings
|
|
33
49
|
# @see Topics.read
|
|
34
50
|
def read_topic(name, partition, count, start_offset = -1, settings = {})
|
|
35
|
-
|
|
51
|
+
new.read_topic(name, partition, count, start_offset, settings)
|
|
36
52
|
end
|
|
37
53
|
|
|
38
54
|
# @param name [String] topic name
|
|
@@ -41,33 +57,33 @@ module Karafka
|
|
|
41
57
|
# @param topic_config [Hash] topic config details
|
|
42
58
|
# @see Topics.create
|
|
43
59
|
def create_topic(name, partitions, replication_factor, topic_config = {})
|
|
44
|
-
|
|
60
|
+
new.create_topic(name, partitions, replication_factor, topic_config)
|
|
45
61
|
end
|
|
46
62
|
|
|
47
63
|
# @param name [String] topic name
|
|
48
64
|
# @see Topics.delete
|
|
49
65
|
def delete_topic(name)
|
|
50
|
-
|
|
66
|
+
new.delete_topic(name)
|
|
51
67
|
end
|
|
52
68
|
|
|
53
69
|
# @param name [String] topic name
|
|
54
70
|
# @param partitions [Integer] total number of partitions we expect to end up with
|
|
55
71
|
# @see Topics.create_partitions
|
|
56
72
|
def create_partitions(name, partitions)
|
|
57
|
-
|
|
73
|
+
new.create_partitions(name, partitions)
|
|
58
74
|
end
|
|
59
75
|
|
|
60
76
|
# @param name_or_hash [String, Symbol, Hash] topic name or hash with topics and partitions
|
|
61
77
|
# @param partition [Integer, nil] partition (nil when using hash format)
|
|
62
78
|
# @see Topics.read_watermark_offsets
|
|
63
79
|
def read_watermark_offsets(name_or_hash, partition = nil)
|
|
64
|
-
|
|
80
|
+
new.read_watermark_offsets(name_or_hash, partition)
|
|
65
81
|
end
|
|
66
82
|
|
|
67
83
|
# @param topic_name [String] name of the topic we're interested in
|
|
68
84
|
# @see Topics.info
|
|
69
85
|
def topic_info(topic_name)
|
|
70
|
-
|
|
86
|
+
new.topic_info(topic_name)
|
|
71
87
|
end
|
|
72
88
|
|
|
73
89
|
# @param consumer_group_id [String] id of the consumer group for which we want to move the
|
|
@@ -75,7 +91,7 @@ module Karafka
|
|
|
75
91
|
# @param topics_with_partitions_and_offsets [Hash] Hash with list of topics and settings
|
|
76
92
|
# @see ConsumerGroups.seek
|
|
77
93
|
def seek_consumer_group(consumer_group_id, topics_with_partitions_and_offsets)
|
|
78
|
-
|
|
94
|
+
new.seek_consumer_group(consumer_group_id, topics_with_partitions_and_offsets)
|
|
79
95
|
end
|
|
80
96
|
|
|
81
97
|
# Takes consumer group and its topics and copies all the offsets to a new named group
|
|
@@ -86,7 +102,7 @@ module Karafka
|
|
|
86
102
|
# @return [Boolean] true if anything was migrated, otherwise false
|
|
87
103
|
# @see ConsumerGroups.copy
|
|
88
104
|
def copy_consumer_group(previous_name, new_name, topics)
|
|
89
|
-
|
|
105
|
+
new.copy_consumer_group(previous_name, new_name, topics)
|
|
90
106
|
end
|
|
91
107
|
|
|
92
108
|
# Takes consumer group and its topics and migrates all the offsets to a new named group
|
|
@@ -100,7 +116,12 @@ module Karafka
|
|
|
100
116
|
# nothing really to rename
|
|
101
117
|
# @see ConsumerGroups.rename
|
|
102
118
|
def rename_consumer_group(previous_name, new_name, topics, delete_previous: true)
|
|
103
|
-
|
|
119
|
+
new.rename_consumer_group(
|
|
120
|
+
previous_name,
|
|
121
|
+
new_name,
|
|
122
|
+
topics,
|
|
123
|
+
delete_previous: delete_previous
|
|
124
|
+
)
|
|
104
125
|
end
|
|
105
126
|
|
|
106
127
|
# Removes given consumer group (if exists)
|
|
@@ -108,7 +129,7 @@ module Karafka
|
|
|
108
129
|
# @param consumer_group_id [String] consumer group name
|
|
109
130
|
# @see ConsumerGroups.delete
|
|
110
131
|
def delete_consumer_group(consumer_group_id)
|
|
111
|
-
|
|
132
|
+
new.delete_consumer_group(consumer_group_id)
|
|
112
133
|
end
|
|
113
134
|
|
|
114
135
|
# Triggers a rebalance for the specified consumer group
|
|
@@ -117,7 +138,7 @@ module Karafka
|
|
|
117
138
|
# @see ConsumerGroups.trigger_rebalance
|
|
118
139
|
# @note This API should be used only for development.
|
|
119
140
|
def trigger_rebalance(consumer_group_id)
|
|
120
|
-
|
|
141
|
+
new.trigger_rebalance(consumer_group_id)
|
|
121
142
|
end
|
|
122
143
|
|
|
123
144
|
# Reads lags and offsets for given topics in the context of consumer groups defined in the
|
|
@@ -131,7 +152,7 @@ module Karafka
|
|
|
131
152
|
# partitions with lags and offsets
|
|
132
153
|
# @see ConsumerGroups.read_lags_with_offsets
|
|
133
154
|
def read_lags_with_offsets(consumer_groups_with_topics = {}, active_topics_only: true)
|
|
134
|
-
|
|
155
|
+
new.read_lags_with_offsets(
|
|
135
156
|
consumer_groups_with_topics,
|
|
136
157
|
active_topics_only: active_topics_only
|
|
137
158
|
)
|
|
@@ -177,16 +198,16 @@ module Karafka
|
|
|
177
198
|
#
|
|
178
199
|
# @see Replication.plan for more details
|
|
179
200
|
def plan_topic_replication(topic:, replication_factor:, brokers: nil)
|
|
180
|
-
|
|
201
|
+
new.plan_topic_replication(
|
|
181
202
|
topic: topic,
|
|
182
|
-
|
|
203
|
+
replication_factor: replication_factor,
|
|
183
204
|
brokers: brokers
|
|
184
205
|
)
|
|
185
206
|
end
|
|
186
207
|
|
|
187
208
|
# @return [Rdkafka::Metadata] cluster metadata info
|
|
188
209
|
def cluster_info
|
|
189
|
-
|
|
210
|
+
new.cluster_info
|
|
190
211
|
end
|
|
191
212
|
|
|
192
213
|
# Creates consumer instance and yields it. After usage it closes the consumer instance
|
|
@@ -196,126 +217,265 @@ module Karafka
|
|
|
196
217
|
#
|
|
197
218
|
# @note We always ship and yield a proxied consumer because admin API performance is not
|
|
198
219
|
# that relevant. That is, there are no high frequency calls that would have to be delegated
|
|
199
|
-
def with_consumer(settings = {})
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
consumer = config(:consumer, settings).consumer(native_kafka_auto_start: false)
|
|
203
|
-
bind_oauth(bind_id, consumer)
|
|
204
|
-
|
|
205
|
-
consumer.start
|
|
206
|
-
proxy = Karafka::Connection::Proxy.new(consumer)
|
|
207
|
-
yield(proxy)
|
|
208
|
-
ensure
|
|
209
|
-
# Always unsubscribe consumer just to be sure, that no metadata requests are running
|
|
210
|
-
# when we close the consumer. This in theory should prevent from some race-conditions
|
|
211
|
-
# that originate from librdkafka
|
|
212
|
-
begin
|
|
213
|
-
consumer&.unsubscribe
|
|
214
|
-
# Ignore any errors and continue to close consumer despite them
|
|
215
|
-
rescue Rdkafka::RdkafkaError
|
|
216
|
-
nil
|
|
217
|
-
end
|
|
218
|
-
|
|
219
|
-
consumer&.close
|
|
220
|
-
|
|
221
|
-
unbind_oauth(bind_id)
|
|
220
|
+
def with_consumer(settings = {}, &)
|
|
221
|
+
new.with_consumer(settings, &)
|
|
222
222
|
end
|
|
223
223
|
|
|
224
224
|
# Creates admin instance and yields it. After usage it closes the admin instance
|
|
225
|
-
def with_admin
|
|
226
|
-
|
|
225
|
+
def with_admin(&)
|
|
226
|
+
new.with_admin(&)
|
|
227
|
+
end
|
|
228
|
+
end
|
|
227
229
|
|
|
228
|
-
|
|
229
|
-
native_kafka_auto_start: false,
|
|
230
|
-
native_kafka_poll_timeout_ms: poll_timeout
|
|
231
|
-
)
|
|
230
|
+
# Instance methods - these use the custom kafka configuration
|
|
232
231
|
|
|
233
|
-
|
|
232
|
+
# @param name [String, Symbol] topic name
|
|
233
|
+
# @param partition [Integer] partition
|
|
234
|
+
# @param count [Integer] how many messages we want to get at most
|
|
235
|
+
# @param start_offset [Integer, Time] offset from which we should start
|
|
236
|
+
# @param settings [Hash] kafka extra settings (optional)
|
|
237
|
+
# @see Topics#read
|
|
238
|
+
def read_topic(name, partition, count, start_offset = -1, settings = {})
|
|
239
|
+
Topics.new(kafka: @custom_kafka).read(name, partition, count, start_offset, settings)
|
|
240
|
+
end
|
|
234
241
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
242
|
+
# @param name [String] topic name
|
|
243
|
+
# @param partitions [Integer] number of partitions for this topic
|
|
244
|
+
# @param replication_factor [Integer] number of replicas
|
|
245
|
+
# @param topic_config [Hash] topic config details
|
|
246
|
+
# @see Topics#create
|
|
247
|
+
def create_topic(name, partitions, replication_factor, topic_config = {})
|
|
248
|
+
Topics.new(kafka: @custom_kafka).create(name, partitions, replication_factor, topic_config)
|
|
249
|
+
end
|
|
240
250
|
|
|
241
|
-
|
|
242
|
-
|
|
251
|
+
# @param name [String] topic name
|
|
252
|
+
# @see Topics#delete
|
|
253
|
+
def delete_topic(name)
|
|
254
|
+
Topics.new(kafka: @custom_kafka).delete(name)
|
|
255
|
+
end
|
|
256
|
+
|
|
257
|
+
# @param name [String] topic name
|
|
258
|
+
# @param partitions [Integer] total number of partitions we expect to end up with
|
|
259
|
+
# @see Topics#create_partitions
|
|
260
|
+
def create_partitions(name, partitions)
|
|
261
|
+
Topics.new(kafka: @custom_kafka).create_partitions(name, partitions)
|
|
262
|
+
end
|
|
263
|
+
|
|
264
|
+
# @param name_or_hash [String, Symbol, Hash] topic name or hash with topics and partitions
|
|
265
|
+
# @param partition [Integer, nil] partition (nil when using hash format)
|
|
266
|
+
# @see Topics#read_watermark_offsets
|
|
267
|
+
def read_watermark_offsets(name_or_hash, partition = nil)
|
|
268
|
+
Topics.new(kafka: @custom_kafka).read_watermark_offsets(name_or_hash, partition)
|
|
269
|
+
end
|
|
270
|
+
|
|
271
|
+
# @param topic_name [String] name of the topic we're interested in
|
|
272
|
+
# @see Topics#info
|
|
273
|
+
def topic_info(topic_name)
|
|
274
|
+
Topics.new(kafka: @custom_kafka).info(topic_name)
|
|
275
|
+
end
|
|
276
|
+
|
|
277
|
+
# @param consumer_group_id [String] consumer group for which we want to move offsets
|
|
278
|
+
# @param topics_with_partitions_and_offsets [Hash] hash with topics and settings
|
|
279
|
+
# @see ConsumerGroups#seek
|
|
280
|
+
def seek_consumer_group(consumer_group_id, topics_with_partitions_and_offsets)
|
|
281
|
+
ConsumerGroups.new(kafka: @custom_kafka).seek(
|
|
282
|
+
consumer_group_id,
|
|
283
|
+
topics_with_partitions_and_offsets
|
|
284
|
+
)
|
|
285
|
+
end
|
|
286
|
+
|
|
287
|
+
# @param previous_name [String] old consumer group name
|
|
288
|
+
# @param new_name [String] new consumer group name
|
|
289
|
+
# @param topics [Array<String>] topics for which we want to copy offsets
|
|
290
|
+
# @see ConsumerGroups#copy
|
|
291
|
+
def copy_consumer_group(previous_name, new_name, topics)
|
|
292
|
+
ConsumerGroups.new(kafka: @custom_kafka).copy(previous_name, new_name, topics)
|
|
293
|
+
end
|
|
294
|
+
|
|
295
|
+
# @param previous_name [String] old consumer group name
|
|
296
|
+
# @param new_name [String] new consumer group name
|
|
297
|
+
# @param topics [Array<String>] topics for which we want to migrate offsets
|
|
298
|
+
# @param delete_previous [Boolean] should we delete previous consumer group after rename
|
|
299
|
+
# @see ConsumerGroups#rename
|
|
300
|
+
def rename_consumer_group(previous_name, new_name, topics, delete_previous: true)
|
|
301
|
+
ConsumerGroups.new(kafka: @custom_kafka).rename(
|
|
302
|
+
previous_name,
|
|
303
|
+
new_name,
|
|
304
|
+
topics,
|
|
305
|
+
delete_previous: delete_previous
|
|
306
|
+
)
|
|
307
|
+
end
|
|
308
|
+
|
|
309
|
+
# @param consumer_group_id [String] consumer group name
|
|
310
|
+
# @see ConsumerGroups#delete
|
|
311
|
+
def delete_consumer_group(consumer_group_id)
|
|
312
|
+
ConsumerGroups.new(kafka: @custom_kafka).delete(consumer_group_id)
|
|
313
|
+
end
|
|
314
|
+
|
|
315
|
+
# @param consumer_group_id [String] consumer group id to trigger rebalance for
|
|
316
|
+
# @see ConsumerGroups#trigger_rebalance
|
|
317
|
+
def trigger_rebalance(consumer_group_id)
|
|
318
|
+
ConsumerGroups.new(kafka: @custom_kafka).trigger_rebalance(consumer_group_id)
|
|
319
|
+
end
|
|
243
320
|
|
|
244
|
-
|
|
321
|
+
# @param consumer_groups_with_topics [Hash{String => Array<String>}] hash with consumer
|
|
322
|
+
# groups names with array of topics
|
|
323
|
+
# @param active_topics_only [Boolean] if set to false, will select also inactive topics
|
|
324
|
+
# @see ConsumerGroups#read_lags_with_offsets
|
|
325
|
+
def read_lags_with_offsets(consumer_groups_with_topics = {}, active_topics_only: true)
|
|
326
|
+
ConsumerGroups.new(kafka: @custom_kafka).read_lags_with_offsets(
|
|
327
|
+
consumer_groups_with_topics,
|
|
328
|
+
active_topics_only: active_topics_only
|
|
329
|
+
)
|
|
330
|
+
end
|
|
331
|
+
|
|
332
|
+
# @param topic [String] topic name to plan replication for
|
|
333
|
+
# @param replication_factor [Integer] target replication factor
|
|
334
|
+
# @param brokers [Hash, nil] optional manual broker assignments per partition
|
|
335
|
+
# @see Replication#plan
|
|
336
|
+
def plan_topic_replication(topic:, replication_factor:, brokers: nil)
|
|
337
|
+
Replication.new(kafka: @custom_kafka).plan(
|
|
338
|
+
topic: topic,
|
|
339
|
+
to: replication_factor,
|
|
340
|
+
brokers: brokers
|
|
341
|
+
)
|
|
342
|
+
end
|
|
245
343
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
344
|
+
# @return [Rdkafka::Metadata] cluster metadata info
|
|
345
|
+
def cluster_info
|
|
346
|
+
with_admin(&:metadata)
|
|
347
|
+
end
|
|
348
|
+
|
|
349
|
+
# Creates consumer instance and yields it. After usage it closes the consumer instance
|
|
350
|
+
# This API can be used in other pieces of code and allows for low-level consumer usage
|
|
351
|
+
#
|
|
352
|
+
# @param settings [Hash] extra settings to customize consumer
|
|
353
|
+
#
|
|
354
|
+
# @note We always ship and yield a proxied consumer because admin API performance is not
|
|
355
|
+
# that relevant. That is, there are no high frequency calls that would have to be delegated
|
|
356
|
+
def with_consumer(settings = {})
|
|
357
|
+
bind_id = SecureRandom.uuid
|
|
358
|
+
|
|
359
|
+
consumer = config(:consumer, settings).consumer(native_kafka_auto_start: false)
|
|
360
|
+
bind_oauth(bind_id, consumer)
|
|
361
|
+
|
|
362
|
+
consumer.start
|
|
363
|
+
proxy = Karafka::Connection::Proxy.new(consumer)
|
|
364
|
+
yield(proxy)
|
|
365
|
+
ensure
|
|
366
|
+
# Always unsubscribe consumer just to be sure, that no metadata requests are running
|
|
367
|
+
# when we close the consumer. This in theory should prevent from some race-conditions
|
|
368
|
+
# that originate from librdkafka
|
|
369
|
+
begin
|
|
370
|
+
consumer&.unsubscribe
|
|
371
|
+
# Ignore any errors and continue to close consumer despite them
|
|
372
|
+
rescue Rdkafka::RdkafkaError
|
|
373
|
+
nil
|
|
250
374
|
end
|
|
251
375
|
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
376
|
+
consumer&.close
|
|
377
|
+
|
|
378
|
+
unbind_oauth(bind_id)
|
|
379
|
+
end
|
|
380
|
+
|
|
381
|
+
# Creates admin instance and yields it. After usage it closes the admin instance
|
|
382
|
+
def with_admin
|
|
383
|
+
bind_id = SecureRandom.uuid
|
|
384
|
+
|
|
385
|
+
admin = config(:producer, {}).admin(
|
|
386
|
+
native_kafka_auto_start: false,
|
|
387
|
+
native_kafka_poll_timeout_ms: self.class.poll_timeout
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
bind_oauth(bind_id, admin)
|
|
391
|
+
|
|
392
|
+
admin.start
|
|
393
|
+
proxy = Karafka::Connection::Proxy.new(admin)
|
|
394
|
+
yield(proxy)
|
|
395
|
+
ensure
|
|
396
|
+
admin&.close
|
|
397
|
+
|
|
398
|
+
unbind_oauth(bind_id)
|
|
399
|
+
end
|
|
400
|
+
|
|
401
|
+
private
|
|
402
|
+
|
|
403
|
+
# @return [Integer] number of seconds to wait. `rdkafka` requires this value
|
|
404
|
+
# (`max_wait_time`) to be provided in seconds while we define it in ms hence the conversion
|
|
405
|
+
def max_wait_time_seconds
|
|
406
|
+
self.class.max_wait_time / 1_000.0
|
|
407
|
+
end
|
|
408
|
+
|
|
409
|
+
# Adds a new callback for given rdkafka instance for oauth token refresh (if needed)
|
|
410
|
+
#
|
|
411
|
+
# @param id [String, Symbol] unique (for the lifetime of instance) id that we use for
|
|
412
|
+
# callback referencing
|
|
413
|
+
# @param instance [Rdkafka::Consumer, Rdkafka::Admin] rdkafka instance to be used to set
|
|
414
|
+
# appropriate oauth token when needed
|
|
415
|
+
def bind_oauth(id, instance)
|
|
416
|
+
Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.add(
|
|
417
|
+
id,
|
|
418
|
+
Instrumentation::Callbacks::OauthbearerTokenRefresh.new(
|
|
419
|
+
instance
|
|
264
420
|
)
|
|
265
|
-
|
|
421
|
+
)
|
|
422
|
+
end
|
|
266
423
|
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
424
|
+
# Removes the callback from no longer used instance
|
|
425
|
+
#
|
|
426
|
+
# @param id [String, Symbol] unique (for the lifetime of instance) id that we use for
|
|
427
|
+
# callback referencing
|
|
428
|
+
def unbind_oauth(id)
|
|
429
|
+
Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.delete(id)
|
|
430
|
+
end
|
|
274
431
|
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
432
|
+
# There are some cases where rdkafka admin operations finish successfully but without the
|
|
433
|
+
# callback being triggered to materialize the post-promise object. Until this is fixed we
|
|
434
|
+
# can figure out, that operation we wanted to do finished successfully by checking that the
|
|
435
|
+
# effect of the command (new topic, more partitions, etc) is handled. Exactly for that we
|
|
436
|
+
# use the breaker. It we get a timeout, we can check that what we wanted to achieve has
|
|
437
|
+
# happened via the breaker check, hence we do not need to wait any longer.
|
|
438
|
+
#
|
|
439
|
+
# @param handler [Proc] the wait handler operation
|
|
440
|
+
# @param breaker [Proc] extra condition upon timeout that indicates things were finished ok
|
|
441
|
+
def with_re_wait(handler, breaker)
|
|
442
|
+
start_time = self.class.monotonic_now
|
|
443
|
+
# Convert milliseconds to seconds for sleep
|
|
444
|
+
sleep_time = self.class.retry_backoff / 1000.0
|
|
288
445
|
|
|
289
|
-
|
|
290
|
-
|
|
446
|
+
loop do
|
|
447
|
+
handler.call
|
|
291
448
|
|
|
292
|
-
|
|
449
|
+
sleep(sleep_time)
|
|
293
450
|
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
451
|
+
return if breaker.call
|
|
452
|
+
rescue Rdkafka::AbstractHandle::WaitTimeoutError
|
|
453
|
+
return if breaker.call
|
|
297
454
|
|
|
298
|
-
|
|
455
|
+
next if self.class.monotonic_now - start_time < self.class.max_retries_duration
|
|
299
456
|
|
|
300
|
-
|
|
301
|
-
end
|
|
457
|
+
raise(Errors::ResultNotVisibleError)
|
|
302
458
|
end
|
|
459
|
+
end
|
|
303
460
|
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
461
|
+
# @param type [Symbol] type of config we want
|
|
462
|
+
# @param settings [Hash] extra settings for config (if needed)
|
|
463
|
+
# @return [::Rdkafka::Config] rdkafka config
|
|
464
|
+
def config(type, settings)
|
|
465
|
+
kafka_config = self.class.app_kafka.dup
|
|
466
|
+
kafka_config.merge!(self.class.admin_kafka)
|
|
467
|
+
kafka_config[:'group.id'] = self.class.group_id
|
|
468
|
+
# We merge after setting the group id so it can be altered if needed
|
|
469
|
+
# In general in admin we only should alter it when we need to impersonate a given
|
|
470
|
+
# consumer group or do something similar
|
|
471
|
+
kafka_config.merge!(settings)
|
|
472
|
+
# Custom kafka config is merged last so it can override all other settings
|
|
473
|
+
# This enables multi-cluster support where custom_kafka specifies a different cluster
|
|
474
|
+
kafka_config.merge!(@custom_kafka)
|
|
475
|
+
|
|
476
|
+
mapped_config = Karafka::Setup::AttributesMap.public_send(type, kafka_config)
|
|
477
|
+
|
|
478
|
+
Rdkafka::Config.new(mapped_config)
|
|
319
479
|
end
|
|
320
480
|
end
|
|
321
481
|
end
|
|
@@ -1,7 +1,24 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
-
#
|
|
4
|
-
#
|
|
3
|
+
# Karafka Pro - Source Available Commercial Software
|
|
4
|
+
# Copyright (c) 2017-present Maciej Mensfeld. All rights reserved.
|
|
5
|
+
#
|
|
6
|
+
# This software is NOT open source. It is source-available commercial software
|
|
7
|
+
# requiring a paid license for use. It is NOT covered by LGPL.
|
|
8
|
+
#
|
|
9
|
+
# PROHIBITED:
|
|
10
|
+
# - Use without a valid commercial license
|
|
11
|
+
# - Redistribution, modification, or derivative works without authorization
|
|
12
|
+
# - Use as training data for AI/ML models or inclusion in datasets
|
|
13
|
+
# - Scraping, crawling, or automated collection for any purpose
|
|
14
|
+
#
|
|
15
|
+
# PERMITTED:
|
|
16
|
+
# - Reading, referencing, and linking for personal or commercial use
|
|
17
|
+
# - Runtime retrieval by AI assistants, coding agents, and RAG systems
|
|
18
|
+
# for the purpose of providing contextual help to Karafka users
|
|
19
|
+
#
|
|
20
|
+
# License: https://karafka.io/docs/Pro-License-Comm/
|
|
21
|
+
# Contact: contact@karafka.io
|
|
5
22
|
|
|
6
23
|
module Karafka
|
|
7
24
|
module Pro
|
|
@@ -1,7 +1,24 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
-
#
|
|
4
|
-
#
|
|
3
|
+
# Karafka Pro - Source Available Commercial Software
|
|
4
|
+
# Copyright (c) 2017-present Maciej Mensfeld. All rights reserved.
|
|
5
|
+
#
|
|
6
|
+
# This software is NOT open source. It is source-available commercial software
|
|
7
|
+
# requiring a paid license for use. It is NOT covered by LGPL.
|
|
8
|
+
#
|
|
9
|
+
# PROHIBITED:
|
|
10
|
+
# - Use without a valid commercial license
|
|
11
|
+
# - Redistribution, modification, or derivative works without authorization
|
|
12
|
+
# - Use as training data for AI/ML models or inclusion in datasets
|
|
13
|
+
# - Scraping, crawling, or automated collection for any purpose
|
|
14
|
+
#
|
|
15
|
+
# PERMITTED:
|
|
16
|
+
# - Reading, referencing, and linking for personal or commercial use
|
|
17
|
+
# - Runtime retrieval by AI assistants, coding agents, and RAG systems
|
|
18
|
+
# for the purpose of providing contextual help to Karafka users
|
|
19
|
+
#
|
|
20
|
+
# License: https://karafka.io/docs/Pro-License-Comm/
|
|
21
|
+
# Contact: contact@karafka.io
|
|
5
22
|
|
|
6
23
|
module Karafka
|
|
7
24
|
module Pro
|
|
@@ -1,7 +1,24 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
-
#
|
|
4
|
-
#
|
|
3
|
+
# Karafka Pro - Source Available Commercial Software
|
|
4
|
+
# Copyright (c) 2017-present Maciej Mensfeld. All rights reserved.
|
|
5
|
+
#
|
|
6
|
+
# This software is NOT open source. It is source-available commercial software
|
|
7
|
+
# requiring a paid license for use. It is NOT covered by LGPL.
|
|
8
|
+
#
|
|
9
|
+
# PROHIBITED:
|
|
10
|
+
# - Use without a valid commercial license
|
|
11
|
+
# - Redistribution, modification, or derivative works without authorization
|
|
12
|
+
# - Use as training data for AI/ML models or inclusion in datasets
|
|
13
|
+
# - Scraping, crawling, or automated collection for any purpose
|
|
14
|
+
#
|
|
15
|
+
# PERMITTED:
|
|
16
|
+
# - Reading, referencing, and linking for personal or commercial use
|
|
17
|
+
# - Runtime retrieval by AI assistants, coding agents, and RAG systems
|
|
18
|
+
# for the purpose of providing contextual help to Karafka users
|
|
19
|
+
#
|
|
20
|
+
# License: https://karafka.io/docs/Pro-License-Comm/
|
|
21
|
+
# Contact: contact@karafka.io
|
|
5
22
|
|
|
6
23
|
module Karafka
|
|
7
24
|
module Pro
|
|
@@ -1,7 +1,24 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
-
#
|
|
4
|
-
#
|
|
3
|
+
# Karafka Pro - Source Available Commercial Software
|
|
4
|
+
# Copyright (c) 2017-present Maciej Mensfeld. All rights reserved.
|
|
5
|
+
#
|
|
6
|
+
# This software is NOT open source. It is source-available commercial software
|
|
7
|
+
# requiring a paid license for use. It is NOT covered by LGPL.
|
|
8
|
+
#
|
|
9
|
+
# PROHIBITED:
|
|
10
|
+
# - Use without a valid commercial license
|
|
11
|
+
# - Redistribution, modification, or derivative works without authorization
|
|
12
|
+
# - Use as training data for AI/ML models or inclusion in datasets
|
|
13
|
+
# - Scraping, crawling, or automated collection for any purpose
|
|
14
|
+
#
|
|
15
|
+
# PERMITTED:
|
|
16
|
+
# - Reading, referencing, and linking for personal or commercial use
|
|
17
|
+
# - Runtime retrieval by AI assistants, coding agents, and RAG systems
|
|
18
|
+
# for the purpose of providing contextual help to Karafka users
|
|
19
|
+
#
|
|
20
|
+
# License: https://karafka.io/docs/Pro-License-Comm/
|
|
21
|
+
# Contact: contact@karafka.io
|
|
5
22
|
|
|
6
23
|
module Karafka
|
|
7
24
|
module Pro
|
|
@@ -1,7 +1,24 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
-
#
|
|
4
|
-
#
|
|
3
|
+
# Karafka Pro - Source Available Commercial Software
|
|
4
|
+
# Copyright (c) 2017-present Maciej Mensfeld. All rights reserved.
|
|
5
|
+
#
|
|
6
|
+
# This software is NOT open source. It is source-available commercial software
|
|
7
|
+
# requiring a paid license for use. It is NOT covered by LGPL.
|
|
8
|
+
#
|
|
9
|
+
# PROHIBITED:
|
|
10
|
+
# - Use without a valid commercial license
|
|
11
|
+
# - Redistribution, modification, or derivative works without authorization
|
|
12
|
+
# - Use as training data for AI/ML models or inclusion in datasets
|
|
13
|
+
# - Scraping, crawling, or automated collection for any purpose
|
|
14
|
+
#
|
|
15
|
+
# PERMITTED:
|
|
16
|
+
# - Reading, referencing, and linking for personal or commercial use
|
|
17
|
+
# - Runtime retrieval by AI assistants, coding agents, and RAG systems
|
|
18
|
+
# for the purpose of providing contextual help to Karafka users
|
|
19
|
+
#
|
|
20
|
+
# License: https://karafka.io/docs/Pro-License-Comm/
|
|
21
|
+
# Contact: contact@karafka.io
|
|
5
22
|
|
|
6
23
|
module Karafka
|
|
7
24
|
module Pro
|