karafka 2.5.2 → 2.5.4.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +16 -0
- data/config/locales/errors.yml +14 -0
- data/karafka.gemspec +15 -4
- data/lib/active_job/queue_adapters/karafka_adapter.rb +2 -2
- data/lib/karafka/active_job/consumer.rb +2 -2
- data/lib/karafka/active_job/current_attributes.rb +2 -2
- data/lib/karafka/active_job/deserializer.rb +1 -1
- data/lib/karafka/active_job/dispatcher.rb +2 -2
- data/lib/karafka/admin/configs/resource.rb +7 -1
- data/lib/karafka/admin/consumer_groups.rb +6 -8
- data/lib/karafka/admin/contracts/replication.rb +149 -0
- data/lib/karafka/admin/replication.rb +462 -0
- data/lib/karafka/admin/topics.rb +5 -4
- data/lib/karafka/admin.rb +57 -12
- data/lib/karafka/app.rb +3 -3
- data/lib/karafka/base_consumer.rb +1 -1
- data/lib/karafka/cli/base.rb +1 -1
- data/lib/karafka/cli/console.rb +1 -1
- data/lib/karafka/cli/contracts/server.rb +1 -1
- data/lib/karafka/cli/help.rb +1 -1
- data/lib/karafka/cli/install.rb +2 -1
- data/lib/karafka/cli/server.rb +1 -1
- data/lib/karafka/cli/swarm.rb +1 -1
- data/lib/karafka/connection/client.rb +19 -18
- data/lib/karafka/connection/manager.rb +1 -0
- data/lib/karafka/connection/proxy.rb +1 -1
- data/lib/karafka/connection/rebalance_manager.rb +1 -1
- data/lib/karafka/connection/status.rb +1 -0
- data/lib/karafka/constraints.rb +1 -1
- data/lib/karafka/contracts/base.rb +1 -1
- data/lib/karafka/deserializers/payload.rb +1 -1
- data/lib/karafka/helpers/async.rb +1 -1
- data/lib/karafka/helpers/config_importer.rb +3 -3
- data/lib/karafka/helpers/multi_delegator.rb +3 -0
- data/lib/karafka/instrumentation/assignments_tracker.rb +2 -1
- data/lib/karafka/instrumentation/callbacks/error.rb +2 -2
- data/lib/karafka/instrumentation/callbacks/statistics.rb +3 -3
- data/lib/karafka/instrumentation/logger.rb +6 -6
- data/lib/karafka/instrumentation/logger_listener.rb +0 -2
- data/lib/karafka/instrumentation/monitor.rb +2 -2
- data/lib/karafka/instrumentation/vendors/appsignal/base.rb +1 -1
- data/lib/karafka/instrumentation/vendors/appsignal/metrics_listener.rb +4 -0
- data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +32 -16
- data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +2 -2
- data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +1 -1
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +3 -15
- data/lib/karafka/licenser.rb +1 -1
- data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
- data/lib/karafka/messages/messages.rb +32 -0
- data/lib/karafka/pro/active_job/consumer.rb +2 -2
- data/lib/karafka/pro/active_job/dispatcher.rb +3 -3
- data/lib/karafka/pro/cleaner/messages/messages.rb +1 -1
- data/lib/karafka/pro/cleaner.rb +3 -3
- data/lib/karafka/pro/cli/contracts/server.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments/base.rb +4 -3
- data/lib/karafka/pro/cli/parallel_segments/collapse.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments/distribute.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments.rb +1 -1
- data/lib/karafka/pro/connection/manager.rb +1 -2
- data/lib/karafka/pro/connection/multiplexing/listener.rb +1 -0
- data/lib/karafka/pro/contracts/base.rb +1 -1
- data/lib/karafka/pro/encryption/cipher.rb +3 -2
- data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
- data/lib/karafka/pro/encryption/messages/parser.rb +1 -1
- data/lib/karafka/pro/encryption/setup/config.rb +1 -1
- data/lib/karafka/pro/iterator/tpl_builder.rb +1 -1
- data/lib/karafka/pro/iterator.rb +1 -1
- data/lib/karafka/pro/loader.rb +1 -1
- data/lib/karafka/pro/processing/coordinator.rb +1 -1
- data/lib/karafka/pro/processing/filters/base.rb +1 -0
- data/lib/karafka/pro/processing/filters/delayer.rb +1 -1
- data/lib/karafka/pro/processing/filters/expirer.rb +1 -1
- data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +1 -1
- data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs/periodic.rb +1 -1
- data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs_builder.rb +1 -1
- data/lib/karafka/pro/processing/jobs_queue.rb +0 -2
- data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +1 -0
- data/lib/karafka/pro/processing/partitioner.rb +1 -1
- data/lib/karafka/pro/processing/strategies/base.rb +1 -1
- data/lib/karafka/pro/processing/strategies/default.rb +2 -2
- data/lib/karafka/pro/processing/strategies/dlq/default.rb +1 -1
- data/lib/karafka/pro/processing/strategies/vp/default.rb +1 -1
- data/lib/karafka/pro/processing/strategy_selector.rb +1 -0
- data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +4 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +4 -2
- data/lib/karafka/pro/recurring_tasks/consumer.rb +3 -2
- data/lib/karafka/pro/recurring_tasks/contracts/config.rb +2 -2
- data/lib/karafka/pro/recurring_tasks/contracts/task.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/deserializer.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/dispatcher.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/executor.rb +2 -1
- data/lib/karafka/pro/recurring_tasks/schedule.rb +5 -2
- data/lib/karafka/pro/recurring_tasks/serializer.rb +6 -5
- data/lib/karafka/pro/recurring_tasks/setup/config.rb +2 -2
- data/lib/karafka/pro/recurring_tasks/task.rb +1 -1
- data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +3 -0
- data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +1 -1
- data/lib/karafka/pro/routing/features/multiplexing.rb +5 -5
- data/lib/karafka/pro/routing/features/offset_metadata.rb +4 -4
- data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +1 -1
- data/lib/karafka/pro/routing/features/patterns/patterns.rb +1 -1
- data/lib/karafka/pro/routing/features/periodic_job/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +1 -1
- data/lib/karafka/pro/routing/features/swarm.rb +1 -1
- data/lib/karafka/pro/routing/features/throttling/topic.rb +3 -1
- data/lib/karafka/pro/scheduled_messages/consumer.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/contracts/config.rb +2 -2
- data/lib/karafka/pro/scheduled_messages/contracts/message.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +3 -2
- data/lib/karafka/pro/scheduled_messages/day.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/max_epoch.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/proxy.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/serializer.rb +3 -3
- data/lib/karafka/pro/scheduled_messages/setup/config.rb +2 -2
- data/lib/karafka/pro/scheduled_messages/state.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/tracker.rb +1 -0
- data/lib/karafka/process.rb +4 -4
- data/lib/karafka/processing/executor.rb +1 -1
- data/lib/karafka/processing/inline_insights/tracker.rb +1 -0
- data/lib/karafka/processing/jobs_queue.rb +1 -1
- data/lib/karafka/processing/result.rb +1 -0
- data/lib/karafka/processing/strategies/dlq.rb +1 -1
- data/lib/karafka/processing/strategy_selector.rb +1 -0
- data/lib/karafka/routing/activity_manager.rb +1 -0
- data/lib/karafka/routing/builder.rb +3 -1
- data/lib/karafka/routing/consumer_group.rb +19 -1
- data/lib/karafka/routing/contracts/consumer_group.rb +3 -2
- data/lib/karafka/routing/contracts/topic.rb +5 -2
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/declaratives/topic.rb +5 -2
- data/lib/karafka/routing/features/deserializers/topic.rb +3 -3
- data/lib/karafka/routing/features/inline_insights.rb +5 -5
- data/lib/karafka/routing/router.rb +1 -1
- data/lib/karafka/routing/subscription_group.rb +2 -2
- data/lib/karafka/routing/subscription_groups_builder.rb +18 -2
- data/lib/karafka/routing/topic.rb +3 -3
- data/lib/karafka/server.rb +1 -1
- data/lib/karafka/setup/attributes_map.rb +4 -2
- data/lib/karafka/setup/config.rb +21 -10
- data/lib/karafka/setup/config_proxy.rb +209 -0
- data/lib/karafka/setup/contracts/config.rb +1 -1
- data/lib/karafka/swarm/liveness_listener.rb +1 -0
- data/lib/karafka/swarm/manager.rb +7 -6
- data/lib/karafka/swarm/node.rb +1 -1
- data/lib/karafka/swarm/supervisor.rb +1 -0
- data/lib/karafka/time_trackers/base.rb +1 -1
- data/lib/karafka/version.rb +1 -1
- data/lib/karafka.rb +2 -3
- metadata +8 -65
- data/.coditsu/ci.yml +0 -3
- data/.console_irbrc +0 -11
- data/.github/CODEOWNERS +0 -3
- data/.github/FUNDING.yml +0 -1
- data/.github/ISSUE_TEMPLATE/bug_report.md +0 -43
- data/.github/ISSUE_TEMPLATE/feature_request.md +0 -20
- data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +0 -278
- data/.github/workflows/ci_macos_arm64.yml +0 -151
- data/.github/workflows/push.yml +0 -35
- data/.github/workflows/trigger-wiki-refresh.yml +0 -30
- data/.github/workflows/verify-action-pins.yml +0 -16
- data/.gitignore +0 -69
- data/.rspec +0 -7
- data/.ruby-gemset +0 -1
- data/.ruby-version +0 -1
- data/CODE_OF_CONDUCT.md +0 -46
- data/CONTRIBUTING.md +0 -32
- data/Gemfile +0 -28
- data/Gemfile.lock +0 -173
- data/Rakefile +0 -4
- data/SECURITY.md +0 -23
- data/bin/benchmarks +0 -99
- data/bin/clean_kafka +0 -43
- data/bin/create_token +0 -22
- data/bin/integrations +0 -341
- data/bin/record_rss +0 -50
- data/bin/rspecs +0 -26
- data/bin/scenario +0 -29
- data/bin/stress_many +0 -13
- data/bin/stress_one +0 -13
- data/bin/verify_kafka_warnings +0 -36
- data/bin/verify_license_integrity +0 -37
- data/bin/verify_topics_naming +0 -27
- data/bin/wait_for_kafka +0 -24
- data/docker-compose.yml +0 -25
- data/examples/payloads/avro/.gitkeep +0 -0
- data/examples/payloads/json/sample_set_01/enrollment_event.json +0 -579
- data/examples/payloads/json/sample_set_01/ingestion_event.json +0 -30
- data/examples/payloads/json/sample_set_01/transaction_event.json +0 -17
- data/examples/payloads/json/sample_set_01/user_event.json +0 -11
- data/examples/payloads/json/sample_set_02/download.json +0 -191
- data/examples/payloads/json/sample_set_03/event_type_1.json +0 -18
- data/examples/payloads/json/sample_set_03/event_type_2.json +0 -263
- data/examples/payloads/json/sample_set_03/event_type_3.json +0 -41
- data/log/.gitkeep +0 -0
- data/renovate.json +0 -21
data/lib/karafka/cli/help.rb
CHANGED
|
@@ -10,7 +10,7 @@ module Karafka
|
|
|
10
10
|
# Print available commands
|
|
11
11
|
def call
|
|
12
12
|
# Find the longest command for alignment purposes
|
|
13
|
-
max_command_length = self.class.commands.map
|
|
13
|
+
max_command_length = self.class.commands.map { |command| command.name.size }.max
|
|
14
14
|
|
|
15
15
|
puts 'Karafka commands:'
|
|
16
16
|
|
data/lib/karafka/cli/install.rb
CHANGED
|
@@ -24,6 +24,7 @@ module Karafka
|
|
|
24
24
|
'example_consumer.rb.erb' => 'app/consumers/example_consumer.rb'
|
|
25
25
|
}.freeze
|
|
26
26
|
|
|
27
|
+
# Initializes the install command
|
|
27
28
|
def initialize
|
|
28
29
|
super
|
|
29
30
|
|
|
@@ -52,7 +53,7 @@ module Karafka
|
|
|
52
53
|
FileUtils.mkdir_p File.dirname(pathed_target)
|
|
53
54
|
|
|
54
55
|
template = File.read(Karafka.core_root.join("templates/#{source}"))
|
|
55
|
-
render =
|
|
56
|
+
render = ERB.new(template, trim_mode: '-').result(binding)
|
|
56
57
|
|
|
57
58
|
File.write(pathed_target, render)
|
|
58
59
|
|
data/lib/karafka/cli/server.rb
CHANGED
|
@@ -10,7 +10,7 @@ module Karafka
|
|
|
10
10
|
)
|
|
11
11
|
|
|
12
12
|
# Types of things we can include / exclude from the routing via the CLI options
|
|
13
|
-
SUPPORTED_TYPES =
|
|
13
|
+
SUPPORTED_TYPES = Karafka::Routing::ActivityManager::SUPPORTED_TYPES
|
|
14
14
|
|
|
15
15
|
private_constant :SUPPORTED_TYPES
|
|
16
16
|
|
data/lib/karafka/cli/swarm.rb
CHANGED
|
@@ -8,7 +8,7 @@ module Karafka
|
|
|
8
8
|
# It is threadsafe and provides some security measures so we won't end up operating on a
|
|
9
9
|
# closed consumer instance as it causes Ruby VM process to crash.
|
|
10
10
|
class Client
|
|
11
|
-
include
|
|
11
|
+
include Karafka::Core::Helpers::Time
|
|
12
12
|
include Helpers::ConfigImporter.new(
|
|
13
13
|
logger: %i[logger],
|
|
14
14
|
tick_interval: %i[internal tick_interval],
|
|
@@ -37,7 +37,7 @@ module Karafka
|
|
|
37
37
|
COOP_UNSUBSCRIBE_FACTOR = 0.5
|
|
38
38
|
|
|
39
39
|
# Errors upon which we early report that something is off without retrying prior to the
|
|
40
|
-
# report
|
|
40
|
+
# report. Aside from those we ALWAYS early report on any fatal error.
|
|
41
41
|
EARLY_REPORT_ERRORS = [
|
|
42
42
|
:inconsistent_group_protocol, # 23
|
|
43
43
|
:max_poll_exceeded, # -147
|
|
@@ -48,10 +48,7 @@ module Karafka
|
|
|
48
48
|
:cluster_authorization_failed, # 31
|
|
49
49
|
:illegal_generation,
|
|
50
50
|
# this will not recover as fencing is permanent
|
|
51
|
-
:fenced, # -144
|
|
52
51
|
:auto_offset_reset, # -140
|
|
53
|
-
# This can happen for many reasons, including issues with static membership being fenced
|
|
54
|
-
:fatal, # -150,
|
|
55
52
|
# This can happen with new rebalance protocol and same group.instance.id in use
|
|
56
53
|
:unreleased_instance_id # 111
|
|
57
54
|
].freeze
|
|
@@ -489,7 +486,7 @@ module Karafka
|
|
|
489
486
|
# If the seek message offset is in a time format, we need to find the closest "real"
|
|
490
487
|
# offset matching before we seek
|
|
491
488
|
if message.offset.is_a?(Time)
|
|
492
|
-
tpl =
|
|
489
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
|
493
490
|
tpl.add_topic_and_partitions_with_offsets(
|
|
494
491
|
message.topic,
|
|
495
492
|
message.partition => message.offset
|
|
@@ -546,9 +543,9 @@ module Karafka
|
|
|
546
543
|
sg_id = @subscription_group.id
|
|
547
544
|
|
|
548
545
|
# Remove callbacks runners that were registered
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
546
|
+
Karafka::Core::Instrumentation.statistics_callbacks.delete(sg_id)
|
|
547
|
+
Karafka::Core::Instrumentation.error_callbacks.delete(sg_id)
|
|
548
|
+
Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.delete(sg_id)
|
|
552
549
|
|
|
553
550
|
kafka.close
|
|
554
551
|
@kafka = nil
|
|
@@ -564,7 +561,7 @@ module Karafka
|
|
|
564
561
|
# ignored. We do however want to instrument on it
|
|
565
562
|
def unsubscribe
|
|
566
563
|
kafka.unsubscribe
|
|
567
|
-
rescue
|
|
564
|
+
rescue Rdkafka::RdkafkaError => e
|
|
568
565
|
Karafka.monitor.instrument(
|
|
569
566
|
'error.occurred',
|
|
570
567
|
caller: self,
|
|
@@ -593,8 +590,8 @@ module Karafka
|
|
|
593
590
|
# established. It may be `-1` in case we lost the assignment or we did not yet fetch data
|
|
594
591
|
# for this topic partition
|
|
595
592
|
def topic_partition_position(topic, partition)
|
|
596
|
-
rd_partition =
|
|
597
|
-
tpl =
|
|
593
|
+
rd_partition = Rdkafka::Consumer::Partition.new(partition, nil, 0)
|
|
594
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new(topic => [rd_partition])
|
|
598
595
|
|
|
599
596
|
kafka.position(tpl).to_h.fetch(topic).first.offset || -1
|
|
600
597
|
end
|
|
@@ -645,7 +642,7 @@ module Karafka
|
|
|
645
642
|
# If we did not exceed total time allocated, it means that we finished because of the
|
|
646
643
|
# tick interval time limitations and not because time run out without any data
|
|
647
644
|
time_poll.exceeded? ? nil : :tick_time
|
|
648
|
-
rescue
|
|
645
|
+
rescue Rdkafka::RdkafkaError => e
|
|
649
646
|
early_report = false
|
|
650
647
|
|
|
651
648
|
retryable = time_poll.attempts <= MAX_POLL_RETRIES && time_poll.retryable?
|
|
@@ -655,6 +652,7 @@ module Karafka
|
|
|
655
652
|
# Those are mainly network issues and exceeding the max poll interval
|
|
656
653
|
# We want to report early on max poll interval exceeding because it may mean that the
|
|
657
654
|
# underlying processing is taking too much time and it is not LRJ
|
|
655
|
+
|
|
658
656
|
case e.code
|
|
659
657
|
when *EARLY_REPORT_ERRORS
|
|
660
658
|
early_report = true
|
|
@@ -678,6 +676,9 @@ module Karafka
|
|
|
678
676
|
return e.details
|
|
679
677
|
end
|
|
680
678
|
|
|
679
|
+
# Any fatal error should always cause early report
|
|
680
|
+
early_report = true if e.fatal?
|
|
681
|
+
|
|
681
682
|
if early_report || !retryable
|
|
682
683
|
Karafka.monitor.instrument(
|
|
683
684
|
'error.occurred',
|
|
@@ -704,7 +705,7 @@ module Karafka
|
|
|
704
705
|
# Builds a new rdkafka consumer instance based on the subscription group configuration
|
|
705
706
|
# @return [Rdkafka::Consumer]
|
|
706
707
|
def build_consumer
|
|
707
|
-
|
|
708
|
+
Rdkafka::Config.logger = logger
|
|
708
709
|
|
|
709
710
|
# We need to refresh the setup of this subscription group in case we started running in a
|
|
710
711
|
# swarm. The initial configuration for validation comes from the parent node, but it needs
|
|
@@ -712,7 +713,7 @@ module Karafka
|
|
|
712
713
|
# group instance id.
|
|
713
714
|
@subscription_group.refresh
|
|
714
715
|
|
|
715
|
-
config =
|
|
716
|
+
config = Rdkafka::Config.new(@subscription_group.kafka)
|
|
716
717
|
config.consumer_rebalance_listener = @rebalance_callback
|
|
717
718
|
# We want to manage the events queue independently from the messages queue. Thanks to that
|
|
718
719
|
# we can ensure, that we get statistics and errors often enough even when not polling
|
|
@@ -724,7 +725,7 @@ module Karafka
|
|
|
724
725
|
@name = consumer.name
|
|
725
726
|
|
|
726
727
|
# Register statistics runner for this particular type of callbacks
|
|
727
|
-
|
|
728
|
+
Karafka::Core::Instrumentation.statistics_callbacks.add(
|
|
728
729
|
@subscription_group.id,
|
|
729
730
|
Instrumentation::Callbacks::Statistics.new(
|
|
730
731
|
@subscription_group.id,
|
|
@@ -734,7 +735,7 @@ module Karafka
|
|
|
734
735
|
)
|
|
735
736
|
|
|
736
737
|
# Register error tracking callback
|
|
737
|
-
|
|
738
|
+
Karafka::Core::Instrumentation.error_callbacks.add(
|
|
738
739
|
@subscription_group.id,
|
|
739
740
|
Instrumentation::Callbacks::Error.new(
|
|
740
741
|
@subscription_group.id,
|
|
@@ -743,7 +744,7 @@ module Karafka
|
|
|
743
744
|
)
|
|
744
745
|
)
|
|
745
746
|
|
|
746
|
-
|
|
747
|
+
Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.add(
|
|
747
748
|
@subscription_group.id,
|
|
748
749
|
Instrumentation::Callbacks::OauthbearerTokenRefresh.new(
|
|
749
750
|
consumer
|
|
@@ -8,6 +8,7 @@ module Karafka
|
|
|
8
8
|
# In the OSS version it starts listeners as they are without any connection management or
|
|
9
9
|
# resources utilization supervision and shuts them down or quiets when time has come
|
|
10
10
|
class Manager
|
|
11
|
+
# Initializes the connection manager
|
|
11
12
|
def initialize
|
|
12
13
|
@once_executions = Set.new
|
|
13
14
|
end
|
|
@@ -155,7 +155,7 @@ module Karafka
|
|
|
155
155
|
|
|
156
156
|
# @param tpl [Rdkafka::Consumer::TopicPartitionList] list of topics and partitions for which
|
|
157
157
|
# we want to get the lag on the defined CG
|
|
158
|
-
# @return [Hash
|
|
158
|
+
# @return [Hash{String => Hash}] hash with topics and their partitions lags
|
|
159
159
|
def lag(tpl)
|
|
160
160
|
l_config = proxy_config.committed
|
|
161
161
|
|
data/lib/karafka/constraints.rb
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
module Karafka
|
|
4
4
|
module Contracts
|
|
5
5
|
# Base contract for all Karafka contracts
|
|
6
|
-
class Base <
|
|
6
|
+
class Base < Karafka::Core::Contractable::Contract
|
|
7
7
|
# @param data [Hash] data for validation
|
|
8
8
|
# @param scope [Array<String>] nested scope if in use
|
|
9
9
|
# @return [Boolean] true if all good
|
|
@@ -9,7 +9,7 @@ module Karafka
|
|
|
9
9
|
# @return [Hash] hash with deserialized JSON data
|
|
10
10
|
def call(message)
|
|
11
11
|
# nil payload can be present for example for tombstone messages
|
|
12
|
-
message.raw_payload.nil? ? nil :
|
|
12
|
+
message.raw_payload.nil? ? nil : JSON.parse(message.raw_payload)
|
|
13
13
|
end
|
|
14
14
|
end
|
|
15
15
|
end
|
|
@@ -5,7 +5,7 @@ module Karafka
|
|
|
5
5
|
# Module allowing for configuration injections. By default injects whole app config
|
|
6
6
|
# Allows for granular config injection
|
|
7
7
|
class ConfigImporter < Module
|
|
8
|
-
# @param attributes [Hash
|
|
8
|
+
# @param attributes [Hash{Symbol => Array<Symbol>}] map defining what we want to inject.
|
|
9
9
|
# The key is the name under which attribute will be visible and the value is the full
|
|
10
10
|
# path to the attribute
|
|
11
11
|
def initialize(attributes = { config: %i[itself] })
|
|
@@ -20,7 +20,7 @@ module Karafka
|
|
|
20
20
|
@attributes.each do |name, path|
|
|
21
21
|
model.class_eval <<~RUBY, __FILE__, __LINE__ + 1
|
|
22
22
|
def #{name}
|
|
23
|
-
@#{name} ||=
|
|
23
|
+
@#{name} ||= Karafka::App.config.#{path.join('.')}
|
|
24
24
|
end
|
|
25
25
|
RUBY
|
|
26
26
|
end
|
|
@@ -33,7 +33,7 @@ module Karafka
|
|
|
33
33
|
@attributes.each do |name, path|
|
|
34
34
|
model.class_eval <<~RUBY, __FILE__, __LINE__ + 1
|
|
35
35
|
def self.#{name}
|
|
36
|
-
@#{name} ||=
|
|
36
|
+
@#{name} ||= Karafka::App.config.#{path.join('.')}
|
|
37
37
|
end
|
|
38
38
|
RUBY
|
|
39
39
|
end
|
|
@@ -14,6 +14,7 @@ module Karafka
|
|
|
14
14
|
class AssignmentsTracker
|
|
15
15
|
include Singleton
|
|
16
16
|
|
|
17
|
+
# Initializes the assignments tracker with empty assignments
|
|
17
18
|
def initialize
|
|
18
19
|
@mutex = Mutex.new
|
|
19
20
|
@assignments = Hash.new { |hash, key| hash[key] = [] }
|
|
@@ -21,7 +22,7 @@ module Karafka
|
|
|
21
22
|
|
|
22
23
|
# Returns all the active/current assignments of this given process
|
|
23
24
|
#
|
|
24
|
-
# @return [Hash
|
|
25
|
+
# @return [Hash{Karafka::Routing::Topic => Array<Integer>}]
|
|
25
26
|
#
|
|
26
27
|
# @note Keep in mind, that those assignments can change any time, especially when working
|
|
27
28
|
# with multiple consumer groups or subscription groups.
|
|
@@ -10,8 +10,8 @@ module Karafka
|
|
|
10
10
|
monitor: %i[monitor]
|
|
11
11
|
)
|
|
12
12
|
|
|
13
|
-
# @param subscription_group_id [String]
|
|
14
|
-
# @param consumer_group_id [String]
|
|
13
|
+
# @param subscription_group_id [String]
|
|
14
|
+
# @param consumer_group_id [String]
|
|
15
15
|
# @param client_name [String] rdkafka client name
|
|
16
16
|
def initialize(subscription_group_id, consumer_group_id, client_name)
|
|
17
17
|
@subscription_group_id = subscription_group_id
|
|
@@ -11,14 +11,14 @@ module Karafka
|
|
|
11
11
|
monitor: %i[monitor]
|
|
12
12
|
)
|
|
13
13
|
|
|
14
|
-
# @param subscription_group_id [String]
|
|
15
|
-
# @param consumer_group_id [String]
|
|
14
|
+
# @param subscription_group_id [String]
|
|
15
|
+
# @param consumer_group_id [String]
|
|
16
16
|
# @param client_name [String] rdkafka client name
|
|
17
17
|
def initialize(subscription_group_id, consumer_group_id, client_name)
|
|
18
18
|
@subscription_group_id = subscription_group_id
|
|
19
19
|
@consumer_group_id = consumer_group_id
|
|
20
20
|
@client_name = client_name
|
|
21
|
-
@statistics_decorator =
|
|
21
|
+
@statistics_decorator = Karafka::Core::Monitoring::StatisticsDecorator.new
|
|
22
22
|
end
|
|
23
23
|
|
|
24
24
|
# Emits decorated statistics to the monitor
|
|
@@ -3,15 +3,15 @@
|
|
|
3
3
|
module Karafka
|
|
4
4
|
module Instrumentation
|
|
5
5
|
# Default logger for Event Delegator
|
|
6
|
-
# @note It uses
|
|
6
|
+
# @note It uses Logger features - providing basic logging
|
|
7
7
|
class Logger < ::Logger
|
|
8
8
|
# Map containing information about log level for given environment
|
|
9
9
|
ENV_MAP = {
|
|
10
|
-
'production' =>
|
|
11
|
-
'test' =>
|
|
12
|
-
'development' =>
|
|
13
|
-
'debug' =>
|
|
14
|
-
'default' =>
|
|
10
|
+
'production' => Logger::ERROR,
|
|
11
|
+
'test' => Logger::ERROR,
|
|
12
|
+
'development' => Logger::INFO,
|
|
13
|
+
'debug' => Logger::DEBUG,
|
|
14
|
+
'default' => Logger::INFO
|
|
15
15
|
}.freeze
|
|
16
16
|
|
|
17
17
|
private_constant :ENV_MAP
|
|
@@ -5,14 +5,14 @@ module Karafka
|
|
|
5
5
|
# Karafka instrumentation monitor that we use to publish events
|
|
6
6
|
# By default uses our internal notifications bus but can be used with
|
|
7
7
|
# `ActiveSupport::Notifications` as well
|
|
8
|
-
class Monitor <
|
|
8
|
+
class Monitor < Karafka::Core::Monitoring::Monitor
|
|
9
9
|
attr_reader :notifications_bus
|
|
10
10
|
|
|
11
11
|
# @param notifications_bus [Object] either our internal notifications bus or
|
|
12
12
|
# `ActiveSupport::Notifications`
|
|
13
13
|
# @param namespace [String, nil] namespace for events or nil if no namespace
|
|
14
14
|
def initialize(
|
|
15
|
-
notifications_bus =
|
|
15
|
+
notifications_bus = Karafka::Instrumentation::Notifications.new,
|
|
16
16
|
namespace = nil
|
|
17
17
|
)
|
|
18
18
|
super
|
|
@@ -44,11 +44,15 @@ module Karafka
|
|
|
44
44
|
|
|
45
45
|
# Types of errors originating from user code in the consumer flow
|
|
46
46
|
USER_CONSUMER_ERROR_TYPES = %w[
|
|
47
|
+
consumer.initialized.error
|
|
48
|
+
consumer.wrap.error
|
|
47
49
|
consumer.consume.error
|
|
48
50
|
consumer.revoked.error
|
|
51
|
+
consumer.idle.error
|
|
49
52
|
consumer.shutdown.error
|
|
50
53
|
consumer.tick.error
|
|
51
54
|
consumer.eofed.error
|
|
55
|
+
consumer.after_consume.error
|
|
52
56
|
].freeze
|
|
53
57
|
|
|
54
58
|
private_constant :USER_CONSUMER_ERROR_TYPES
|
|
@@ -9,7 +9,7 @@ module Karafka
|
|
|
9
9
|
# A karafka's logger listener for Datadog
|
|
10
10
|
# It depends on the 'ddtrace' gem
|
|
11
11
|
class LoggerListener
|
|
12
|
-
include
|
|
12
|
+
include Karafka::Core::Configurable
|
|
13
13
|
extend Forwardable
|
|
14
14
|
|
|
15
15
|
def_delegators :config, :client, :service_name
|
|
@@ -90,42 +90,58 @@ module Karafka
|
|
|
90
90
|
client.active_span&.set_error(error)
|
|
91
91
|
|
|
92
92
|
case event[:type]
|
|
93
|
+
when 'consumer.initialized.error'
|
|
94
|
+
error "Consumer initialized error: #{error}"
|
|
95
|
+
when 'consumer.wrap.error'
|
|
96
|
+
error "Consumer wrap failed due to an error: #{error}"
|
|
93
97
|
when 'consumer.consume.error'
|
|
94
98
|
error "Consumer consuming error: #{error}"
|
|
95
99
|
when 'consumer.revoked.error'
|
|
96
100
|
error "Consumer on revoked failed due to an error: #{error}"
|
|
97
|
-
when 'consumer.
|
|
98
|
-
error "Consumer
|
|
99
|
-
when 'consumer.before_consume.error'
|
|
100
|
-
error "Consumer before consume failed due to an error: #{error}"
|
|
101
|
-
when 'consumer.after_consume.error'
|
|
102
|
-
error "Consumer after consume failed due to an error: #{error}"
|
|
101
|
+
when 'consumer.idle.error'
|
|
102
|
+
error "Consumer idle failed due to an error: #{error}"
|
|
103
103
|
when 'consumer.shutdown.error'
|
|
104
104
|
error "Consumer on shutdown failed due to an error: #{error}"
|
|
105
105
|
when 'consumer.tick.error'
|
|
106
|
-
error "Consumer tick failed due to an error: #{error}"
|
|
106
|
+
error "Consumer on tick failed due to an error: #{error}"
|
|
107
107
|
when 'consumer.eofed.error'
|
|
108
|
-
error "Consumer eofed failed due to an error: #{error}"
|
|
108
|
+
error "Consumer on eofed failed due to an error: #{error}"
|
|
109
|
+
when 'consumer.after_consume.error'
|
|
110
|
+
error "Consumer on after_consume failed due to an error: #{error}"
|
|
109
111
|
when 'worker.process.error'
|
|
110
112
|
fatal "Worker processing failed due to an error: #{error}"
|
|
111
113
|
when 'connection.listener.fetch_loop.error'
|
|
112
114
|
error "Listener fetch loop error: #{error}"
|
|
115
|
+
when 'swarm.supervisor.error'
|
|
116
|
+
fatal "Swarm supervisor crashed due to an error: #{error}"
|
|
113
117
|
when 'runner.call.error'
|
|
114
118
|
fatal "Runner crashed due to an error: #{error}"
|
|
115
119
|
when 'app.stopping.error'
|
|
116
120
|
error 'Forceful Karafka server stop'
|
|
117
|
-
when '
|
|
118
|
-
|
|
121
|
+
when 'app.forceful_stopping.error'
|
|
122
|
+
error "Forceful shutdown error occurred: #{error}"
|
|
119
123
|
when 'librdkafka.error'
|
|
120
124
|
error "librdkafka internal error occurred: #{error}"
|
|
121
|
-
|
|
122
|
-
|
|
125
|
+
when 'callbacks.statistics.error'
|
|
126
|
+
error "callbacks.statistics processing failed due to an error: #{error}"
|
|
127
|
+
when 'callbacks.error.error'
|
|
128
|
+
error "callbacks.error processing failed due to an error: #{error}"
|
|
129
|
+
# Those will only occur when retries in the client fail and when they did not stop
|
|
130
|
+
# after back-offs
|
|
123
131
|
when 'connection.client.poll.error'
|
|
124
132
|
error "Data polling error occurred: #{error}"
|
|
133
|
+
when 'connection.client.rebalance_callback.error'
|
|
134
|
+
error "Rebalance callback error occurred: #{error}"
|
|
135
|
+
when 'connection.client.unsubscribe.error'
|
|
136
|
+
error "Client unsubscribe error occurred: #{error}"
|
|
137
|
+
when 'parallel_segments.reducer.error'
|
|
138
|
+
error "Parallel segments reducer error occurred: #{error}"
|
|
139
|
+
when 'parallel_segments.partitioner.error'
|
|
140
|
+
error "Parallel segments partitioner error occurred: #{error}"
|
|
141
|
+
when 'virtual_partitions.partitioner.error'
|
|
142
|
+
error "Virtual partitions partitioner error occurred: #{error}"
|
|
125
143
|
else
|
|
126
|
-
|
|
127
|
-
# This should never happen. Please contact the maintainers
|
|
128
|
-
raise Errors::UnsupportedCaseError, event
|
|
144
|
+
error "#{event[:type]} error occurred: #{error}"
|
|
129
145
|
end
|
|
130
146
|
|
|
131
147
|
pop_tags
|
|
@@ -11,7 +11,7 @@ module Karafka
|
|
|
11
11
|
#
|
|
12
12
|
# @note You need to setup the `dogstatsd-ruby` client and assign it
|
|
13
13
|
class MetricsListener
|
|
14
|
-
include
|
|
14
|
+
include Karafka::Core::Configurable
|
|
15
15
|
extend Forwardable
|
|
16
16
|
|
|
17
17
|
def_delegators(
|
|
@@ -212,7 +212,7 @@ module Karafka
|
|
|
212
212
|
)
|
|
213
213
|
else
|
|
214
214
|
raise(
|
|
215
|
-
|
|
215
|
+
ArgumentError,
|
|
216
216
|
'distribution_mode setting value must be either :histogram or :distribution'
|
|
217
217
|
)
|
|
218
218
|
end
|
|
@@ -10,7 +10,7 @@ module Karafka
|
|
|
10
10
|
# Base Kubernetes Listener providing basic HTTP server capabilities to respond with health
|
|
11
11
|
# statuses
|
|
12
12
|
class BaseListener
|
|
13
|
-
include
|
|
13
|
+
include Karafka::Core::Helpers::Time
|
|
14
14
|
|
|
15
15
|
# All good with Karafka
|
|
16
16
|
OK_CODE = '200 OK'
|
|
@@ -26,19 +26,6 @@ module Karafka
|
|
|
26
26
|
#
|
|
27
27
|
# @note Please use `Kubernetes::SwarmLivenessListener` when operating in the swarm mode
|
|
28
28
|
class LivenessListener < BaseListener
|
|
29
|
-
# When any of those occurs, it means something went wrong in a way that cannot be
|
|
30
|
-
# recovered. In such cases we should report that the consumer process is not healthy.
|
|
31
|
-
# - `fenced` - This instance has been fenced by a newer instance and will not do any
|
|
32
|
-
# processing at all never. Fencing most of the time means the instance.group.id has
|
|
33
|
-
# been reused without properly terminating the previous consumer process first
|
|
34
|
-
# - `fatal` - any fatal error that halts the processing forever
|
|
35
|
-
UNRECOVERABLE_RDKAFKA_ERRORS = [
|
|
36
|
-
:fenced, # -144
|
|
37
|
-
:fatal # -150
|
|
38
|
-
].freeze
|
|
39
|
-
|
|
40
|
-
private_constant :UNRECOVERABLE_RDKAFKA_ERRORS
|
|
41
|
-
|
|
42
29
|
# @param hostname [String, nil] hostname or nil to bind on all
|
|
43
30
|
# @param port [Integer] TCP port on which we want to run our HTTP status server
|
|
44
31
|
# @param consuming_ttl [Integer] time in ms after which we consider consumption hanging.
|
|
@@ -113,8 +100,9 @@ module Karafka
|
|
|
113
100
|
|
|
114
101
|
# We are only interested in the rdkafka errors
|
|
115
102
|
return unless error.is_a?(Rdkafka::RdkafkaError)
|
|
116
|
-
#
|
|
117
|
-
|
|
103
|
+
# When any of those occurs, it means something went wrong in a way that cannot be
|
|
104
|
+
# recovered. In such cases we should report that the consumer process is not healthy.
|
|
105
|
+
return unless error.fatal?
|
|
118
106
|
|
|
119
107
|
@unrecoverable = error.code
|
|
120
108
|
end
|
data/lib/karafka/licenser.rb
CHANGED
|
@@ -46,7 +46,7 @@ module Karafka
|
|
|
46
46
|
|
|
47
47
|
# We gsub and strip in case someone copy-pasted it as a multi line string
|
|
48
48
|
formatted_token = license_config.token.strip.delete("\n").delete(' ')
|
|
49
|
-
decoded_token =
|
|
49
|
+
decoded_token = formatted_token.unpack1('m') # decode from base64
|
|
50
50
|
|
|
51
51
|
begin
|
|
52
52
|
data = public_key.public_decrypt(decoded_token)
|
|
@@ -46,7 +46,7 @@ module Karafka
|
|
|
46
46
|
# @note Message can be from the future in case consumer machine and Kafka cluster drift
|
|
47
47
|
# apart and the machine is behind the cluster.
|
|
48
48
|
def local_created_at(last_message)
|
|
49
|
-
now =
|
|
49
|
+
now = Time.now
|
|
50
50
|
|
|
51
51
|
return now unless last_message
|
|
52
52
|
|
|
@@ -64,6 +64,38 @@ module Karafka
|
|
|
64
64
|
@messages_array.dup
|
|
65
65
|
end
|
|
66
66
|
|
|
67
|
+
# Returns the underlying messages array directly without duplication.
|
|
68
|
+
#
|
|
69
|
+
# This method exists to provide Karafka internals with direct access to the messages array,
|
|
70
|
+
# bypassing any monkey patches that external libraries may apply to enumerable methods.
|
|
71
|
+
#
|
|
72
|
+
# ## Why this method exists
|
|
73
|
+
#
|
|
74
|
+
# External instrumentation libraries like DataDog's `dd-trace-rb` patch the `#each` method
|
|
75
|
+
# on this class to create tracing spans around message iteration. While this is desirable
|
|
76
|
+
# for user code (to trace message processing), it causes problems when Karafka's internal
|
|
77
|
+
# infrastructure iterates over messages for housekeeping tasks (offset tracking,
|
|
78
|
+
# deserialization, etc.) - creating empty/unwanted spans.
|
|
79
|
+
#
|
|
80
|
+
# By using `raw.map` or `raw.each` instead of `map` or `each` directly, internal code
|
|
81
|
+
# bypasses the patched `#each` method since it operates on the raw Array, not this class.
|
|
82
|
+
#
|
|
83
|
+
# ## Usage
|
|
84
|
+
#
|
|
85
|
+
# This method should ONLY be used by Karafka internals. User-facing code (consumers,
|
|
86
|
+
# ActiveJob processors, etc.) should use regular `#each`/`#map` so that instrumentation
|
|
87
|
+
# libraries can properly trace message processing.
|
|
88
|
+
#
|
|
89
|
+
# @return [Array<Karafka::Messages::Message>] the underlying messages array (not a copy)
|
|
90
|
+
#
|
|
91
|
+
# @note This returns the actual internal array, not a copy. Do not modify it.
|
|
92
|
+
# @see https://github.com/karafka/karafka/issues/2939
|
|
93
|
+
#
|
|
94
|
+
# @private
|
|
95
|
+
def raw
|
|
96
|
+
@messages_array
|
|
97
|
+
end
|
|
98
|
+
|
|
67
99
|
alias count size
|
|
68
100
|
end
|
|
69
101
|
end
|