karafka 2.5.2 → 2.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +18 -0
- data/.yard-lint.yml +174 -0
- data/CHANGELOG.md +6 -0
- data/Gemfile +1 -0
- data/Gemfile.lock +24 -19
- data/examples/payloads/json/sample_set_03/event_type_1.json +1 -1
- data/examples/payloads/json/sample_set_03/event_type_2.json +1 -1
- data/examples/payloads/json/sample_set_03/event_type_3.json +1 -1
- data/karafka.gemspec +2 -2
- data/lib/active_job/queue_adapters/karafka_adapter.rb +2 -2
- data/lib/karafka/active_job/consumer.rb +2 -2
- data/lib/karafka/active_job/current_attributes.rb +2 -2
- data/lib/karafka/active_job/deserializer.rb +1 -1
- data/lib/karafka/active_job/dispatcher.rb +2 -2
- data/lib/karafka/admin/configs/resource.rb +7 -1
- data/lib/karafka/admin/consumer_groups.rb +6 -8
- data/lib/karafka/admin/topics.rb +5 -4
- data/lib/karafka/admin.rb +10 -10
- data/lib/karafka/app.rb +3 -3
- data/lib/karafka/base_consumer.rb +1 -1
- data/lib/karafka/cli/base.rb +1 -1
- data/lib/karafka/cli/console.rb +1 -1
- data/lib/karafka/cli/contracts/server.rb +1 -1
- data/lib/karafka/cli/help.rb +1 -1
- data/lib/karafka/cli/install.rb +2 -1
- data/lib/karafka/cli/server.rb +1 -1
- data/lib/karafka/cli/swarm.rb +1 -1
- data/lib/karafka/connection/client.rb +19 -18
- data/lib/karafka/connection/manager.rb +1 -0
- data/lib/karafka/connection/proxy.rb +1 -1
- data/lib/karafka/connection/rebalance_manager.rb +1 -1
- data/lib/karafka/connection/status.rb +1 -0
- data/lib/karafka/constraints.rb +1 -1
- data/lib/karafka/contracts/base.rb +1 -1
- data/lib/karafka/deserializers/payload.rb +1 -1
- data/lib/karafka/helpers/async.rb +1 -1
- data/lib/karafka/helpers/config_importer.rb +3 -3
- data/lib/karafka/helpers/multi_delegator.rb +3 -0
- data/lib/karafka/instrumentation/assignments_tracker.rb +2 -1
- data/lib/karafka/instrumentation/callbacks/error.rb +2 -2
- data/lib/karafka/instrumentation/callbacks/statistics.rb +3 -3
- data/lib/karafka/instrumentation/logger.rb +6 -6
- data/lib/karafka/instrumentation/monitor.rb +2 -2
- data/lib/karafka/instrumentation/vendors/appsignal/base.rb +1 -1
- data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +1 -1
- data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +2 -2
- data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +1 -1
- data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +3 -15
- data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
- data/lib/karafka/pro/active_job/consumer.rb +2 -2
- data/lib/karafka/pro/active_job/dispatcher.rb +3 -3
- data/lib/karafka/pro/cleaner.rb +3 -3
- data/lib/karafka/pro/cli/contracts/server.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments/base.rb +4 -3
- data/lib/karafka/pro/cli/parallel_segments/collapse.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments/distribute.rb +1 -1
- data/lib/karafka/pro/cli/parallel_segments.rb +1 -1
- data/lib/karafka/pro/connection/manager.rb +1 -2
- data/lib/karafka/pro/connection/multiplexing/listener.rb +1 -0
- data/lib/karafka/pro/contracts/base.rb +1 -1
- data/lib/karafka/pro/encryption/cipher.rb +3 -2
- data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
- data/lib/karafka/pro/encryption/messages/parser.rb +1 -1
- data/lib/karafka/pro/encryption/setup/config.rb +1 -1
- data/lib/karafka/pro/iterator/tpl_builder.rb +1 -1
- data/lib/karafka/pro/iterator.rb +1 -1
- data/lib/karafka/pro/loader.rb +1 -1
- data/lib/karafka/pro/processing/coordinator.rb +1 -1
- data/lib/karafka/pro/processing/filters/base.rb +1 -0
- data/lib/karafka/pro/processing/filters/delayer.rb +1 -1
- data/lib/karafka/pro/processing/filters/expirer.rb +1 -1
- data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +1 -1
- data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs/periodic.rb +1 -1
- data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +1 -1
- data/lib/karafka/pro/processing/jobs_builder.rb +1 -1
- data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +1 -0
- data/lib/karafka/pro/processing/partitioner.rb +1 -1
- data/lib/karafka/pro/processing/strategies/base.rb +1 -1
- data/lib/karafka/pro/processing/strategies/default.rb +2 -2
- data/lib/karafka/pro/processing/strategy_selector.rb +1 -0
- data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +4 -2
- data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +4 -2
- data/lib/karafka/pro/recurring_tasks/consumer.rb +3 -2
- data/lib/karafka/pro/recurring_tasks/contracts/config.rb +2 -2
- data/lib/karafka/pro/recurring_tasks/contracts/task.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/deserializer.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/dispatcher.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/executor.rb +2 -1
- data/lib/karafka/pro/recurring_tasks/schedule.rb +5 -2
- data/lib/karafka/pro/recurring_tasks/serializer.rb +6 -5
- data/lib/karafka/pro/recurring_tasks/setup/config.rb +2 -2
- data/lib/karafka/pro/recurring_tasks/task.rb +1 -1
- data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +3 -0
- data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +1 -1
- data/lib/karafka/pro/routing/features/multiplexing.rb +5 -5
- data/lib/karafka/pro/routing/features/offset_metadata.rb +4 -4
- data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +1 -1
- data/lib/karafka/pro/routing/features/patterns/patterns.rb +1 -1
- data/lib/karafka/pro/routing/features/periodic_job/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +1 -1
- data/lib/karafka/pro/routing/features/swarm.rb +1 -1
- data/lib/karafka/pro/routing/features/throttling/topic.rb +3 -1
- data/lib/karafka/pro/scheduled_messages/consumer.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/contracts/config.rb +2 -2
- data/lib/karafka/pro/scheduled_messages/contracts/message.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +3 -2
- data/lib/karafka/pro/scheduled_messages/day.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/max_epoch.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/proxy.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/serializer.rb +3 -3
- data/lib/karafka/pro/scheduled_messages/setup/config.rb +2 -2
- data/lib/karafka/pro/scheduled_messages/state.rb +1 -0
- data/lib/karafka/pro/scheduled_messages/tracker.rb +1 -0
- data/lib/karafka/process.rb +4 -4
- data/lib/karafka/processing/executor.rb +1 -1
- data/lib/karafka/processing/inline_insights/tracker.rb +1 -0
- data/lib/karafka/processing/jobs_queue.rb +1 -1
- data/lib/karafka/processing/result.rb +1 -0
- data/lib/karafka/processing/strategy_selector.rb +1 -0
- data/lib/karafka/routing/activity_manager.rb +1 -0
- data/lib/karafka/routing/builder.rb +3 -1
- data/lib/karafka/routing/contracts/consumer_group.rb +3 -2
- data/lib/karafka/routing/contracts/topic.rb +5 -2
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/declaratives/topic.rb +5 -2
- data/lib/karafka/routing/features/deserializers/topic.rb +3 -3
- data/lib/karafka/routing/features/inline_insights.rb +5 -5
- data/lib/karafka/routing/router.rb +1 -1
- data/lib/karafka/routing/subscription_group.rb +1 -1
- data/lib/karafka/routing/subscription_groups_builder.rb +1 -0
- data/lib/karafka/routing/topic.rb +3 -3
- data/lib/karafka/server.rb +1 -1
- data/lib/karafka/setup/attributes_map.rb +4 -2
- data/lib/karafka/setup/config.rb +21 -10
- data/lib/karafka/setup/config_proxy.rb +209 -0
- data/lib/karafka/setup/contracts/config.rb +1 -1
- data/lib/karafka/swarm/liveness_listener.rb +1 -0
- data/lib/karafka/swarm/manager.rb +7 -6
- data/lib/karafka/swarm/node.rb +1 -1
- data/lib/karafka/swarm/supervisor.rb +1 -0
- data/lib/karafka/time_trackers/base.rb +1 -1
- data/lib/karafka/version.rb +1 -1
- data/lib/karafka.rb +2 -2
- metadata +7 -5
|
@@ -5,7 +5,7 @@ module Karafka
|
|
|
5
5
|
# CLI related contracts
|
|
6
6
|
module Contracts
|
|
7
7
|
# Contract for validating correctness of the server cli command options.
|
|
8
|
-
class Server <
|
|
8
|
+
class Server < Karafka::Contracts::Base
|
|
9
9
|
configure do |config|
|
|
10
10
|
config.error_messages = YAML.safe_load_file(
|
|
11
11
|
File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
|
data/lib/karafka/cli/help.rb
CHANGED
|
@@ -10,7 +10,7 @@ module Karafka
|
|
|
10
10
|
# Print available commands
|
|
11
11
|
def call
|
|
12
12
|
# Find the longest command for alignment purposes
|
|
13
|
-
max_command_length = self.class.commands.map
|
|
13
|
+
max_command_length = self.class.commands.map { |command| command.name.size }.max
|
|
14
14
|
|
|
15
15
|
puts 'Karafka commands:'
|
|
16
16
|
|
data/lib/karafka/cli/install.rb
CHANGED
|
@@ -24,6 +24,7 @@ module Karafka
|
|
|
24
24
|
'example_consumer.rb.erb' => 'app/consumers/example_consumer.rb'
|
|
25
25
|
}.freeze
|
|
26
26
|
|
|
27
|
+
# Initializes the install command
|
|
27
28
|
def initialize
|
|
28
29
|
super
|
|
29
30
|
|
|
@@ -52,7 +53,7 @@ module Karafka
|
|
|
52
53
|
FileUtils.mkdir_p File.dirname(pathed_target)
|
|
53
54
|
|
|
54
55
|
template = File.read(Karafka.core_root.join("templates/#{source}"))
|
|
55
|
-
render =
|
|
56
|
+
render = ERB.new(template, trim_mode: '-').result(binding)
|
|
56
57
|
|
|
57
58
|
File.write(pathed_target, render)
|
|
58
59
|
|
data/lib/karafka/cli/server.rb
CHANGED
|
@@ -10,7 +10,7 @@ module Karafka
|
|
|
10
10
|
)
|
|
11
11
|
|
|
12
12
|
# Types of things we can include / exclude from the routing via the CLI options
|
|
13
|
-
SUPPORTED_TYPES =
|
|
13
|
+
SUPPORTED_TYPES = Karafka::Routing::ActivityManager::SUPPORTED_TYPES
|
|
14
14
|
|
|
15
15
|
private_constant :SUPPORTED_TYPES
|
|
16
16
|
|
data/lib/karafka/cli/swarm.rb
CHANGED
|
@@ -8,7 +8,7 @@ module Karafka
|
|
|
8
8
|
# It is threadsafe and provides some security measures so we won't end up operating on a
|
|
9
9
|
# closed consumer instance as it causes Ruby VM process to crash.
|
|
10
10
|
class Client
|
|
11
|
-
include
|
|
11
|
+
include Karafka::Core::Helpers::Time
|
|
12
12
|
include Helpers::ConfigImporter.new(
|
|
13
13
|
logger: %i[logger],
|
|
14
14
|
tick_interval: %i[internal tick_interval],
|
|
@@ -37,7 +37,7 @@ module Karafka
|
|
|
37
37
|
COOP_UNSUBSCRIBE_FACTOR = 0.5
|
|
38
38
|
|
|
39
39
|
# Errors upon which we early report that something is off without retrying prior to the
|
|
40
|
-
# report
|
|
40
|
+
# report. Aside from those we ALWAYS early report on any fatal error.
|
|
41
41
|
EARLY_REPORT_ERRORS = [
|
|
42
42
|
:inconsistent_group_protocol, # 23
|
|
43
43
|
:max_poll_exceeded, # -147
|
|
@@ -48,10 +48,7 @@ module Karafka
|
|
|
48
48
|
:cluster_authorization_failed, # 31
|
|
49
49
|
:illegal_generation,
|
|
50
50
|
# this will not recover as fencing is permanent
|
|
51
|
-
:fenced, # -144
|
|
52
51
|
:auto_offset_reset, # -140
|
|
53
|
-
# This can happen for many reasons, including issues with static membership being fenced
|
|
54
|
-
:fatal, # -150,
|
|
55
52
|
# This can happen with new rebalance protocol and same group.instance.id in use
|
|
56
53
|
:unreleased_instance_id # 111
|
|
57
54
|
].freeze
|
|
@@ -489,7 +486,7 @@ module Karafka
|
|
|
489
486
|
# If the seek message offset is in a time format, we need to find the closest "real"
|
|
490
487
|
# offset matching before we seek
|
|
491
488
|
if message.offset.is_a?(Time)
|
|
492
|
-
tpl =
|
|
489
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
|
493
490
|
tpl.add_topic_and_partitions_with_offsets(
|
|
494
491
|
message.topic,
|
|
495
492
|
message.partition => message.offset
|
|
@@ -546,9 +543,9 @@ module Karafka
|
|
|
546
543
|
sg_id = @subscription_group.id
|
|
547
544
|
|
|
548
545
|
# Remove callbacks runners that were registered
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
546
|
+
Karafka::Core::Instrumentation.statistics_callbacks.delete(sg_id)
|
|
547
|
+
Karafka::Core::Instrumentation.error_callbacks.delete(sg_id)
|
|
548
|
+
Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.delete(sg_id)
|
|
552
549
|
|
|
553
550
|
kafka.close
|
|
554
551
|
@kafka = nil
|
|
@@ -564,7 +561,7 @@ module Karafka
|
|
|
564
561
|
# ignored. We do however want to instrument on it
|
|
565
562
|
def unsubscribe
|
|
566
563
|
kafka.unsubscribe
|
|
567
|
-
rescue
|
|
564
|
+
rescue Rdkafka::RdkafkaError => e
|
|
568
565
|
Karafka.monitor.instrument(
|
|
569
566
|
'error.occurred',
|
|
570
567
|
caller: self,
|
|
@@ -593,8 +590,8 @@ module Karafka
|
|
|
593
590
|
# established. It may be `-1` in case we lost the assignment or we did not yet fetch data
|
|
594
591
|
# for this topic partition
|
|
595
592
|
def topic_partition_position(topic, partition)
|
|
596
|
-
rd_partition =
|
|
597
|
-
tpl =
|
|
593
|
+
rd_partition = Rdkafka::Consumer::Partition.new(partition, nil, 0)
|
|
594
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new(topic => [rd_partition])
|
|
598
595
|
|
|
599
596
|
kafka.position(tpl).to_h.fetch(topic).first.offset || -1
|
|
600
597
|
end
|
|
@@ -645,7 +642,7 @@ module Karafka
|
|
|
645
642
|
# If we did not exceed total time allocated, it means that we finished because of the
|
|
646
643
|
# tick interval time limitations and not because time run out without any data
|
|
647
644
|
time_poll.exceeded? ? nil : :tick_time
|
|
648
|
-
rescue
|
|
645
|
+
rescue Rdkafka::RdkafkaError => e
|
|
649
646
|
early_report = false
|
|
650
647
|
|
|
651
648
|
retryable = time_poll.attempts <= MAX_POLL_RETRIES && time_poll.retryable?
|
|
@@ -655,6 +652,7 @@ module Karafka
|
|
|
655
652
|
# Those are mainly network issues and exceeding the max poll interval
|
|
656
653
|
# We want to report early on max poll interval exceeding because it may mean that the
|
|
657
654
|
# underlying processing is taking too much time and it is not LRJ
|
|
655
|
+
|
|
658
656
|
case e.code
|
|
659
657
|
when *EARLY_REPORT_ERRORS
|
|
660
658
|
early_report = true
|
|
@@ -678,6 +676,9 @@ module Karafka
|
|
|
678
676
|
return e.details
|
|
679
677
|
end
|
|
680
678
|
|
|
679
|
+
# Any fatal error should always cause early report
|
|
680
|
+
early_report = true if e.fatal?
|
|
681
|
+
|
|
681
682
|
if early_report || !retryable
|
|
682
683
|
Karafka.monitor.instrument(
|
|
683
684
|
'error.occurred',
|
|
@@ -704,7 +705,7 @@ module Karafka
|
|
|
704
705
|
# Builds a new rdkafka consumer instance based on the subscription group configuration
|
|
705
706
|
# @return [Rdkafka::Consumer]
|
|
706
707
|
def build_consumer
|
|
707
|
-
|
|
708
|
+
Rdkafka::Config.logger = logger
|
|
708
709
|
|
|
709
710
|
# We need to refresh the setup of this subscription group in case we started running in a
|
|
710
711
|
# swarm. The initial configuration for validation comes from the parent node, but it needs
|
|
@@ -712,7 +713,7 @@ module Karafka
|
|
|
712
713
|
# group instance id.
|
|
713
714
|
@subscription_group.refresh
|
|
714
715
|
|
|
715
|
-
config =
|
|
716
|
+
config = Rdkafka::Config.new(@subscription_group.kafka)
|
|
716
717
|
config.consumer_rebalance_listener = @rebalance_callback
|
|
717
718
|
# We want to manage the events queue independently from the messages queue. Thanks to that
|
|
718
719
|
# we can ensure, that we get statistics and errors often enough even when not polling
|
|
@@ -724,7 +725,7 @@ module Karafka
|
|
|
724
725
|
@name = consumer.name
|
|
725
726
|
|
|
726
727
|
# Register statistics runner for this particular type of callbacks
|
|
727
|
-
|
|
728
|
+
Karafka::Core::Instrumentation.statistics_callbacks.add(
|
|
728
729
|
@subscription_group.id,
|
|
729
730
|
Instrumentation::Callbacks::Statistics.new(
|
|
730
731
|
@subscription_group.id,
|
|
@@ -734,7 +735,7 @@ module Karafka
|
|
|
734
735
|
)
|
|
735
736
|
|
|
736
737
|
# Register error tracking callback
|
|
737
|
-
|
|
738
|
+
Karafka::Core::Instrumentation.error_callbacks.add(
|
|
738
739
|
@subscription_group.id,
|
|
739
740
|
Instrumentation::Callbacks::Error.new(
|
|
740
741
|
@subscription_group.id,
|
|
@@ -743,7 +744,7 @@ module Karafka
|
|
|
743
744
|
)
|
|
744
745
|
)
|
|
745
746
|
|
|
746
|
-
|
|
747
|
+
Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.add(
|
|
747
748
|
@subscription_group.id,
|
|
748
749
|
Instrumentation::Callbacks::OauthbearerTokenRefresh.new(
|
|
749
750
|
consumer
|
|
@@ -8,6 +8,7 @@ module Karafka
|
|
|
8
8
|
# In the OSS version it starts listeners as they are without any connection management or
|
|
9
9
|
# resources utilization supervision and shuts them down or quiets when time has come
|
|
10
10
|
class Manager
|
|
11
|
+
# Initializes the connection manager
|
|
11
12
|
def initialize
|
|
12
13
|
@once_executions = Set.new
|
|
13
14
|
end
|
|
@@ -155,7 +155,7 @@ module Karafka
|
|
|
155
155
|
|
|
156
156
|
# @param tpl [Rdkafka::Consumer::TopicPartitionList] list of topics and partitions for which
|
|
157
157
|
# we want to get the lag on the defined CG
|
|
158
|
-
# @return [Hash
|
|
158
|
+
# @return [Hash{String => Hash}] hash with topics and their partitions lags
|
|
159
159
|
def lag(tpl)
|
|
160
160
|
l_config = proxy_config.committed
|
|
161
161
|
|
data/lib/karafka/constraints.rb
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
module Karafka
|
|
4
4
|
module Contracts
|
|
5
5
|
# Base contract for all Karafka contracts
|
|
6
|
-
class Base <
|
|
6
|
+
class Base < Karafka::Core::Contractable::Contract
|
|
7
7
|
# @param data [Hash] data for validation
|
|
8
8
|
# @param scope [Array<String>] nested scope if in use
|
|
9
9
|
# @return [Boolean] true if all good
|
|
@@ -9,7 +9,7 @@ module Karafka
|
|
|
9
9
|
# @return [Hash] hash with deserialized JSON data
|
|
10
10
|
def call(message)
|
|
11
11
|
# nil payload can be present for example for tombstone messages
|
|
12
|
-
message.raw_payload.nil? ? nil :
|
|
12
|
+
message.raw_payload.nil? ? nil : JSON.parse(message.raw_payload)
|
|
13
13
|
end
|
|
14
14
|
end
|
|
15
15
|
end
|
|
@@ -5,7 +5,7 @@ module Karafka
|
|
|
5
5
|
# Module allowing for configuration injections. By default injects whole app config
|
|
6
6
|
# Allows for granular config injection
|
|
7
7
|
class ConfigImporter < Module
|
|
8
|
-
# @param attributes [Hash
|
|
8
|
+
# @param attributes [Hash{Symbol => Array<Symbol>}] map defining what we want to inject.
|
|
9
9
|
# The key is the name under which attribute will be visible and the value is the full
|
|
10
10
|
# path to the attribute
|
|
11
11
|
def initialize(attributes = { config: %i[itself] })
|
|
@@ -20,7 +20,7 @@ module Karafka
|
|
|
20
20
|
@attributes.each do |name, path|
|
|
21
21
|
model.class_eval <<~RUBY, __FILE__, __LINE__ + 1
|
|
22
22
|
def #{name}
|
|
23
|
-
@#{name} ||=
|
|
23
|
+
@#{name} ||= Karafka::App.config.#{path.join('.')}
|
|
24
24
|
end
|
|
25
25
|
RUBY
|
|
26
26
|
end
|
|
@@ -33,7 +33,7 @@ module Karafka
|
|
|
33
33
|
@attributes.each do |name, path|
|
|
34
34
|
model.class_eval <<~RUBY, __FILE__, __LINE__ + 1
|
|
35
35
|
def self.#{name}
|
|
36
|
-
@#{name} ||=
|
|
36
|
+
@#{name} ||= Karafka::App.config.#{path.join('.')}
|
|
37
37
|
end
|
|
38
38
|
RUBY
|
|
39
39
|
end
|
|
@@ -14,6 +14,7 @@ module Karafka
|
|
|
14
14
|
class AssignmentsTracker
|
|
15
15
|
include Singleton
|
|
16
16
|
|
|
17
|
+
# Initializes the assignments tracker with empty assignments
|
|
17
18
|
def initialize
|
|
18
19
|
@mutex = Mutex.new
|
|
19
20
|
@assignments = Hash.new { |hash, key| hash[key] = [] }
|
|
@@ -21,7 +22,7 @@ module Karafka
|
|
|
21
22
|
|
|
22
23
|
# Returns all the active/current assignments of this given process
|
|
23
24
|
#
|
|
24
|
-
# @return [Hash
|
|
25
|
+
# @return [Hash{Karafka::Routing::Topic => Array<Integer>}]
|
|
25
26
|
#
|
|
26
27
|
# @note Keep in mind, that those assignments can change any time, especially when working
|
|
27
28
|
# with multiple consumer groups or subscription groups.
|
|
@@ -10,8 +10,8 @@ module Karafka
|
|
|
10
10
|
monitor: %i[monitor]
|
|
11
11
|
)
|
|
12
12
|
|
|
13
|
-
# @param subscription_group_id [String]
|
|
14
|
-
# @param consumer_group_id [String]
|
|
13
|
+
# @param subscription_group_id [String]
|
|
14
|
+
# @param consumer_group_id [String]
|
|
15
15
|
# @param client_name [String] rdkafka client name
|
|
16
16
|
def initialize(subscription_group_id, consumer_group_id, client_name)
|
|
17
17
|
@subscription_group_id = subscription_group_id
|
|
@@ -11,14 +11,14 @@ module Karafka
|
|
|
11
11
|
monitor: %i[monitor]
|
|
12
12
|
)
|
|
13
13
|
|
|
14
|
-
# @param subscription_group_id [String]
|
|
15
|
-
# @param consumer_group_id [String]
|
|
14
|
+
# @param subscription_group_id [String]
|
|
15
|
+
# @param consumer_group_id [String]
|
|
16
16
|
# @param client_name [String] rdkafka client name
|
|
17
17
|
def initialize(subscription_group_id, consumer_group_id, client_name)
|
|
18
18
|
@subscription_group_id = subscription_group_id
|
|
19
19
|
@consumer_group_id = consumer_group_id
|
|
20
20
|
@client_name = client_name
|
|
21
|
-
@statistics_decorator =
|
|
21
|
+
@statistics_decorator = Karafka::Core::Monitoring::StatisticsDecorator.new
|
|
22
22
|
end
|
|
23
23
|
|
|
24
24
|
# Emits decorated statistics to the monitor
|
|
@@ -3,15 +3,15 @@
|
|
|
3
3
|
module Karafka
|
|
4
4
|
module Instrumentation
|
|
5
5
|
# Default logger for Event Delegator
|
|
6
|
-
# @note It uses
|
|
6
|
+
# @note It uses Logger features - providing basic logging
|
|
7
7
|
class Logger < ::Logger
|
|
8
8
|
# Map containing information about log level for given environment
|
|
9
9
|
ENV_MAP = {
|
|
10
|
-
'production' =>
|
|
11
|
-
'test' =>
|
|
12
|
-
'development' =>
|
|
13
|
-
'debug' =>
|
|
14
|
-
'default' =>
|
|
10
|
+
'production' => Logger::ERROR,
|
|
11
|
+
'test' => Logger::ERROR,
|
|
12
|
+
'development' => Logger::INFO,
|
|
13
|
+
'debug' => Logger::DEBUG,
|
|
14
|
+
'default' => Logger::INFO
|
|
15
15
|
}.freeze
|
|
16
16
|
|
|
17
17
|
private_constant :ENV_MAP
|
|
@@ -5,14 +5,14 @@ module Karafka
|
|
|
5
5
|
# Karafka instrumentation monitor that we use to publish events
|
|
6
6
|
# By default uses our internal notifications bus but can be used with
|
|
7
7
|
# `ActiveSupport::Notifications` as well
|
|
8
|
-
class Monitor <
|
|
8
|
+
class Monitor < Karafka::Core::Monitoring::Monitor
|
|
9
9
|
attr_reader :notifications_bus
|
|
10
10
|
|
|
11
11
|
# @param notifications_bus [Object] either our internal notifications bus or
|
|
12
12
|
# `ActiveSupport::Notifications`
|
|
13
13
|
# @param namespace [String, nil] namespace for events or nil if no namespace
|
|
14
14
|
def initialize(
|
|
15
|
-
notifications_bus =
|
|
15
|
+
notifications_bus = Karafka::Instrumentation::Notifications.new,
|
|
16
16
|
namespace = nil
|
|
17
17
|
)
|
|
18
18
|
super
|
|
@@ -9,7 +9,7 @@ module Karafka
|
|
|
9
9
|
# A karafka's logger listener for Datadog
|
|
10
10
|
# It depends on the 'ddtrace' gem
|
|
11
11
|
class LoggerListener
|
|
12
|
-
include
|
|
12
|
+
include Karafka::Core::Configurable
|
|
13
13
|
extend Forwardable
|
|
14
14
|
|
|
15
15
|
def_delegators :config, :client, :service_name
|
|
@@ -11,7 +11,7 @@ module Karafka
|
|
|
11
11
|
#
|
|
12
12
|
# @note You need to setup the `dogstatsd-ruby` client and assign it
|
|
13
13
|
class MetricsListener
|
|
14
|
-
include
|
|
14
|
+
include Karafka::Core::Configurable
|
|
15
15
|
extend Forwardable
|
|
16
16
|
|
|
17
17
|
def_delegators(
|
|
@@ -212,7 +212,7 @@ module Karafka
|
|
|
212
212
|
)
|
|
213
213
|
else
|
|
214
214
|
raise(
|
|
215
|
-
|
|
215
|
+
ArgumentError,
|
|
216
216
|
'distribution_mode setting value must be either :histogram or :distribution'
|
|
217
217
|
)
|
|
218
218
|
end
|
|
@@ -10,7 +10,7 @@ module Karafka
|
|
|
10
10
|
# Base Kubernetes Listener providing basic HTTP server capabilities to respond with health
|
|
11
11
|
# statuses
|
|
12
12
|
class BaseListener
|
|
13
|
-
include
|
|
13
|
+
include Karafka::Core::Helpers::Time
|
|
14
14
|
|
|
15
15
|
# All good with Karafka
|
|
16
16
|
OK_CODE = '200 OK'
|
|
@@ -26,19 +26,6 @@ module Karafka
|
|
|
26
26
|
#
|
|
27
27
|
# @note Please use `Kubernetes::SwarmLivenessListener` when operating in the swarm mode
|
|
28
28
|
class LivenessListener < BaseListener
|
|
29
|
-
# When any of those occurs, it means something went wrong in a way that cannot be
|
|
30
|
-
# recovered. In such cases we should report that the consumer process is not healthy.
|
|
31
|
-
# - `fenced` - This instance has been fenced by a newer instance and will not do any
|
|
32
|
-
# processing at all never. Fencing most of the time means the instance.group.id has
|
|
33
|
-
# been reused without properly terminating the previous consumer process first
|
|
34
|
-
# - `fatal` - any fatal error that halts the processing forever
|
|
35
|
-
UNRECOVERABLE_RDKAFKA_ERRORS = [
|
|
36
|
-
:fenced, # -144
|
|
37
|
-
:fatal # -150
|
|
38
|
-
].freeze
|
|
39
|
-
|
|
40
|
-
private_constant :UNRECOVERABLE_RDKAFKA_ERRORS
|
|
41
|
-
|
|
42
29
|
# @param hostname [String, nil] hostname or nil to bind on all
|
|
43
30
|
# @param port [Integer] TCP port on which we want to run our HTTP status server
|
|
44
31
|
# @param consuming_ttl [Integer] time in ms after which we consider consumption hanging.
|
|
@@ -113,8 +100,9 @@ module Karafka
|
|
|
113
100
|
|
|
114
101
|
# We are only interested in the rdkafka errors
|
|
115
102
|
return unless error.is_a?(Rdkafka::RdkafkaError)
|
|
116
|
-
#
|
|
117
|
-
|
|
103
|
+
# When any of those occurs, it means something went wrong in a way that cannot be
|
|
104
|
+
# recovered. In such cases we should report that the consumer process is not healthy.
|
|
105
|
+
return unless error.fatal?
|
|
118
106
|
|
|
119
107
|
@unrecoverable = error.code
|
|
120
108
|
end
|
|
@@ -46,7 +46,7 @@ module Karafka
|
|
|
46
46
|
# @note Message can be from the future in case consumer machine and Kafka cluster drift
|
|
47
47
|
# apart and the machine is behind the cluster.
|
|
48
48
|
def local_created_at(last_message)
|
|
49
|
-
now =
|
|
49
|
+
now = Time.now
|
|
50
50
|
|
|
51
51
|
return now unless last_message
|
|
52
52
|
|
|
@@ -14,7 +14,7 @@ module Karafka
|
|
|
14
14
|
#
|
|
15
15
|
# It contains slightly better revocation warranties than the regular blocking consumer as
|
|
16
16
|
# it can stop processing batch of jobs in the middle after the revocation.
|
|
17
|
-
class Consumer <
|
|
17
|
+
class Consumer < Karafka::ActiveJob::Consumer
|
|
18
18
|
# Runs ActiveJob jobs processing and handles lrj if needed
|
|
19
19
|
def consume
|
|
20
20
|
messages.each(clean: true) do |message|
|
|
@@ -25,7 +25,7 @@ module Karafka
|
|
|
25
25
|
# We cannot early stop when running virtual partitions because the intermediate state
|
|
26
26
|
# would force us not to commit the offsets. This would cause extensive
|
|
27
27
|
# double-processing
|
|
28
|
-
break if Karafka::App.stopping? && !topic.virtual_partitions?
|
|
28
|
+
break if ::Karafka::App.stopping? && !topic.virtual_partitions?
|
|
29
29
|
|
|
30
30
|
consume_job(message)
|
|
31
31
|
|
|
@@ -10,7 +10,7 @@ module Karafka
|
|
|
10
10
|
# Pro dispatcher that sends the ActiveJob job to a proper topic based on the queue name
|
|
11
11
|
# and that allows to inject additional options into the producer, effectively allowing for a
|
|
12
12
|
# much better and more granular control over the dispatch and consumption process.
|
|
13
|
-
class Dispatcher <
|
|
13
|
+
class Dispatcher < Karafka::ActiveJob::Dispatcher
|
|
14
14
|
include Helpers::ConfigImporter.new(
|
|
15
15
|
deserializer: %i[internal active_job deserializer]
|
|
16
16
|
)
|
|
@@ -31,7 +31,7 @@ module Karafka
|
|
|
31
31
|
# Allows for setting a callable producer since at the moment of defining the class,
|
|
32
32
|
# variants may not be available
|
|
33
33
|
#
|
|
34
|
-
# We do not initialize it with `-> {
|
|
34
|
+
# We do not initialize it with `-> { Karafka.producer }` so we do not have to call it
|
|
35
35
|
# each time for the defaults to preserve CPU cycles.
|
|
36
36
|
#
|
|
37
37
|
# We also do **not** cache the execution of this producer lambda because we want to
|
|
@@ -122,7 +122,7 @@ module Karafka
|
|
|
122
122
|
def producer(job)
|
|
123
123
|
dynamic_producer = fetch_option(job, :producer, DEFAULTS)
|
|
124
124
|
|
|
125
|
-
dynamic_producer ? dynamic_producer.call(job) :
|
|
125
|
+
dynamic_producer ? dynamic_producer.call(job) : Karafka.producer
|
|
126
126
|
end
|
|
127
127
|
|
|
128
128
|
# @param job [ActiveJob::Base] job instance
|
data/lib/karafka/pro/cleaner.rb
CHANGED
|
@@ -19,9 +19,9 @@ module Karafka
|
|
|
19
19
|
class << self
|
|
20
20
|
# @param _config [Karafka::Core::Configurable::Node] root node config
|
|
21
21
|
def pre_setup(_config)
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
22
|
+
Karafka::Messages::Message.prepend(Messages::Message)
|
|
23
|
+
Karafka::Messages::Metadata.prepend(Messages::Metadata)
|
|
24
|
+
Karafka::Messages::Messages.prepend(Messages::Messages)
|
|
25
25
|
end
|
|
26
26
|
|
|
27
27
|
# @param _config [Karafka::Core::Configurable::Node] root node config
|
|
@@ -10,7 +10,7 @@ module Karafka
|
|
|
10
10
|
module Contracts
|
|
11
11
|
# Contract for validating correctness of the server cli command options.
|
|
12
12
|
# It differs slightly from the OSS one because it is aware of the routing patterns
|
|
13
|
-
class Server <
|
|
13
|
+
class Server < Karafka::Cli::Contracts::Server
|
|
14
14
|
configure do |config|
|
|
15
15
|
config.error_messages = YAML.safe_load_file(
|
|
16
16
|
File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
|
|
@@ -12,6 +12,7 @@ module Karafka
|
|
|
12
12
|
include Helpers::Colorize
|
|
13
13
|
|
|
14
14
|
# @param options [Hash] cli flags options
|
|
15
|
+
# @option options [Array<String>] :groups consumer group names to work with
|
|
15
16
|
def initialize(options)
|
|
16
17
|
@options = options
|
|
17
18
|
end
|
|
@@ -23,12 +24,12 @@ module Karafka
|
|
|
23
24
|
|
|
24
25
|
# Returns consumer groups for parallel segments with which we should be working
|
|
25
26
|
#
|
|
26
|
-
# @return [Hash
|
|
27
|
+
# @return [Hash{String => Array<Karafka::Routing::ConsumerGroup>}] hash with all parallel
|
|
27
28
|
# consumer groups as values and names of segments origin consumer group as the key.
|
|
28
29
|
def applicable_groups
|
|
29
30
|
requested_groups = options[:groups].dup || []
|
|
30
31
|
|
|
31
|
-
workable_groups =
|
|
32
|
+
workable_groups = Karafka::App
|
|
32
33
|
.routes
|
|
33
34
|
.select(&:parallel_segments?)
|
|
34
35
|
.group_by(&:segment_origin)
|
|
@@ -46,7 +47,7 @@ module Karafka
|
|
|
46
47
|
applicable_groups[requested_group] = workable_group
|
|
47
48
|
else
|
|
48
49
|
raise(
|
|
49
|
-
|
|
50
|
+
Karafka::Errors::ConsumerGroupNotFoundError,
|
|
50
51
|
"Consumer group #{requested_group} was not found"
|
|
51
52
|
)
|
|
52
53
|
end
|
|
@@ -132,7 +132,7 @@ module Karafka
|
|
|
132
132
|
return unless inconclusive
|
|
133
133
|
|
|
134
134
|
raise(
|
|
135
|
-
|
|
135
|
+
Karafka::Errors::CommandValidationError,
|
|
136
136
|
"Parallel segments for #{red(segment_origin)} have #{red('inconclusive')} offsets"
|
|
137
137
|
)
|
|
138
138
|
end
|
|
@@ -91,7 +91,7 @@ module Karafka
|
|
|
91
91
|
next unless offset.to_i.positive?
|
|
92
92
|
|
|
93
93
|
raise(
|
|
94
|
-
|
|
94
|
+
Karafka::Errors::CommandValidationError,
|
|
95
95
|
"Parallel segment #{red(cg_name)} already has offset #{red(offset)} " \
|
|
96
96
|
"set for #{red("#{topic_name}##{partition_id}")}"
|
|
97
97
|
)
|