karafka 1.0.0 → 1.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/.ruby-version +1 -1
- data/.travis.yml +3 -1
- data/CHANGELOG.md +90 -3
- data/CONTRIBUTING.md +5 -6
- data/Gemfile +1 -1
- data/Gemfile.lock +59 -64
- data/README.md +28 -57
- data/bin/karafka +13 -1
- data/config/errors.yml +6 -0
- data/karafka.gemspec +10 -9
- data/lib/karafka.rb +19 -10
- data/lib/karafka/app.rb +8 -15
- data/lib/karafka/attributes_map.rb +4 -4
- data/lib/karafka/backends/inline.rb +2 -3
- data/lib/karafka/base_consumer.rb +68 -0
- data/lib/karafka/base_responder.rb +41 -17
- data/lib/karafka/callbacks.rb +30 -0
- data/lib/karafka/callbacks/config.rb +22 -0
- data/lib/karafka/callbacks/dsl.rb +16 -0
- data/lib/karafka/cli/base.rb +2 -0
- data/lib/karafka/cli/flow.rb +1 -1
- data/lib/karafka/cli/info.rb +1 -2
- data/lib/karafka/cli/install.rb +2 -3
- data/lib/karafka/cli/server.rb +9 -12
- data/lib/karafka/connection/client.rb +117 -0
- data/lib/karafka/connection/config_adapter.rb +30 -14
- data/lib/karafka/connection/delegator.rb +46 -0
- data/lib/karafka/connection/listener.rb +22 -20
- data/lib/karafka/consumers/callbacks.rb +54 -0
- data/lib/karafka/consumers/includer.rb +51 -0
- data/lib/karafka/consumers/responders.rb +24 -0
- data/lib/karafka/{controllers → consumers}/single_params.rb +3 -3
- data/lib/karafka/errors.rb +19 -2
- data/lib/karafka/fetcher.rb +30 -28
- data/lib/karafka/helpers/class_matcher.rb +8 -8
- data/lib/karafka/helpers/config_retriever.rb +2 -2
- data/lib/karafka/instrumentation/listener.rb +112 -0
- data/lib/karafka/instrumentation/logger.rb +55 -0
- data/lib/karafka/instrumentation/monitor.rb +64 -0
- data/lib/karafka/loader.rb +0 -1
- data/lib/karafka/params/dsl.rb +156 -0
- data/lib/karafka/params/params_batch.rb +7 -2
- data/lib/karafka/patches/dry_configurable.rb +7 -7
- data/lib/karafka/patches/ruby_kafka.rb +34 -0
- data/lib/karafka/persistence/client.rb +25 -0
- data/lib/karafka/persistence/consumer.rb +38 -0
- data/lib/karafka/persistence/topic.rb +29 -0
- data/lib/karafka/process.rb +6 -5
- data/lib/karafka/responders/builder.rb +15 -14
- data/lib/karafka/responders/topic.rb +8 -1
- data/lib/karafka/routing/builder.rb +2 -2
- data/lib/karafka/routing/consumer_group.rb +1 -1
- data/lib/karafka/routing/consumer_mapper.rb +34 -0
- data/lib/karafka/routing/router.rb +1 -1
- data/lib/karafka/routing/topic.rb +5 -11
- data/lib/karafka/routing/{mapper.rb → topic_mapper.rb} +2 -2
- data/lib/karafka/schemas/config.rb +4 -5
- data/lib/karafka/schemas/consumer_group.rb +45 -24
- data/lib/karafka/schemas/consumer_group_topic.rb +18 -0
- data/lib/karafka/schemas/responder_usage.rb +1 -0
- data/lib/karafka/server.rb +39 -20
- data/lib/karafka/setup/config.rb +74 -51
- data/lib/karafka/setup/configurators/base.rb +6 -12
- data/lib/karafka/setup/configurators/params.rb +25 -0
- data/lib/karafka/setup/configurators/water_drop.rb +15 -14
- data/lib/karafka/setup/dsl.rb +22 -0
- data/lib/karafka/templates/{application_controller.rb.example → application_consumer.rb.example} +2 -3
- data/lib/karafka/templates/karafka.rb.example +18 -5
- data/lib/karafka/version.rb +1 -1
- metadata +87 -63
- data/.github/ISSUE_TEMPLATE.md +0 -2
- data/Rakefile +0 -7
- data/lib/karafka/base_controller.rb +0 -118
- data/lib/karafka/connection/messages_consumer.rb +0 -106
- data/lib/karafka/connection/messages_processor.rb +0 -59
- data/lib/karafka/controllers/includer.rb +0 -51
- data/lib/karafka/controllers/responders.rb +0 -19
- data/lib/karafka/logger.rb +0 -53
- data/lib/karafka/monitor.rb +0 -98
- data/lib/karafka/params/params.rb +0 -101
- data/lib/karafka/persistence.rb +0 -18
- data/lib/karafka/setup/configurators/celluloid.rb +0 -22
@@ -0,0 +1,16 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Callbacks
|
5
|
+
# App level dsl to define callbacks
|
6
|
+
module Dsl
|
7
|
+
Callbacks::TYPES.each do |callback_type|
|
8
|
+
# Allows us to define a block, that will be executed for a given moment
|
9
|
+
# @param [Block] block that should be executed after the initialization process
|
10
|
+
define_method callback_type do |&block|
|
11
|
+
config.callbacks.send(callback_type).push block
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
data/lib/karafka/cli/base.rb
CHANGED
data/lib/karafka/cli/flow.rb
CHANGED
@@ -15,7 +15,7 @@ module Karafka
|
|
15
15
|
if any_topics
|
16
16
|
puts "#{topic.name} =>"
|
17
17
|
|
18
|
-
topic.responder.topics.
|
18
|
+
topic.responder.topics.each_value do |responder_topic|
|
19
19
|
features = []
|
20
20
|
features << (responder_topic.required? ? 'always' : 'conditionally')
|
21
21
|
features << (responder_topic.multiple_usage? ? 'one or more' : 'exactly once')
|
data/lib/karafka/cli/info.rb
CHANGED
@@ -15,9 +15,8 @@ module Karafka
|
|
15
15
|
"Karafka framework version: #{Karafka::VERSION}",
|
16
16
|
"Application client id: #{config.client_id}",
|
17
17
|
"Backend: #{config.backend}",
|
18
|
+
"Batch fetching: #{config.batch_fetching}",
|
18
19
|
"Batch consuming: #{config.batch_consuming}",
|
19
|
-
"Batch processing: #{config.batch_processing}",
|
20
|
-
"Number of threads: #{config.concurrency}",
|
21
20
|
"Boot file: #{Karafka.boot_file}",
|
22
21
|
"Environment: #{Karafka.env}",
|
23
22
|
"Kafka seed brokers: #{config.kafka.seed_brokers}"
|
data/lib/karafka/cli/install.rb
CHANGED
@@ -9,8 +9,7 @@ module Karafka
|
|
9
9
|
|
10
10
|
# Directories created by default
|
11
11
|
INSTALL_DIRS = %w[
|
12
|
-
app/
|
13
|
-
app/controllers
|
12
|
+
app/consumers
|
14
13
|
app/responders
|
15
14
|
config
|
16
15
|
log
|
@@ -20,7 +19,7 @@ module Karafka
|
|
20
19
|
# Where should we map proper files from templates
|
21
20
|
INSTALL_FILES_MAP = {
|
22
21
|
'karafka.rb.example' => Karafka.boot_file.basename,
|
23
|
-
'
|
22
|
+
'application_consumer.rb.example' => 'app/consumers/application_consumer.rb',
|
24
23
|
'application_responder.rb.example' => 'app/responders/application_responder.rb'
|
25
24
|
}.freeze
|
26
25
|
|
data/lib/karafka/cli/server.rb
CHANGED
@@ -20,24 +20,21 @@ module Karafka
|
|
20
20
|
|
21
21
|
if cli.options[:daemon]
|
22
22
|
FileUtils.mkdir_p File.dirname(cli.options[:pid])
|
23
|
-
# For some reason Celluloid spins threads that break forking
|
24
|
-
# Threads are not shutdown immediately so deamonization will stale until
|
25
|
-
# those threads are killed by Celluloid manager (via timeout)
|
26
|
-
# There's nothing initialized here yet, so instead we shutdown celluloid
|
27
|
-
# and run it again when we need (after fork)
|
28
|
-
Celluloid.shutdown
|
29
23
|
daemonize
|
30
|
-
Celluloid.boot
|
31
24
|
end
|
32
25
|
|
33
|
-
# Remove pidfile on shutdown
|
34
|
-
ObjectSpace.define_finalizer(String.new, proc { send(:clean) })
|
35
|
-
|
36
26
|
# We assign active topics on a server level, as only server is expected to listen on
|
37
27
|
# part of the topics
|
38
28
|
Karafka::Server.consumer_groups = cli.options[:consumer_groups]
|
39
29
|
|
40
|
-
#
|
30
|
+
# Remove pidfile on stop, just before the server instance is going to be GCed
|
31
|
+
# We want to delay the moment in which the pidfile is removed as much as we can,
|
32
|
+
# so instead of removing it after the server stops running, we rely on the gc moment
|
33
|
+
# when this object gets removed (it is a bit later), so it is closer to the actual
|
34
|
+
# system process end. We do that, so monitoring and deployment tools that rely on pids
|
35
|
+
# won't alarm or start new system process up until the current one is finished
|
36
|
+
ObjectSpace.define_finalizer(self, proc { send(:clean) })
|
37
|
+
|
41
38
|
Karafka::Server.run
|
42
39
|
end
|
43
40
|
|
@@ -62,7 +59,7 @@ module Karafka
|
|
62
59
|
|
63
60
|
# Removes a pidfile (if exist)
|
64
61
|
def clean
|
65
|
-
FileUtils.rm_f(cli.options[:pid])
|
62
|
+
FileUtils.rm_f(cli.options[:pid]) if cli.options[:pid]
|
66
63
|
end
|
67
64
|
end
|
68
65
|
end
|
@@ -0,0 +1,117 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Connection
|
5
|
+
# Class used as a wrapper around Ruby-Kafka client to simplify additional
|
6
|
+
# features that we provide/might provide in future and to hide the internal implementation
|
7
|
+
class Client
|
8
|
+
extend Forwardable
|
9
|
+
|
10
|
+
def_delegator :kafka_consumer, :seek
|
11
|
+
|
12
|
+
# Creates a queue consumer client that will pull the data from Kafka
|
13
|
+
# @param consumer_group [Karafka::Routing::ConsumerGroup] consumer group for which
|
14
|
+
# we create a client
|
15
|
+
# @return [Karafka::Connection::Client] group consumer that can subscribe to
|
16
|
+
# multiple topics
|
17
|
+
def initialize(consumer_group)
|
18
|
+
@consumer_group = consumer_group
|
19
|
+
Persistence::Client.write(self)
|
20
|
+
end
|
21
|
+
|
22
|
+
# Opens connection, gets messages and calls a block for each of the incoming messages
|
23
|
+
# @yieldparam [Array<Kafka::FetchedMessage>] kafka fetched messages
|
24
|
+
# @note This will yield with raw messages - no preprocessing or reformatting.
|
25
|
+
def fetch_loop
|
26
|
+
settings = ConfigAdapter.consuming(consumer_group)
|
27
|
+
|
28
|
+
if consumer_group.batch_fetching
|
29
|
+
kafka_consumer.each_batch(*settings) { |batch| yield(batch.messages) }
|
30
|
+
else
|
31
|
+
# always yield an array of messages, so we have consistent API (always a batch)
|
32
|
+
kafka_consumer.each_message(*settings) { |message| yield([message]) }
|
33
|
+
end
|
34
|
+
rescue Kafka::ProcessingError => error
|
35
|
+
# If there was an error during consumption, we have to log it, pause current partition
|
36
|
+
# and process other things
|
37
|
+
Karafka.monitor.instrument(
|
38
|
+
'connection.client.fetch_loop.error',
|
39
|
+
caller: self,
|
40
|
+
error: error.cause
|
41
|
+
)
|
42
|
+
pause(error.topic, error.partition)
|
43
|
+
retry
|
44
|
+
# This is on purpose - see the notes for this method
|
45
|
+
# rubocop:disable RescueException
|
46
|
+
rescue Exception => error
|
47
|
+
# rubocop:enable RescueException
|
48
|
+
Karafka.monitor.instrument(
|
49
|
+
'connection.client.fetch_loop.error',
|
50
|
+
caller: self,
|
51
|
+
error: error
|
52
|
+
)
|
53
|
+
retry
|
54
|
+
end
|
55
|
+
|
56
|
+
# Gracefuly stops topic consumption
|
57
|
+
# @note Stopping running consumers without a really important reason is not recommended
|
58
|
+
# as until all the consumers are stopped, the server will keep running serving only
|
59
|
+
# part of the messages
|
60
|
+
def stop
|
61
|
+
@kafka_consumer&.stop
|
62
|
+
@kafka_consumer = nil
|
63
|
+
end
|
64
|
+
|
65
|
+
# Pauses fetching and consumption of a given topic partition
|
66
|
+
# @param topic [String] topic that we want to pause
|
67
|
+
# @param partition [Integer] number partition that we want to pause
|
68
|
+
def pause(topic, partition)
|
69
|
+
settings = ConfigAdapter.pausing(consumer_group)
|
70
|
+
timeout = settings[:timeout]
|
71
|
+
raise(Errors::InvalidPauseTimeout, timeout) unless timeout.positive?
|
72
|
+
kafka_consumer.pause(topic, partition, settings)
|
73
|
+
end
|
74
|
+
|
75
|
+
# Marks a given message as consumed and commit the offsets
|
76
|
+
# @note In opposite to ruby-kafka, we commit the offset for each manual marking to be sure
|
77
|
+
# that offset commit happen asap in case of a crash
|
78
|
+
# @param [Karafka::Params::Params] params message that we want to mark as processed
|
79
|
+
def mark_as_consumed(params)
|
80
|
+
kafka_consumer.mark_message_as_processed(params)
|
81
|
+
# Trigger an immediate, blocking offset commit in order to minimize the risk of crashing
|
82
|
+
# before the automatic triggers have kicked in.
|
83
|
+
kafka_consumer.commit_offsets
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
attr_reader :consumer_group
|
89
|
+
|
90
|
+
# @return [Kafka::Consumer] returns a ready to consume Kafka consumer
|
91
|
+
# that is set up to consume from topics of a given consumer group
|
92
|
+
def kafka_consumer
|
93
|
+
@kafka_consumer ||= kafka.consumer(
|
94
|
+
*ConfigAdapter.consumer(consumer_group)
|
95
|
+
).tap do |consumer|
|
96
|
+
consumer_group.topics.each do |topic|
|
97
|
+
consumer.subscribe(*ConfigAdapter.subscription(topic))
|
98
|
+
end
|
99
|
+
end
|
100
|
+
rescue Kafka::ConnectionError
|
101
|
+
# If we would not wait it would totally spam log file with failed
|
102
|
+
# attempts if Kafka is down
|
103
|
+
sleep(consumer_group.reconnect_timeout)
|
104
|
+
# We don't log and just reraise - this will be logged
|
105
|
+
# down the road
|
106
|
+
raise
|
107
|
+
end
|
108
|
+
|
109
|
+
# @return [Kafka] returns a Kafka
|
110
|
+
# @note We don't cache it internally because we cache kafka_consumer that uses kafka
|
111
|
+
# object instance
|
112
|
+
def kafka
|
113
|
+
Kafka.new(*ConfigAdapter.client(consumer_group))
|
114
|
+
end
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
@@ -14,7 +14,10 @@ module Karafka
|
|
14
14
|
class << self
|
15
15
|
# Builds all the configuration settings for Kafka.new method
|
16
16
|
# @param _consumer_group [Karafka::Routing::ConsumerGroup] consumer group details
|
17
|
-
# @return [Hash]
|
17
|
+
# @return [Array<Hash>] Array with all the client arguments including hash with all
|
18
|
+
# the settings required by Kafka.new method
|
19
|
+
# @note We return array, so we can inject any arguments we want, in case of changes in the
|
20
|
+
# raw driver
|
18
21
|
def client(_consumer_group)
|
19
22
|
# This one is a default that takes all the settings except special
|
20
23
|
# cases defined in the map
|
@@ -33,33 +36,40 @@ module Karafka
|
|
33
36
|
settings[setting_name] = setting_value
|
34
37
|
end
|
35
38
|
|
36
|
-
sanitize(settings)
|
39
|
+
settings_hash = sanitize(settings)
|
40
|
+
|
41
|
+
# Normalization for the way Kafka::Client accepts arguments from 0.5.3
|
42
|
+
[settings_hash.delete(:seed_brokers), settings_hash]
|
37
43
|
end
|
38
44
|
|
39
45
|
# Builds all the configuration settings for kafka#consumer method
|
40
46
|
# @param consumer_group [Karafka::Routing::ConsumerGroup] consumer group details
|
41
|
-
# @return [Hash]
|
47
|
+
# @return [Array<Hash>] array with all the consumer arguments including hash with all
|
48
|
+
# the settings required by Kafka#consumer
|
42
49
|
def consumer(consumer_group)
|
43
50
|
settings = { group_id: consumer_group.id }
|
44
|
-
settings = fetch_for(:consumer, settings)
|
45
|
-
sanitize(settings)
|
51
|
+
settings = fetch_for(:consumer, consumer_group, settings)
|
52
|
+
[sanitize(settings)]
|
46
53
|
end
|
47
54
|
|
48
55
|
# Builds all the configuration settings for kafka consumer consume_each_batch and
|
49
56
|
# consume_each_message methods
|
50
|
-
# @param
|
51
|
-
# @return [Hash]
|
57
|
+
# @param consumer_group [Karafka::Routing::ConsumerGroup] consumer group details
|
58
|
+
# @return [Array<Hash>] Array with all the arguments required by consuming method
|
59
|
+
# including hash with all the settings required by
|
52
60
|
# Kafka::Consumer#consume_each_message and Kafka::Consumer#consume_each_batch method
|
53
|
-
def consuming(
|
54
|
-
|
61
|
+
def consuming(consumer_group)
|
62
|
+
settings = {
|
63
|
+
automatically_mark_as_processed: consumer_group.automatically_mark_as_consumed
|
64
|
+
}
|
65
|
+
[sanitize(fetch_for(:consuming, consumer_group, settings))]
|
55
66
|
end
|
56
67
|
|
57
68
|
# Builds all the configuration settings for kafka consumer#subscribe method
|
58
69
|
# @param topic [Karafka::Routing::Topic] topic that holds details for a given subscription
|
59
70
|
# @return [Hash] hash with all the settings required by kafka consumer#subscribe method
|
60
71
|
def subscription(topic)
|
61
|
-
settings =
|
62
|
-
settings = fetch_for(:subscription, settings)
|
72
|
+
settings = fetch_for(:subscription, topic)
|
63
73
|
[Karafka::App.config.topic_mapper.outgoing(topic.name), sanitize(settings)]
|
64
74
|
end
|
65
75
|
|
@@ -74,13 +84,19 @@ module Karafka
|
|
74
84
|
|
75
85
|
# Fetches proper settings for a given map namespace
|
76
86
|
# @param namespace_key [Symbol] namespace from attributes map config adapter hash
|
87
|
+
# @param route_layer [Object] route topic or consumer group
|
77
88
|
# @param preexisting_settings [Hash] hash with some preexisting settings that might have
|
78
89
|
# been loaded in a different way
|
79
|
-
def fetch_for(namespace_key, preexisting_settings = {})
|
80
|
-
kafka_configs.
|
90
|
+
def fetch_for(namespace_key, route_layer, preexisting_settings = {})
|
91
|
+
kafka_configs.each_key do |setting_name|
|
92
|
+
# Ignore settings that are not related to our namespace
|
81
93
|
next unless AttributesMap.config_adapter[namespace_key].include?(setting_name)
|
94
|
+
# Ignore settings that are already initialized
|
95
|
+
# In case they are in preexisting settings fetched differently
|
82
96
|
next if preexisting_settings.keys.include?(setting_name)
|
83
|
-
|
97
|
+
# Fetch all the settings from a given layer object. Objects can handle the fallback
|
98
|
+
# to the kafka settings, so
|
99
|
+
preexisting_settings[setting_name] = route_layer.send(setting_name)
|
84
100
|
end
|
85
101
|
|
86
102
|
preexisting_settings
|
@@ -0,0 +1,46 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Connection
|
5
|
+
# Class that delegates processing of messages for which we listen to a proper processor
|
6
|
+
module Delegator
|
7
|
+
class << self
|
8
|
+
# Delegates messages (does something with them)
|
9
|
+
# It will either schedule or run a proper processor action for messages
|
10
|
+
# @note This should be looped to obtain a constant delegating of new messages
|
11
|
+
# @note We catch all the errors here, to make sure that none failures
|
12
|
+
# for a given consumption will affect other consumed messages
|
13
|
+
# If we wouldn't catch it, it would propagate up until killing the thread
|
14
|
+
# @note It is a one huge method, because of performance reasons. It is much faster then
|
15
|
+
# using send or invoking additional methods
|
16
|
+
# @param group_id [String] group_id of a group from which a given message came
|
17
|
+
# @param kafka_messages [Array<Kafka::FetchedMessage>] raw messages fetched from kafka
|
18
|
+
def call(group_id, kafka_messages)
|
19
|
+
# @note We always get messages by topic and partition so we can take topic from the
|
20
|
+
# first one and it will be valid for all the messages
|
21
|
+
topic = Persistence::Topic.fetch(group_id, kafka_messages[0].topic)
|
22
|
+
consumer = Persistence::Consumer.fetch(topic, kafka_messages[0].partition)
|
23
|
+
|
24
|
+
Karafka.monitor.instrument(
|
25
|
+
'connection.delegator.call',
|
26
|
+
caller: self,
|
27
|
+
consumer: consumer,
|
28
|
+
kafka_messages: kafka_messages
|
29
|
+
) do
|
30
|
+
# Depending on a case (persisted or not) we might use new consumer instance per
|
31
|
+
# each batch, or use the same one for all of them (for implementing buffering, etc.)
|
32
|
+
if topic.batch_consuming
|
33
|
+
consumer.params_batch = kafka_messages
|
34
|
+
consumer.call
|
35
|
+
else
|
36
|
+
kafka_messages.each do |kafka_message|
|
37
|
+
consumer.params_batch = [kafka_message]
|
38
|
+
consumer.call
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
@@ -7,12 +7,6 @@ module Karafka
|
|
7
7
|
# @note Listener itself does nothing with the message - it will return to the block
|
8
8
|
# a raw Kafka::FetchedMessage
|
9
9
|
class Listener
|
10
|
-
include Celluloid
|
11
|
-
|
12
|
-
execute_block_on_receiver :fetch_loop
|
13
|
-
|
14
|
-
attr_reader :consumer_group
|
15
|
-
|
16
10
|
# @param consumer_group [Karafka::Routing::ConsumerGroup] consumer group that holds details
|
17
11
|
# on what topics and with what settings should we listen
|
18
12
|
# @return [Karafka::Connection::Listener] listener instance
|
@@ -20,6 +14,17 @@ module Karafka
|
|
20
14
|
@consumer_group = consumer_group
|
21
15
|
end
|
22
16
|
|
17
|
+
# Runs prefetch callbacks and executes the main listener fetch loop
|
18
|
+
def call
|
19
|
+
Karafka::Callbacks.before_fetch_loop(
|
20
|
+
@consumer_group,
|
21
|
+
client
|
22
|
+
)
|
23
|
+
fetch_loop
|
24
|
+
end
|
25
|
+
|
26
|
+
private
|
27
|
+
|
23
28
|
# Opens connection, gets messages and calls a block for each of the incoming messages
|
24
29
|
# @yieldparam [String] consumer group id
|
25
30
|
# @yieldparam [Array<Kafka::FetchedMessage>] kafka fetched messages
|
@@ -30,28 +35,25 @@ module Karafka
|
|
30
35
|
# won't crash the whole cluster. Here we mostly focus on catchin the exceptions related to
|
31
36
|
# Kafka connections / Internet connection issues / Etc. Business logic problems should not
|
32
37
|
# propagate this far
|
33
|
-
def fetch_loop
|
34
|
-
|
35
|
-
|
38
|
+
def fetch_loop
|
39
|
+
client.fetch_loop do |raw_messages|
|
40
|
+
# @note What happens here is a delegation of processing to a proper processor based
|
41
|
+
# on the incoming messages characteristics
|
42
|
+
Karafka::Connection::Delegator.call(@consumer_group.id, raw_messages)
|
36
43
|
end
|
37
44
|
# This is on purpose - see the notes for this method
|
38
45
|
# rubocop:disable RescueException
|
39
46
|
rescue Exception => e
|
47
|
+
Karafka.monitor.instrument('connection.listener.fetch_loop.error', caller: self, error: e)
|
40
48
|
# rubocop:enable RescueException
|
41
|
-
|
42
|
-
@
|
43
|
-
retry if @messages_consumer
|
49
|
+
@client&.stop
|
50
|
+
retry if @client
|
44
51
|
end
|
45
52
|
|
46
|
-
|
47
|
-
|
48
|
-
# @return [Karafka::Connection::MessagesConsumer] wrapped kafka consumer for a given topic
|
53
|
+
# @return [Karafka::Connection::Client] wrapped kafka consuming client for a given topic
|
49
54
|
# consumption
|
50
|
-
|
51
|
-
|
52
|
-
@messages_consumer ||= MessagesConsumer.new(consumer_group).tap do |consumer|
|
53
|
-
Karafka::Server.consumers << consumer if Karafka::Server.consumers
|
54
|
-
end
|
55
|
+
def client
|
56
|
+
@client ||= Client.new(@consumer_group)
|
55
57
|
end
|
56
58
|
end
|
57
59
|
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Consumers
|
5
|
+
# Additional callbacks that can be used to trigger some actions on certain moments like
|
6
|
+
# manual offset management, committing or anything else outside of a standard messages flow
|
7
|
+
# They are not included by default, as we don't want to provide functionalities that are
|
8
|
+
# not required by users by default
|
9
|
+
# Please refer to the wiki callbacks page for more details on how to use them
|
10
|
+
module Callbacks
|
11
|
+
# Types of events on which we run callbacks
|
12
|
+
TYPES = %i[
|
13
|
+
after_fetch
|
14
|
+
after_poll
|
15
|
+
before_poll
|
16
|
+
before_stop
|
17
|
+
].freeze
|
18
|
+
|
19
|
+
# Class methods needed to make callbacks run
|
20
|
+
module ClassMethods
|
21
|
+
TYPES.each do |type|
|
22
|
+
# A Creates a callback wrapper
|
23
|
+
# @param method_name [Symbol, String] method name or nil if we plan to provide a block
|
24
|
+
# @yield A block with a code that should be executed before scheduling
|
25
|
+
define_method type do |method_name = nil, &block|
|
26
|
+
set_callback type, :before, method_name ? method_name : block
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
# @param consumer_class [Class] consumer class that we extend with callbacks
|
32
|
+
def self.included(consumer_class)
|
33
|
+
consumer_class.class_eval do
|
34
|
+
extend ClassMethods
|
35
|
+
include ActiveSupport::Callbacks
|
36
|
+
|
37
|
+
# The call method is wrapped with a set of callbacks
|
38
|
+
# We won't run process if any of the callbacks throw abort
|
39
|
+
# @see http://api.rubyonrails.org/classes/ActiveSupport/Callbacks/ClassMethods.html#method-i-get_callbacks
|
40
|
+
TYPES.each { |type| define_callbacks type }
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
# Executes the default consumer flow, runs callbacks and if not halted will call process
|
45
|
+
# method of a proper backend. It is here because it interacts with the default Karafka
|
46
|
+
# call flow and needs to be overwritten to support callbacks
|
47
|
+
def call
|
48
|
+
run_callbacks :after_fetch do
|
49
|
+
process
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|