karafka 1.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.console_irbrc +13 -0
- data/.gitignore +68 -0
- data/.rspec +1 -0
- data/.ruby-gemset +1 -0
- data/.ruby-version +1 -0
- data/.travis.yml +18 -0
- data/CHANGELOG.md +415 -0
- data/CODE_OF_CONDUCT.md +46 -0
- data/CONTRIBUTING.md +41 -0
- data/Gemfile +11 -0
- data/Gemfile.lock +123 -0
- data/MIT-LICENCE +18 -0
- data/README.md +89 -0
- data/bin/karafka +19 -0
- data/config/errors.yml +6 -0
- data/karafka.gemspec +37 -0
- data/lib/karafka.rb +78 -0
- data/lib/karafka/app.rb +45 -0
- data/lib/karafka/attributes_map.rb +67 -0
- data/lib/karafka/backends/inline.rb +16 -0
- data/lib/karafka/base_consumer.rb +68 -0
- data/lib/karafka/base_responder.rb +204 -0
- data/lib/karafka/callbacks.rb +30 -0
- data/lib/karafka/callbacks/config.rb +22 -0
- data/lib/karafka/callbacks/dsl.rb +16 -0
- data/lib/karafka/cli.rb +54 -0
- data/lib/karafka/cli/base.rb +78 -0
- data/lib/karafka/cli/console.rb +29 -0
- data/lib/karafka/cli/flow.rb +46 -0
- data/lib/karafka/cli/info.rb +29 -0
- data/lib/karafka/cli/install.rb +42 -0
- data/lib/karafka/cli/server.rb +66 -0
- data/lib/karafka/connection/client.rb +117 -0
- data/lib/karafka/connection/config_adapter.rb +120 -0
- data/lib/karafka/connection/delegator.rb +46 -0
- data/lib/karafka/connection/listener.rb +60 -0
- data/lib/karafka/consumers/callbacks.rb +54 -0
- data/lib/karafka/consumers/includer.rb +51 -0
- data/lib/karafka/consumers/responders.rb +24 -0
- data/lib/karafka/consumers/single_params.rb +15 -0
- data/lib/karafka/errors.rb +50 -0
- data/lib/karafka/fetcher.rb +44 -0
- data/lib/karafka/helpers/class_matcher.rb +78 -0
- data/lib/karafka/helpers/config_retriever.rb +46 -0
- data/lib/karafka/helpers/multi_delegator.rb +33 -0
- data/lib/karafka/instrumentation/listener.rb +112 -0
- data/lib/karafka/instrumentation/logger.rb +55 -0
- data/lib/karafka/instrumentation/monitor.rb +64 -0
- data/lib/karafka/loader.rb +28 -0
- data/lib/karafka/params/dsl.rb +156 -0
- data/lib/karafka/params/params_batch.rb +46 -0
- data/lib/karafka/parsers/json.rb +38 -0
- data/lib/karafka/patches/dry_configurable.rb +35 -0
- data/lib/karafka/patches/ruby_kafka.rb +34 -0
- data/lib/karafka/persistence/client.rb +25 -0
- data/lib/karafka/persistence/consumer.rb +38 -0
- data/lib/karafka/persistence/topic.rb +29 -0
- data/lib/karafka/process.rb +64 -0
- data/lib/karafka/responders/builder.rb +36 -0
- data/lib/karafka/responders/topic.rb +57 -0
- data/lib/karafka/routing/builder.rb +61 -0
- data/lib/karafka/routing/consumer_group.rb +61 -0
- data/lib/karafka/routing/consumer_mapper.rb +34 -0
- data/lib/karafka/routing/proxy.rb +37 -0
- data/lib/karafka/routing/router.rb +29 -0
- data/lib/karafka/routing/topic.rb +60 -0
- data/lib/karafka/routing/topic_mapper.rb +55 -0
- data/lib/karafka/schemas/config.rb +24 -0
- data/lib/karafka/schemas/consumer_group.rb +77 -0
- data/lib/karafka/schemas/consumer_group_topic.rb +18 -0
- data/lib/karafka/schemas/responder_usage.rb +39 -0
- data/lib/karafka/schemas/server_cli_options.rb +43 -0
- data/lib/karafka/server.rb +94 -0
- data/lib/karafka/setup/config.rb +189 -0
- data/lib/karafka/setup/configurators/base.rb +29 -0
- data/lib/karafka/setup/configurators/params.rb +25 -0
- data/lib/karafka/setup/configurators/water_drop.rb +32 -0
- data/lib/karafka/setup/dsl.rb +22 -0
- data/lib/karafka/status.rb +25 -0
- data/lib/karafka/templates/application_consumer.rb.example +6 -0
- data/lib/karafka/templates/application_responder.rb.example +11 -0
- data/lib/karafka/templates/karafka.rb.example +54 -0
- data/lib/karafka/version.rb +7 -0
- data/log/.gitkeep +0 -0
- metadata +301 -0
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Schemas
|
5
|
+
# Schema for validating correctness of the server cli command options
|
6
|
+
# We validate some basics + the list of consumer_groups on which we want to use, to make
|
7
|
+
# sure that all of them are defined, plus that a pidfile does not exist
|
8
|
+
ServerCliOptions = Dry::Validation.Schema do
|
9
|
+
configure do
|
10
|
+
option :consumer_groups
|
11
|
+
|
12
|
+
def self.messages
|
13
|
+
super.merge(
|
14
|
+
en: {
|
15
|
+
errors: {
|
16
|
+
consumer_groups_inclusion: 'Unknown consumer group.',
|
17
|
+
pid_existence: 'Pidfile already exists.'
|
18
|
+
}
|
19
|
+
}
|
20
|
+
)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
optional(:pid).filled(:str?)
|
25
|
+
optional(:daemon).filled(:bool?)
|
26
|
+
optional(:consumer_groups).filled(:array?)
|
27
|
+
|
28
|
+
validate(consumer_groups_inclusion: :consumer_groups) do |consumer_groups|
|
29
|
+
# If there were no consumer_groups declared in the server cli, it means that we will
|
30
|
+
# run all of them and no need to validate them here at all
|
31
|
+
if consumer_groups.nil?
|
32
|
+
true
|
33
|
+
else
|
34
|
+
(consumer_groups - Karafka::Routing::Builder.instance.map(&:name)).empty?
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
validate(pid_existence: :pid) do |pid|
|
39
|
+
pid ? !File.exist?(pid) : true
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,94 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
# Karafka consuming server class
|
5
|
+
class Server
|
6
|
+
@consumer_threads = Concurrent::Array.new
|
7
|
+
|
8
|
+
# How long should we sleep between checks on shutting down consumers
|
9
|
+
SUPERVISION_SLEEP = 1
|
10
|
+
# What system exit code should we use when we terminated forcefully
|
11
|
+
FORCEFUL_EXIT_CODE = 2
|
12
|
+
|
13
|
+
class << self
|
14
|
+
# Set of consuming threads. Each consumer thread contains a single consumer
|
15
|
+
attr_accessor :consumer_threads
|
16
|
+
|
17
|
+
# Writer for list of consumer groups that we want to consume in our current process context
|
18
|
+
attr_writer :consumer_groups
|
19
|
+
|
20
|
+
# Method which runs app
|
21
|
+
def run
|
22
|
+
bind_on_sigint
|
23
|
+
bind_on_sigquit
|
24
|
+
bind_on_sigterm
|
25
|
+
start_supervised
|
26
|
+
end
|
27
|
+
|
28
|
+
# @return [Array<String>] array with names of consumer groups that should be consumed in a
|
29
|
+
# current server context
|
30
|
+
def consumer_groups
|
31
|
+
# If not specified, a server will listed on all the topics
|
32
|
+
@consumer_groups ||= Karafka::App.consumer_groups.map(&:name).freeze
|
33
|
+
end
|
34
|
+
|
35
|
+
private
|
36
|
+
|
37
|
+
# @return [Karafka::Process] process wrapper instance used to catch system signal calls
|
38
|
+
def process
|
39
|
+
Karafka::Process.instance
|
40
|
+
end
|
41
|
+
|
42
|
+
# What should happen when we decide to quit with sigint
|
43
|
+
def bind_on_sigint
|
44
|
+
process.on_sigint { stop_supervised }
|
45
|
+
end
|
46
|
+
|
47
|
+
# What should happen when we decide to quit with sigquit
|
48
|
+
def bind_on_sigquit
|
49
|
+
process.on_sigquit { stop_supervised }
|
50
|
+
end
|
51
|
+
|
52
|
+
# What should happen when we decide to quit with sigterm
|
53
|
+
def bind_on_sigterm
|
54
|
+
process.on_sigterm { stop_supervised }
|
55
|
+
end
|
56
|
+
|
57
|
+
# Starts Karafka with a supervision
|
58
|
+
# @note We don't need to sleep because Karafka::Fetcher is locking and waiting to
|
59
|
+
# finish loop (and it won't happen until we explicitily want to stop)
|
60
|
+
def start_supervised
|
61
|
+
process.supervise do
|
62
|
+
Karafka::App.run!
|
63
|
+
Karafka::Fetcher.call
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
# Stops Karafka with a supervision (as long as there is a shutdown timeout)
|
68
|
+
# If consumers won't stop in a given timeframe, it will force them to exit
|
69
|
+
def stop_supervised
|
70
|
+
Karafka.monitor.instrument('server.stop', {})
|
71
|
+
|
72
|
+
Karafka::App.stop!
|
73
|
+
# If there is no shutdown timeout, we don't exit and wait until all the consumers
|
74
|
+
# had done their work
|
75
|
+
return unless Karafka::App.config.shutdown_timeout
|
76
|
+
|
77
|
+
# If there is a timeout, we check every 1 second (for the timeout period) if all
|
78
|
+
# the threads finished their work and if so, we can just return and normal
|
79
|
+
# shutdown process will take place
|
80
|
+
Karafka::App.config.shutdown_timeout.to_i.times do
|
81
|
+
return if consumer_threads.count(&:alive?).zero?
|
82
|
+
sleep SUPERVISION_SLEEP
|
83
|
+
end
|
84
|
+
|
85
|
+
Karafka.monitor.instrument('server.stop.error', {})
|
86
|
+
# We're done waiting, lets kill them!
|
87
|
+
consumer_threads.each(&:terminate)
|
88
|
+
|
89
|
+
# exit is not within the instrumentation as it would not trigger due to exit
|
90
|
+
Kernel.exit FORCEFUL_EXIT_CODE
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
@@ -0,0 +1,189 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
# Module containing all Karafka setup related elements like configuration settings,
|
5
|
+
# config validations and configurators for external gems integration
|
6
|
+
module Setup
|
7
|
+
# Configurator for setting up all the framework details that are required to make it work
|
8
|
+
# @note If you want to do some configurations after all of this is done, please add to
|
9
|
+
# karafka/config a proper file (needs to inherit from Karafka::Setup::Configurators::Base
|
10
|
+
# and implement setup method) after that everything will happen automatically
|
11
|
+
# @note This config object allows to create a 1 level nestings (nodes) only. This should be
|
12
|
+
# enough and will still keep the code simple
|
13
|
+
# @see Karafka::Setup::Configurators::Base for more details about configurators api
|
14
|
+
class Config
|
15
|
+
extend Dry::Configurable
|
16
|
+
extend Callbacks::Config
|
17
|
+
|
18
|
+
# Available settings
|
19
|
+
# option client_id [String] kafka client_id - used to provide
|
20
|
+
# default Kafka groups namespaces and identify that app in kafka
|
21
|
+
setting :client_id
|
22
|
+
# What backend do we want to use to process messages
|
23
|
+
setting :backend, :inline
|
24
|
+
# option logger [Instance] logger that we want to use
|
25
|
+
setting :logger, -> { ::Karafka::Instrumentation::Logger.instance }
|
26
|
+
# option monitor [Instance] monitor that we will to use (defaults to Karafka::Monitor)
|
27
|
+
setting :monitor, -> { ::Karafka::Instrumentation::Monitor.instance }
|
28
|
+
# Mapper used to remap consumer groups ids, so in case users migrate from other tools
|
29
|
+
# or they need to maintain their own internal consumer group naming conventions, they
|
30
|
+
# can easily do it, replacing the default client_id + consumer name pattern concept
|
31
|
+
setting :consumer_mapper, -> { Routing::ConsumerMapper }
|
32
|
+
# Mapper used to remap names of topics, so we can have a clean internal topic namings
|
33
|
+
# despite using any Kafka provider that uses namespacing, etc
|
34
|
+
# It needs to implement two methods:
|
35
|
+
# - #incoming - for remapping from the incoming message to our internal format
|
36
|
+
# - #outgoing - for remapping from internal topic name into outgoing message
|
37
|
+
setting :topic_mapper, -> { Routing::TopicMapper }
|
38
|
+
# Default parser for parsing and unparsing incoming and outgoing data
|
39
|
+
setting :parser, -> { Karafka::Parsers::Json }
|
40
|
+
# If batch_fetching is true, we will fetch kafka messages in batches instead of 1 by 1
|
41
|
+
# @note Fetching does not equal consuming, see batch_consuming description for details
|
42
|
+
setting :batch_fetching, true
|
43
|
+
# If batch_consuming is true, we will have access to #params_batch instead of #params.
|
44
|
+
# #params_batch will contain params received from Kafka (may be more than 1) so we can
|
45
|
+
# process them in batches
|
46
|
+
setting :batch_consuming, false
|
47
|
+
# Should we operate in a single consumer instance across multiple batches of messages,
|
48
|
+
# from the same partition or should we build a new one for each incoming batch.
|
49
|
+
# Disabling that can be useful when you want to create a new consumer instance for each
|
50
|
+
# incoming batch. It's disabled by default, not to create more objects that needed
|
51
|
+
# on each batch
|
52
|
+
setting :persistent, true
|
53
|
+
# option shutdown_timeout [Integer, nil] the number of seconds after which Karafka no
|
54
|
+
# longer wait for the consumers to stop gracefully but instead we force
|
55
|
+
# terminate everything.
|
56
|
+
# @note Keep in mind, that if your business logic
|
57
|
+
# @note If set to nil, it won't forcefully shutdown the process at all.
|
58
|
+
setting :shutdown_timeout, 60
|
59
|
+
# option params_base_class [Class] base class for params class initialization
|
60
|
+
# This can be either a Hash or a HashWithIndifferentAccess depending on your
|
61
|
+
# requirements. Note, that by using HashWithIndifferentAccess, you remove some of the
|
62
|
+
# performance in favor of convenience. This can be useful especially if you already use
|
63
|
+
# it with Rails, etc
|
64
|
+
setting :params_base_class, Hash
|
65
|
+
|
66
|
+
# option kafka [Hash] - optional - kafka configuration options
|
67
|
+
setting :kafka do
|
68
|
+
# Array with at least one host
|
69
|
+
setting :seed_brokers
|
70
|
+
# option session_timeout [Integer] the number of seconds after which, if a client
|
71
|
+
# hasn't contacted the Kafka cluster, it will be kicked out of the group.
|
72
|
+
setting :session_timeout, 30
|
73
|
+
# Time that a given partition will be paused from fetching messages, when message
|
74
|
+
# consumption fails. It allows us to process other partitions, while the error is being
|
75
|
+
# resolved and also "slows" things down, so it prevents from "eating" up all messages and
|
76
|
+
# consuming them with failed code
|
77
|
+
setting :pause_timeout, 10
|
78
|
+
# option offset_commit_interval [Integer] the interval between offset commits,
|
79
|
+
# in seconds.
|
80
|
+
setting :offset_commit_interval, 10
|
81
|
+
# option offset_commit_threshold [Integer] the number of messages that can be
|
82
|
+
# processed before their offsets are committed. If zero, offset commits are
|
83
|
+
# not triggered by message consumption.
|
84
|
+
setting :offset_commit_threshold, 0
|
85
|
+
# option heartbeat_interval [Integer] the interval between heartbeats; must be less
|
86
|
+
# than the session window.
|
87
|
+
setting :heartbeat_interval, 10
|
88
|
+
# option max_bytes_per_partition [Integer] the maximum amount of data fetched
|
89
|
+
# from a single partition at a time.
|
90
|
+
setting :max_bytes_per_partition, 1_048_576
|
91
|
+
# whether to consume messages starting at the beginning or to just consume new messages
|
92
|
+
setting :start_from_beginning, true
|
93
|
+
# option min_bytes [Integer] the minimum number of bytes to read before
|
94
|
+
# returning messages from the server; if `max_wait_time` is reached, this
|
95
|
+
# is ignored.
|
96
|
+
setting :min_bytes, 1
|
97
|
+
# option max_bytes [Integer] the maximum number of bytes to read before returning messages
|
98
|
+
# from each broker.
|
99
|
+
setting :max_bytes, 10_485_760
|
100
|
+
# option max_wait_time [Integer, Float] max_wait_time is the maximum number of seconds to
|
101
|
+
# wait before returning data from a single message fetch. By setting this high you also
|
102
|
+
# increase the fetching throughput - and by setting it low you set a bound on latency.
|
103
|
+
# This configuration overrides `min_bytes`, so you'll _always_ get data back within the
|
104
|
+
# time specified. The default value is one second. If you want to have at most five
|
105
|
+
# seconds of latency, set `max_wait_time` to 5. You should make sure
|
106
|
+
# max_wait_time * num brokers + heartbeat_interval is less than session_timeout.
|
107
|
+
setting :max_wait_time, 1
|
108
|
+
# option automatically_mark_as_consumed [Boolean] should we automatically mark received
|
109
|
+
# messages as consumed (processed) after non-error consumption
|
110
|
+
setting :automatically_mark_as_consumed, true
|
111
|
+
# option reconnect_timeout [Integer] How long should we wait before trying to reconnect to
|
112
|
+
# Kafka cluster that went down (in seconds)
|
113
|
+
setting :reconnect_timeout, 5
|
114
|
+
# option offset_retention_time [Integer] The length of the retention window, known as
|
115
|
+
# offset retention time
|
116
|
+
setting :offset_retention_time, nil
|
117
|
+
# option connect_timeout [Integer] Sets the number of seconds to wait while connecting to
|
118
|
+
# a broker for the first time. When ruby-kafka initializes, it needs to connect to at
|
119
|
+
# least one host.
|
120
|
+
setting :connect_timeout, 10
|
121
|
+
# option socket_timeout [Integer] Sets the number of seconds to wait when reading from or
|
122
|
+
# writing to a socket connection to a broker. After this timeout expires the connection
|
123
|
+
# will be killed. Note that some Kafka operations are by definition long-running, such as
|
124
|
+
# waiting for new messages to arrive in a partition, so don't set this value too low
|
125
|
+
setting :socket_timeout, 30
|
126
|
+
|
127
|
+
# SSL authentication related settings
|
128
|
+
# option ca_cert [String, nil] SSL CA certificate
|
129
|
+
setting :ssl_ca_cert, nil
|
130
|
+
# option ssl_ca_cert_file_path [String, nil] SSL CA certificate file path
|
131
|
+
setting :ssl_ca_cert_file_path, nil
|
132
|
+
# option ssl_ca_certs_from_system [Boolean] Use the CA certs from your system's default
|
133
|
+
# certificate store
|
134
|
+
setting :ssl_ca_certs_from_system, false
|
135
|
+
# option ssl_client_cert [String, nil] SSL client certificate
|
136
|
+
setting :ssl_client_cert, nil
|
137
|
+
# option ssl_client_cert_key [String, nil] SSL client certificate password
|
138
|
+
setting :ssl_client_cert_key, nil
|
139
|
+
# option sasl_gssapi_principal [String, nil] sasl principal
|
140
|
+
setting :sasl_gssapi_principal, nil
|
141
|
+
# option sasl_gssapi_keytab [String, nil] sasl keytab
|
142
|
+
setting :sasl_gssapi_keytab, nil
|
143
|
+
# option sasl_plain_authzid [String] The authorization identity to use
|
144
|
+
setting :sasl_plain_authzid, ''
|
145
|
+
# option sasl_plain_username [String, nil] The username used to authenticate
|
146
|
+
setting :sasl_plain_username, nil
|
147
|
+
# option sasl_plain_password [String, nil] The password used to authenticate
|
148
|
+
setting :sasl_plain_password, nil
|
149
|
+
# option sasl_scram_username [String, nil] The username used to authenticate
|
150
|
+
setting :sasl_scram_username, nil
|
151
|
+
# option sasl_scram_password [String, nil] The password used to authenticate
|
152
|
+
setting :sasl_scram_password, nil
|
153
|
+
# option sasl_scram_mechanism [String, nil] Scram mechanism, either 'sha256' or 'sha512'
|
154
|
+
setting :sasl_scram_mechanism, nil
|
155
|
+
end
|
156
|
+
|
157
|
+
class << self
|
158
|
+
# Configurating method
|
159
|
+
# @yield Runs a block of code providing a config singleton instance to it
|
160
|
+
# @yieldparam [Karafka::Setup::Config] Karafka config instance
|
161
|
+
def setup
|
162
|
+
configure { |config| yield(config) }
|
163
|
+
end
|
164
|
+
|
165
|
+
# Everything that should be initialized after the setup
|
166
|
+
# Components are in karafka/config directory and are all loaded one by one
|
167
|
+
# If you want to configure a next component, please add a proper file to config dir
|
168
|
+
def setup_components
|
169
|
+
[
|
170
|
+
Configurators::Params,
|
171
|
+
Configurators::WaterDrop
|
172
|
+
].each { |klass| klass.setup(config) }
|
173
|
+
end
|
174
|
+
|
175
|
+
# Validate config based on ConfigurationSchema
|
176
|
+
# @return [Boolean] true if configuration is valid
|
177
|
+
# @raise [Karafka::Errors::InvalidConfiguration] raised when configuration
|
178
|
+
# doesn't match with ConfigurationSchema
|
179
|
+
def validate!
|
180
|
+
validation_result = Karafka::Schemas::Config.call(config.to_h)
|
181
|
+
|
182
|
+
return true if validation_result.success?
|
183
|
+
|
184
|
+
raise Errors::InvalidConfiguration, validation_result.errors
|
185
|
+
end
|
186
|
+
end
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Setup
|
5
|
+
# Configurators module is used to enclose all the external dependencies configurations
|
6
|
+
# upon which Karafka depents
|
7
|
+
class Configurators
|
8
|
+
# Karafka has some components that it relies on (like Sidekiq)
|
9
|
+
# We need to configure all of them only when the framework was set up.
|
10
|
+
# Any class that descends from this one will be automatically invoked upon setup (after it)
|
11
|
+
# @note This should be used only for internal Karafka dependencies configuration
|
12
|
+
# End users configuration should go to the after_init block
|
13
|
+
# @example Configure an Example class
|
14
|
+
# class ExampleConfigurator < Base
|
15
|
+
# def setup
|
16
|
+
# ExampleClass.logger = Karafka.logger
|
17
|
+
# ExampleClass.redis = config.redis
|
18
|
+
# end
|
19
|
+
# end
|
20
|
+
class Base
|
21
|
+
# @param _config [Karafka::Config] config instance
|
22
|
+
# This method needs to be implemented in a subclass
|
23
|
+
def self.setup(_config)
|
24
|
+
raise NotImplementedError
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Setup
|
5
|
+
class Configurators
|
6
|
+
# Karafka::Params::Params are dynamically built based on user defined parent class
|
7
|
+
# so we cannot just require it, we need to initialize it after user is done with
|
8
|
+
# the framework configuration. This is a configurator that does exactly that.
|
9
|
+
class Params < Base
|
10
|
+
# Builds up Karafka::Params::Params class with user defined parent class
|
11
|
+
# @param config [Karafka::Setup::Config] Config we can user to setup things
|
12
|
+
def self.setup(config)
|
13
|
+
return if defined? Karafka::Params::Params
|
14
|
+
|
15
|
+
Karafka::Params.const_set(
|
16
|
+
'Params',
|
17
|
+
Class
|
18
|
+
.new(config.params_base_class)
|
19
|
+
.tap { |klass| klass.include(Karafka::Params::Dsl) }
|
20
|
+
)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Setup
|
5
|
+
class Configurators
|
6
|
+
# Class responsible for setting up WaterDrop configuration
|
7
|
+
class WaterDrop < Base
|
8
|
+
# Sets up a WaterDrop settings
|
9
|
+
# @param config [Karafka::Setup::Config] Config we can user to setup things
|
10
|
+
# @note This will also inject Karafka monitor as a default monitor into WaterDrop,
|
11
|
+
# so we have the same monitor within whole Karafka framework (same with logger)
|
12
|
+
def self.setup(config)
|
13
|
+
::WaterDrop.setup do |water_config|
|
14
|
+
water_config.deliver = true
|
15
|
+
|
16
|
+
config.to_h.except(:kafka).each do |k, v|
|
17
|
+
key_assignment = :"#{k}="
|
18
|
+
next unless water_config.respond_to?(key_assignment)
|
19
|
+
water_config.public_send(key_assignment, v)
|
20
|
+
end
|
21
|
+
|
22
|
+
config.kafka.to_h.each do |k, v|
|
23
|
+
key_assignment = :"#{k}="
|
24
|
+
next unless water_config.kafka.respond_to?(key_assignment)
|
25
|
+
water_config.kafka.public_send(key_assignment, v)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Setup
|
5
|
+
# Dsl for allowing to work with the configuration from the Karafka::App
|
6
|
+
# @note Despite providing methods, everything is still persisted and fetched
|
7
|
+
# from the Karafka::Setup::Config
|
8
|
+
module Dsl
|
9
|
+
# Sets up the whole configuration
|
10
|
+
# @param [Block] block configuration block
|
11
|
+
def setup(&block)
|
12
|
+
Setup::Config.setup(&block)
|
13
|
+
initialize!
|
14
|
+
end
|
15
|
+
|
16
|
+
# @return [Karafka::Config] config instance
|
17
|
+
def config
|
18
|
+
Setup::Config.config
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
# App status monitor
|
5
|
+
class Status
|
6
|
+
include Singleton
|
7
|
+
|
8
|
+
# Available states and their transitions
|
9
|
+
STATES = {
|
10
|
+
initializing: :initialize!,
|
11
|
+
running: :run!,
|
12
|
+
stopped: :stop!
|
13
|
+
}.freeze
|
14
|
+
|
15
|
+
STATES.each do |state, transition|
|
16
|
+
define_method :"#{state}?" do
|
17
|
+
@status == state
|
18
|
+
end
|
19
|
+
|
20
|
+
define_method transition do
|
21
|
+
@status = state
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|