karafka 1.0.0 → 1.2.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (83) hide show
  1. checksums.yaml +5 -5
  2. data/.ruby-version +1 -1
  3. data/.travis.yml +3 -1
  4. data/CHANGELOG.md +90 -3
  5. data/CONTRIBUTING.md +5 -6
  6. data/Gemfile +1 -1
  7. data/Gemfile.lock +59 -64
  8. data/README.md +28 -57
  9. data/bin/karafka +13 -1
  10. data/config/errors.yml +6 -0
  11. data/karafka.gemspec +10 -9
  12. data/lib/karafka.rb +19 -10
  13. data/lib/karafka/app.rb +8 -15
  14. data/lib/karafka/attributes_map.rb +4 -4
  15. data/lib/karafka/backends/inline.rb +2 -3
  16. data/lib/karafka/base_consumer.rb +68 -0
  17. data/lib/karafka/base_responder.rb +41 -17
  18. data/lib/karafka/callbacks.rb +30 -0
  19. data/lib/karafka/callbacks/config.rb +22 -0
  20. data/lib/karafka/callbacks/dsl.rb +16 -0
  21. data/lib/karafka/cli/base.rb +2 -0
  22. data/lib/karafka/cli/flow.rb +1 -1
  23. data/lib/karafka/cli/info.rb +1 -2
  24. data/lib/karafka/cli/install.rb +2 -3
  25. data/lib/karafka/cli/server.rb +9 -12
  26. data/lib/karafka/connection/client.rb +117 -0
  27. data/lib/karafka/connection/config_adapter.rb +30 -14
  28. data/lib/karafka/connection/delegator.rb +46 -0
  29. data/lib/karafka/connection/listener.rb +22 -20
  30. data/lib/karafka/consumers/callbacks.rb +54 -0
  31. data/lib/karafka/consumers/includer.rb +51 -0
  32. data/lib/karafka/consumers/responders.rb +24 -0
  33. data/lib/karafka/{controllers → consumers}/single_params.rb +3 -3
  34. data/lib/karafka/errors.rb +19 -2
  35. data/lib/karafka/fetcher.rb +30 -28
  36. data/lib/karafka/helpers/class_matcher.rb +8 -8
  37. data/lib/karafka/helpers/config_retriever.rb +2 -2
  38. data/lib/karafka/instrumentation/listener.rb +112 -0
  39. data/lib/karafka/instrumentation/logger.rb +55 -0
  40. data/lib/karafka/instrumentation/monitor.rb +64 -0
  41. data/lib/karafka/loader.rb +0 -1
  42. data/lib/karafka/params/dsl.rb +156 -0
  43. data/lib/karafka/params/params_batch.rb +7 -2
  44. data/lib/karafka/patches/dry_configurable.rb +7 -7
  45. data/lib/karafka/patches/ruby_kafka.rb +34 -0
  46. data/lib/karafka/persistence/client.rb +25 -0
  47. data/lib/karafka/persistence/consumer.rb +38 -0
  48. data/lib/karafka/persistence/topic.rb +29 -0
  49. data/lib/karafka/process.rb +6 -5
  50. data/lib/karafka/responders/builder.rb +15 -14
  51. data/lib/karafka/responders/topic.rb +8 -1
  52. data/lib/karafka/routing/builder.rb +2 -2
  53. data/lib/karafka/routing/consumer_group.rb +1 -1
  54. data/lib/karafka/routing/consumer_mapper.rb +34 -0
  55. data/lib/karafka/routing/router.rb +1 -1
  56. data/lib/karafka/routing/topic.rb +5 -11
  57. data/lib/karafka/routing/{mapper.rb → topic_mapper.rb} +2 -2
  58. data/lib/karafka/schemas/config.rb +4 -5
  59. data/lib/karafka/schemas/consumer_group.rb +45 -24
  60. data/lib/karafka/schemas/consumer_group_topic.rb +18 -0
  61. data/lib/karafka/schemas/responder_usage.rb +1 -0
  62. data/lib/karafka/server.rb +39 -20
  63. data/lib/karafka/setup/config.rb +74 -51
  64. data/lib/karafka/setup/configurators/base.rb +6 -12
  65. data/lib/karafka/setup/configurators/params.rb +25 -0
  66. data/lib/karafka/setup/configurators/water_drop.rb +15 -14
  67. data/lib/karafka/setup/dsl.rb +22 -0
  68. data/lib/karafka/templates/{application_controller.rb.example → application_consumer.rb.example} +2 -3
  69. data/lib/karafka/templates/karafka.rb.example +18 -5
  70. data/lib/karafka/version.rb +1 -1
  71. metadata +87 -63
  72. data/.github/ISSUE_TEMPLATE.md +0 -2
  73. data/Rakefile +0 -7
  74. data/lib/karafka/base_controller.rb +0 -118
  75. data/lib/karafka/connection/messages_consumer.rb +0 -106
  76. data/lib/karafka/connection/messages_processor.rb +0 -59
  77. data/lib/karafka/controllers/includer.rb +0 -51
  78. data/lib/karafka/controllers/responders.rb +0 -19
  79. data/lib/karafka/logger.rb +0 -53
  80. data/lib/karafka/monitor.rb +0 -98
  81. data/lib/karafka/params/params.rb +0 -101
  82. data/lib/karafka/persistence.rb +0 -18
  83. data/lib/karafka/setup/configurators/celluloid.rb +0 -22
@@ -6,7 +6,7 @@ module Karafka
6
6
  # @example Build a simple (most common) route
7
7
  # consumers do
8
8
  # topic :new_videos do
9
- # controller NewVideosController
9
+ # consumer NewVideosConsumer
10
10
  # end
11
11
  # end
12
12
  class Builder < Array
@@ -28,7 +28,7 @@ module Karafka
28
28
  hashed_group = consumer_group.to_h
29
29
  validation_result = Karafka::Schemas::ConsumerGroup.call(hashed_group)
30
30
  return if validation_result.success?
31
- raise Errors::InvalidConfiguration, [validation_result.errors, hashed_group]
31
+ raise Errors::InvalidConfiguration, validation_result.errors
32
32
  end
33
33
  end
34
34
 
@@ -18,7 +18,7 @@ module Karafka
18
18
  # kafka and don't understand the concept of consumer groups.
19
19
  def initialize(name)
20
20
  @name = name
21
- @id = "#{Karafka::App.config.client_id.to_s.underscore}_#{@name}"
21
+ @id = Karafka::App.config.consumer_mapper.call(name)
22
22
  @topics = []
23
23
  end
24
24
 
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Routing
5
+ # Default consumer mapper that builds consumer ids based on app id and consumer group name
6
+ # Different mapper can be used in case of preexisting consumer names or for applying
7
+ # other naming conventions not compatible wiih Karafkas client_id + consumer name concept
8
+ #
9
+ # @example Mapper for using consumer groups without a client_id prefix
10
+ # module MyMapper
11
+ # def self.call(raw_consumer_group_name)
12
+ # raw_consumer_group_name
13
+ # end
14
+ # end
15
+ #
16
+ # @example Mapper for replacing "_" with "." in topic names
17
+ # module MyMapper
18
+ # def self.call(raw_consumer_group_name)
19
+ # [
20
+ # Dry::Inflector.new.underscore(Karafka::App.config.client_id.to_s),
21
+ # raw_consumer_group_name
22
+ # ].join('_').gsub('_', '.')
23
+ # end
24
+ # end
25
+ module ConsumerMapper
26
+ # @param raw_consumer_group_name [String, Symbol] string or symbolized consumer group name
27
+ # @return [String] remapped final consumer group name
28
+ def self.call(raw_consumer_group_name)
29
+ client_name = Dry::Inflector.new.underscore(Karafka::App.config.client_id.to_s)
30
+ "#{client_name}_#{raw_consumer_group_name}"
31
+ end
32
+ end
33
+ end
34
+ end
@@ -3,7 +3,7 @@
3
3
  module Karafka
4
4
  # Namespace for all elements related to requests routing
5
5
  module Routing
6
- # Karafka framework Router for routing incoming messages to proper controllers
6
+ # Karafka framework Router for routing incoming messages to proper consumers
7
7
  # @note Since Kafka does not provide namespaces or modules for topics, they all have "flat"
8
8
  # structure so all the routes are being stored in a single level array
9
9
  module Router
@@ -9,7 +9,7 @@ module Karafka
9
9
  extend Helpers::ConfigRetriever
10
10
 
11
11
  attr_reader :id, :consumer_group
12
- attr_accessor :controller
12
+ attr_accessor :consumer
13
13
 
14
14
  # @param [String, Symbol] name of a topic on which we want to listen
15
15
  # @param consumer_group [Karafka::Routing::ConsumerGroup] owning consumer group of this topic
@@ -29,20 +29,14 @@ module Karafka
29
29
  # example for Sidekiq
30
30
  def build
31
31
  Karafka::AttributesMap.topic.each { |attr| send(attr) }
32
- controller&.topic = self
32
+ consumer&.topic = self
33
33
  self
34
34
  end
35
35
 
36
36
  # @return [Class, nil] Class (not an instance) of a responder that should respond from
37
- # controller back to Kafka (usefull for piping dataflows)
37
+ # consumer back to Kafka (usefull for piping dataflows)
38
38
  def responder
39
- @responder ||= Karafka::Responders::Builder.new(controller).build
40
- end
41
-
42
- # @return [Class] Parser class (not instance) that we want to use to unparse Kafka messages
43
- # @note If not provided - will use Json as default
44
- def parser
45
- @parser ||= Karafka::Parsers::Json
39
+ @responder ||= Karafka::Responders::Builder.new(consumer).build
46
40
  end
47
41
 
48
42
  Karafka::AttributesMap.topic.each do |attribute|
@@ -58,7 +52,7 @@ module Karafka
58
52
 
59
53
  Hash[map].merge!(
60
54
  id: id,
61
- controller: controller
55
+ consumer: consumer
62
56
  )
63
57
  end
64
58
  end
@@ -2,7 +2,7 @@
2
2
 
3
3
  module Karafka
4
4
  module Routing
5
- # Default routes mapper that does not remap things
5
+ # Default topic mapper that does not remap things
6
6
  # Mapper can be used for Kafka providers that require namespaced topic names. Instead of being
7
7
  # provider dependent, we can then define mapper and use internally "pure" topic names in
8
8
  # routes and responders
@@ -32,7 +32,7 @@ module Karafka
32
32
  # topic.to_s.gsub('_', '.')
33
33
  # end
34
34
  # end
35
- module Mapper
35
+ module TopicMapper
36
36
  class << self
37
37
  # @param topic [String, Symbol] topic
38
38
  # @return [String, Symbol] same topic as on input
@@ -13,13 +13,12 @@ module Karafka
13
13
  # so we validate all of that once all the routes are defined and ready
14
14
  Config = Dry::Validation.Schema do
15
15
  required(:client_id).filled(:str?, format?: Karafka::Schemas::TOPIC_REGEXP)
16
+ required(:shutdown_timeout) { none? | (int? & gteq?(0)) }
17
+ required(:consumer_mapper)
18
+ required(:topic_mapper)
19
+ required(:params_base_class).filled
16
20
 
17
21
  optional(:backend).filled
18
-
19
- optional(:connection_pool).schema do
20
- required(:size).filled
21
- optional(:timeout).filled(:int?)
22
- end
23
22
  end
24
23
  end
25
24
  end
@@ -2,33 +2,46 @@
2
2
 
3
3
  module Karafka
4
4
  module Schemas
5
- # Consumer group topic validation rules
6
- ConsumerGroupTopic = Dry::Validation.Schema do
7
- required(:id).filled(:str?, format?: Karafka::Schemas::TOPIC_REGEXP)
8
- required(:name).filled(:str?, format?: Karafka::Schemas::TOPIC_REGEXP)
9
- required(:backend).filled(included_in?: %i[inline sidekiq])
10
- required(:controller).filled
11
- required(:parser).filled
12
- required(:max_bytes_per_partition).filled(:int?, gteq?: 0)
13
- required(:start_from_beginning).filled(:bool?)
14
- required(:batch_processing).filled(:bool?)
15
- required(:persistent).filled(:bool?)
16
- end
17
-
18
5
  # Schema for single full route (consumer group + topics) validation.
19
6
  ConsumerGroup = Dry::Validation.Schema do
7
+ # Valid uri schemas of Kafka broker url
8
+ # The ||= is due to the behavior of require_all that resolves dependencies
9
+ # but someetimes loads things twice
10
+ URI_SCHEMES ||= %w[kafka kafka+ssl].freeze
11
+
12
+ # Available sasl scram mechanism of authentication (plus nil)
13
+ SASL_SCRAM_MECHANISMS ||= %w[sha256 sha512].freeze
14
+
15
+ configure do
16
+ config.messages_file = File.join(
17
+ Karafka.gem_root, 'config', 'errors.yml'
18
+ )
19
+
20
+ # Uri validator to check if uri is in a Karafka acceptable format
21
+ # @param uri [String] uri we want to validate
22
+ # @return [Boolean] true if it is a valid uri, otherwise false
23
+ def broker_schema?(uri)
24
+ uri = URI.parse(uri)
25
+ URI_SCHEMES.include?(uri.scheme) && uri.port
26
+ rescue URI::InvalidURIError
27
+ false
28
+ end
29
+ end
30
+
20
31
  required(:id).filled(:str?, format?: Karafka::Schemas::TOPIC_REGEXP)
21
- required(:seed_brokers).filled(:array?)
22
- required(:session_timeout).filled(:int?)
23
- required(:pause_timeout).filled(:int?, gteq?: 0)
24
- required(:offset_commit_interval).filled(:int?)
32
+ required(:seed_brokers).filled { each(:broker_schema?) }
33
+ required(:session_timeout).filled { int? | float? }
34
+ required(:pause_timeout).filled { (int? | float?) & gteq?(0) }
35
+ required(:offset_commit_interval) { int? | float? }
25
36
  required(:offset_commit_threshold).filled(:int?)
26
37
  required(:offset_retention_time) { none?.not > int? }
27
- required(:heartbeat_interval).filled(:int?, gteq?: 0)
28
- required(:connect_timeout).filled(:int?, gt?: 0)
29
- required(:socket_timeout).filled(:int?, gt?: 0)
30
- required(:max_wait_time).filled(:int?, gteq?: 0)
31
- required(:batch_consuming).filled(:bool?)
38
+ required(:heartbeat_interval).filled { (int? | float?) & gteq?(0) }
39
+ required(:connect_timeout).filled { (int? | float?) & gt?(0) }
40
+ required(:socket_timeout).filled { (int? | float?) & gt?(0) }
41
+ required(:min_bytes).filled(:int?, gt?: 0)
42
+ required(:max_bytes).filled(:int?, gt?: 0)
43
+ required(:max_wait_time).filled { (int? | float?) & gteq?(0) }
44
+ required(:batch_fetching).filled(:bool?)
32
45
  required(:topics).filled { each { schema(ConsumerGroupTopic) } }
33
46
 
34
47
  # Max wait time cannot exceed socket_timeout - wouldn't make sense
@@ -43,14 +56,22 @@ module Karafka
43
56
  ssl_ca_cert_file_path
44
57
  ssl_client_cert
45
58
  ssl_client_cert_key
59
+ sasl_gssapi_principal
60
+ sasl_gssapi_keytab
46
61
  sasl_plain_authzid
47
62
  sasl_plain_username
48
63
  sasl_plain_password
49
- sasl_gssapi_principal
50
- sasl_gssapi_keytab
64
+ sasl_scram_username
65
+ sasl_scram_password
51
66
  ].each do |encryption_attribute|
52
67
  optional(encryption_attribute).maybe(:str?)
53
68
  end
69
+
70
+ optional(:ssl_ca_certs_from_system).maybe(:bool?)
71
+
72
+ # It's not with other encryptions as it has some more rules
73
+ optional(:sasl_scram_mechanism)
74
+ .maybe(:str?, included_in?: Karafka::Schemas::SASL_SCRAM_MECHANISMS)
54
75
  end
55
76
  end
56
77
  end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Schemas
5
+ # Consumer group topic validation rules
6
+ ConsumerGroupTopic = Dry::Validation.Schema do
7
+ required(:id).filled(:str?, format?: Karafka::Schemas::TOPIC_REGEXP)
8
+ required(:name).filled(:str?, format?: Karafka::Schemas::TOPIC_REGEXP)
9
+ required(:backend).filled(included_in?: %i[inline sidekiq])
10
+ required(:consumer).filled
11
+ required(:parser).filled
12
+ required(:max_bytes_per_partition).filled(:int?, gteq?: 0)
13
+ required(:start_from_beginning).filled(:bool?)
14
+ required(:batch_consuming).filled(:bool?)
15
+ required(:persistent).filled(:bool?)
16
+ end
17
+ end
18
+ end
@@ -9,6 +9,7 @@ module Karafka
9
9
  required(:multiple_usage).filled(:bool?)
10
10
  required(:usage_count).filled(:int?, gteq?: 0)
11
11
  required(:registered).filled(eql?: true)
12
+ required(:async).filled(:bool?)
12
13
 
13
14
  rule(
14
15
  required_usage: %i[required usage_count]
@@ -3,17 +3,22 @@
3
3
  module Karafka
4
4
  # Karafka consuming server class
5
5
  class Server
6
+ @consumer_threads = Concurrent::Array.new
7
+
8
+ # How long should we sleep between checks on shutting down consumers
9
+ SUPERVISION_SLEEP = 1
10
+ # What system exit code should we use when we terminated forcefully
11
+ FORCEFUL_EXIT_CODE = 2
12
+
6
13
  class << self
7
- # We need to store reference to all the consumers in the main server thread,
8
- # So we can have access to them later on and be able to stop them on exit
9
- attr_reader :consumers
14
+ # Set of consuming threads. Each consumer thread contains a single consumer
15
+ attr_accessor :consumer_threads
10
16
 
11
17
  # Writer for list of consumer groups that we want to consume in our current process context
12
18
  attr_writer :consumer_groups
13
19
 
14
20
  # Method which runs app
15
21
  def run
16
- @consumers = Concurrent::Array.new
17
22
  bind_on_sigint
18
23
  bind_on_sigquit
19
24
  bind_on_sigterm
@@ -36,29 +41,17 @@ module Karafka
36
41
 
37
42
  # What should happen when we decide to quit with sigint
38
43
  def bind_on_sigint
39
- process.on_sigint do
40
- Karafka::App.stop!
41
- consumers.map(&:stop)
42
- Kernel.exit
43
- end
44
+ process.on_sigint { stop_supervised }
44
45
  end
45
46
 
46
47
  # What should happen when we decide to quit with sigquit
47
48
  def bind_on_sigquit
48
- process.on_sigquit do
49
- Karafka::App.stop!
50
- consumers.map(&:stop)
51
- Kernel.exit
52
- end
49
+ process.on_sigquit { stop_supervised }
53
50
  end
54
51
 
55
52
  # What should happen when we decide to quit with sigterm
56
53
  def bind_on_sigterm
57
- process.on_sigterm do
58
- Karafka::App.stop!
59
- consumers.map(&:stop)
60
- Kernel.exit
61
- end
54
+ process.on_sigterm { stop_supervised }
62
55
  end
63
56
 
64
57
  # Starts Karafka with a supervision
@@ -67,8 +60,34 @@ module Karafka
67
60
  def start_supervised
68
61
  process.supervise do
69
62
  Karafka::App.run!
70
- Karafka::Fetcher.new.fetch_loop
63
+ Karafka::Fetcher.call
64
+ end
65
+ end
66
+
67
+ # Stops Karafka with a supervision (as long as there is a shutdown timeout)
68
+ # If consumers won't stop in a given timeframe, it will force them to exit
69
+ def stop_supervised
70
+ Karafka.monitor.instrument('server.stop', {})
71
+
72
+ Karafka::App.stop!
73
+ # If there is no shutdown timeout, we don't exit and wait until all the consumers
74
+ # had done their work
75
+ return unless Karafka::App.config.shutdown_timeout
76
+
77
+ # If there is a timeout, we check every 1 second (for the timeout period) if all
78
+ # the threads finished their work and if so, we can just return and normal
79
+ # shutdown process will take place
80
+ Karafka::App.config.shutdown_timeout.to_i.times do
81
+ return if consumer_threads.count(&:alive?).zero?
82
+ sleep SUPERVISION_SLEEP
71
83
  end
84
+
85
+ Karafka.monitor.instrument('server.stop.error', {})
86
+ # We're done waiting, lets kill them!
87
+ consumer_threads.each(&:terminate)
88
+
89
+ # exit is not within the instrumentation as it would not trigger due to exit
90
+ Kernel.exit FORCEFUL_EXIT_CODE
72
91
  end
73
92
  end
74
93
  end
@@ -13,6 +13,7 @@ module Karafka
13
13
  # @see Karafka::Setup::Configurators::Base for more details about configurators api
14
14
  class Config
15
15
  extend Dry::Configurable
16
+ extend Callbacks::Config
16
17
 
17
18
  # Available settings
18
19
  # option client_id [String] kafka client_id - used to provide
@@ -21,39 +22,46 @@ module Karafka
21
22
  # What backend do we want to use to process messages
22
23
  setting :backend, :inline
23
24
  # option logger [Instance] logger that we want to use
24
- setting :logger, -> { ::Karafka::Logger.instance }
25
+ setting :logger, -> { ::Karafka::Instrumentation::Logger.instance }
25
26
  # option monitor [Instance] monitor that we will to use (defaults to Karafka::Monitor)
26
- setting :monitor, -> { ::Karafka::Monitor.instance }
27
+ setting :monitor, -> { ::Karafka::Instrumentation::Monitor.instance }
28
+ # Mapper used to remap consumer groups ids, so in case users migrate from other tools
29
+ # or they need to maintain their own internal consumer group naming conventions, they
30
+ # can easily do it, replacing the default client_id + consumer name pattern concept
31
+ setting :consumer_mapper, -> { Routing::ConsumerMapper }
27
32
  # Mapper used to remap names of topics, so we can have a clean internal topic namings
28
33
  # despite using any Kafka provider that uses namespacing, etc
29
34
  # It needs to implement two methods:
30
35
  # - #incoming - for remapping from the incoming message to our internal format
31
36
  # - #outgoing - for remapping from internal topic name into outgoing message
32
- setting :topic_mapper, -> { Routing::Mapper }
33
- # If batch_consuming is true, we will consume kafka messages in batches instead of 1 by 1
34
- # @note Consuming does not equal processing, see batch_processing description for details
35
- setting :batch_consuming, true
36
- # If batch_processing is true, we will have access to #params_batch instead of #params.
37
+ setting :topic_mapper, -> { Routing::TopicMapper }
38
+ # Default parser for parsing and unparsing incoming and outgoing data
39
+ setting :parser, -> { Karafka::Parsers::Json }
40
+ # If batch_fetching is true, we will fetch kafka messages in batches instead of 1 by 1
41
+ # @note Fetching does not equal consuming, see batch_consuming description for details
42
+ setting :batch_fetching, true
43
+ # If batch_consuming is true, we will have access to #params_batch instead of #params.
37
44
  # #params_batch will contain params received from Kafka (may be more than 1) so we can
38
45
  # process them in batches
39
- setting :batch_processing, false
40
- # Should we operate in a single controller instance across multiple batches of messages,
41
- # from the same partition or should we build a new instance for each incoming batch.
42
- # Disabling that can be useful when you want to build a new controller instance for each
43
- # incoming batch. It's disabled by default, not to create more objects that needed on
44
- # each batch
46
+ setting :batch_consuming, false
47
+ # Should we operate in a single consumer instance across multiple batches of messages,
48
+ # from the same partition or should we build a new one for each incoming batch.
49
+ # Disabling that can be useful when you want to create a new consumer instance for each
50
+ # incoming batch. It's disabled by default, not to create more objects that needed
51
+ # on each batch
45
52
  setting :persistent, true
46
- # Connection pool options are used for producer (Waterdrop) - by default it will adapt to
47
- # number of active actors
48
- setting :connection_pool do
49
- # Connection pool size for producers. If you use sidekiq or any other multi threaded
50
- # backend, you might want to tune it to match number of threads of your background
51
- # processing engine
52
- setting :size, -> { ::Karafka::App.consumer_groups.active.count }
53
- # How long should we wait for a working resource from the pool before rising timeout
54
- # With a proper connection pool size, this should never happen
55
- setting :timeout, 5
56
- end
53
+ # option shutdown_timeout [Integer, nil] the number of seconds after which Karafka no
54
+ # longer wait for the consumers to stop gracefully but instead we force
55
+ # terminate everything.
56
+ # @note Keep in mind, that if your business logic
57
+ # @note If set to nil, it won't forcefully shutdown the process at all.
58
+ setting :shutdown_timeout, 60
59
+ # option params_base_class [Class] base class for params class initialization
60
+ # This can be either a Hash or a HashWithIndifferentAccess depending on your
61
+ # requirements. Note, that by using HashWithIndifferentAccess, you remove some of the
62
+ # performance in favor of convenience. This can be useful especially if you already use
63
+ # it with Rails, etc
64
+ setting :params_base_class, Hash
57
65
 
58
66
  # option kafka [Hash] - optional - kafka configuration options
59
67
  setting :kafka do
@@ -62,17 +70,17 @@ module Karafka
62
70
  # option session_timeout [Integer] the number of seconds after which, if a client
63
71
  # hasn't contacted the Kafka cluster, it will be kicked out of the group.
64
72
  setting :session_timeout, 30
65
- # Time that a given partition will be paused from processing messages, when message
66
- # processing fails. It allows us to process other partitions, while the error is being
73
+ # Time that a given partition will be paused from fetching messages, when message
74
+ # consumption fails. It allows us to process other partitions, while the error is being
67
75
  # resolved and also "slows" things down, so it prevents from "eating" up all messages and
68
- # processing them with failed code
76
+ # consuming them with failed code
69
77
  setting :pause_timeout, 10
70
78
  # option offset_commit_interval [Integer] the interval between offset commits,
71
79
  # in seconds.
72
80
  setting :offset_commit_interval, 10
73
81
  # option offset_commit_threshold [Integer] the number of messages that can be
74
82
  # processed before their offsets are committed. If zero, offset commits are
75
- # not triggered by message processing.
83
+ # not triggered by message consumption.
76
84
  setting :offset_commit_threshold, 0
77
85
  # option heartbeat_interval [Integer] the interval between heartbeats; must be less
78
86
  # than the session window.
@@ -86,9 +94,20 @@ module Karafka
86
94
  # returning messages from the server; if `max_wait_time` is reached, this
87
95
  # is ignored.
88
96
  setting :min_bytes, 1
89
- # option max_wait_time [Integer, Float] the maximum duration of time to wait before
90
- # returning messages from the server, in seconds.
91
- setting :max_wait_time, 5
97
+ # option max_bytes [Integer] the maximum number of bytes to read before returning messages
98
+ # from each broker.
99
+ setting :max_bytes, 10_485_760
100
+ # option max_wait_time [Integer, Float] max_wait_time is the maximum number of seconds to
101
+ # wait before returning data from a single message fetch. By setting this high you also
102
+ # increase the fetching throughput - and by setting it low you set a bound on latency.
103
+ # This configuration overrides `min_bytes`, so you'll _always_ get data back within the
104
+ # time specified. The default value is one second. If you want to have at most five
105
+ # seconds of latency, set `max_wait_time` to 5. You should make sure
106
+ # max_wait_time * num brokers + heartbeat_interval is less than session_timeout.
107
+ setting :max_wait_time, 1
108
+ # option automatically_mark_as_consumed [Boolean] should we automatically mark received
109
+ # messages as consumed (processed) after non-error consumption
110
+ setting :automatically_mark_as_consumed, true
92
111
  # option reconnect_timeout [Integer] How long should we wait before trying to reconnect to
93
112
  # Kafka cluster that went down (in seconds)
94
113
  setting :reconnect_timeout, 5
@@ -103,50 +122,54 @@ module Karafka
103
122
  # writing to a socket connection to a broker. After this timeout expires the connection
104
123
  # will be killed. Note that some Kafka operations are by definition long-running, such as
105
124
  # waiting for new messages to arrive in a partition, so don't set this value too low
106
- setting :socket_timeout, 10
125
+ setting :socket_timeout, 30
126
+
107
127
  # SSL authentication related settings
108
- # option ca_cert [String] SSL CA certificate
128
+ # option ca_cert [String, nil] SSL CA certificate
109
129
  setting :ssl_ca_cert, nil
110
- # option ssl_ca_cert_file_path [String] SSL CA certificate file path
130
+ # option ssl_ca_cert_file_path [String, nil] SSL CA certificate file path
111
131
  setting :ssl_ca_cert_file_path, nil
112
- # option client_cert [String] SSL client certificate
132
+ # option ssl_ca_certs_from_system [Boolean] Use the CA certs from your system's default
133
+ # certificate store
134
+ setting :ssl_ca_certs_from_system, false
135
+ # option ssl_client_cert [String, nil] SSL client certificate
113
136
  setting :ssl_client_cert, nil
114
- # option client_cert_key [String] SSL client certificate password
137
+ # option ssl_client_cert_key [String, nil] SSL client certificate password
115
138
  setting :ssl_client_cert_key, nil
116
- # option sasl_gssapi_principal [String] sasl principal
139
+ # option sasl_gssapi_principal [String, nil] sasl principal
117
140
  setting :sasl_gssapi_principal, nil
118
- # option sasl_gssapi_keytab [String] sasl keytab
141
+ # option sasl_gssapi_keytab [String, nil] sasl keytab
119
142
  setting :sasl_gssapi_keytab, nil
120
143
  # option sasl_plain_authzid [String] The authorization identity to use
121
144
  setting :sasl_plain_authzid, ''
122
- # option sasl_plain_username [String] The username used to authenticate
145
+ # option sasl_plain_username [String, nil] The username used to authenticate
123
146
  setting :sasl_plain_username, nil
124
- # option sasl_plain_password [String] The password used to authenticate
147
+ # option sasl_plain_password [String, nil] The password used to authenticate
125
148
  setting :sasl_plain_password, nil
149
+ # option sasl_scram_username [String, nil] The username used to authenticate
150
+ setting :sasl_scram_username, nil
151
+ # option sasl_scram_password [String, nil] The password used to authenticate
152
+ setting :sasl_scram_password, nil
153
+ # option sasl_scram_mechanism [String, nil] Scram mechanism, either 'sha256' or 'sha512'
154
+ setting :sasl_scram_mechanism, nil
126
155
  end
127
156
 
128
- # This is configured automatically, don't overwrite it!
129
- # Each consumer group requires separate thread, so number of threads should be equal to
130
- # number of consumer groups
131
- setting :concurrency, -> { ::Karafka::App.consumer_groups.count }
132
-
133
157
  class << self
134
158
  # Configurating method
135
159
  # @yield Runs a block of code providing a config singleton instance to it
136
160
  # @yieldparam [Karafka::Setup::Config] Karafka config instance
137
161
  def setup
138
- configure do |config|
139
- yield(config)
140
- end
162
+ configure { |config| yield(config) }
141
163
  end
142
164
 
143
165
  # Everything that should be initialized after the setup
144
166
  # Components are in karafka/config directory and are all loaded one by one
145
167
  # If you want to configure a next component, please add a proper file to config dir
146
168
  def setup_components
147
- Configurators::Base.descendants.each do |klass|
148
- klass.new(config).setup
149
- end
169
+ [
170
+ Configurators::Params,
171
+ Configurators::WaterDrop
172
+ ].each { |klass| klass.setup(config) }
150
173
  end
151
174
 
152
175
  # Validate config based on ConfigurationSchema