karafka 1.4.9 → 2.0.0.alpha1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (127) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +3 -0
  4. data/.github/workflows/ci.yml +78 -26
  5. data/.ruby-version +1 -1
  6. data/CHANGELOG.md +46 -0
  7. data/Gemfile +6 -0
  8. data/Gemfile.lock +39 -49
  9. data/LICENSE +14 -0
  10. data/LICENSE-COMM +89 -0
  11. data/LICENSE-LGPL +165 -0
  12. data/README.md +16 -48
  13. data/bin/benchmarks +85 -0
  14. data/bin/create_token +28 -0
  15. data/bin/integrations +160 -0
  16. data/bin/stress +13 -0
  17. data/certs/karafka-pro.pem +11 -0
  18. data/config/errors.yml +4 -38
  19. data/docker-compose.yml +11 -3
  20. data/karafka.gemspec +17 -17
  21. data/lib/active_job/consumer.rb +22 -0
  22. data/lib/active_job/karafka.rb +18 -0
  23. data/lib/active_job/queue_adapters/karafka_adapter.rb +29 -0
  24. data/lib/active_job/routing_extensions.rb +15 -0
  25. data/lib/karafka/app.rb +13 -20
  26. data/lib/karafka/base_consumer.rb +103 -34
  27. data/lib/karafka/cli/base.rb +4 -4
  28. data/lib/karafka/cli/info.rb +43 -8
  29. data/lib/karafka/cli/install.rb +3 -8
  30. data/lib/karafka/cli/server.rb +17 -30
  31. data/lib/karafka/cli.rb +4 -11
  32. data/lib/karafka/connection/client.rb +279 -93
  33. data/lib/karafka/connection/listener.rb +137 -38
  34. data/lib/karafka/connection/messages_buffer.rb +57 -0
  35. data/lib/karafka/connection/pauses_manager.rb +46 -0
  36. data/lib/karafka/connection/rebalance_manager.rb +62 -0
  37. data/lib/karafka/contracts/config.rb +25 -7
  38. data/lib/karafka/contracts/consumer_group.rb +0 -173
  39. data/lib/karafka/contracts/consumer_group_topic.rb +17 -7
  40. data/lib/karafka/contracts/server_cli_options.rb +1 -9
  41. data/lib/karafka/contracts.rb +1 -1
  42. data/lib/karafka/env.rb +46 -0
  43. data/lib/karafka/errors.rb +14 -18
  44. data/lib/karafka/helpers/multi_delegator.rb +2 -2
  45. data/lib/karafka/instrumentation/callbacks/error.rb +40 -0
  46. data/lib/karafka/instrumentation/callbacks/statistics.rb +42 -0
  47. data/lib/karafka/instrumentation/monitor.rb +14 -21
  48. data/lib/karafka/instrumentation/stdout_listener.rb +64 -91
  49. data/lib/karafka/instrumentation.rb +21 -0
  50. data/lib/karafka/licenser.rb +65 -0
  51. data/lib/karafka/{params → messages}/batch_metadata.rb +7 -13
  52. data/lib/karafka/messages/builders/batch_metadata.rb +30 -0
  53. data/lib/karafka/messages/builders/message.rb +38 -0
  54. data/lib/karafka/messages/builders/messages.rb +40 -0
  55. data/lib/karafka/{params/params.rb → messages/message.rb} +7 -12
  56. data/lib/karafka/messages/messages.rb +64 -0
  57. data/lib/karafka/{params → messages}/metadata.rb +4 -6
  58. data/lib/karafka/messages/seek.rb +9 -0
  59. data/lib/karafka/patches/rdkafka/consumer.rb +22 -0
  60. data/lib/karafka/processing/executor.rb +96 -0
  61. data/lib/karafka/processing/executors_buffer.rb +49 -0
  62. data/lib/karafka/processing/jobs/base.rb +18 -0
  63. data/lib/karafka/processing/jobs/consume.rb +28 -0
  64. data/lib/karafka/processing/jobs/revoked.rb +22 -0
  65. data/lib/karafka/processing/jobs/shutdown.rb +23 -0
  66. data/lib/karafka/processing/jobs_queue.rb +121 -0
  67. data/lib/karafka/processing/worker.rb +57 -0
  68. data/lib/karafka/processing/workers_batch.rb +22 -0
  69. data/lib/karafka/railtie.rb +65 -0
  70. data/lib/karafka/routing/builder.rb +15 -14
  71. data/lib/karafka/routing/consumer_group.rb +10 -18
  72. data/lib/karafka/routing/consumer_mapper.rb +1 -2
  73. data/lib/karafka/routing/router.rb +1 -1
  74. data/lib/karafka/routing/subscription_group.rb +53 -0
  75. data/lib/karafka/routing/subscription_groups_builder.rb +51 -0
  76. data/lib/karafka/routing/topic.rb +47 -25
  77. data/lib/karafka/runner.rb +59 -0
  78. data/lib/karafka/serialization/json/deserializer.rb +6 -15
  79. data/lib/karafka/server.rb +62 -25
  80. data/lib/karafka/setup/config.rb +86 -159
  81. data/lib/karafka/status.rb +13 -3
  82. data/lib/karafka/templates/example_consumer.rb.erb +16 -0
  83. data/lib/karafka/templates/karafka.rb.erb +14 -50
  84. data/lib/karafka/time_trackers/base.rb +19 -0
  85. data/lib/karafka/time_trackers/pause.rb +84 -0
  86. data/lib/karafka/time_trackers/poll.rb +65 -0
  87. data/lib/karafka/version.rb +1 -1
  88. data/lib/karafka.rb +30 -13
  89. data.tar.gz.sig +0 -0
  90. metadata +78 -108
  91. metadata.gz.sig +0 -0
  92. data/MIT-LICENCE +0 -18
  93. data/lib/karafka/assignment_strategies/round_robin.rb +0 -13
  94. data/lib/karafka/attributes_map.rb +0 -63
  95. data/lib/karafka/backends/inline.rb +0 -16
  96. data/lib/karafka/base_responder.rb +0 -226
  97. data/lib/karafka/cli/flow.rb +0 -48
  98. data/lib/karafka/cli/missingno.rb +0 -19
  99. data/lib/karafka/code_reloader.rb +0 -67
  100. data/lib/karafka/connection/api_adapter.rb +0 -158
  101. data/lib/karafka/connection/batch_delegator.rb +0 -55
  102. data/lib/karafka/connection/builder.rb +0 -23
  103. data/lib/karafka/connection/message_delegator.rb +0 -36
  104. data/lib/karafka/consumers/batch_metadata.rb +0 -10
  105. data/lib/karafka/consumers/callbacks.rb +0 -71
  106. data/lib/karafka/consumers/includer.rb +0 -64
  107. data/lib/karafka/consumers/responders.rb +0 -24
  108. data/lib/karafka/consumers/single_params.rb +0 -15
  109. data/lib/karafka/contracts/responder_usage.rb +0 -54
  110. data/lib/karafka/fetcher.rb +0 -42
  111. data/lib/karafka/helpers/class_matcher.rb +0 -88
  112. data/lib/karafka/helpers/config_retriever.rb +0 -46
  113. data/lib/karafka/helpers/inflector.rb +0 -26
  114. data/lib/karafka/params/builders/batch_metadata.rb +0 -30
  115. data/lib/karafka/params/builders/params.rb +0 -38
  116. data/lib/karafka/params/builders/params_batch.rb +0 -25
  117. data/lib/karafka/params/params_batch.rb +0 -60
  118. data/lib/karafka/patches/ruby_kafka.rb +0 -47
  119. data/lib/karafka/persistence/client.rb +0 -29
  120. data/lib/karafka/persistence/consumers.rb +0 -45
  121. data/lib/karafka/persistence/topics.rb +0 -48
  122. data/lib/karafka/responders/builder.rb +0 -36
  123. data/lib/karafka/responders/topic.rb +0 -55
  124. data/lib/karafka/routing/topic_mapper.rb +0 -53
  125. data/lib/karafka/serialization/json/serializer.rb +0 -31
  126. data/lib/karafka/setup/configurators/water_drop.rb +0 -36
  127. data/lib/karafka/templates/application_responder.rb.erb +0 -11
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Routing
5
+ # Object representing a set of single consumer group topics that can be subscribed together
6
+ # with one connection.
7
+ #
8
+ # @note One subscription group will always belong to one consumer group, but one consumer
9
+ # group can have multiple subscription groups.
10
+ class SubscriptionGroup
11
+ attr_reader :id, :topics
12
+
13
+ # @param topics [Array<Topic>] all the topics that share the same key settings
14
+ # @return [SubscriptionGroup] built subscription group
15
+ def initialize(topics)
16
+ @id = SecureRandom.uuid
17
+ @topics = topics
18
+ freeze
19
+ end
20
+
21
+ # @return [String] consumer group id
22
+ def consumer_group_id
23
+ kafka['group.id']
24
+ end
25
+
26
+ # @return [Integer] max messages fetched in a single go
27
+ def max_messages
28
+ @topics.first.max_messages
29
+ end
30
+
31
+ # @return [Integer] max milliseconds we can wait for incoming messages
32
+ def max_wait_time
33
+ @topics.first.max_wait_time
34
+ end
35
+
36
+ # @return [Hash] kafka settings are a bit special. They are exactly the same for all of the
37
+ # topics but they lack the group.id (unless explicitly) provided. To make it compatible
38
+ # with our routing engine, we inject it before it will go to the consumer
39
+ def kafka
40
+ kafka = @topics.first.kafka.dup
41
+
42
+ kafka['client.id'] ||= Karafka::App.config.client_id
43
+ kafka['group.id'] ||= @topics.first.consumer_group.id
44
+ kafka['auto.offset.reset'] ||= 'earliest'
45
+ # Karafka manages the offsets based on the processing state, thus we do not rely on the
46
+ # rdkafka offset auto-storing
47
+ kafka['enable.auto.offset.store'] = 'false'
48
+ kafka.freeze
49
+ kafka
50
+ end
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Routing
5
+ # rdkafka allows us to group topics subscriptions when they have same settings.
6
+ # This builder groups topics from a single consumer group into subscription groups that can be
7
+ # subscribed with one rdkafka connection.
8
+ # This way we save resources as having several rdkafka consumers under the hood is not the
9
+ # cheapest thing in a bigger system.
10
+ #
11
+ # In general, if we can, we try to subscribe to as many topics with one rdkafka connection as
12
+ # possible, but if not possible, we divide.
13
+ class SubscriptionGroupsBuilder
14
+ # Keys used to build up a hash for subscription groups distribution.
15
+ # In order to be able to use the same rdkafka connection for several topics, those keys need
16
+ # to have same values.
17
+ DISTRIBUTION_KEYS = %i[
18
+ kafka
19
+ max_messages
20
+ max_wait_time
21
+ ].freeze
22
+
23
+ private_constant :DISTRIBUTION_KEYS
24
+
25
+ # @param topics [Array<Topic>] array with topics based on which we want to build subscription
26
+ # groups
27
+ # @return [Array<SubscriptionGroup>] all subscription groups we need in separate threads
28
+ def call(topics)
29
+ topics
30
+ .map { |topic| [checksum(topic), topic] }
31
+ .group_by(&:first)
32
+ .values
33
+ .map { |value| value.map(&:last) }
34
+ .map { |grouped_topics| SubscriptionGroup.new(grouped_topics) }
35
+ end
36
+
37
+ private
38
+
39
+ # @param topic [Karafka::Routing::Topic] topic for which we compute the grouping checksum
40
+ # @return [Integer] checksum that we can use to check if topics have the same set of
41
+ # settings based on which we group
42
+ def checksum(topic)
43
+ accu = {}
44
+
45
+ DISTRIBUTION_KEYS.each { |key| accu[key] = topic.public_send(key) }
46
+
47
+ accu.hash
48
+ end
49
+ end
50
+ end
51
+ end
@@ -2,17 +2,23 @@
2
2
 
3
3
  module Karafka
4
4
  module Routing
5
- # Topic stores all the details on how we should interact with Kafka given topic
5
+ # Topic stores all the details on how we should interact with Kafka given topic.
6
6
  # It belongs to a consumer group as from 0.6 all the topics can work in the same consumer group
7
- # It is a part of Karafka's DSL
7
+ # It is a part of Karafka's DSL.
8
8
  class Topic
9
- extend Helpers::ConfigRetriever
10
- extend Forwardable
9
+ attr_reader :id, :name, :consumer_group
10
+ attr_writer :consumer
11
11
 
12
- attr_reader :id, :consumer_group
13
- attr_accessor :consumer
12
+ # Attributes we can inherit from the root unless they were defined on this level
13
+ INHERITABLE_ATTRIBUTES = %i[
14
+ kafka
15
+ deserializer
16
+ manual_offset_management
17
+ max_messages
18
+ max_wait_time
19
+ ].freeze
14
20
 
15
- def_delegator :@consumer_group, :batch_fetching
21
+ private_constant :INHERITABLE_ATTRIBUTES
16
22
 
17
23
  # @param [String, Symbol] name of a topic on which we want to listen
18
24
  # @param consumer_group [Karafka::Routing::ConsumerGroup] owning consumer group of this topic
@@ -22,40 +28,56 @@ module Karafka
22
28
  @attributes = {}
23
29
  # @note We use identifier related to the consumer group that owns a topic, because from
24
30
  # Karafka 0.6 we can handle multiple Kafka instances with the same process and we can
25
- # have same topic name across multiple Kafkas
31
+ # have same topic name across multiple consumer groups
26
32
  @id = "#{consumer_group.id}_#{@name}"
27
33
  end
28
34
 
29
- # Initializes default values for all the options that support defaults if their values are
30
- # not yet specified. This is need to be done (cannot be lazy loaded on first use) because
31
- # everywhere except Karafka server command, those would not be initialized on time - for
32
- # example for Sidekiq
33
- def build
34
- Karafka::AttributesMap.topic.each { |attr| send(attr) }
35
- self
36
- end
35
+ INHERITABLE_ATTRIBUTES.each do |attribute|
36
+ attr_writer attribute
37
+
38
+ define_method attribute do
39
+ current_value = instance_variable_get(:"@#{attribute}")
40
+
41
+ return current_value unless current_value.nil?
37
42
 
38
- # @return [Class, nil] Class (not an instance) of a responder that should respond from
39
- # consumer back to Kafka (useful for piping data flows)
40
- def responder
41
- @responder ||= Karafka::Responders::Builder.new(consumer).build
43
+ value = Karafka::App.config.send(attribute)
44
+
45
+ instance_variable_set(:"@#{attribute}", value)
46
+ end
42
47
  end
43
48
 
44
- Karafka::AttributesMap.topic.each do |attribute|
45
- config_retriever_for(attribute)
49
+ # @return [Class] consumer class that we should use
50
+ def consumer
51
+ if Karafka::App.config.consumer_persistence
52
+ # When persistence of consumers is on, no need to reload them
53
+ @consumer
54
+ else
55
+ # In order to support code reload without having to change the topic api, we re-fetch the
56
+ # class of a consumer based on its class name. This will support all the cases where the
57
+ # consumer class is defined with a name. It won't support code reload for anonymous
58
+ # consumer classes, but this is an edge case
59
+ begin
60
+ ::Object.const_get(@consumer.to_s)
61
+ rescue NameError
62
+ # It will only fail if the in case of anonymous classes
63
+ @consumer
64
+ end
65
+ end
46
66
  end
47
67
 
48
68
  # @return [Hash] hash with all the topic attributes
49
69
  # @note This is being used when we validate the consumer_group and its topics
50
70
  def to_h
51
- map = Karafka::AttributesMap.topic.map do |attribute|
71
+ map = INHERITABLE_ATTRIBUTES.map do |attribute|
52
72
  [attribute, public_send(attribute)]
53
73
  end
54
74
 
55
75
  Hash[map].merge!(
56
76
  id: id,
57
- consumer: consumer
58
- )
77
+ name: name,
78
+ consumer: consumer,
79
+ consumer_group_id: consumer_group.id
80
+ ).freeze
59
81
  end
60
82
  end
61
83
  end
@@ -0,0 +1,59 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ # Class used to run the Karafka listeners in separate threads
5
+ class Runner
6
+ # Starts listening on all the listeners asynchronously
7
+ # Fetch loop should never end. If they do, it is a critical error
8
+ def call
9
+ # Despite possibility of having several independent listeners, we aim to have one queue for
10
+ # jobs across and one workers poll for that
11
+ jobs_queue = Processing::JobsQueue.new
12
+
13
+ workers = Processing::WorkersBatch.new(jobs_queue)
14
+ Karafka::Server.workers = workers
15
+
16
+ threads = listeners(jobs_queue).map do |listener|
17
+ # We abort on exception because there should be an exception handling developed for
18
+ # each listener running in separate threads, so the exceptions should never leak
19
+ # and if that happens, it means that something really bad happened and we should stop
20
+ # the whole process
21
+ Thread
22
+ .new { listener.call }
23
+ .tap { |thread| thread.abort_on_exception = true }
24
+ end
25
+
26
+ # We aggregate threads here for a supervised shutdown process
27
+ Karafka::Server.consumer_threads = threads
28
+
29
+ # All the listener threads need to finish
30
+ threads.each(&:join)
31
+ # All the workers need to stop processing anything before we can stop the runner completely
32
+ workers.each(&:join)
33
+ # If anything crashes here, we need to raise the error and crush the runner because it means
34
+ # that something terrible happened
35
+ rescue StandardError => e
36
+ Karafka.monitor.instrument(
37
+ 'error.occurred',
38
+ caller: self,
39
+ error: e,
40
+ type: 'runner.call.error'
41
+ )
42
+ Karafka::App.stop!
43
+ raise e
44
+ end
45
+
46
+ private
47
+
48
+ # @param jobs_queue [Processing::JobsQueue] the main processing queue
49
+ # @return [Array<Karafka::Connection::Listener>] listeners that will consume messages for each
50
+ # of the subscription groups
51
+ def listeners(jobs_queue)
52
+ App
53
+ .subscription_groups
54
+ .map do |subscription_group|
55
+ Karafka::Connection::Listener.new(subscription_group, jobs_queue)
56
+ end
57
+ end
58
+ end
59
+ end
@@ -1,25 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Karafka
4
- # Module for all supported by default serialization and deserialization ways
4
+ # Module for all supported by default serialization and deserialization ways.
5
5
  module Serialization
6
- # Namespace for json ser/der
6
+ # Namespace for json serializers and deserializers.
7
7
  module Json
8
- # Default Karafka Json deserializer for loading JSON data
8
+ # Default Karafka Json deserializer for loading JSON data.
9
9
  class Deserializer
10
- # @param params [Karafka::Params::Params] Full params object that we want to deserialize
10
+ # @param message [Karafka::Messages::Message] Message object that we want to deserialize
11
11
  # @return [Hash] hash with deserialized JSON data
12
- # @example
13
- # params = {
14
- # 'payload' => "{\"a\":1}",
15
- # 'topic' => 'my-topic',
16
- # 'headers' => { 'message_type' => :test }
17
- # }
18
- # Deserializer.call(params) #=> { 'a' => 1 }
19
- def call(params)
20
- params.raw_payload.nil? ? nil : ::JSON.parse(params.raw_payload)
21
- rescue ::JSON::ParserError => e
22
- raise ::Karafka::Errors::DeserializationError, e
12
+ def call(message)
13
+ message.raw_payload.nil? ? nil : ::JSON.parse(message.raw_payload)
23
14
  end
24
15
  end
25
16
  end
@@ -3,8 +3,6 @@
3
3
  module Karafka
4
4
  # Karafka consuming server class
5
5
  class Server
6
- @consumer_threads = Concurrent::Array.new
7
-
8
6
  # How long should we sleep between checks on shutting down consumers
9
7
  SUPERVISION_SLEEP = 0.1
10
8
  # What system exit code should we use when we terminated forcefully
@@ -19,53 +17,70 @@ module Karafka
19
17
  # Set of consuming threads. Each consumer thread contains a single consumer
20
18
  attr_accessor :consumer_threads
21
19
 
20
+ # Set of workers
21
+ attr_accessor :workers
22
+
22
23
  # Writer for list of consumer groups that we want to consume in our current process context
23
24
  attr_writer :consumer_groups
24
25
 
25
26
  # Method which runs app
26
27
  def run
27
- process.on_sigint { stop_supervised }
28
- process.on_sigquit { stop_supervised }
29
- process.on_sigterm { stop_supervised }
30
- run_supervised
28
+ process.on_sigint { stop }
29
+ process.on_sigquit { stop }
30
+ process.on_sigterm { stop }
31
+
32
+ # Start is blocking until stop is called and when we stop, it will wait until
33
+ # all of the things are ready to stop
34
+ start
35
+
36
+ # We always need to wait for Karafka to stop here since we should wait for the stop running
37
+ # in a separate thread (or trap context) to indicate everything is closed
38
+ Thread.pass until Karafka::App.stopped?
39
+ # Try its best to shutdown underlying components before re-raising
40
+ # rubocop:disable Lint/RescueException
41
+ rescue Exception => e
42
+ # rubocop:enable Lint/RescueException
43
+ stop
44
+
45
+ raise e
31
46
  end
32
47
 
33
48
  # @return [Array<String>] array with names of consumer groups that should be consumed in a
34
49
  # current server context
35
50
  def consumer_groups
36
- # If not specified, a server will listed on all the topics
51
+ # If not specified, a server will listen on all the topics
37
52
  @consumer_groups ||= Karafka::App.consumer_groups.map(&:name).freeze
38
53
  end
39
54
 
40
- private
41
-
42
- # @return [Karafka::Process] process wrapper instance used to catch system signal calls
43
- def process
44
- Karafka::App.config.internal.process
45
- end
46
-
47
55
  # Starts Karafka with a supervision
48
56
  # @note We don't need to sleep because Karafka::Fetcher is locking and waiting to
49
57
  # finish loop (and it won't happen until we explicitly want to stop)
50
- def run_supervised
58
+ def start
51
59
  process.supervise
52
60
  Karafka::App.run!
53
- Karafka::App.config.internal.fetcher.call
61
+ Karafka::Runner.new.call
54
62
  end
55
63
 
56
64
  # Stops Karafka with a supervision (as long as there is a shutdown timeout)
57
- # If consumers won't stop in a given time frame, it will force them to exit
58
- def stop_supervised
65
+ # If consumers or workers won't stop in a given time frame, it will force them to exit
66
+ #
67
+ # @note This method is not async. It should not be executed from the workers as it will
68
+ # lock them forever. If you need to run Karafka shutdown from within workers threads,
69
+ # please start a separate thread to do so.
70
+ def stop
59
71
  Karafka::App.stop!
60
72
 
61
- # See https://github.com/dry-rb/dry-configurable/issues/93
62
73
  timeout = Thread.new { Karafka::App.config.shutdown_timeout }.join.value
63
74
 
64
75
  # We check from time to time (for the timeout period) if all the threads finished
65
76
  # their work and if so, we can just return and normal shutdown process will take place
66
- (timeout * SUPERVISION_CHECK_FACTOR).to_i.times do
67
- if consumer_threads.count(&:alive?).zero?
68
- Thread.new { Karafka.monitor.instrument('app.stopped') }.join
77
+ # We divide it by 1000 because we use time in ms.
78
+ ((timeout / 1_000) * SUPERVISION_CHECK_FACTOR).to_i.times do
79
+ if consumer_threads.count(&:alive?).zero? &&
80
+ workers.count(&:alive?).zero?
81
+
82
+ Thread.new { Karafka::App.producer.close }.join
83
+
69
84
  return
70
85
  end
71
86
 
@@ -74,12 +89,34 @@ module Karafka
74
89
 
75
90
  raise Errors::ForcefulShutdownError
76
91
  rescue Errors::ForcefulShutdownError => e
77
- Thread.new { Karafka.monitor.instrument('app.stopping.error', error: e) }.join
78
- # We're done waiting, lets kill them!
79
- consumer_threads.each(&:terminate)
92
+ thread = Thread.new do
93
+ Karafka.monitor.instrument(
94
+ 'error.occurred',
95
+ caller: self,
96
+ error: e,
97
+ type: 'app.stopping.error'
98
+ )
99
+
100
+ # We're done waiting, lets kill them!
101
+ workers.each(&:terminate)
102
+ consumer_threads.each(&:terminate)
103
+
104
+ Karafka::App.producer.close
105
+ end
106
+
107
+ thread.join
80
108
 
81
109
  # exit! is not within the instrumentation as it would not trigger due to exit
82
110
  Kernel.exit! FORCEFUL_EXIT_CODE
111
+ ensure
112
+ Karafka::App.stopped!
113
+ end
114
+
115
+ private
116
+
117
+ # @return [Karafka::Process] process wrapper instance used to catch system signal calls
118
+ def process
119
+ Karafka::App.config.internal.process
83
120
  end
84
121
  end
85
122
  end