karafka 1.4.15 → 2.0.0.alpha1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (128) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +3 -0
  4. data/.github/workflows/ci.yml +74 -24
  5. data/.ruby-version +1 -1
  6. data/CHANGELOG.md +38 -39
  7. data/Gemfile +6 -0
  8. data/Gemfile.lock +50 -52
  9. data/LICENSE +14 -0
  10. data/LICENSE-COMM +89 -0
  11. data/LICENSE-LGPL +165 -0
  12. data/README.md +59 -14
  13. data/bin/benchmarks +85 -0
  14. data/bin/create_token +28 -0
  15. data/bin/integrations +160 -0
  16. data/bin/stress +13 -0
  17. data/certs/karafka-pro.pem +11 -0
  18. data/certs/mensfeld.pem +23 -24
  19. data/config/errors.yml +4 -38
  20. data/docker-compose.yml +11 -3
  21. data/karafka.gemspec +10 -20
  22. data/lib/active_job/consumer.rb +22 -0
  23. data/lib/active_job/karafka.rb +18 -0
  24. data/lib/active_job/queue_adapters/karafka_adapter.rb +29 -0
  25. data/lib/active_job/routing_extensions.rb +15 -0
  26. data/lib/karafka/app.rb +13 -20
  27. data/lib/karafka/base_consumer.rb +103 -34
  28. data/lib/karafka/cli/base.rb +4 -4
  29. data/lib/karafka/cli/info.rb +43 -8
  30. data/lib/karafka/cli/install.rb +3 -8
  31. data/lib/karafka/cli/server.rb +17 -30
  32. data/lib/karafka/cli.rb +4 -11
  33. data/lib/karafka/connection/client.rb +279 -93
  34. data/lib/karafka/connection/listener.rb +137 -38
  35. data/lib/karafka/connection/messages_buffer.rb +57 -0
  36. data/lib/karafka/connection/pauses_manager.rb +46 -0
  37. data/lib/karafka/connection/rebalance_manager.rb +62 -0
  38. data/lib/karafka/contracts/config.rb +25 -7
  39. data/lib/karafka/contracts/consumer_group.rb +0 -173
  40. data/lib/karafka/contracts/consumer_group_topic.rb +17 -7
  41. data/lib/karafka/contracts/server_cli_options.rb +1 -9
  42. data/lib/karafka/contracts.rb +1 -1
  43. data/lib/karafka/env.rb +46 -0
  44. data/lib/karafka/errors.rb +14 -18
  45. data/lib/karafka/helpers/multi_delegator.rb +2 -2
  46. data/lib/karafka/instrumentation/callbacks/error.rb +40 -0
  47. data/lib/karafka/instrumentation/callbacks/statistics.rb +42 -0
  48. data/lib/karafka/instrumentation/monitor.rb +14 -21
  49. data/lib/karafka/instrumentation/stdout_listener.rb +64 -91
  50. data/lib/karafka/instrumentation.rb +21 -0
  51. data/lib/karafka/licenser.rb +65 -0
  52. data/lib/karafka/{params → messages}/batch_metadata.rb +7 -13
  53. data/lib/karafka/messages/builders/batch_metadata.rb +30 -0
  54. data/lib/karafka/messages/builders/message.rb +38 -0
  55. data/lib/karafka/messages/builders/messages.rb +40 -0
  56. data/lib/karafka/{params/params.rb → messages/message.rb} +7 -12
  57. data/lib/karafka/messages/messages.rb +64 -0
  58. data/lib/karafka/{params → messages}/metadata.rb +4 -6
  59. data/lib/karafka/messages/seek.rb +9 -0
  60. data/lib/karafka/patches/rdkafka/consumer.rb +22 -0
  61. data/lib/karafka/processing/executor.rb +96 -0
  62. data/lib/karafka/processing/executors_buffer.rb +49 -0
  63. data/lib/karafka/processing/jobs/base.rb +18 -0
  64. data/lib/karafka/processing/jobs/consume.rb +28 -0
  65. data/lib/karafka/processing/jobs/revoked.rb +22 -0
  66. data/lib/karafka/processing/jobs/shutdown.rb +23 -0
  67. data/lib/karafka/processing/jobs_queue.rb +121 -0
  68. data/lib/karafka/processing/worker.rb +57 -0
  69. data/lib/karafka/processing/workers_batch.rb +22 -0
  70. data/lib/karafka/railtie.rb +65 -0
  71. data/lib/karafka/routing/builder.rb +15 -14
  72. data/lib/karafka/routing/consumer_group.rb +10 -18
  73. data/lib/karafka/routing/consumer_mapper.rb +1 -2
  74. data/lib/karafka/routing/router.rb +1 -1
  75. data/lib/karafka/routing/subscription_group.rb +53 -0
  76. data/lib/karafka/routing/subscription_groups_builder.rb +51 -0
  77. data/lib/karafka/routing/topic.rb +47 -25
  78. data/lib/karafka/runner.rb +59 -0
  79. data/lib/karafka/serialization/json/deserializer.rb +6 -15
  80. data/lib/karafka/server.rb +62 -25
  81. data/lib/karafka/setup/config.rb +86 -159
  82. data/lib/karafka/status.rb +13 -3
  83. data/lib/karafka/templates/example_consumer.rb.erb +16 -0
  84. data/lib/karafka/templates/karafka.rb.erb +14 -50
  85. data/lib/karafka/time_trackers/base.rb +19 -0
  86. data/lib/karafka/time_trackers/pause.rb +84 -0
  87. data/lib/karafka/time_trackers/poll.rb +65 -0
  88. data/lib/karafka/version.rb +1 -1
  89. data/lib/karafka.rb +30 -44
  90. data.tar.gz.sig +0 -0
  91. metadata +96 -132
  92. metadata.gz.sig +0 -0
  93. data/MIT-LICENCE +0 -18
  94. data/lib/karafka/assignment_strategies/round_robin.rb +0 -13
  95. data/lib/karafka/attributes_map.rb +0 -63
  96. data/lib/karafka/backends/inline.rb +0 -16
  97. data/lib/karafka/base_responder.rb +0 -226
  98. data/lib/karafka/cli/flow.rb +0 -48
  99. data/lib/karafka/cli/missingno.rb +0 -19
  100. data/lib/karafka/code_reloader.rb +0 -67
  101. data/lib/karafka/connection/api_adapter.rb +0 -158
  102. data/lib/karafka/connection/batch_delegator.rb +0 -55
  103. data/lib/karafka/connection/builder.rb +0 -23
  104. data/lib/karafka/connection/message_delegator.rb +0 -36
  105. data/lib/karafka/consumers/batch_metadata.rb +0 -10
  106. data/lib/karafka/consumers/callbacks.rb +0 -71
  107. data/lib/karafka/consumers/includer.rb +0 -64
  108. data/lib/karafka/consumers/responders.rb +0 -24
  109. data/lib/karafka/consumers/single_params.rb +0 -15
  110. data/lib/karafka/contracts/responder_usage.rb +0 -54
  111. data/lib/karafka/fetcher.rb +0 -42
  112. data/lib/karafka/helpers/class_matcher.rb +0 -88
  113. data/lib/karafka/helpers/config_retriever.rb +0 -46
  114. data/lib/karafka/helpers/inflector.rb +0 -26
  115. data/lib/karafka/params/builders/batch_metadata.rb +0 -30
  116. data/lib/karafka/params/builders/params.rb +0 -38
  117. data/lib/karafka/params/builders/params_batch.rb +0 -25
  118. data/lib/karafka/params/params_batch.rb +0 -60
  119. data/lib/karafka/patches/ruby_kafka.rb +0 -47
  120. data/lib/karafka/persistence/client.rb +0 -29
  121. data/lib/karafka/persistence/consumers.rb +0 -45
  122. data/lib/karafka/persistence/topics.rb +0 -48
  123. data/lib/karafka/responders/builder.rb +0 -36
  124. data/lib/karafka/responders/topic.rb +0 -55
  125. data/lib/karafka/routing/topic_mapper.rb +0 -53
  126. data/lib/karafka/serialization/json/serializer.rb +0 -31
  127. data/lib/karafka/setup/configurators/water_drop.rb +0 -36
  128. data/lib/karafka/templates/application_responder.rb.erb +0 -11
@@ -0,0 +1,96 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ # Namespace that encapsulates all the logic related to processing data.
5
+ module Processing
6
+ # Executors:
7
+ # - run consumers code with provided messages batch (for `#call`) or run given teardown
8
+ # operations when needed from separate threads.
9
+ # - they re-create consumer instances in case of partitions that were revoked
10
+ # and assigned back.
11
+ #
12
+ # @note Executors are not removed after partition is revoked. They are not that big and will
13
+ # be re-used in case of a re-claim
14
+ class Executor
15
+ # @return [String] unique id that we use to ensure, that we use for state tracking
16
+ attr_reader :id
17
+
18
+ # @return [String] subscription group id to which a given executor belongs
19
+ attr_reader :group_id
20
+
21
+ # @param group_id [String] id of the subscription group to which the executor belongs
22
+ # @param client [Karafka::Connection::Client] kafka client
23
+ # @param topic [Karafka::Routing::Topic] topic for which this executor will run
24
+ # @param pause [Karafka::TimeTrackers::Pause] fetch pause object for crash pausing
25
+ def initialize(group_id, client, topic, pause)
26
+ @id = SecureRandom.uuid
27
+ @group_id = group_id
28
+ @client = client
29
+ @topic = topic
30
+ @pause = pause
31
+ end
32
+
33
+ # Runs consumer data processing against given batch and handles failures and errors.
34
+ #
35
+ # @param messages [Array<Rdkafka::Consumer::Message>] raw rdkafka messages
36
+ # @param received_at [Time] the moment we've received the batch (actually the moment we've)
37
+ # enqueued it, but good enough
38
+ def consume(messages, received_at)
39
+ # Recreate consumer with each batch if persistence is not enabled
40
+ # We reload the consumers with each batch instead of relying on some external signals
41
+ # when needed for consistency. That way devs may have it on or off and not in this
42
+ # middle state, where re-creation of a consumer instance would occur only sometimes
43
+ @consumer = nil unless ::Karafka::App.config.consumer_persistence
44
+
45
+ # First we build messages batch...
46
+ consumer.messages = Messages::Builders::Messages.call(
47
+ messages,
48
+ @topic,
49
+ received_at
50
+ )
51
+
52
+ # We run the consumer client logic...
53
+ consumer.on_consume
54
+ end
55
+
56
+ # Runs the controller `#revoked` method that should be triggered when a given consumer is
57
+ # no longer needed due to partitions reassignment.
58
+ #
59
+ # @note Clearing the consumer will ensure, that if we get the partition back, it will be
60
+ # handled with a consumer with a clean state.
61
+ #
62
+ # @note We run it only when consumer was present, because presence indicates, that at least
63
+ # a single message has been consumed.
64
+ def revoked
65
+ consumer.on_revoked if @consumer
66
+ @consumer = nil
67
+ end
68
+
69
+ # Runs the controller `#shutdown` method that should be triggered when a given consumer is
70
+ # no longer needed as we're closing the process.
71
+ #
72
+ # @note While we do not need to clear the consumer here, it's a good habit to clean after
73
+ # work is done.
74
+ def shutdown
75
+ # There is a case, where the consumer no longer exists because it was revoked, in case like
76
+ # that we do not build a new instance and shutdown should not be triggered.
77
+ consumer.on_shutdown if @consumer
78
+ @consumer = nil
79
+ end
80
+
81
+ private
82
+
83
+ # @return [Object] cached consumer instance
84
+ def consumer
85
+ @consumer ||= begin
86
+ consumer = @topic.consumer.new
87
+ consumer.topic = @topic
88
+ consumer.client = @client
89
+ consumer.pause = @pause
90
+ consumer.producer = ::Karafka::App.producer
91
+ consumer
92
+ end
93
+ end
94
+ end
95
+ end
96
+ end
@@ -0,0 +1,49 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Buffer for executors of a given subscription group. It wraps around the concept of building
6
+ # and caching them, so we can re-use them instead of creating new each time.
7
+ class ExecutorsBuffer
8
+ # @param client [Connection::Client]
9
+ # @param subscription_group [Routing::SubscriptionGroup]
10
+ # @return [ExecutorsBuffer]
11
+ def initialize(client, subscription_group)
12
+ @subscription_group = subscription_group
13
+ @client = client
14
+ @buffer = Hash.new { |h, k| h[k] = {} }
15
+ end
16
+
17
+ # @param topic [String] topic name
18
+ # @param partition [Integer] partition number
19
+ # @param pause [TimeTrackers::Pause] pause corresponding with provided topic and partition
20
+ # @return [Executor] consumer executor
21
+ def fetch(
22
+ topic,
23
+ partition,
24
+ pause
25
+ )
26
+ topic = @subscription_group.topics.find { |ktopic| ktopic.name == topic }
27
+
28
+ topic || raise(Errors::TopicNotFoundError, topic)
29
+
30
+ @buffer[topic][partition] ||= Executor.new(
31
+ @subscription_group.id,
32
+ @client,
33
+ topic,
34
+ pause
35
+ )
36
+ end
37
+
38
+ # Runs the shutdown on all active executors.
39
+ def shutdown
40
+ @buffer.values.map(&:values).flatten.each(&:shutdown)
41
+ end
42
+
43
+ # Clears the executors buffer. Useful for critical errors recovery.
44
+ def clear
45
+ @buffer.clear
46
+ end
47
+ end
48
+ end
49
+ end
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Namespace for all the jobs that are suppose to run in workers.
6
+ module Jobs
7
+ # Base class for all the jobs types that are suppose to run in workers threads.
8
+ class Base
9
+ extend Forwardable
10
+
11
+ # @note Since one job has always one executer, we use the jobs id and group id as reference
12
+ def_delegators :executor, :id, :group_id
13
+
14
+ attr_reader :executor
15
+ end
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ module Jobs
6
+ # The main job type. It runs the executor that triggers given topic partition messages
7
+ # processing in an underlying consumer instance.
8
+ class Consume < Base
9
+ # @param executor [Karafka::Processing::Executor] executor that is suppose to run a given
10
+ # job
11
+ # @param messages [Array<dkafka::Consumer::Message>] array with raw rdkafka messages with
12
+ # which we are suppose to work
13
+ # @return [Consume]
14
+ def initialize(executor, messages)
15
+ @executor = executor
16
+ @messages = messages
17
+ @created_at = Time.now
18
+ super()
19
+ end
20
+
21
+ # Runs the given executor.
22
+ def call
23
+ executor.consume(@messages, @created_at)
24
+ end
25
+ end
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ module Jobs
6
+ # Job that runs the revoked operation when we loose a partition on a consumer that lost it.
7
+ class Revoked < Base
8
+ # @param executor [Karafka::Processing::Executor] executor that is suppose to run the job
9
+ # @return [Revoked]
10
+ def initialize(executor)
11
+ @executor = executor
12
+ super()
13
+ end
14
+
15
+ # Runs the revoking job via an executor.
16
+ def call
17
+ executor.revoked
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ module Jobs
6
+ # Job that runs on each active consumer upon process shutdown (one job per consumer).
7
+ class Shutdown < Base
8
+ # @param executor [Karafka::Processing::Executor] executor that is suppose to run a given
9
+ # job on an active consumer
10
+ # @return [Shutdown]
11
+ def initialize(executor)
12
+ @executor = executor
13
+ super()
14
+ end
15
+
16
+ # Runs the shutdown job via an executor.
17
+ def call
18
+ executor.shutdown
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,121 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # This is the key work component for Karafka jobs distribution. It provides API for running
6
+ # jobs in parallel while operating within more than one subscription group.
7
+ #
8
+ # We need to take into consideration fact, that more than one subscription group can operate
9
+ # on this queue, that's why internally we keep track of processing per group.
10
+ #
11
+ # We work with the assumption, that partitions data is evenly distributed.
12
+ class JobsQueue
13
+ # @return [Karafka::Processing::JobsQueue]
14
+ def initialize
15
+ @queue = ::Queue.new
16
+ # Those queues will act as a semaphores internally. Since we need an indicator for waiting
17
+ # we could use Thread.pass but this is expensive. Instead we can just lock until any
18
+ # of the workers finishes their work and we can re-check. This means that in the worse
19
+ # scenario, we will context switch 10 times per poll instead of getting this thread
20
+ # scheduled by Ruby hundreds of thousands of times per group.
21
+ # We cannot use a single semaphore as it could potentially block in listeners that should
22
+ # process with their data and also could unlock when a given group needs to remain locked
23
+ @semaphores = Hash.new { |h, k| h[k] = Queue.new }
24
+ @in_processing = Hash.new { |h, k| h[k] = {} }
25
+ @mutex = Mutex.new
26
+ end
27
+
28
+ # Returns number of jobs that are either enqueued or in processing (but not finished)
29
+ # @return [Integer] number of elements in the queue
30
+ # @note Using `#pop` won't decrease this number as only marking job as completed does this
31
+ def size
32
+ @in_processing.values.map(&:size).sum
33
+ end
34
+
35
+ # Adds the job to the internal main queue, scheduling it for execution in a worker and marks
36
+ # this job as in processing pipeline.
37
+ #
38
+ # @param job [Jobs::Base] job that we want to run
39
+ def <<(job)
40
+ # We do not push the job if the queue is closed as it means that it would anyhow not be
41
+ # executed
42
+ return if @queue.closed?
43
+
44
+ @mutex.synchronize do
45
+ group = @in_processing[job.group_id]
46
+
47
+ raise(Errors::JobsQueueSynchronizationError, job.group_id) if group.key?(job.id)
48
+
49
+ group[job.id] = true
50
+ end
51
+
52
+ @queue << job
53
+ end
54
+
55
+ # @return [Jobs::Base, nil] waits for a job from the main queue and returns it once available
56
+ # or returns nil if the queue has been stopped and there won't be anything more to process
57
+ # ever.
58
+ # @note This command is blocking and will wait until any job is available on the main queue
59
+ def pop
60
+ @queue.pop
61
+ end
62
+
63
+ # Marks a given job from a given group as completed. When there are no more jobs from a given
64
+ # group to be executed, we won't wait.
65
+ #
66
+ # @param [Jobs::Base] job that was completed
67
+ def complete(job)
68
+ @mutex.synchronize do
69
+ @in_processing[job.group_id].delete(job.id)
70
+ @semaphores[job.group_id] << true
71
+ end
72
+ end
73
+
74
+ # Clears the processing states for a provided group. Useful when a recovery happens and we
75
+ # need to clean up state but only for a given subscription group.
76
+ #
77
+ # @param group_id [String]
78
+ def clear(group_id)
79
+ @mutex.synchronize do
80
+ @in_processing[group_id].clear
81
+ # We unlock it just in case it was blocked when clearing started
82
+ @semaphores[group_id] << true
83
+ end
84
+ end
85
+
86
+ # Stops the whole processing queue.
87
+ def close
88
+ @mutex.synchronize do
89
+ return if @queue.closed?
90
+
91
+ @queue.close
92
+ @semaphores.values.each(&:close)
93
+ end
94
+ end
95
+
96
+ # Blocks when there are things in the queue in a given group and waits until all the jobs
97
+ # from a given group are completed
98
+ # @param group_id [String] id of the group in which jobs we're interested.
99
+ # @note This method is blocking.
100
+ def wait(group_id)
101
+ # Go doing other things while we cannot process and wait for anyone to finish their work
102
+ # and re-check the wait status
103
+ @semaphores[group_id].pop while wait?(group_id)
104
+ end
105
+
106
+ private
107
+
108
+ # @param group_id [String] id of the group in which jobs we're interested.
109
+ # @return [Boolean] should we keep waiting or not
110
+ def wait?(group_id)
111
+ # If it is stopping, all the previous messages that are processed at the moment need to
112
+ # finish. Otherwise we may risk closing the client and committing offsets afterwards
113
+ return false if Karafka::App.stopping? && @in_processing[group_id].empty?
114
+ return false if @queue.closed?
115
+ return false if @in_processing[group_id].empty?
116
+
117
+ true
118
+ end
119
+ end
120
+ end
121
+ end
@@ -0,0 +1,57 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Workers are used to run jobs in separate threads.
6
+ # Workers are the main processing units of the Karafka framework.
7
+ class Worker
8
+ extend Forwardable
9
+
10
+ def_delegators :@thread, :join, :terminate, :alive?
11
+
12
+ # @param jobs_queue [JobsQueue]
13
+ # @return [Worker]
14
+ def initialize(jobs_queue)
15
+ @jobs_queue = jobs_queue
16
+ @thread = Thread.new do
17
+ # If anything goes wrong in this worker thread, it means something went really wrong and
18
+ # we should terminate.
19
+ Thread.current.abort_on_exception = true
20
+ loop { break unless process }
21
+ end
22
+ end
23
+
24
+ private
25
+
26
+ # Fetches a single job, processes it and marks as completed.
27
+ #
28
+ # @note We do not have error handling here, as no errors should propagate this far. If they
29
+ # do, it is a critical error and should bubble up.
30
+ #
31
+ # @note Upon closing the jobs queue, worker will close it's thread
32
+ def process
33
+ job = @jobs_queue.pop
34
+
35
+ if job
36
+ job.call
37
+ true
38
+ else
39
+ false
40
+ end
41
+ # We signal critical exceptions, notify and do not allow worker to fail
42
+ # rubocop:disable Lint/RescueException
43
+ rescue Exception => e
44
+ # rubocop:enable Lint/RescueException
45
+ Karafka.monitor.instrument(
46
+ 'error.occurred',
47
+ caller: self,
48
+ error: e,
49
+ type: 'worker.process.error'
50
+ )
51
+ ensure
52
+ # job can be nil when the queue is being closed
53
+ @jobs_queue.complete(job) if job
54
+ end
55
+ end
56
+ end
57
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Abstraction layer around workers batch.
6
+ class WorkersBatch
7
+ include Enumerable
8
+
9
+ # @param jobs_queue [JobsQueue]
10
+ # @return [WorkersBatch]
11
+ def initialize(jobs_queue)
12
+ @batch = Array.new(App.config.concurrency) { Processing::Worker.new(jobs_queue) }
13
+ end
14
+
15
+ # Iterates over available workers and yields each worker
16
+ # @param block [Proc] block we want to run
17
+ def each(&block)
18
+ @batch.each(&block)
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,65 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This file contains Railtie for auto-configuration
4
+
5
+ rails = false
6
+
7
+ begin
8
+ require 'rails'
9
+
10
+ rails = true
11
+ rescue LoadError
12
+ # Without defining this in any way, Zeitwerk ain't happy so we do it that way
13
+ module Karafka
14
+ class Railtie
15
+ end
16
+ end
17
+ end
18
+
19
+ if rails
20
+ # Load Karafka
21
+ require 'karafka'
22
+ # Load ActiveJob adapter
23
+ require 'active_job/karafka'
24
+
25
+ # Setup env if configured (may be configured later by .net, etc)
26
+ ENV['KARAFKA_ENV'] ||= ENV['RAILS_ENV'] if ENV.key?('RAILS_ENV')
27
+
28
+ module Karafka
29
+ # Railtie for setting up Rails integration
30
+ class Railtie < Rails::Railtie
31
+ railtie_name :karafka
32
+
33
+ initializer 'karafka.configure_rails_initialization' do |app|
34
+ # Consumers should autoload by default in the Rails app so they are visible
35
+ app.config.autoload_paths += %w[app/consumers]
36
+
37
+ # Make Karafka use Rails logger
38
+ ::Karafka::App.config.logger = Rails.logger
39
+
40
+ # This lines will make Karafka print to stdout like puma or unicorn
41
+ if Rails.env.development?
42
+ Rails.logger.extend(
43
+ ActiveSupport::Logger.broadcast(
44
+ ActiveSupport::Logger.new($stdout)
45
+ )
46
+ )
47
+
48
+ # We can have many listeners, but it does not matter in which we will reload the code as
49
+ # long as all the consumers will be re-created as Rails reload is thread-safe
50
+ ::Karafka::App.monitor.subscribe('connection.listener.fetch_loop') do
51
+ # Reload code each time there is a change in the code
52
+ next unless Rails.application.reloaders.any?(&:updated?)
53
+
54
+ Rails.application.reloader.reload!
55
+ end
56
+ end
57
+
58
+ app.reloader.to_prepare do
59
+ # Load Karafka bot file, so it can be used in Rails server context
60
+ require Rails.root.join(Karafka.boot_file.to_s).to_s
61
+ end
62
+ end
63
+ end
64
+ end
65
+ end
@@ -16,8 +16,8 @@ module Karafka
16
16
  private_constant :CONTRACT
17
17
 
18
18
  def initialize
19
- super
20
19
  @draws = Concurrent::Array.new
20
+ super
21
21
  end
22
22
 
23
23
  # Used to draw routes for Karafka
@@ -40,6 +40,7 @@ module Karafka
40
40
  each do |consumer_group|
41
41
  hashed_group = consumer_group.to_h
42
42
  validation_result = CONTRACT.call(hashed_group)
43
+
43
44
  next if validation_result.success?
44
45
 
45
46
  raise Errors::InvalidConfigurationError, validation_result.errors.to_h
@@ -59,30 +60,30 @@ module Karafka
59
60
  super
60
61
  end
61
62
 
62
- # Redraws all the routes for the in-process code reloading.
63
- # @note This won't allow registration of new topics without process restart but will trigger
64
- # cache invalidation so all the classes, etc are re-fetched after code reload
65
- def reload
66
- draws = @draws.dup
67
- clear
68
- draws.each { |block| draw(&block) }
69
- end
70
-
71
63
  private
72
64
 
73
65
  # Builds and saves given consumer group
74
66
  # @param group_id [String, Symbol] name for consumer group
75
67
  # @param block [Proc] proc that should be executed in the proxy context
76
68
  def consumer_group(group_id, &block)
77
- consumer_group = ConsumerGroup.new(group_id.to_s)
78
- self << Proxy.new(consumer_group, &block).target
69
+ consumer_group = find { |cg| cg.name == group_id.to_s }
70
+
71
+ if consumer_group
72
+ Proxy.new(consumer_group, &block).target
73
+ else
74
+ consumer_group = ConsumerGroup.new(group_id.to_s)
75
+ self << Proxy.new(consumer_group, &block).target
76
+ end
79
77
  end
80
78
 
79
+ # In case we use simple style of routing, all topics will be assigned to the same consumer
80
+ # group that will be based on the client_id
81
+ #
81
82
  # @param topic_name [String, Symbol] name of a topic from which we want to consumer
82
83
  # @param block [Proc] proc we want to evaluate in the topic context
83
84
  def topic(topic_name, &block)
84
- consumer_group(topic_name) do
85
- topic(topic_name, &block).tap(&:build)
85
+ consumer_group('app') do
86
+ topic(topic_name, &block)
86
87
  end
87
88
  end
88
89
  end
@@ -5,14 +5,10 @@ module Karafka
5
5
  # Object used to describe a single consumer group that is going to subscribe to
6
6
  # given topics
7
7
  # It is a part of Karafka's DSL
8
+ # @note A single consumer group represents Kafka consumer group, but it may not match 1:1 with
9
+ # subscription groups. There can be more subscription groups than consumer groups
8
10
  class ConsumerGroup
9
- extend Helpers::ConfigRetriever
10
-
11
- attr_reader(
12
- :topics,
13
- :id,
14
- :name
15
- )
11
+ attr_reader :id, :topics, :name
16
12
 
17
13
  # @param name [String, Symbol] raw name of this consumer group. Raw means, that it does not
18
14
  # yet have an application client_id namespace, this will be added here by default.
@@ -35,28 +31,24 @@ module Karafka
35
31
  # @return [Karafka::Routing::Topic] newly built topic instance
36
32
  def topic=(name, &block)
37
33
  topic = Topic.new(name, self)
38
- @topics << Proxy.new(topic, &block).target.tap(&:build)
34
+ @topics << Proxy.new(topic, &block).target
39
35
  @topics.last
40
36
  end
41
37
 
42
- Karafka::AttributesMap.consumer_group.each do |attribute|
43
- config_retriever_for(attribute)
38
+ # @return [Array<Routing::SubscriptionGroup>] all the subscription groups build based on
39
+ # the consumer group topics
40
+ def subscription_groups
41
+ App.config.internal.subscription_groups_builder.call(topics)
44
42
  end
45
43
 
46
44
  # Hashed version of consumer group that can be used for validation purposes
47
45
  # @return [Hash] hash with consumer group attributes including serialized to hash
48
46
  # topics inside of it.
49
47
  def to_h
50
- result = {
48
+ {
51
49
  topics: topics.map(&:to_h),
52
50
  id: id
53
- }
54
-
55
- Karafka::AttributesMap.consumer_group.each do |attribute|
56
- result[attribute] = public_send(attribute)
57
- end
58
-
59
- result
51
+ }.freeze
60
52
  end
61
53
  end
62
54
  end
@@ -26,8 +26,7 @@ module Karafka
26
26
  # @param raw_consumer_group_name [String, Symbol] string or symbolized consumer group name
27
27
  # @return [String] remapped final consumer group name
28
28
  def call(raw_consumer_group_name)
29
- client_name = Karafka::Helpers::Inflector.map(Karafka::App.config.client_id.to_s)
30
- "#{client_name}_#{raw_consumer_group_name}"
29
+ "#{Karafka::App.config.client_id}_#{raw_consumer_group_name}"
31
30
  end
32
31
  end
33
32
  end
@@ -10,7 +10,7 @@ module Karafka
10
10
  # Find a proper topic based on full topic id
11
11
  # @param topic_id [String] proper topic id (already mapped, etc) for which we want to find
12
12
  # routing topic
13
- # @return [Karafka::Routing::Route] proper route details
13
+ # @return [Karafka::Routing::Topic] proper route details
14
14
  # @raise [Karafka::Topic::NonMatchingTopicError] raised if topic name does not match
15
15
  # any route defined by user using routes.draw
16
16
  def find(topic_id)