karafka 2.0.0.beta1 → 2.0.0.beta4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +9 -23
  4. data/CHANGELOG.md +47 -0
  5. data/Gemfile.lock +8 -8
  6. data/bin/integrations +36 -14
  7. data/bin/scenario +29 -0
  8. data/bin/wait_for_kafka +20 -0
  9. data/config/errors.yml +1 -0
  10. data/docker-compose.yml +12 -0
  11. data/karafka.gemspec +2 -2
  12. data/lib/active_job/karafka.rb +2 -2
  13. data/lib/karafka/active_job/routing/extensions.rb +31 -0
  14. data/lib/karafka/base_consumer.rb +65 -42
  15. data/lib/karafka/connection/client.rb +65 -19
  16. data/lib/karafka/connection/listener.rb +99 -34
  17. data/lib/karafka/connection/listeners_batch.rb +24 -0
  18. data/lib/karafka/connection/messages_buffer.rb +50 -54
  19. data/lib/karafka/connection/raw_messages_buffer.rb +101 -0
  20. data/lib/karafka/contracts/config.rb +9 -1
  21. data/lib/karafka/helpers/async.rb +33 -0
  22. data/lib/karafka/instrumentation/logger_listener.rb +34 -10
  23. data/lib/karafka/instrumentation/monitor.rb +3 -1
  24. data/lib/karafka/licenser.rb +26 -7
  25. data/lib/karafka/messages/batch_metadata.rb +26 -3
  26. data/lib/karafka/messages/builders/batch_metadata.rb +17 -29
  27. data/lib/karafka/messages/builders/message.rb +1 -0
  28. data/lib/karafka/messages/builders/messages.rb +4 -12
  29. data/lib/karafka/pro/active_job/consumer.rb +49 -0
  30. data/lib/karafka/pro/active_job/dispatcher.rb +10 -10
  31. data/lib/karafka/pro/active_job/job_options_contract.rb +9 -9
  32. data/lib/karafka/pro/base_consumer.rb +76 -0
  33. data/lib/karafka/pro/loader.rb +30 -13
  34. data/lib/karafka/pro/performance_tracker.rb +9 -9
  35. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +37 -0
  36. data/lib/karafka/pro/processing/jobs_builder.rb +31 -0
  37. data/lib/karafka/pro/routing/extensions.rb +32 -0
  38. data/lib/karafka/pro/scheduler.rb +54 -0
  39. data/lib/karafka/processing/executor.rb +34 -7
  40. data/lib/karafka/processing/executors_buffer.rb +15 -7
  41. data/lib/karafka/processing/jobs/base.rb +21 -4
  42. data/lib/karafka/processing/jobs/consume.rb +12 -5
  43. data/lib/karafka/processing/jobs_builder.rb +28 -0
  44. data/lib/karafka/processing/jobs_queue.rb +15 -12
  45. data/lib/karafka/processing/result.rb +34 -0
  46. data/lib/karafka/processing/worker.rb +23 -17
  47. data/lib/karafka/processing/workers_batch.rb +5 -0
  48. data/lib/karafka/routing/consumer_group.rb +1 -1
  49. data/lib/karafka/routing/subscription_group.rb +2 -2
  50. data/lib/karafka/routing/subscription_groups_builder.rb +3 -2
  51. data/lib/karafka/routing/topic.rb +5 -0
  52. data/lib/karafka/routing/topics.rb +38 -0
  53. data/lib/karafka/runner.rb +19 -27
  54. data/lib/karafka/scheduler.rb +10 -11
  55. data/lib/karafka/server.rb +24 -23
  56. data/lib/karafka/setup/config.rb +4 -1
  57. data/lib/karafka/status.rb +1 -3
  58. data/lib/karafka/version.rb +1 -1
  59. data.tar.gz.sig +0 -0
  60. metadata +20 -5
  61. metadata.gz.sig +0 -0
  62. data/lib/karafka/active_job/routing_extensions.rb +0 -18
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component.
4
+ # All of the commercial components are present in the lib/karafka/pro directory of this
5
+ # repository and their usage requires commercial license agreement.
6
+ #
7
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
8
+ #
9
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
10
+ # your code to Maciej Mensfeld.
11
+
12
+ module Karafka
13
+ module Pro
14
+ module Processing
15
+ # Pro jobs builder that supports lrj
16
+ class JobsBuilder < ::Karafka::Processing::JobsBuilder
17
+ # @param executor [Karafka::Processing::Executor]
18
+ # @param messages [Karafka::Messages::Messages] messages batch to be consumed
19
+ # @return [Karafka::Processing::Jobs::Consume] blocking job
20
+ # @return [Karafka::Pro::Processing::Jobs::ConsumeNonBlocking] non blocking for lrj
21
+ def consume(executor, messages)
22
+ if executor.topic.long_running_job?
23
+ Jobs::ConsumeNonBlocking.new(executor, messages)
24
+ else
25
+ super
26
+ end
27
+ end
28
+ end
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component.
4
+ # All of the commercial components are present in the lib/karafka/pro directory of this
5
+ # repository and their usage requires commercial license agreement.
6
+ #
7
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
8
+ #
9
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
10
+ # your code to Maciej Mensfeld.
11
+
12
+ module Karafka
13
+ module Pro
14
+ # Pro routing components
15
+ module Routing
16
+ # Routing extensions that allow to configure some extra PRO routing options
17
+ module Extensions
18
+ class << self
19
+ # @param base [Class] class we extend
20
+ def included(base)
21
+ base.attr_accessor :long_running_job
22
+ end
23
+ end
24
+
25
+ # @return [Boolean] is a given job on a topic a long running one
26
+ def long_running_job?
27
+ @long_running_job || false
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component.
4
+ # All of the commercial components are present in the lib/karafka/pro directory of this
5
+ # repository and their usage requires commercial license agreement.
6
+ #
7
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
8
+ #
9
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
10
+ # your code to Maciej Mensfeld.
11
+
12
+ module Karafka
13
+ module Pro
14
+ # Optimizes scheduler that takes into consideration of execution time needed to process
15
+ # messages from given topics partitions. It uses the non-preemptive LJF algorithm
16
+ #
17
+ # This scheduler is designed to optimize execution times on jobs that perform IO operations as
18
+ # when taking IO into consideration, the can achieve optimized parallel processing.
19
+ #
20
+ # This scheduler can also work with virtual partitions.
21
+ #
22
+ # Aside from consumption jobs, other jobs do not run often, thus we can leave them with
23
+ # default FIFO scheduler from the default Karafka scheduler
24
+ class Scheduler < ::Karafka::Scheduler
25
+ # Schedules jobs in the LJF order for consumption
26
+ #
27
+ # @param queue [Karafka::Processing::JobsQueue] queue where we want to put the jobs
28
+ # @param jobs_array [Array<Karafka::Processing::Jobs::Base>] jobs we want to schedule
29
+ #
30
+ def schedule_consumption(queue, jobs_array)
31
+ pt = PerformanceTracker.instance
32
+
33
+ ordered = []
34
+
35
+ jobs_array.each do |job|
36
+ messages = job.messages
37
+ message = messages.first
38
+
39
+ cost = pt.processing_time_p95(message.topic, message.partition) * messages.size
40
+
41
+ ordered << [job, cost]
42
+ end
43
+
44
+ ordered.sort_by!(&:last)
45
+ ordered.reverse!
46
+ ordered.map!(&:first)
47
+
48
+ ordered.each do |job|
49
+ queue << job
50
+ end
51
+ end
52
+ end
53
+ end
54
+ end
@@ -18,6 +18,15 @@ module Karafka
18
18
  # @return [String] subscription group id to which a given executor belongs
19
19
  attr_reader :group_id
20
20
 
21
+ # @return [Karafka::Messages::Messages] messages batch
22
+ attr_reader :messages
23
+
24
+ # Topic accessibility may be needed for the jobs builder to be able to build a proper job
25
+ # based on the topic settings defined by the end user
26
+ #
27
+ # @return [Karafka::Routing::Topic] topic of this executor
28
+ attr_reader :topic
29
+
21
30
  # @param group_id [String] id of the subscription group to which the executor belongs
22
31
  # @param client [Karafka::Connection::Client] kafka client
23
32
  # @param topic [Karafka::Routing::Topic] topic for which this executor will run
@@ -30,17 +39,26 @@ module Karafka
30
39
  @pause_tracker = pause_tracker
31
40
  end
32
41
 
33
- # Builds the consumer instance and sets all that is needed to run the user consumption logic
42
+ # Builds the consumer instance, builds messages batch and sets all that is needed to run the
43
+ # user consumption logic
34
44
  #
35
- # @param messages [Array<Rdkafka::Consumer::Message>] raw rdkafka messages
45
+ # @param messages [Array<Karafka::Messages::Message>]
36
46
  # @param received_at [Time] the moment we've received the batch (actually the moment we've)
37
47
  # enqueued it, but good enough
38
- def prepare(messages, received_at)
48
+ def before_consume(messages, received_at)
39
49
  # Recreate consumer with each batch if persistence is not enabled
40
50
  # We reload the consumers with each batch instead of relying on some external signals
41
51
  # when needed for consistency. That way devs may have it on or off and not in this
42
52
  # middle state, where re-creation of a consumer instance would occur only sometimes
43
- @consumer = nil unless ::Karafka::App.config.consumer_persistence
53
+ @recreate = true unless ::Karafka::App.config.consumer_persistence
54
+
55
+ # If @recreate was set to true (aside from non persistent), it means, that revocation or
56
+ # a shutdown happened and we need to have a new instance for running another consume for
57
+ # this topic partition
58
+ if @recreate
59
+ @consumer = nil
60
+ @recreate = false
61
+ end
44
62
 
45
63
  # First we build messages batch...
46
64
  consumer.messages = Messages::Builders::Messages.call(
@@ -49,7 +67,7 @@ module Karafka
49
67
  received_at
50
68
  )
51
69
 
52
- consumer.on_prepared
70
+ consumer.on_before_consume
53
71
  end
54
72
 
55
73
  # Runs consumer data processing against given batch and handles failures and errors.
@@ -58,6 +76,11 @@ module Karafka
58
76
  consumer.on_consume
59
77
  end
60
78
 
79
+ # Runs consumer after consumption code
80
+ def after_consume
81
+ consumer.on_after_consume if @consumer
82
+ end
83
+
61
84
  # Runs the controller `#revoked` method that should be triggered when a given consumer is
62
85
  # no longer needed due to partitions reassignment.
63
86
  #
@@ -66,9 +89,13 @@ module Karafka
66
89
  #
67
90
  # @note We run it only when consumer was present, because presence indicates, that at least
68
91
  # a single message has been consumed.
92
+ #
93
+ # @note We do not reset the consumer but we indicate need for recreation instead, because
94
+ # after the revocation, there still may be `#after_consume` running that needs a given
95
+ # consumer instance.
69
96
  def revoked
70
97
  consumer.on_revoked if @consumer
71
- @consumer = nil
98
+ @recreate = true
72
99
  end
73
100
 
74
101
  # Runs the controller `#shutdown` method that should be triggered when a given consumer is
@@ -80,7 +107,7 @@ module Karafka
80
107
  # There is a case, where the consumer no longer exists because it was revoked, in case like
81
108
  # that we do not build a new instance and shutdown should not be triggered.
82
109
  consumer.on_shutdown if @consumer
83
- @consumer = nil
110
+ @recreate = true
84
111
  end
85
112
 
86
113
  private
@@ -23,21 +23,29 @@ module Karafka
23
23
  partition,
24
24
  pause
25
25
  )
26
- topic = @subscription_group.topics.find { |ktopic| ktopic.name == topic }
26
+ ktopic = @subscription_group.topics.find(topic)
27
27
 
28
- topic || raise(Errors::TopicNotFoundError, topic)
28
+ ktopic || raise(Errors::TopicNotFoundError, topic)
29
29
 
30
- @buffer[topic][partition] ||= Executor.new(
30
+ @buffer[ktopic][partition] ||= Executor.new(
31
31
  @subscription_group.id,
32
32
  @client,
33
- topic,
33
+ ktopic,
34
34
  pause
35
35
  )
36
36
  end
37
37
 
38
- # Runs the shutdown on all active executors.
39
- def shutdown
40
- @buffer.values.map(&:values).flatten.each(&:shutdown)
38
+ # Iterates over all available executors and yields them together with topic and partition
39
+ # info
40
+ # @yieldparam [Routing::Topic] karafka routing topic object
41
+ # @yieldparam [Integer] partition number
42
+ # @yieldparam [Executor] given executor
43
+ def each
44
+ @buffer.each do |ktopic, partitions|
45
+ partitions.each do |partition, executor|
46
+ yield(ktopic, partition, executor)
47
+ end
48
+ end
41
49
  end
42
50
 
43
51
  # Clears the executors buffer. Useful for critical errors recovery.
@@ -5,7 +5,7 @@ module Karafka
5
5
  # Namespace for all the jobs that are suppose to run in workers.
6
6
  module Jobs
7
7
  # Base class for all the jobs types that are suppose to run in workers threads.
8
- # Each job can have 3 main entry-points: `#prepare`, `#call` and `#teardown`
8
+ # Each job can have 3 main entry-points: `#before_call`, `#call` and `#after_call`
9
9
  # Only `#call` is required.
10
10
  class Base
11
11
  extend Forwardable
@@ -15,18 +15,35 @@ module Karafka
15
15
 
16
16
  attr_reader :executor
17
17
 
18
+ # Creates a new job instance
19
+ def initialize
20
+ # All jobs are blocking by default and they can release the lock when blocking operations
21
+ # are done (if needed)
22
+ @non_blocking = false
23
+ end
24
+
18
25
  # When redefined can run any code that should run before executing the proper code
19
- def prepare; end
26
+ def before_call; end
27
+
28
+ # The main entry-point of a job
29
+ def call
30
+ raise NotImplementedError, 'Please implement in a subclass'
31
+ end
20
32
 
21
33
  # When redefined can run any code that should run after executing the proper code
22
- def teardown; end
34
+ def after_call; end
23
35
 
24
36
  # @return [Boolean] is this a non-blocking job
37
+ #
25
38
  # @note Blocking job is a job, that will cause the job queue to wait until it is finished
26
39
  # before removing the lock on new jobs being added
40
+ #
27
41
  # @note All the jobs are blocking by default
42
+ #
43
+ # @note Job **needs** to mark itself as non-blocking only **after** it is done with all
44
+ # the blocking things (pausing partition, etc).
28
45
  def non_blocking?
29
- false
46
+ @non_blocking
30
47
  end
31
48
  end
32
49
  end
@@ -6,10 +6,12 @@ module Karafka
6
6
  # The main job type. It runs the executor that triggers given topic partition messages
7
7
  # processing in an underlying consumer instance.
8
8
  class Consume < Base
9
+ # @return [Array<Rdkafka::Consumer::Message>] array with messages
10
+ attr_reader :messages
11
+
9
12
  # @param executor [Karafka::Processing::Executor] executor that is suppose to run a given
10
13
  # job
11
- # @param messages [Array<dkafka::Consumer::Message>] array with raw rdkafka messages with
12
- # which we are suppose to work
14
+ # @param messages [Karafka::Messages::Messages] karafka messages batch
13
15
  # @return [Consume]
14
16
  def initialize(executor, messages)
15
17
  @executor = executor
@@ -18,15 +20,20 @@ module Karafka
18
20
  super()
19
21
  end
20
22
 
21
- # Runs the preparations on the executor
22
- def prepare
23
- executor.prepare(@messages, @created_at)
23
+ # Runs the before consumption preparations on the executor
24
+ def before_call
25
+ executor.before_consume(@messages, @created_at)
24
26
  end
25
27
 
26
28
  # Runs the given executor
27
29
  def call
28
30
  executor.consume
29
31
  end
32
+
33
+ # Runs any error handling and other post-consumption stuff on the executor
34
+ def after_call
35
+ executor.after_consume
36
+ end
30
37
  end
31
38
  end
32
39
  end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Class responsible for deciding what type of job should we build to run a given command and
6
+ # for building a proper job for it.
7
+ class JobsBuilder
8
+ # @param executor [Karafka::Processing::Executor]
9
+ # @param messages [Karafka::Messages::Messages] messages batch to be consumed
10
+ # @return [Karafka::Processing::Jobs::Consume] consumption job
11
+ def consume(executor, messages)
12
+ Jobs::Consume.new(executor, messages)
13
+ end
14
+
15
+ # @param executor [Karafka::Processing::Executor]
16
+ # @return [Karafka::Processing::Jobs::Revoked] revocation job
17
+ def revoked(executor)
18
+ Jobs::Revoked.new(executor)
19
+ end
20
+
21
+ # @param executor [Karafka::Processing::Executor]
22
+ # @return [Karafka::Processing::Jobs::Shutdown] shutdown job
23
+ def shutdown(executor)
24
+ Jobs::Shutdown.new(executor)
25
+ end
26
+ end
27
+ end
28
+ end
@@ -12,7 +12,7 @@ module Karafka
12
12
  class JobsQueue
13
13
  # @return [Karafka::Processing::JobsQueue]
14
14
  def initialize
15
- @queue = ::Queue.new
15
+ @queue = Queue.new
16
16
  # Those queues will act as a semaphores internally. Since we need an indicator for waiting
17
17
  # we could use Thread.pass but this is expensive. Instead we can just lock until any
18
18
  # of the workers finishes their work and we can re-check. This means that in the worse
@@ -100,8 +100,17 @@ module Karafka
100
100
  end
101
101
  end
102
102
 
103
- # Blocks when there are things in the queue in a given group and waits until all the jobs
104
- # from a given group are completed
103
+ # @param group_id [String]
104
+ #
105
+ # @return [Boolean] tell us if we have anything in the processing (or for processing) from
106
+ # a given group.
107
+ def empty?(group_id)
108
+ @in_processing[group_id].empty?
109
+ end
110
+
111
+ # Blocks when there are things in the queue in a given group and waits until all the blocking
112
+ # jobs from a given group are completed
113
+ #
105
114
  # @param group_id [String] id of the group in which jobs we're interested.
106
115
  # @note This method is blocking.
107
116
  def wait(group_id)
@@ -114,16 +123,10 @@ module Karafka
114
123
 
115
124
  # @param group_id [String] id of the group in which jobs we're interested.
116
125
  # @return [Boolean] should we keep waiting or not
126
+ # @note We do not wait for non-blocking jobs. Their flow should allow for `poll` running
127
+ # as they may exceed `max.poll.interval`
117
128
  def wait?(group_id)
118
- group = @in_processing[group_id]
119
-
120
- # If it is stopping, all the previous messages that are processed at the moment need to
121
- # finish. Otherwise we may risk closing the client and committing offsets afterwards
122
- return false if Karafka::App.stopping? && group.empty?
123
- return false if @queue.closed?
124
- return false if group.empty?
125
-
126
- !group.all?(&:non_blocking?)
129
+ !@in_processing[group_id].all?(&:non_blocking?)
127
130
  end
128
131
  end
129
132
  end
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # A simple object that allows us to keep track of processing state.
6
+ # It allows to indicate if given thing moved from success to a failure or the other way around
7
+ # Useful for tracking consumption state
8
+ class Result
9
+ def initialize
10
+ @success = true
11
+ end
12
+
13
+ # @return [Boolean]
14
+ def failure?
15
+ !success?
16
+ end
17
+
18
+ # @return [Boolean]
19
+ def success?
20
+ @success
21
+ end
22
+
23
+ # Marks state as successful
24
+ def success!
25
+ @success = true
26
+ end
27
+
28
+ # Marks state as failure
29
+ def failure!
30
+ @success = false
31
+ end
32
+ end
33
+ end
34
+ end
@@ -17,24 +17,26 @@ module Karafka
17
17
  # code. This can be used to unlock certain resources or do other things that are
18
18
  # not user code but need to run after user code base is executed.
19
19
  class Worker
20
- extend Forwardable
20
+ include Helpers::Async
21
21
 
22
- def_delegators :@thread, :join, :terminate, :alive?
22
+ # @return [String] id of this worker
23
+ attr_reader :id
23
24
 
24
25
  # @param jobs_queue [JobsQueue]
25
26
  # @return [Worker]
26
27
  def initialize(jobs_queue)
28
+ @id = SecureRandom.uuid
27
29
  @jobs_queue = jobs_queue
28
- @thread = Thread.new do
29
- # If anything goes wrong in this worker thread, it means something went really wrong and
30
- # we should terminate.
31
- Thread.current.abort_on_exception = true
32
- loop { break unless process }
33
- end
34
30
  end
35
31
 
36
32
  private
37
33
 
34
+ # Runs processing of jobs in a loop
35
+ # Stops when queue is closed.
36
+ def call
37
+ loop { break unless process }
38
+ end
39
+
38
40
  # Fetches a single job, processes it and marks as completed.
39
41
  #
40
42
  # @note We do not have error handling here, as no errors should propagate this far. If they
@@ -45,19 +47,23 @@ module Karafka
45
47
  job = @jobs_queue.pop
46
48
 
47
49
  if job
48
- job.prepare
50
+ Karafka.monitor.instrument('worker.process', caller: self, job: job)
51
+
52
+ Karafka.monitor.instrument('worker.processed', caller: self, job: job) do
53
+ job.before_call
49
54
 
50
- # If a job is marked as non blocking, we can run a tick in the job queue and if there
51
- # are no other blocking factors, the job queue will be unlocked.
52
- # If this does not run, all the things will be blocking and job queue won't allow to
53
- # pass it until done.
54
- @jobs_queue.tick(job.group_id) if job.non_blocking?
55
+ # If a job is marked as non blocking, we can run a tick in the job queue and if there
56
+ # are no other blocking factors, the job queue will be unlocked.
57
+ # If this does not run, all the things will be blocking and job queue won't allow to
58
+ # pass it until done.
59
+ @jobs_queue.tick(job.group_id) if job.non_blocking?
55
60
 
56
- job.call
61
+ job.call
57
62
 
58
- job.teardown
63
+ job.after_call
59
64
 
60
- true
65
+ true
66
+ end
61
67
  else
62
68
  false
63
69
  end
@@ -17,6 +17,11 @@ module Karafka
17
17
  def each(&block)
18
18
  @batch.each(&block)
19
19
  end
20
+
21
+ # @return [Integer] number of workers in the batch
22
+ def size
23
+ @batch.size
24
+ end
20
25
  end
21
26
  end
22
27
  end
@@ -17,7 +17,7 @@ module Karafka
17
17
  def initialize(name)
18
18
  @name = name
19
19
  @id = Karafka::App.config.consumer_mapper.call(name)
20
- @topics = []
20
+ @topics = Topics.new([])
21
21
  end
22
22
 
23
23
  # @return [Boolean] true if this consumer group should be active in our current process
@@ -10,7 +10,7 @@ module Karafka
10
10
  class SubscriptionGroup
11
11
  attr_reader :id, :topics
12
12
 
13
- # @param topics [Array<Topic>] all the topics that share the same key settings
13
+ # @param topics [Karafka::Routing::Topics] all the topics that share the same key settings
14
14
  # @return [SubscriptionGroup] built subscription group
15
15
  def initialize(topics)
16
16
  @id = SecureRandom.uuid
@@ -44,7 +44,7 @@ module Karafka
44
44
  kafka[:'auto.offset.reset'] ||= @topics.first.initial_offset
45
45
  # Karafka manages the offsets based on the processing state, thus we do not rely on the
46
46
  # rdkafka offset auto-storing
47
- kafka[:'enable.auto.offset.store'] = 'false'
47
+ kafka[:'enable.auto.offset.store'] = false
48
48
  kafka.freeze
49
49
  kafka
50
50
  end
@@ -23,8 +23,8 @@ module Karafka
23
23
 
24
24
  private_constant :DISTRIBUTION_KEYS
25
25
 
26
- # @param topics [Array<Topic>] array with topics based on which we want to build subscription
27
- # groups
26
+ # @param topics [Karafka::Routing::Topics] all the topics based on which we want to build
27
+ # subscription groups
28
28
  # @return [Array<SubscriptionGroup>] all subscription groups we need in separate threads
29
29
  def call(topics)
30
30
  topics
@@ -32,6 +32,7 @@ module Karafka
32
32
  .group_by(&:first)
33
33
  .values
34
34
  .map { |value| value.map(&:last) }
35
+ .map { |topics_array| Routing::Topics.new(topics_array) }
35
36
  .map { |grouped_topics| SubscriptionGroup.new(grouped_topics) }
36
37
  end
37
38
 
@@ -66,6 +66,11 @@ module Karafka
66
66
  end
67
67
  end
68
68
 
69
+ # @return [Boolean] true if this topic offset is handled by the end user
70
+ def manual_offset_management?
71
+ manual_offset_management
72
+ end
73
+
69
74
  # @return [Hash] hash with all the topic attributes
70
75
  # @note This is being used when we validate the consumer_group and its topics
71
76
  def to_h
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ # frozen_string_literal: true
4
+
5
+ module Karafka
6
+ module Routing
7
+ # Abstraction layer on top of groups of topics
8
+ class Topics
9
+ include Enumerable
10
+ extend Forwardable
11
+
12
+ def_delegators :@accumulator, :[], :size, :empty?, :last, :<<
13
+
14
+ # @param topics_array [Array<Karafka::Routing::Topic>] array with topics
15
+ def initialize(topics_array)
16
+ @accumulator = topics_array.dup
17
+ end
18
+
19
+ # Yields each topic
20
+ #
21
+ # @param [Proc] block we want to yield with on each topic
22
+ def each(&block)
23
+ @accumulator.each(&block)
24
+ end
25
+
26
+ # Finds topic by its name
27
+ #
28
+ # @param topic_name [String] topic name
29
+ # @return [Karafka::Routing::Topic]
30
+ # @raise [Karafka::Errors::TopicNotFoundError] this should never happen. If you see it,
31
+ # please create an issue.
32
+ def find(topic_name)
33
+ @accumulator.find { |topic| topic.name == topic_name } ||
34
+ raise(Karafka::Errors::TopicNotFoundError, topic_name)
35
+ end
36
+ end
37
+ end
38
+ end