karafka 2.0.0.beta3 → 2.0.0.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +18 -15
  4. data/CHANGELOG.md +37 -0
  5. data/CONTRIBUTING.md +0 -5
  6. data/Gemfile.lock +6 -6
  7. data/README.md +2 -10
  8. data/bin/benchmarks +2 -2
  9. data/bin/integrations +10 -3
  10. data/bin/{stress → stress_many} +1 -1
  11. data/bin/stress_one +13 -0
  12. data/bin/wait_for_kafka +20 -0
  13. data/docker-compose.yml +32 -13
  14. data/karafka.gemspec +1 -1
  15. data/lib/karafka/active_job/routing/extensions.rb +1 -1
  16. data/lib/karafka/app.rb +2 -1
  17. data/lib/karafka/base_consumer.rb +59 -46
  18. data/lib/karafka/connection/client.rb +60 -14
  19. data/lib/karafka/connection/listener.rb +37 -11
  20. data/lib/karafka/connection/rebalance_manager.rb +20 -19
  21. data/lib/karafka/contracts/config.rb +18 -4
  22. data/lib/karafka/contracts/server_cli_options.rb +1 -1
  23. data/lib/karafka/errors.rb +3 -0
  24. data/lib/karafka/instrumentation/logger_listener.rb +0 -3
  25. data/lib/karafka/instrumentation/monitor.rb +0 -1
  26. data/lib/karafka/pro/active_job/consumer.rb +2 -8
  27. data/lib/karafka/pro/base_consumer.rb +82 -0
  28. data/lib/karafka/pro/loader.rb +14 -8
  29. data/lib/karafka/pro/processing/coordinator.rb +63 -0
  30. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +1 -1
  31. data/lib/karafka/pro/processing/jobs_builder.rb +3 -2
  32. data/lib/karafka/pro/processing/partitioner.rb +41 -0
  33. data/lib/karafka/pro/processing/scheduler.rb +56 -0
  34. data/lib/karafka/pro/routing/extensions.rb +6 -0
  35. data/lib/karafka/processing/coordinator.rb +88 -0
  36. data/lib/karafka/processing/coordinators_buffer.rb +54 -0
  37. data/lib/karafka/processing/executor.rb +16 -9
  38. data/lib/karafka/processing/executors_buffer.rb +46 -15
  39. data/lib/karafka/processing/jobs/base.rb +8 -3
  40. data/lib/karafka/processing/jobs/consume.rb +11 -4
  41. data/lib/karafka/processing/jobs_builder.rb +3 -2
  42. data/lib/karafka/processing/partitioner.rb +22 -0
  43. data/lib/karafka/processing/result.rb +29 -0
  44. data/lib/karafka/processing/scheduler.rb +22 -0
  45. data/lib/karafka/processing/worker.rb +2 -2
  46. data/lib/karafka/routing/consumer_group.rb +1 -1
  47. data/lib/karafka/routing/topic.rb +14 -0
  48. data/lib/karafka/setup/config.rb +20 -10
  49. data/lib/karafka/version.rb +1 -1
  50. data.tar.gz.sig +0 -0
  51. metadata +16 -8
  52. metadata.gz.sig +0 -0
  53. data/lib/karafka/pro/base_consumer_extensions.rb +0 -66
  54. data/lib/karafka/pro/scheduler.rb +0 -54
  55. data/lib/karafka/scheduler.rb +0 -20
@@ -16,11 +16,12 @@ module Karafka
16
16
  class JobsBuilder < ::Karafka::Processing::JobsBuilder
17
17
  # @param executor [Karafka::Processing::Executor]
18
18
  # @param messages [Karafka::Messages::Messages] messages batch to be consumed
19
+ # @param coordinator [Karafka::Processing::Coordinator]
19
20
  # @return [Karafka::Processing::Jobs::Consume] blocking job
20
21
  # @return [Karafka::Pro::Processing::Jobs::ConsumeNonBlocking] non blocking for lrj
21
- def consume(executor, messages)
22
+ def consume(executor, messages, coordinator)
22
23
  if executor.topic.long_running_job?
23
- Jobs::ConsumeNonBlocking.new(executor, messages)
24
+ Jobs::ConsumeNonBlocking.new(executor, messages, coordinator)
24
25
  else
25
26
  super
26
27
  end
@@ -0,0 +1,41 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component.
4
+ # All of the commercial components are present in the lib/karafka/pro directory of this
5
+ # repository and their usage requires commercial license agreement.
6
+ #
7
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
8
+ #
9
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
10
+ # your code to Maciej Mensfeld.
11
+
12
+ module Karafka
13
+ module Pro
14
+ module Processing
15
+ # Pro partitioner that can distribute work based on the virtual partitioner settings
16
+ class Partitioner < ::Karafka::Processing::Partitioner
17
+ # @param topic [String] topic name
18
+ # @param messages [Array<Karafka::Messages::Message>] karafka messages
19
+ # @yieldparam [Integer] group id
20
+ # @yieldparam [Array<Karafka::Messages::Message>] karafka messages
21
+ def call(topic, messages)
22
+ ktopic = @subscription_group.topics.find(topic)
23
+
24
+ @concurrency ||= ::Karafka::App.config.concurrency
25
+
26
+ # We only partition work if we have a virtual partitioner and more than one thread to
27
+ # process the data. With one thread it is not worth partitioning the work as the work
28
+ # itself will be assigned to one thread (pointless work)
29
+ if ktopic.virtual_partitioner? && @concurrency > 1
30
+ messages
31
+ .group_by { |msg| ktopic.virtual_partitioner.call(msg).hash.abs % @concurrency }
32
+ .each { |group_id, messages_group| yield(group_id, messages_group) }
33
+ else
34
+ # When no virtual partitioner, works as regular one
35
+ yield(0, messages)
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component.
4
+ # All of the commercial components are present in the lib/karafka/pro directory of this
5
+ # repository and their usage requires commercial license agreement.
6
+ #
7
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
8
+ #
9
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
10
+ # your code to Maciej Mensfeld.
11
+
12
+ module Karafka
13
+ module Pro
14
+ module Processing
15
+ # Optimizes scheduler that takes into consideration of execution time needed to process
16
+ # messages from given topics partitions. It uses the non-preemptive LJF algorithm
17
+ #
18
+ # This scheduler is designed to optimize execution times on jobs that perform IO operations
19
+ # as when taking IO into consideration, the can achieve optimized parallel processing.
20
+ #
21
+ # This scheduler can also work with virtual partitions.
22
+ #
23
+ # Aside from consumption jobs, other jobs do not run often, thus we can leave them with
24
+ # default FIFO scheduler from the default Karafka scheduler
25
+ class Scheduler < ::Karafka::Processing::Scheduler
26
+ # Schedules jobs in the LJF order for consumption
27
+ #
28
+ # @param queue [Karafka::Processing::JobsQueue] queue where we want to put the jobs
29
+ # @param jobs_array [Array<Karafka::Processing::Jobs::Base>] jobs we want to schedule
30
+ #
31
+ def schedule_consumption(queue, jobs_array)
32
+ pt = PerformanceTracker.instance
33
+
34
+ ordered = []
35
+
36
+ jobs_array.each do |job|
37
+ messages = job.messages
38
+ message = messages.first
39
+
40
+ cost = pt.processing_time_p95(message.topic, message.partition) * messages.size
41
+
42
+ ordered << [job, cost]
43
+ end
44
+
45
+ ordered.sort_by!(&:last)
46
+ ordered.reverse!
47
+ ordered.map!(&:first)
48
+
49
+ ordered.each do |job|
50
+ queue << job
51
+ end
52
+ end
53
+ end
54
+ end
55
+ end
56
+ end
@@ -19,9 +19,15 @@ module Karafka
19
19
  # @param base [Class] class we extend
20
20
  def included(base)
21
21
  base.attr_accessor :long_running_job
22
+ base.attr_accessor :virtual_partitioner
22
23
  end
23
24
  end
24
25
 
26
+ # @return [Boolean] true if virtual partitioner is defined, false otherwise
27
+ def virtual_partitioner?
28
+ virtual_partitioner != nil
29
+ end
30
+
25
31
  # @return [Boolean] is a given job on a topic a long running one
26
32
  def long_running_job?
27
33
  @long_running_job || false
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Basic coordinator that allows us to provide coordination objects into consumers.
6
+ #
7
+ # This is a wrapping layer to simplify management of work to be handled around consumption.
8
+ #
9
+ # @note This coordinator needs to be thread safe. Some operations are performed only in the
10
+ # listener thread, but we go with thread-safe by default for all not to worry about potential
11
+ # future mistakes.
12
+ class Coordinator
13
+ # @return [Karafka::TimeTrackers::Pause]
14
+ attr_reader :pause_tracker
15
+
16
+ # @param pause_tracker [Karafka::TimeTrackers::Pause] pause tracker for given topic partition
17
+ def initialize(pause_tracker)
18
+ @pause_tracker = pause_tracker
19
+ @revoked = false
20
+ @consumptions = {}
21
+ @running_jobs = 0
22
+ @mutex = Mutex.new
23
+ end
24
+
25
+ # Starts the coordinator for given consumption jobs
26
+ # @param _messages [Array<Karafka::Messages::Message>] batch of message for which we are
27
+ # going to coordinate work. Not used with regular coordinator.
28
+ def start(_messages)
29
+ @mutex.synchronize do
30
+ @running_jobs = 0
31
+ # We need to clear the consumption results hash here, otherwise we could end up storing
32
+ # consumption results of consumer instances we no longer control
33
+ @consumptions.clear
34
+ end
35
+ end
36
+
37
+ # Increases number of jobs that we handle with this coordinator
38
+ def increment
39
+ @mutex.synchronize { @running_jobs += 1 }
40
+ end
41
+
42
+ # Decrements number of jobs we handle at the moment
43
+ def decrement
44
+ @mutex.synchronize do
45
+ @running_jobs -= 1
46
+
47
+ return @running_jobs unless @running_jobs.negative?
48
+
49
+ # This should never happen. If it does, something is heavily out of sync. Please reach
50
+ # out to us if you encounter this
51
+ raise Karafka::Errors::InvalidCoordinatorState, 'Was zero before decrementation'
52
+ end
53
+ end
54
+
55
+ # @param consumer [Object] karafka consumer (normal or pro)
56
+ # @return [Karafka::Processing::Result] result object which we can use to indicate
57
+ # consumption processing state.
58
+ def consumption(consumer)
59
+ @mutex.synchronize do
60
+ @consumptions[consumer] ||= Processing::Result.new
61
+ end
62
+ end
63
+
64
+ # Is all the consumption done and finished successfully for this coordinator
65
+ def success?
66
+ @mutex.synchronize { @running_jobs.zero? && @consumptions.values.all?(&:success?) }
67
+ end
68
+
69
+ # Marks given coordinator for processing group as revoked
70
+ #
71
+ # This is invoked in two places:
72
+ # - from the main listener loop when we detect revoked partitions
73
+ # - from the consumer in case checkpointing fails
74
+ #
75
+ # This means, we can end up having consumer being aware that it was revoked prior to the
76
+ # listener loop dispatching the revocation job. It is ok, as effectively nothing will be
77
+ # processed until revocation jobs are done.
78
+ def revoke
79
+ @mutex.synchronize { @revoked = true }
80
+ end
81
+
82
+ # @return [Boolean] is the partition we are processing revoked or not
83
+ def revoked?
84
+ @revoked
85
+ end
86
+ end
87
+ end
88
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Coordinators builder used to build coordinators per topic partition
6
+ #
7
+ # It provides direct pauses access for revocation
8
+ #
9
+ # @note This buffer operates only from the listener loop, thus we do not have to make it
10
+ # thread-safe.
11
+ class CoordinatorsBuffer
12
+ def initialize
13
+ @pauses_manager = Connection::PausesManager.new
14
+ @coordinator_class = ::Karafka::App.config.internal.processing.coordinator_class
15
+ @coordinators = Hash.new { |h, k| h[k] = {} }
16
+ end
17
+
18
+ # @param topic [String] topic name
19
+ # @param partition [Integer] partition number
20
+ def find_or_create(topic, partition)
21
+ @coordinators[topic][partition] ||= @coordinator_class.new(
22
+ @pauses_manager.fetch(topic, partition)
23
+ )
24
+ end
25
+
26
+ # Resumes processing of partitions for which pause time has ended.
27
+ # @param block we want to run for resumed topic partitions
28
+ # @yieldparam [String] topic name
29
+ # @yieldparam [Integer] partition number
30
+ def resume(&block)
31
+ @pauses_manager.resume(&block)
32
+ end
33
+
34
+ # @param topic [String] topic name
35
+ # @param partition [Integer] partition number
36
+ def revoke(topic, partition)
37
+ return unless @coordinators[topic].key?(partition)
38
+
39
+ # The fact that we delete here does not change the fact that the executor still holds the
40
+ # reference to this coordinator. We delete it here, as we will no longer process any
41
+ # new stuff with it and we may need a new coordinator if we regain this partition, but the
42
+ # coordinator may still be in use
43
+ @coordinators[topic].delete(partition).revoke
44
+ end
45
+
46
+ # Clears coordinators and re-created the pauses manager
47
+ # This should be used only for critical errors recovery
48
+ def reset
49
+ @pauses_manager = Connection::PausesManager.new
50
+ @coordinators.clear
51
+ end
52
+ end
53
+ end
54
+ end
@@ -30,13 +30,11 @@ module Karafka
30
30
  # @param group_id [String] id of the subscription group to which the executor belongs
31
31
  # @param client [Karafka::Connection::Client] kafka client
32
32
  # @param topic [Karafka::Routing::Topic] topic for which this executor will run
33
- # @param pause_tracker [Karafka::TimeTrackers::Pause] fetch pause tracker for pausing
34
- def initialize(group_id, client, topic, pause_tracker)
33
+ def initialize(group_id, client, topic)
35
34
  @id = SecureRandom.uuid
36
35
  @group_id = group_id
37
36
  @client = client
38
37
  @topic = topic
39
- @pause_tracker = pause_tracker
40
38
  end
41
39
 
42
40
  # Builds the consumer instance, builds messages batch and sets all that is needed to run the
@@ -45,13 +43,16 @@ module Karafka
45
43
  # @param messages [Array<Karafka::Messages::Message>]
46
44
  # @param received_at [Time] the moment we've received the batch (actually the moment we've)
47
45
  # enqueued it, but good enough
48
- def prepare(messages, received_at)
46
+ # @param coordinator [Karafka::Processing::Coordinator] coordinator for processing management
47
+ def before_consume(messages, received_at, coordinator)
49
48
  # Recreate consumer with each batch if persistence is not enabled
50
49
  # We reload the consumers with each batch instead of relying on some external signals
51
50
  # when needed for consistency. That way devs may have it on or off and not in this
52
51
  # middle state, where re-creation of a consumer instance would occur only sometimes
53
52
  @consumer = nil unless ::Karafka::App.config.consumer_persistence
54
53
 
54
+ consumer.coordinator = coordinator
55
+
55
56
  # First we build messages batch...
56
57
  consumer.messages = Messages::Builders::Messages.call(
57
58
  messages,
@@ -59,7 +60,7 @@ module Karafka
59
60
  received_at
60
61
  )
61
62
 
62
- consumer.on_prepare
63
+ consumer.on_before_consume
63
64
  end
64
65
 
65
66
  # Runs consumer data processing against given batch and handles failures and errors.
@@ -68,6 +69,11 @@ module Karafka
68
69
  consumer.on_consume
69
70
  end
70
71
 
72
+ # Runs consumer after consumption code
73
+ def after_consume
74
+ consumer.on_after_consume
75
+ end
76
+
71
77
  # Runs the controller `#revoked` method that should be triggered when a given consumer is
72
78
  # no longer needed due to partitions reassignment.
73
79
  #
@@ -76,9 +82,12 @@ module Karafka
76
82
  #
77
83
  # @note We run it only when consumer was present, because presence indicates, that at least
78
84
  # a single message has been consumed.
85
+ #
86
+ # @note We do not reset the consumer but we indicate need for recreation instead, because
87
+ # after the revocation, there still may be `#after_consume` running that needs a given
88
+ # consumer instance.
79
89
  def revoked
80
90
  consumer.on_revoked if @consumer
81
- @consumer = nil
82
91
  end
83
92
 
84
93
  # Runs the controller `#shutdown` method that should be triggered when a given consumer is
@@ -90,7 +99,6 @@ module Karafka
90
99
  # There is a case, where the consumer no longer exists because it was revoked, in case like
91
100
  # that we do not build a new instance and shutdown should not be triggered.
92
101
  consumer.on_shutdown if @consumer
93
- @consumer = nil
94
102
  end
95
103
 
96
104
  private
@@ -98,10 +106,9 @@ module Karafka
98
106
  # @return [Object] cached consumer instance
99
107
  def consumer
100
108
  @consumer ||= begin
101
- consumer = @topic.consumer.new
109
+ consumer = @topic.consumer_class.new
102
110
  consumer.topic = @topic
103
111
  consumer.client = @client
104
- consumer.pause_tracker = @pause_tracker
105
112
  consumer.producer = ::Karafka::App.producer
106
113
  consumer
107
114
  end
@@ -11,30 +11,48 @@ module Karafka
11
11
  def initialize(client, subscription_group)
12
12
  @subscription_group = subscription_group
13
13
  @client = client
14
- @buffer = Hash.new { |h, k| h[k] = {} }
14
+ # We need two layers here to keep track of topics, partitions and processing groups
15
+ @buffer = Hash.new { |h, k| h[k] = Hash.new { |h2, k2| h2[k2] = {} } }
15
16
  end
16
17
 
18
+ # Finds or creates an executor based on the provided details
19
+ #
17
20
  # @param topic [String] topic name
18
21
  # @param partition [Integer] partition number
19
- # @param pause [TimeTrackers::Pause] pause corresponding with provided topic and partition
22
+ # @param parallel_key [String] parallel group key
20
23
  # @return [Executor] consumer executor
21
- def fetch(
22
- topic,
23
- partition,
24
- pause
25
- )
26
- ktopic = @subscription_group.topics.find(topic)
24
+ def find_or_create(topic, partition, parallel_key)
25
+ ktopic = find_topic(topic)
27
26
 
28
- ktopic || raise(Errors::TopicNotFoundError, topic)
29
-
30
- @buffer[ktopic][partition] ||= Executor.new(
27
+ @buffer[ktopic][partition][parallel_key] ||= Executor.new(
31
28
  @subscription_group.id,
32
29
  @client,
33
- ktopic,
34
- pause
30
+ ktopic
35
31
  )
36
32
  end
37
33
 
34
+ # Revokes executors of a given topic partition, so they won't be used anymore for incoming
35
+ # messages
36
+ #
37
+ # @param topic [String] topic name
38
+ # @param partition [Integer] partition number
39
+ def revoke(topic, partition)
40
+ ktopic = find_topic(topic)
41
+
42
+ @buffer[ktopic][partition].clear
43
+ end
44
+
45
+ # Finds all the executors available for a given topic partition
46
+ #
47
+ # @param topic [String] topic name
48
+ # @param partition [Integer] partition number
49
+ # @return [Array<Executor>] executors in use for this topic + partition
50
+ def find_all(topic, partition)
51
+ ktopic = find_topic(topic)
52
+
53
+ @buffer[ktopic][partition].values
54
+ end
55
+
38
56
  # Iterates over all available executors and yields them together with topic and partition
39
57
  # info
40
58
  # @yieldparam [Routing::Topic] karafka routing topic object
@@ -42,8 +60,11 @@ module Karafka
42
60
  # @yieldparam [Executor] given executor
43
61
  def each
44
62
  @buffer.each do |ktopic, partitions|
45
- partitions.each do |partition, executor|
46
- yield(ktopic, partition, executor)
63
+ partitions.each do |partition, executors|
64
+ executors.each do |_parallel_key, executor|
65
+ # We skip the parallel key here as it does not serve any value when iterating
66
+ yield(ktopic, partition, executor)
67
+ end
47
68
  end
48
69
  end
49
70
  end
@@ -52,6 +73,16 @@ module Karafka
52
73
  def clear
53
74
  @buffer.clear
54
75
  end
76
+
77
+ private
78
+
79
+ # Finds topic based on its name
80
+ #
81
+ # @param topic [String] topic we're looking for
82
+ # @return [Karafka::Routing::Topic] topic we're interested in
83
+ def find_topic(topic)
84
+ @subscription_group.topics.find(topic) || raise(Errors::TopicNotFoundError, topic)
85
+ end
55
86
  end
56
87
  end
57
88
  end
@@ -5,7 +5,7 @@ module Karafka
5
5
  # Namespace for all the jobs that are suppose to run in workers.
6
6
  module Jobs
7
7
  # Base class for all the jobs types that are suppose to run in workers threads.
8
- # Each job can have 3 main entry-points: `#prepare`, `#call` and `#teardown`
8
+ # Each job can have 3 main entry-points: `#before_call`, `#call` and `#after_call`
9
9
  # Only `#call` is required.
10
10
  class Base
11
11
  extend Forwardable
@@ -23,10 +23,15 @@ module Karafka
23
23
  end
24
24
 
25
25
  # When redefined can run any code that should run before executing the proper code
26
- def prepare; end
26
+ def before_call; end
27
+
28
+ # The main entry-point of a job
29
+ def call
30
+ raise NotImplementedError, 'Please implement in a subclass'
31
+ end
27
32
 
28
33
  # When redefined can run any code that should run after executing the proper code
29
- def teardown; end
34
+ def after_call; end
30
35
 
31
36
  # @return [Boolean] is this a non-blocking job
32
37
  #
@@ -12,23 +12,30 @@ module Karafka
12
12
  # @param executor [Karafka::Processing::Executor] executor that is suppose to run a given
13
13
  # job
14
14
  # @param messages [Karafka::Messages::Messages] karafka messages batch
15
+ # @param coordinator [Karafka::Processing::Coordinator] processing coordinator
15
16
  # @return [Consume]
16
- def initialize(executor, messages)
17
+ def initialize(executor, messages, coordinator)
17
18
  @executor = executor
18
19
  @messages = messages
20
+ @coordinator = coordinator
19
21
  @created_at = Time.now
20
22
  super()
21
23
  end
22
24
 
23
- # Runs the preparations on the executor
24
- def prepare
25
- executor.prepare(@messages, @created_at)
25
+ # Runs the before consumption preparations on the executor
26
+ def before_call
27
+ executor.before_consume(@messages, @created_at, @coordinator)
26
28
  end
27
29
 
28
30
  # Runs the given executor
29
31
  def call
30
32
  executor.consume
31
33
  end
34
+
35
+ # Runs any error handling and other post-consumption stuff on the executor
36
+ def after_call
37
+ executor.after_consume
38
+ end
32
39
  end
33
40
  end
34
41
  end
@@ -7,9 +7,10 @@ module Karafka
7
7
  class JobsBuilder
8
8
  # @param executor [Karafka::Processing::Executor]
9
9
  # @param messages [Karafka::Messages::Messages] messages batch to be consumed
10
+ # @param coordinator [Karafka::Processing::Coordinator]
10
11
  # @return [Karafka::Processing::Jobs::Consume] consumption job
11
- def consume(executor, messages)
12
- Jobs::Consume.new(executor, messages)
12
+ def consume(executor, messages, coordinator)
13
+ Jobs::Consume.new(executor, messages, coordinator)
13
14
  end
14
15
 
15
16
  # @param executor [Karafka::Processing::Executor]
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # Basic partitioner for work division
6
+ # It does not divide any work.
7
+ class Partitioner
8
+ # @param subscription_group [Karafka::Routing::SubscriptionGroup] subscription group
9
+ def initialize(subscription_group)
10
+ @subscription_group = subscription_group
11
+ end
12
+
13
+ # @param _topic [String] topic name
14
+ # @param messages [Array<Karafka::Messages::Message>] karafka messages
15
+ # @yieldparam [Integer] group id
16
+ # @yieldparam [Array<Karafka::Messages::Message>] karafka messages
17
+ def call(_topic, messages)
18
+ yield(0, messages)
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # A simple object that allows us to keep track of processing state.
6
+ # It allows to indicate if given thing moved from success to a failure or the other way around
7
+ # Useful for tracking consumption state
8
+ class Result
9
+ def initialize
10
+ @success = true
11
+ end
12
+
13
+ # @return [Boolean]
14
+ def success?
15
+ @success
16
+ end
17
+
18
+ # Marks state as successful
19
+ def success!
20
+ @success = true
21
+ end
22
+
23
+ # Marks state as failure
24
+ def failure!
25
+ @success = false
26
+ end
27
+ end
28
+ end
29
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Processing
5
+ # FIFO scheduler for messages coming from various topics and partitions
6
+ class Scheduler
7
+ # Schedules jobs in the fifo order
8
+ #
9
+ # @param queue [Karafka::Processing::JobsQueue] queue where we want to put the jobs
10
+ # @param jobs_array [Array<Karafka::Processing::Jobs::Base>] jobs we want to schedule
11
+ def schedule_consumption(queue, jobs_array)
12
+ jobs_array.each do |job|
13
+ queue << job
14
+ end
15
+ end
16
+
17
+ # Both revocation and shutdown jobs can also run in fifo by default
18
+ alias schedule_revocation schedule_consumption
19
+ alias schedule_shutdown schedule_consumption
20
+ end
21
+ end
22
+ end
@@ -50,7 +50,7 @@ module Karafka
50
50
  Karafka.monitor.instrument('worker.process', caller: self, job: job)
51
51
 
52
52
  Karafka.monitor.instrument('worker.processed', caller: self, job: job) do
53
- job.prepare
53
+ job.before_call
54
54
 
55
55
  # If a job is marked as non blocking, we can run a tick in the job queue and if there
56
56
  # are no other blocking factors, the job queue will be unlocked.
@@ -60,7 +60,7 @@ module Karafka
60
60
 
61
61
  job.call
62
62
 
63
- job.teardown
63
+ job.after_call
64
64
 
65
65
  true
66
66
  end
@@ -38,7 +38,7 @@ module Karafka
38
38
  # @return [Array<Routing::SubscriptionGroup>] all the subscription groups build based on
39
39
  # the consumer group topics
40
40
  def subscription_groups
41
- App.config.internal.subscription_groups_builder.call(topics)
41
+ App.config.internal.routing.subscription_groups_builder.call(topics)
42
42
  end
43
43
 
44
44
  # Hashed version of consumer group that can be used for validation purposes
@@ -66,6 +66,20 @@ module Karafka
66
66
  end
67
67
  end
68
68
 
69
+ # @return [Class] consumer class that we should use
70
+ # @note This is just an alias to the `#consumer` method. We however want to use it internally
71
+ # instead of referencing the `#consumer`. We use this to indicate that this method returns
72
+ # class and not an instance. In the routing we want to keep the `#consumer Consumer`
73
+ # routing syntax, but for references outside, we should use this one.
74
+ def consumer_class
75
+ consumer
76
+ end
77
+
78
+ # @return [Boolean] true if this topic offset is handled by the end user
79
+ def manual_offset_management?
80
+ manual_offset_management
81
+ end
82
+
69
83
  # @return [Hash] hash with all the topic attributes
70
84
  # @note This is being used when we validate the consumer_group and its topics
71
85
  def to_h