karafka 2.0.21 → 2.0.23

Sign up to get free protection for your applications and to get access to all the features.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +6 -0
  4. data/.ruby-version +1 -1
  5. data/CHANGELOG.md +21 -0
  6. data/Gemfile.lock +7 -8
  7. data/README.md +1 -1
  8. data/bin/integrations +2 -2
  9. data/bin/rspecs +2 -0
  10. data/config/errors.yml +5 -2
  11. data/karafka.gemspec +2 -3
  12. data/lib/karafka/admin.rb +84 -4
  13. data/lib/karafka/app.rb +12 -2
  14. data/lib/karafka/base_consumer.rb +8 -11
  15. data/lib/karafka/cli/info.rb +2 -1
  16. data/lib/karafka/cli/server.rb +7 -6
  17. data/lib/karafka/connection/client.rb +4 -4
  18. data/lib/karafka/contracts/server_cli_options.rb +60 -3
  19. data/lib/karafka/contracts/topic.rb +1 -1
  20. data/lib/karafka/licenser.rb +53 -50
  21. data/lib/karafka/pro/loader.rb +1 -3
  22. data/lib/karafka/pro/processing/strategies/aj_dlq_mom.rb +2 -2
  23. data/lib/karafka/pro/processing/strategies/aj_lrj_mom_vp.rb +2 -2
  24. data/lib/karafka/pro/processing/strategies/aj_mom_vp.rb +1 -1
  25. data/lib/karafka/pro/processing/strategies/default.rb +31 -1
  26. data/lib/karafka/pro/processing/strategies/dlq.rb +4 -2
  27. data/lib/karafka/pro/processing/strategies/dlq_lrj.rb +3 -1
  28. data/lib/karafka/pro/processing/strategies/dlq_lrj_mom.rb +1 -1
  29. data/lib/karafka/pro/processing/strategies/dlq_mom.rb +2 -2
  30. data/lib/karafka/pro/processing/strategies/lrj.rb +4 -2
  31. data/lib/karafka/pro/processing/strategies/lrj_mom.rb +2 -2
  32. data/lib/karafka/pro/processing/strategies/mom.rb +1 -1
  33. data/lib/karafka/processing/coordinator.rb +15 -0
  34. data/lib/karafka/processing/jobs_queue.rb +1 -1
  35. data/lib/karafka/processing/strategies/aj_dlq_mom.rb +2 -2
  36. data/lib/karafka/processing/strategies/base.rb +5 -0
  37. data/lib/karafka/processing/strategies/default.rb +26 -1
  38. data/lib/karafka/processing/strategies/dlq.rb +4 -2
  39. data/lib/karafka/processing/strategies/dlq_mom.rb +2 -2
  40. data/lib/karafka/processing/strategies/mom.rb +1 -1
  41. data/lib/karafka/railtie.rb +3 -0
  42. data/lib/karafka/routing/builder.rb +1 -1
  43. data/lib/karafka/routing/consumer_group.rb +6 -3
  44. data/lib/karafka/routing/subscription_group.rb +11 -2
  45. data/lib/karafka/routing/topic.rb +8 -0
  46. data/lib/karafka/routing/topics.rb +8 -0
  47. data/lib/karafka/server.rb +11 -10
  48. data/lib/karafka/setup/config.rb +15 -11
  49. data/lib/karafka/version.rb +1 -1
  50. data/lib/karafka.rb +9 -0
  51. data.tar.gz.sig +0 -0
  52. metadata +7 -22
  53. metadata.gz.sig +0 -0
  54. data/lib/karafka/instrumentation.rb +0 -21
@@ -42,7 +42,7 @@ module Karafka
42
42
  # Do NOT commit offsets, they are comitted after each job in the AJ consumer.
43
43
  coordinator.pause_tracker.reset
44
44
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
45
- pause(coordinator.seek_offset)
45
+ pause(coordinator.seek_offset, nil, false)
46
46
  else
47
47
  coordinator.pause_tracker.reset
48
48
  skippable_message = find_skippable_message
@@ -51,7 +51,7 @@ module Karafka
51
51
  # since AJ consumer commits the offset after each job, we also know that the
52
52
  # previous job was successful
53
53
  mark_as_consumed(skippable_message)
54
- pause(coordinator.seek_offset)
54
+ pause(coordinator.seek_offset, nil, false)
55
55
  end
56
56
  end
57
57
  end
@@ -33,7 +33,7 @@ module Karafka
33
33
  # No actions needed for the standard flow here
34
34
  def handle_before_enqueue
35
35
  coordinator.on_enqueued do
36
- pause(coordinator.seek_offset, Lrj::MAX_PAUSE_TIME)
36
+ pause(coordinator.seek_offset, Lrj::MAX_PAUSE_TIME, false)
37
37
  end
38
38
  end
39
39
 
@@ -51,7 +51,7 @@ module Karafka
51
51
  # If processing failed, we need to pause
52
52
  # For long running job this will overwrite the default never-ending pause and will
53
53
  # cause the processing to keep going after the error backoff
54
- pause(coordinator.seek_offset)
54
+ pause(coordinator.seek_offset, nil, false)
55
55
  end
56
56
  end
57
57
  end
@@ -47,7 +47,7 @@ module Karafka
47
47
 
48
48
  mark_as_consumed(last_group_message)
49
49
  else
50
- pause(coordinator.seek_offset)
50
+ pause(coordinator.seek_offset, nil, false)
51
51
  end
52
52
  end
53
53
  end
@@ -22,6 +22,7 @@ module Karafka
22
22
  # Nothing. Just standard, automatic flow
23
23
  module Default
24
24
  include Base
25
+ include ::Karafka::Processing::Strategies::Default
25
26
 
26
27
  # Apply strategy for a non-feature based flow
27
28
  FEATURES = %i[].freeze
@@ -39,6 +40,31 @@ module Karafka
39
40
  end
40
41
  end
41
42
 
43
+ # Run the user consumption code
44
+ def handle_consume
45
+ # We should not run the work at all on a partition that was revoked
46
+ # This can happen primarily when an LRJ job gets to the internal worker queue and
47
+ # this partition is revoked prior processing.
48
+ unless revoked?
49
+ Karafka.monitor.instrument('consumer.consumed', caller: self) do
50
+ consume
51
+ end
52
+ end
53
+
54
+ # Mark job as successful
55
+ coordinator.consumption(self).success!
56
+ rescue StandardError => e
57
+ # If failed, mark as failed
58
+ coordinator.consumption(self).failure!(e)
59
+
60
+ # Re-raise so reported in the consumer
61
+ raise e
62
+ ensure
63
+ # We need to decrease number of jobs that this coordinator coordinates as it has
64
+ # finished
65
+ coordinator.decrement
66
+ end
67
+
42
68
  # Standard flow without any features
43
69
  def handle_after_consume
44
70
  coordinator.on_finished do |last_group_message|
@@ -47,9 +73,13 @@ module Karafka
47
73
  if coordinator.success?
48
74
  coordinator.pause_tracker.reset
49
75
 
76
+ # Do not mark last message if pause happened. This prevents a scenario where pause
77
+ # is overridden upon rebalance by marking
78
+ return if coordinator.manual_pause?
79
+
50
80
  mark_as_consumed(last_group_message)
51
81
  else
52
- pause(coordinator.seek_offset)
82
+ pause(coordinator.seek_offset, nil, false)
53
83
  end
54
84
  end
55
85
  end
@@ -32,9 +32,11 @@ module Karafka
32
32
  if coordinator.success?
33
33
  coordinator.pause_tracker.reset
34
34
 
35
+ return if coordinator.manual_pause?
36
+
35
37
  mark_as_consumed(messages.last)
36
38
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
37
- pause(coordinator.seek_offset)
39
+ pause(coordinator.seek_offset, nil, false)
38
40
  # If we've reached number of retries that we could, we need to skip the first message
39
41
  # that was not marked as consumed, pause and continue, while also moving this message
40
42
  # to the dead topic
@@ -44,7 +46,7 @@ module Karafka
44
46
  skippable_message = find_skippable_message
45
47
  dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
46
48
  mark_as_consumed(skippable_message)
47
- pause(coordinator.seek_offset)
49
+ pause(coordinator.seek_offset, nil, false)
48
50
  end
49
51
  end
50
52
  end
@@ -34,12 +34,14 @@ module Karafka
34
34
  if coordinator.success?
35
35
  coordinator.pause_tracker.reset
36
36
 
37
+ return if coordinator.manual_pause?
38
+
37
39
  mark_as_consumed(last_group_message) unless revoked?
38
40
  seek(coordinator.seek_offset) unless revoked?
39
41
 
40
42
  resume
41
43
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
42
- pause(coordinator.seek_offset)
44
+ pause(coordinator.seek_offset, nil, false)
43
45
  else
44
46
  coordinator.pause_tracker.reset
45
47
 
@@ -38,7 +38,7 @@ module Karafka
38
38
 
39
39
  resume
40
40
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
41
- pause(coordinator.seek_offset)
41
+ pause(coordinator.seek_offset, nil, false)
42
42
  else
43
43
  coordinator.pause_tracker.reset
44
44
 
@@ -35,7 +35,7 @@ module Karafka
35
35
  if coordinator.success?
36
36
  coordinator.pause_tracker.reset
37
37
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
38
- pause(coordinator.seek_offset)
38
+ pause(coordinator.seek_offset, nil, false)
39
39
  # If we've reached number of retries that we could, we need to skip the first message
40
40
  # that was not marked as consumed, pause and continue, while also moving this message
41
41
  # to the dead topic.
@@ -51,7 +51,7 @@ module Karafka
51
51
  dispatch_to_dlq(skippable_message)
52
52
  end
53
53
 
54
- pause(coordinator.seek_offset)
54
+ pause(coordinator.seek_offset, nil, false)
55
55
  end
56
56
  end
57
57
  end
@@ -37,7 +37,7 @@ module Karafka
37
37
  #
38
38
  # For VP it applies the same way and since VP cannot be used with MOM we should not
39
39
  # have any edge cases here.
40
- pause(coordinator.seek_offset, MAX_PAUSE_TIME)
40
+ pause(coordinator.seek_offset, MAX_PAUSE_TIME, false)
41
41
  end
42
42
  end
43
43
 
@@ -47,6 +47,8 @@ module Karafka
47
47
  if coordinator.success?
48
48
  coordinator.pause_tracker.reset
49
49
 
50
+ return if coordinator.manual_pause?
51
+
50
52
  mark_as_consumed(last_group_message) unless revoked?
51
53
  seek(coordinator.seek_offset) unless revoked?
52
54
 
@@ -55,7 +57,7 @@ module Karafka
55
57
  # If processing failed, we need to pause
56
58
  # For long running job this will overwrite the default never-ending pause and will
57
59
  # cause the processing to keep going after the error backoff
58
- pause(coordinator.seek_offset)
60
+ pause(coordinator.seek_offset, nil, false)
59
61
  end
60
62
  end
61
63
  end
@@ -36,7 +36,7 @@ module Karafka
36
36
  #
37
37
  # For VP it applies the same way and since VP cannot be used with MOM we should not
38
38
  # have any edge cases here.
39
- pause(coordinator.seek_offset, Lrj::MAX_PAUSE_TIME)
39
+ pause(coordinator.seek_offset, Lrj::MAX_PAUSE_TIME, false)
40
40
  end
41
41
  end
42
42
 
@@ -50,7 +50,7 @@ module Karafka
50
50
 
51
51
  resume
52
52
  else
53
- pause(coordinator.seek_offset)
53
+ pause(coordinator.seek_offset, false)
54
54
  end
55
55
  end
56
56
  end
@@ -32,7 +32,7 @@ module Karafka
32
32
  if coordinator.success?
33
33
  coordinator.pause_tracker.reset
34
34
  else
35
- pause(coordinator.seek_offset)
35
+ pause(coordinator.seek_offset, nil, false)
36
36
  end
37
37
  end
38
38
  end
@@ -21,6 +21,7 @@ module Karafka
21
21
  @revoked = false
22
22
  @consumptions = {}
23
23
  @running_jobs = 0
24
+ @manual_pause = false
24
25
  @mutex = Mutex.new
25
26
  end
26
27
 
@@ -34,6 +35,9 @@ module Karafka
34
35
  # consumption results of consumer instances we no longer control
35
36
  @consumptions.clear
36
37
 
38
+ # When starting to run, no pause is expected and no manual pause as well
39
+ @manual_pause = false
40
+
37
41
  # We set it on the first encounter and never again, because then the offset setting
38
42
  # should be up to the consumers logic (our or the end user)
39
43
  # Seek offset needs to be always initialized as for case where manual offset management
@@ -98,6 +102,17 @@ module Karafka
98
102
  def revoked?
99
103
  @revoked
100
104
  end
105
+
106
+ # Store in the coordinator info, that this pause was done manually by the end user and not
107
+ # by the system itself
108
+ def manual_pause
109
+ @mutex.synchronize { @manual_pause = true }
110
+ end
111
+
112
+ # @return [Boolean] are we in a pause that was initiated by the user
113
+ def manual_pause?
114
+ @pause_tracker.paused? && @manual_pause
115
+ end
101
116
  end
102
117
  end
103
118
  end
@@ -100,7 +100,7 @@ module Karafka
100
100
  return if @queue.closed?
101
101
 
102
102
  @queue.close
103
- @semaphores.values.each(&:close)
103
+ @semaphores.each_value(&:close)
104
104
  end
105
105
  end
106
106
 
@@ -26,7 +26,7 @@ module Karafka
26
26
  # Do NOT commit offsets, they are comitted after each job in the AJ consumer.
27
27
  coordinator.pause_tracker.reset
28
28
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
29
- pause(coordinator.seek_offset)
29
+ pause(coordinator.seek_offset, nil, false)
30
30
  else
31
31
  coordinator.pause_tracker.reset
32
32
  skippable_message = find_skippable_message
@@ -35,7 +35,7 @@ module Karafka
35
35
  # since AJ consumer commits the offset after each job, we also know that the
36
36
  # previous job was successful
37
37
  mark_as_consumed(skippable_message)
38
- pause(coordinator.seek_offset)
38
+ pause(coordinator.seek_offset, nil, false)
39
39
  end
40
40
  end
41
41
  end
@@ -22,6 +22,11 @@ module Karafka
22
22
  raise NotImplementedError, 'Implement in a subclass'
23
23
  end
24
24
 
25
+ # What should happen in the processing
26
+ def handle_consume
27
+ raise NotImplementedError, 'Implement in a subclass'
28
+ end
29
+
25
30
  # Post-consumption handling
26
31
  def handle_after_consume
27
32
  raise NotImplementedError, 'Implement in a subclass'
@@ -23,6 +23,25 @@ module Karafka
23
23
  coordinator.pause_tracker.increment
24
24
  end
25
25
 
26
+ # Run the user consumption code
27
+ def handle_consume
28
+ Karafka.monitor.instrument('consumer.consumed', caller: self) do
29
+ consume
30
+ end
31
+
32
+ # Mark job as successful
33
+ coordinator.consumption(self).success!
34
+ rescue StandardError => e
35
+ # If failed, mark as failed
36
+ coordinator.consumption(self).failure!(e)
37
+
38
+ # Re-raise so reported in the consumer
39
+ raise e
40
+ ensure
41
+ # We need to decrease number of jobs that this coordinator coordinates as it has finished
42
+ coordinator.decrement
43
+ end
44
+
26
45
  # Standard flow marks work as consumed and moves on if everything went ok.
27
46
  # If there was a processing error, we will pause and continue from the next message
28
47
  # (next that is +1 from the last one that was successfully marked as consumed)
@@ -32,9 +51,15 @@ module Karafka
32
51
  if coordinator.success?
33
52
  coordinator.pause_tracker.reset
34
53
 
54
+ # We should not move the offset automatically when the partition was paused
55
+ # If we would not do this upon a revocation during the pause time, a different process
56
+ # would pick not from the place where we paused but from the offset that would be
57
+ # automatically committed here
58
+ return if coordinator.manual_pause?
59
+
35
60
  mark_as_consumed(messages.last)
36
61
  else
37
- pause(coordinator.seek_offset)
62
+ pause(coordinator.seek_offset, nil, false)
38
63
  end
39
64
  end
40
65
 
@@ -22,9 +22,11 @@ module Karafka
22
22
  if coordinator.success?
23
23
  coordinator.pause_tracker.reset
24
24
 
25
+ return if coordinator.manual_pause?
26
+
25
27
  mark_as_consumed(messages.last)
26
28
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
27
- pause(coordinator.seek_offset)
29
+ pause(coordinator.seek_offset, nil, false)
28
30
  # If we've reached number of retries that we could, we need to skip the first message
29
31
  # that was not marked as consumed, pause and continue, while also moving this message
30
32
  # to the dead topic
@@ -43,7 +45,7 @@ module Karafka
43
45
  return if revoked?
44
46
 
45
47
  # We pause to backoff once just in case.
46
- pause(coordinator.seek_offset)
48
+ pause(coordinator.seek_offset, nil, false)
47
49
  end
48
50
  end
49
51
 
@@ -21,7 +21,7 @@ module Karafka
21
21
  if coordinator.success?
22
22
  coordinator.pause_tracker.reset
23
23
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
24
- pause(coordinator.seek_offset)
24
+ pause(coordinator.seek_offset, nil, false)
25
25
  # If we've reached number of retries that we could, we need to skip the first message
26
26
  # that was not marked as consumed, pause and continue, while also moving this message
27
27
  # to the dead topic
@@ -33,7 +33,7 @@ module Karafka
33
33
  dispatch_to_dlq(skippable_message)
34
34
 
35
35
  # We pause to backoff once just in case.
36
- pause(coordinator.seek_offset)
36
+ pause(coordinator.seek_offset, nil, false)
37
37
  end
38
38
  end
39
39
  end
@@ -20,7 +20,7 @@ module Karafka
20
20
  if coordinator.success?
21
21
  coordinator.pause_tracker.reset
22
22
  else
23
- pause(coordinator.seek_offset)
23
+ pause(coordinator.seek_offset, nil, false)
24
24
  end
25
25
  end
26
26
  end
@@ -45,6 +45,9 @@ if rails
45
45
 
46
46
  next unless Rails.env.development?
47
47
  next unless ENV.key?('KARAFKA_CLI')
48
+ # If we are already publishing to STDOUT, no need to add it again.
49
+ # If added again, would print stuff twice
50
+ next if ActiveSupport::Logger.logger_outputs_to?(Rails.logger, $stdout)
48
51
 
49
52
  logger = ActiveSupport::Logger.new($stdout)
50
53
  # Inherit the logger level from Rails, otherwise would always run with the debug level
@@ -82,7 +82,7 @@ module Karafka
82
82
  # @param block [Proc] further topics definitions
83
83
  def subscription_group(subscription_group_name = SecureRandom.uuid, &block)
84
84
  consumer_group('app') do
85
- target.public_send(:subscription_group=, subscription_group_name, &block)
85
+ target.public_send(:subscription_group=, subscription_group_name.to_s, &block)
86
86
  end
87
87
  end
88
88
 
@@ -31,7 +31,10 @@ module Karafka
31
31
 
32
32
  # @return [Boolean] true if this consumer group should be active in our current process
33
33
  def active?
34
- Karafka::Server.consumer_groups.include?(name)
34
+ cgs = Karafka::App.config.internal.routing.active.consumer_groups
35
+
36
+ # When empty it means no groups were specified, hence all should be used
37
+ cgs.empty? || cgs.include?(name)
35
38
  end
36
39
 
37
40
  # Builds a topic representation inside of a current consumer group route
@@ -50,9 +53,9 @@ module Karafka
50
53
 
51
54
  # Assigns the current subscription group id based on the defined one and allows for further
52
55
  # topic definition
53
- # @param name [String, Symbol]
56
+ # @param name [String, Symbol] name of the current subscription group
54
57
  # @param block [Proc] block that may include topics definitions
55
- def subscription_group=(name, &block)
58
+ def subscription_group=(name = SecureRandom.uuid, &block)
56
59
  # We cast it here, so the routing supports symbol based but that's anyhow later on
57
60
  # validated as a string
58
61
  @current_subscription_group_id = name
@@ -8,7 +8,7 @@ module Karafka
8
8
  # @note One subscription group will always belong to one consumer group, but one consumer
9
9
  # group can have multiple subscription groups.
10
10
  class SubscriptionGroup
11
- attr_reader :id, :topics, :kafka
11
+ attr_reader :id, :name, :topics, :kafka
12
12
 
13
13
  # @param position [Integer] position of this subscription group in all the subscriptions
14
14
  # groups array. We need to have this value for sake of static group memberships, where
@@ -16,7 +16,8 @@ module Karafka
16
16
  # @param topics [Karafka::Routing::Topics] all the topics that share the same key settings
17
17
  # @return [SubscriptionGroup] built subscription group
18
18
  def initialize(position, topics)
19
- @id = "#{topics.first.subscription_group}_#{position}"
19
+ @name = topics.first.subscription_group
20
+ @id = "#{@name}_#{position}"
20
21
  @position = position
21
22
  @topics = topics
22
23
  @kafka = build_kafka
@@ -38,6 +39,14 @@ module Karafka
38
39
  @topics.first.max_wait_time
39
40
  end
40
41
 
42
+ # @return [Boolean] is this subscription group one of active once
43
+ def active?
44
+ sgs = Karafka::App.config.internal.routing.active.subscription_groups
45
+
46
+ # When empty it means no groups were specified, hence all should be used
47
+ sgs.empty? || sgs.include?(name)
48
+ end
49
+
41
50
  private
42
51
 
43
52
  # @return [Hash] kafka settings are a bit special. They are exactly the same for all of the
@@ -75,6 +75,14 @@ module Karafka
75
75
  consumer
76
76
  end
77
77
 
78
+ # @return [Boolean] should this topic be in use
79
+ def active?
80
+ topics = Karafka::App.config.internal.routing.active.topics
81
+
82
+ # When empty it means no topics were specified, hence all should be used
83
+ topics.empty? || topics.include?(name)
84
+ end
85
+
78
86
  # @return [Hash] hash with all the topic attributes
79
87
  # @note This is being used when we validate the consumer_group and its topics
80
88
  def to_h
@@ -23,6 +23,14 @@ module Karafka
23
23
  @accumulator.each(&block)
24
24
  end
25
25
 
26
+ # Allows us to remove elements from the topics
27
+ #
28
+ # Block to decide what to delete
29
+ # @param block [Proc]
30
+ def delete_if(&block)
31
+ @accumulator.delete_if(&block)
32
+ end
33
+
26
34
  # Finds topic by its name
27
35
  #
28
36
  # @param topic_name [String] topic name
@@ -20,11 +20,19 @@ module Karafka
20
20
  # Set of workers
21
21
  attr_accessor :workers
22
22
 
23
- # Writer for list of consumer groups that we want to consume in our current process context
24
- attr_writer :consumer_groups
25
-
26
23
  # Method which runs app
27
24
  def run
25
+ self.listeners = []
26
+ self.workers = []
27
+
28
+ # We need to validate this prior to running because it may be executed also from the
29
+ # embedded
30
+ # We cannot validate this during the start because config needs to be populated and routes
31
+ # need to be defined.
32
+ Contracts::ServerCliOptions.new.validate!(
33
+ Karafka::App.config.internal.routing.active.to_h
34
+ )
35
+
28
36
  process.on_sigint { stop }
29
37
  process.on_sigquit { stop }
30
38
  process.on_sigterm { stop }
@@ -49,13 +57,6 @@ module Karafka
49
57
  raise e
50
58
  end
51
59
 
52
- # @return [Array<String>] array with names of consumer groups that should be consumed in a
53
- # current server context
54
- def consumer_groups
55
- # If not specified, a server will listen on all the topics
56
- @consumer_groups ||= Karafka::App.consumer_groups.map(&:name).freeze
57
- end
58
-
59
60
  # Starts Karafka with a supervision
60
61
  # @note We don't need to sleep because Karafka::Fetcher is locking and waiting to
61
62
  # finish loop (and it won't happen until we explicitly want to stop)
@@ -107,6 +107,14 @@ module Karafka
107
107
  # option subscription_groups_builder [Routing::SubscriptionGroupsBuilder] subscription
108
108
  # group builder
109
109
  setting :subscription_groups_builder, default: Routing::SubscriptionGroupsBuilder.new
110
+
111
+ # Internally assigned list of limits on routings active for the current process
112
+ # This should be overwritten by the CLI command
113
+ setting :active do
114
+ setting :consumer_groups, default: [].freeze
115
+ setting :subscription_groups, default: [].freeze
116
+ setting :topics, default: [].freeze
117
+ end
110
118
  end
111
119
 
112
120
  setting :processing do
@@ -142,16 +150,18 @@ module Karafka
142
150
  # Configuring method
143
151
  # @param block [Proc] block we want to execute with the config instance
144
152
  def setup(&block)
153
+ # Will prepare and verify license if present
154
+ Licenser.prepare_and_verify(config.license)
155
+ # Will configure all the pro components
156
+ # This needs to happen before end user configuration as the end user may overwrite some
157
+ # of the pro defaults with custom components
158
+ Pro::Loader.setup(config) if Karafka.pro?
159
+
145
160
  configure(&block)
146
161
  merge_kafka_defaults!(config)
147
162
 
148
163
  Contracts::Config.new.validate!(config.to_h)
149
164
 
150
- licenser = Licenser.new
151
-
152
- # Tries to load our license gem and if present will try to load the correct license
153
- licenser.prepare_and_verify(config.license)
154
-
155
165
  configure_components
156
166
 
157
167
  Karafka::App.initialized!
@@ -188,12 +198,6 @@ module Karafka
188
198
  producer_config.kafka = AttributesMap.producer(config.kafka.dup)
189
199
  producer_config.logger = config.logger
190
200
  end
191
-
192
- return unless Karafka.pro?
193
-
194
- # Runs the pro loader that includes all the pro components
195
- require 'karafka/pro/loader'
196
- Pro::Loader.setup(config)
197
201
  end
198
202
  end
199
203
  end
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.0.21'
6
+ VERSION = '2.0.23'
7
7
  end
data/lib/karafka.rb CHANGED
@@ -100,5 +100,14 @@ loader.eager_load
100
100
  # nor included here
101
101
  ::Karafka::Routing::Features::Base.load_all
102
102
 
103
+ # We need to detect and require (not setup) Pro components during the gem load, because we need
104
+ # to make pro components available in case anyone wants to use them as a base to their own
105
+ # custom components. Otherwise inheritance would not work.
106
+ Karafka::Licenser.detect do
107
+ require 'karafka/pro/loader'
108
+
109
+ Karafka::Pro::Loader.require_all
110
+ end
111
+
103
112
  # Load railtie after everything else is ready so we know we can rely on it.
104
113
  require 'karafka/railtie'
data.tar.gz.sig CHANGED
Binary file