karafka 2.0.23 → 2.0.26

Sign up to get free protection for your applications and to get access to all the features.
Files changed (87) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +24 -3
  4. data/.ruby-version +1 -1
  5. data/CHANGELOG.md +52 -1
  6. data/Gemfile.lock +14 -12
  7. data/README.md +6 -4
  8. data/bin/integrations +8 -0
  9. data/bin/verify_license_integrity +35 -0
  10. data/config/{errors.yml → locales/errors.yml} +2 -1
  11. data/config/locales/pro_errors.yml +18 -0
  12. data/docker-compose.yml +3 -0
  13. data/karafka.gemspec +3 -3
  14. data/lib/karafka/active_job/job_options_contract.rb +1 -1
  15. data/lib/karafka/admin.rb +16 -14
  16. data/lib/karafka/app.rb +16 -4
  17. data/lib/karafka/base_consumer.rb +37 -7
  18. data/lib/karafka/connection/client.rb +21 -0
  19. data/lib/karafka/connection/consumer_group_coordinator.rb +7 -1
  20. data/lib/karafka/connection/listener.rb +5 -4
  21. data/lib/karafka/connection/listeners_batch.rb +6 -0
  22. data/lib/karafka/contracts/config.rb +1 -1
  23. data/lib/karafka/contracts/consumer_group.rb +1 -1
  24. data/lib/karafka/contracts/server_cli_options.rb +2 -1
  25. data/lib/karafka/contracts/topic.rb +13 -2
  26. data/lib/karafka/instrumentation/logger_listener.rb +50 -2
  27. data/lib/karafka/instrumentation/notifications.rb +17 -7
  28. data/lib/karafka/instrumentation/proctitle_listener.rb +7 -16
  29. data/lib/karafka/instrumentation/vendors/datadog/listener.rb +2 -2
  30. data/lib/karafka/messages/message.rb +14 -2
  31. data/lib/karafka/messages/parser.rb +14 -0
  32. data/lib/karafka/pro/active_job/job_options_contract.rb +1 -1
  33. data/lib/karafka/pro/encryption/cipher.rb +58 -0
  34. data/lib/karafka/pro/encryption/contracts/config.rb +79 -0
  35. data/lib/karafka/pro/encryption/errors.rb +24 -0
  36. data/lib/karafka/pro/encryption/messages/middleware.rb +46 -0
  37. data/lib/karafka/pro/encryption/messages/parser.rb +56 -0
  38. data/lib/karafka/pro/encryption/setup/config.rb +48 -0
  39. data/lib/karafka/pro/encryption.rb +47 -0
  40. data/lib/karafka/pro/loader.rb +22 -1
  41. data/lib/karafka/pro/processing/strategies/aj_dlq_mom.rb +1 -1
  42. data/lib/karafka/pro/processing/strategies/aj_lrj_mom_vp.rb +6 -1
  43. data/lib/karafka/pro/processing/strategies/aj_mom_vp.rb +1 -1
  44. data/lib/karafka/pro/processing/strategies/default.rb +7 -1
  45. data/lib/karafka/pro/processing/strategies/dlq.rb +1 -1
  46. data/lib/karafka/pro/processing/strategies/dlq_lrj.rb +1 -1
  47. data/lib/karafka/pro/processing/strategies/dlq_lrj_mom.rb +1 -1
  48. data/lib/karafka/pro/processing/strategies/dlq_mom.rb +1 -1
  49. data/lib/karafka/pro/processing/strategies/lrj.rb +6 -1
  50. data/lib/karafka/pro/processing/strategies/lrj_mom.rb +6 -1
  51. data/lib/karafka/pro/processing/strategies/mom.rb +1 -1
  52. data/lib/karafka/pro/routing/features/dead_letter_queue/contract.rb +2 -2
  53. data/lib/karafka/pro/routing/features/long_running_job/contract.rb +2 -2
  54. data/lib/karafka/pro/routing/features/virtual_partitions/contract.rb +2 -2
  55. data/lib/karafka/process.rb +3 -1
  56. data/lib/karafka/processing/executor.rb +1 -1
  57. data/lib/karafka/processing/jobs_queue.rb +2 -2
  58. data/lib/karafka/processing/strategies/aj_dlq_mom.rb +1 -1
  59. data/lib/karafka/processing/strategies/base.rb +5 -0
  60. data/lib/karafka/processing/strategies/default.rb +15 -1
  61. data/lib/karafka/processing/strategies/dlq.rb +1 -1
  62. data/lib/karafka/processing/strategies/dlq_mom.rb +1 -1
  63. data/lib/karafka/processing/strategies/mom.rb +1 -1
  64. data/lib/karafka/processing/worker.rb +3 -1
  65. data/lib/karafka/railtie.rb +3 -0
  66. data/lib/karafka/routing/builder.rb +1 -1
  67. data/lib/karafka/routing/consumer_group.rb +3 -3
  68. data/lib/karafka/routing/consumer_mapper.rb +0 -10
  69. data/lib/karafka/routing/features/active_job/contract.rb +1 -1
  70. data/lib/karafka/routing/features/dead_letter_queue/contract.rb +1 -1
  71. data/lib/karafka/routing/features/manual_offset_management/contract.rb +1 -1
  72. data/lib/karafka/routing/router.rb +12 -2
  73. data/lib/karafka/routing/subscription_group.rb +18 -1
  74. data/lib/karafka/routing/topic.rb +11 -0
  75. data/lib/karafka/runner.rb +1 -0
  76. data/lib/karafka/server.rb +27 -18
  77. data/lib/karafka/setup/config.rb +15 -2
  78. data/lib/karafka/status.rb +33 -9
  79. data/lib/karafka/templates/karafka.rb.erb +1 -2
  80. data/lib/karafka/time_trackers/base.rb +1 -6
  81. data/lib/karafka/time_trackers/pause.rb +5 -3
  82. data/lib/karafka/time_trackers/poll.rb +2 -2
  83. data/lib/karafka/version.rb +1 -1
  84. data/lib/karafka.rb +2 -0
  85. data.tar.gz.sig +0 -0
  86. metadata +18 -8
  87. metadata.gz.sig +0 -0
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This Karafka component is a Pro component under a commercial license.
4
+ # This Karafka component is NOT licensed under LGPL.
5
+ #
6
+ # All of the commercial components are present in the lib/karafka/pro directory of this
7
+ # repository and their usage requires commercial license agreement.
8
+ #
9
+ # Karafka has also commercial-friendly license, commercial support and commercial components.
10
+ #
11
+ # By sending a pull request to the pro components, you are agreeing to transfer the copyright of
12
+ # your code to Maciej Mensfeld.
13
+
14
+ module Karafka
15
+ module Pro
16
+ # Out of the box encryption engine for both Karafka and WaterDrop
17
+ # It uses asymmetric encryption via RSA. We use asymmetric so we can have producers that won't
18
+ # have ability (when private key not added) to decrypt messages.
19
+ module Encryption
20
+ class << self
21
+ # Sets up additional config scope, validations and other things
22
+ #
23
+ # @param config [Karafka::Core::Configurable::Node] root node config
24
+ def pre_setup(config)
25
+ # Expand the config with this feature specific stuff
26
+ config.instance_eval do
27
+ setting(:encryption, default: Setup::Config.config)
28
+ end
29
+ end
30
+
31
+ # @param config [Karafka::Core::Configurable::Node] root node config
32
+ def post_setup(config)
33
+ Encryption::Contracts::Config.new.validate!(config.to_h)
34
+
35
+ # Don't inject extra components if encryption is not active
36
+ return unless config.encryption.active
37
+
38
+ # This parser is encryption aware
39
+ config.internal.messages.parser = Messages::Parser.new
40
+
41
+ # Encryption for WaterDrop
42
+ config.producer.middleware.append(Messages::Middleware.new)
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
@@ -23,6 +23,11 @@ module Karafka
23
23
  processing/jobs/consume_non_blocking
24
24
  processing/strategies/base
25
25
  routing/features/base
26
+ encryption
27
+ encryption/cipher
28
+ encryption/setup/config
29
+ encryption/contracts/config
30
+ encryption/messages/parser
26
31
  ].freeze
27
32
 
28
33
  # Zeitwerk pro loader
@@ -44,14 +49,30 @@ module Karafka
44
49
  # Loads all the pro components and configures them wherever it is expected
45
50
  # @param config [Karafka::Core::Configurable::Node] app config that we can alter with pro
46
51
  # components
47
- def setup(config)
52
+ def pre_setup(config)
53
+ features.each { |feature| feature.pre_setup(config) }
54
+
48
55
  reconfigure(config)
49
56
 
50
57
  load_topic_features
51
58
  end
52
59
 
60
+ # Runs post setup features configuration operations
61
+ #
62
+ # @param config [Karafka::Core::Configurable::Node]
63
+ def post_setup(config)
64
+ features.each { |feature| feature.post_setup(config) }
65
+ end
66
+
53
67
  private
54
68
 
69
+ # @return [Array<Module>] extra non-routing related pro features
70
+ def features
71
+ [
72
+ Encryption
73
+ ]
74
+ end
75
+
55
76
  # Sets proper config options to use pro components
56
77
  # @param config [::Karafka::Core::Configurable::Node] root config node
57
78
  def reconfigure(config)
@@ -42,7 +42,7 @@ module Karafka
42
42
  # Do NOT commit offsets, they are comitted after each job in the AJ consumer.
43
43
  coordinator.pause_tracker.reset
44
44
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
45
- pause(coordinator.seek_offset, nil, false)
45
+ retry_after_pause
46
46
  else
47
47
  coordinator.pause_tracker.reset
48
48
  skippable_message = find_skippable_message
@@ -51,7 +51,7 @@ module Karafka
51
51
  # If processing failed, we need to pause
52
52
  # For long running job this will overwrite the default never-ending pause and will
53
53
  # cause the processing to keep going after the error backoff
54
- pause(coordinator.seek_offset, nil, false)
54
+ retry_after_pause
55
55
  end
56
56
  end
57
57
  end
@@ -61,6 +61,11 @@ module Karafka
61
61
  coordinator.on_revoked do
62
62
  coordinator.revoke
63
63
  end
64
+
65
+ Karafka.monitor.instrument('consumer.revoke', caller: self)
66
+ Karafka.monitor.instrument('consumer.revoked', caller: self) do
67
+ revoked
68
+ end
64
69
  end
65
70
  end
66
71
  end
@@ -47,7 +47,7 @@ module Karafka
47
47
 
48
48
  mark_as_consumed(last_group_message)
49
49
  else
50
- pause(coordinator.seek_offset, nil, false)
50
+ retry_after_pause
51
51
  end
52
52
  end
53
53
  end
@@ -46,6 +46,7 @@ module Karafka
46
46
  # This can happen primarily when an LRJ job gets to the internal worker queue and
47
47
  # this partition is revoked prior processing.
48
48
  unless revoked?
49
+ Karafka.monitor.instrument('consumer.consume', caller: self)
49
50
  Karafka.monitor.instrument('consumer.consumed', caller: self) do
50
51
  consume
51
52
  end
@@ -79,7 +80,7 @@ module Karafka
79
80
 
80
81
  mark_as_consumed(last_group_message)
81
82
  else
82
- pause(coordinator.seek_offset, nil, false)
83
+ retry_after_pause
83
84
  end
84
85
  end
85
86
  end
@@ -91,6 +92,11 @@ module Karafka
91
92
 
92
93
  coordinator.revoke
93
94
  end
95
+
96
+ Karafka.monitor.instrument('consumer.revoke', caller: self)
97
+ Karafka.monitor.instrument('consumer.revoked', caller: self) do
98
+ revoked
99
+ end
94
100
  end
95
101
  end
96
102
  end
@@ -36,7 +36,7 @@ module Karafka
36
36
 
37
37
  mark_as_consumed(messages.last)
38
38
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
39
- pause(coordinator.seek_offset, nil, false)
39
+ retry_after_pause
40
40
  # If we've reached number of retries that we could, we need to skip the first message
41
41
  # that was not marked as consumed, pause and continue, while also moving this message
42
42
  # to the dead topic
@@ -41,7 +41,7 @@ module Karafka
41
41
 
42
42
  resume
43
43
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
44
- pause(coordinator.seek_offset, nil, false)
44
+ retry_after_pause
45
45
  else
46
46
  coordinator.pause_tracker.reset
47
47
 
@@ -38,7 +38,7 @@ module Karafka
38
38
 
39
39
  resume
40
40
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
41
- pause(coordinator.seek_offset, nil, false)
41
+ retry_after_pause
42
42
  else
43
43
  coordinator.pause_tracker.reset
44
44
 
@@ -35,7 +35,7 @@ module Karafka
35
35
  if coordinator.success?
36
36
  coordinator.pause_tracker.reset
37
37
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
38
- pause(coordinator.seek_offset, nil, false)
38
+ retry_after_pause
39
39
  # If we've reached number of retries that we could, we need to skip the first message
40
40
  # that was not marked as consumed, pause and continue, while also moving this message
41
41
  # to the dead topic.
@@ -57,7 +57,7 @@ module Karafka
57
57
  # If processing failed, we need to pause
58
58
  # For long running job this will overwrite the default never-ending pause and will
59
59
  # cause the processing to keep going after the error backoff
60
- pause(coordinator.seek_offset, nil, false)
60
+ retry_after_pause
61
61
  end
62
62
  end
63
63
  end
@@ -70,6 +70,11 @@ module Karafka
70
70
  # a failure. Double non-blocking resume could cause problems in coordination.
71
71
  coordinator.revoke
72
72
  end
73
+
74
+ Karafka.monitor.instrument('consumer.revoke', caller: self)
75
+ Karafka.monitor.instrument('consumer.revoked', caller: self) do
76
+ revoked
77
+ end
73
78
  end
74
79
  end
75
80
  end
@@ -50,7 +50,7 @@ module Karafka
50
50
 
51
51
  resume
52
52
  else
53
- pause(coordinator.seek_offset, false)
53
+ retry_after_pause
54
54
  end
55
55
  end
56
56
  end
@@ -60,6 +60,11 @@ module Karafka
60
60
  coordinator.on_revoked do
61
61
  coordinator.revoke
62
62
  end
63
+
64
+ Karafka.monitor.instrument('consumer.revoke', caller: self)
65
+ Karafka.monitor.instrument('consumer.revoked', caller: self) do
66
+ revoked
67
+ end
63
68
  end
64
69
  end
65
70
  end
@@ -32,7 +32,7 @@ module Karafka
32
32
  if coordinator.success?
33
33
  coordinator.pause_tracker.reset
34
34
  else
35
- pause(coordinator.seek_offset, nil, false)
35
+ retry_after_pause
36
36
  end
37
37
  end
38
38
  end
@@ -21,9 +21,9 @@ module Karafka
21
21
  configure do |config|
22
22
  config.error_messages = YAML.safe_load(
23
23
  File.read(
24
- File.join(Karafka.gem_root, 'config', 'errors.yml')
24
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
25
25
  )
26
- ).fetch('en').fetch('validations').fetch('pro_topic')
26
+ ).fetch('en').fetch('validations').fetch('topic')
27
27
  end
28
28
 
29
29
  # Make sure that we don't use DLQ with VP
@@ -21,9 +21,9 @@ module Karafka
21
21
  configure do |config|
22
22
  config.error_messages = YAML.safe_load(
23
23
  File.read(
24
- File.join(Karafka.gem_root, 'config', 'errors.yml')
24
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
25
25
  )
26
- ).fetch('en').fetch('validations').fetch('pro_topic')
26
+ ).fetch('en').fetch('validations').fetch('topic')
27
27
  end
28
28
 
29
29
  nested(:long_running_job) do
@@ -21,9 +21,9 @@ module Karafka
21
21
  configure do |config|
22
22
  config.error_messages = YAML.safe_load(
23
23
  File.read(
24
- File.join(Karafka.gem_root, 'config', 'errors.yml')
24
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
25
25
  )
26
- ).fetch('en').fetch('validations').fetch('pro_topic')
26
+ ).fetch('en').fetch('validations').fetch('topic')
27
27
  end
28
28
 
29
29
  nested(:virtual_partitions) do
@@ -53,12 +53,14 @@ module Karafka
53
53
  # trap context s some things may not work there as expected, that is why we spawn a separate
54
54
  # thread to handle the signals process
55
55
  def trap_signal(signal)
56
- trap(signal) do
56
+ previous_handler = ::Signal.trap(signal) do
57
57
  Thread.new do
58
58
  notice_signal(signal)
59
59
 
60
60
  (@callbacks[signal] || []).each(&:call)
61
61
  end
62
+
63
+ previous_handler.call if previous_handler.respond_to?(:call)
62
64
  end
63
65
  end
64
66
 
@@ -31,7 +31,7 @@ module Karafka
31
31
  # @param client [Karafka::Connection::Client] kafka client
32
32
  # @param topic [Karafka::Routing::Topic] topic for which this executor will run
33
33
  def initialize(group_id, client, topic)
34
- @id = SecureRandom.uuid
34
+ @id = SecureRandom.hex(6)
35
35
  @group_id = group_id
36
36
  @client = client
37
37
  @topic = topic
@@ -125,13 +125,13 @@ module Karafka
125
125
  @semaphores[group_id].pop while wait?(group_id)
126
126
  end
127
127
 
128
- # - `processing` - number of jobs that are currently being processed (active work)
128
+ # - `busy` - number of jobs that are currently being processed (active work)
129
129
  # - `enqueued` - number of jobs in the queue that are waiting to be picked up by a worker
130
130
  #
131
131
  # @return [Hash] hash with basic usage statistics of this queue.
132
132
  def statistics
133
133
  {
134
- processing: size - @queue.size,
134
+ busy: size - @queue.size,
135
135
  enqueued: @queue.size
136
136
  }.freeze
137
137
  end
@@ -26,7 +26,7 @@ module Karafka
26
26
  # Do NOT commit offsets, they are comitted after each job in the AJ consumer.
27
27
  coordinator.pause_tracker.reset
28
28
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
29
- pause(coordinator.seek_offset, nil, false)
29
+ retry_after_pause
30
30
  else
31
31
  coordinator.pause_tracker.reset
32
32
  skippable_message = find_skippable_message
@@ -36,6 +36,11 @@ module Karafka
36
36
  def handle_revoked
37
37
  raise NotImplementedError, 'Implement in a subclass'
38
38
  end
39
+
40
+ # Shutdown handling
41
+ def handle_shutdown
42
+ raise NotImplementedError, 'Implement in a subclass'
43
+ end
39
44
  end
40
45
  end
41
46
  end
@@ -25,6 +25,7 @@ module Karafka
25
25
 
26
26
  # Run the user consumption code
27
27
  def handle_consume
28
+ Karafka.monitor.instrument('consumer.consume', caller: self)
28
29
  Karafka.monitor.instrument('consumer.consumed', caller: self) do
29
30
  consume
30
31
  end
@@ -59,7 +60,7 @@ module Karafka
59
60
 
60
61
  mark_as_consumed(messages.last)
61
62
  else
62
- pause(coordinator.seek_offset, nil, false)
63
+ retry_after_pause
63
64
  end
64
65
  end
65
66
 
@@ -70,6 +71,19 @@ module Karafka
70
71
  resume
71
72
 
72
73
  coordinator.revoke
74
+
75
+ Karafka.monitor.instrument('consumer.revoke', caller: self)
76
+ Karafka.monitor.instrument('consumer.revoked', caller: self) do
77
+ revoked
78
+ end
79
+ end
80
+
81
+ # Runs the shutdown code
82
+ def handle_shutdown
83
+ Karafka.monitor.instrument('consumer.shutting_down', caller: self)
84
+ Karafka.monitor.instrument('consumer.shutdown', caller: self) do
85
+ shutdown
86
+ end
73
87
  end
74
88
  end
75
89
  end
@@ -26,7 +26,7 @@ module Karafka
26
26
 
27
27
  mark_as_consumed(messages.last)
28
28
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
29
- pause(coordinator.seek_offset, nil, false)
29
+ retry_after_pause
30
30
  # If we've reached number of retries that we could, we need to skip the first message
31
31
  # that was not marked as consumed, pause and continue, while also moving this message
32
32
  # to the dead topic
@@ -21,7 +21,7 @@ module Karafka
21
21
  if coordinator.success?
22
22
  coordinator.pause_tracker.reset
23
23
  elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
24
- pause(coordinator.seek_offset, nil, false)
24
+ retry_after_pause
25
25
  # If we've reached number of retries that we could, we need to skip the first message
26
26
  # that was not marked as consumed, pause and continue, while also moving this message
27
27
  # to the dead topic
@@ -20,7 +20,7 @@ module Karafka
20
20
  if coordinator.success?
21
21
  coordinator.pause_tracker.reset
22
22
  else
23
- pause(coordinator.seek_offset, nil, false)
23
+ retry_after_pause
24
24
  end
25
25
  end
26
26
  end
@@ -25,7 +25,7 @@ module Karafka
25
25
  # @param jobs_queue [JobsQueue]
26
26
  # @return [Worker]
27
27
  def initialize(jobs_queue)
28
- @id = SecureRandom.uuid
28
+ @id = SecureRandom.hex(6)
29
29
  @jobs_queue = jobs_queue
30
30
  end
31
31
 
@@ -76,6 +76,8 @@ module Karafka
76
76
  Karafka.monitor.instrument(
77
77
  'error.occurred',
78
78
  caller: self,
79
+ job: job,
80
+ jobs_queue: @jobs_queue,
79
81
  error: e,
80
82
  type: 'worker.process.error'
81
83
  )
@@ -79,6 +79,9 @@ if rails
79
79
  ::Karafka::App.monitor.subscribe('connection.listener.fetch_loop') do
80
80
  # Reload code each time there is a change in the code
81
81
  next unless Rails.application.reloaders.any?(&:updated?)
82
+ # If consumer persistence is enabled, no reason to reload because we will still keep
83
+ # old consumer instances in memory.
84
+ next if Karafka::App.config.consumer_persistence
82
85
 
83
86
  Rails.application.reloader.reload!
84
87
  end
@@ -80,7 +80,7 @@ module Karafka
80
80
  # @param subscription_group_name [String, Symbol] subscription group id. When not provided,
81
81
  # a random uuid will be used
82
82
  # @param block [Proc] further topics definitions
83
- def subscription_group(subscription_group_name = SecureRandom.uuid, &block)
83
+ def subscription_group(subscription_group_name = SubscriptionGroup.id, &block)
84
84
  consumer_group('app') do
85
85
  target.public_send(:subscription_group=, subscription_group_name.to_s, &block)
86
86
  end
@@ -26,7 +26,7 @@ module Karafka
26
26
  @topics = Topics.new([])
27
27
  # Initialize the subscription group so there's always a value for it, since even if not
28
28
  # defined directly, a subscription group will be created
29
- @current_subscription_group_id = SecureRandom.uuid
29
+ @current_subscription_group_id = SubscriptionGroup.id
30
30
  end
31
31
 
32
32
  # @return [Boolean] true if this consumer group should be active in our current process
@@ -55,7 +55,7 @@ module Karafka
55
55
  # topic definition
56
56
  # @param name [String, Symbol] name of the current subscription group
57
57
  # @param block [Proc] block that may include topics definitions
58
- def subscription_group=(name = SecureRandom.uuid, &block)
58
+ def subscription_group=(name = SubscriptionGroup.id, &block)
59
59
  # We cast it here, so the routing supports symbol based but that's anyhow later on
60
60
  # validated as a string
61
61
  @current_subscription_group_id = name
@@ -64,7 +64,7 @@ module Karafka
64
64
 
65
65
  # We need to reset the current subscription group after it is used, so it won't leak
66
66
  # outside to other topics that would be defined without a defined subscription group
67
- @current_subscription_group_id = SecureRandom.uuid
67
+ @current_subscription_group_id = SubscriptionGroup.id
68
68
  end
69
69
 
70
70
  # @return [Array<Routing::SubscriptionGroup>] all the subscription groups build based on
@@ -12,16 +12,6 @@ module Karafka
12
12
  # raw_consumer_group_name
13
13
  # end
14
14
  # end
15
- #
16
- # @example Mapper for replacing "_" with "." in topic names
17
- # class MyMapper
18
- # def call(raw_consumer_group_name)
19
- # [
20
- # Karafka::Helpers::Inflector.map(Karafka::App.config.client_id.to_s),
21
- # raw_consumer_group_name
22
- # ].join('_').gsub('_', '.')
23
- # end
24
- # end
25
15
  class ConsumerMapper
26
16
  # @param raw_consumer_group_name [String, Symbol] string or symbolized consumer group name
27
17
  # @return [String] remapped final consumer group name
@@ -10,7 +10,7 @@ module Karafka
10
10
  configure do |config|
11
11
  config.error_messages = YAML.safe_load(
12
12
  File.read(
13
- File.join(Karafka.gem_root, 'config', 'errors.yml')
13
+ File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
14
14
  )
15
15
  ).fetch('en').fetch('validations').fetch('topic')
16
16
  end
@@ -9,7 +9,7 @@ module Karafka
9
9
  configure do |config|
10
10
  config.error_messages = YAML.safe_load(
11
11
  File.read(
12
- File.join(Karafka.gem_root, 'config', 'errors.yml')
12
+ File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
13
13
  )
14
14
  ).fetch('en').fetch('validations').fetch('topic')
15
15
  end
@@ -9,7 +9,7 @@ module Karafka
9
9
  configure do |config|
10
10
  config.error_messages = YAML.safe_load(
11
11
  File.read(
12
- File.join(Karafka.gem_root, 'config', 'errors.yml')
12
+ File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
13
13
  )
14
14
  ).fetch('en').fetch('validations').fetch('topic')
15
15
  end
@@ -14,16 +14,26 @@ module Karafka
14
14
  # @raise [Karafka::Topic::NonMatchingTopicError] raised if topic name does not match
15
15
  # any route defined by user using routes.draw
16
16
  def find(topic_id)
17
+ find_by(id: topic_id) || raise(Errors::NonMatchingRouteError, topic_id)
18
+ end
19
+
20
+ # Finds first reference of a given topic based on provided lookup attribute
21
+ # @param lookup [Hash<Symbol, String>] hash with attribute - value key pairs
22
+ # @return [Karafka::Routing::Topic, nil] proper route details or nil if not found
23
+ def find_by(lookup)
17
24
  App.consumer_groups.each do |consumer_group|
18
25
  consumer_group.topics.each do |topic|
19
- return topic if topic.id == topic_id
26
+ return topic if lookup.all? do |attribute, value|
27
+ topic.public_send(attribute) == value
28
+ end
20
29
  end
21
30
  end
22
31
 
23
- raise(Errors::NonMatchingRouteError, topic_id)
32
+ nil
24
33
  end
25
34
 
26
35
  module_function :find
36
+ module_function :find_by
27
37
  end
28
38
  end
29
39
  end
@@ -8,7 +8,23 @@ module Karafka
8
8
  # @note One subscription group will always belong to one consumer group, but one consumer
9
9
  # group can have multiple subscription groups.
10
10
  class SubscriptionGroup
11
- attr_reader :id, :name, :topics, :kafka
11
+ attr_reader :id, :name, :topics, :kafka, :consumer_group
12
+
13
+ # Numeric for counting groups
14
+ GROUP_COUNT = Concurrent::AtomicFixnum.new
15
+
16
+ private_constant :GROUP_COUNT
17
+
18
+ class << self
19
+ # Generates new subscription group id that will be used in case of anonymous subscription
20
+ # groups
21
+ # @return [String] hex(6) compatible reproducible id
22
+ def id
23
+ ::Digest::MD5.hexdigest(
24
+ GROUP_COUNT.increment.to_s
25
+ )[0..11]
26
+ end
27
+ end
12
28
 
13
29
  # @param position [Integer] position of this subscription group in all the subscriptions
14
30
  # groups array. We need to have this value for sake of static group memberships, where
@@ -17,6 +33,7 @@ module Karafka
17
33
  # @return [SubscriptionGroup] built subscription group
18
34
  def initialize(position, topics)
19
35
  @name = topics.first.subscription_group
36
+ @consumer_group = topics.first.consumer_group
20
37
  @id = "#{@name}_#{position}"
21
38
  @position = position
22
39
  @topics = topics
@@ -27,6 +27,7 @@ module Karafka
27
27
  @name = name.to_s
28
28
  @consumer_group = consumer_group
29
29
  @attributes = {}
30
+ @active = true
30
31
  # @note We use identifier related to the consumer group that owns a topic, because from
31
32
  # Karafka 0.6 we can handle multiple Kafka instances with the same process and we can
32
33
  # have same topic name across multiple consumer groups
@@ -66,6 +67,12 @@ module Karafka
66
67
  end
67
68
  end
68
69
 
70
+ # Allows to disable topic by invoking this method and setting it to `false`.
71
+ # @param active [Boolean] should this topic be consumed or not
72
+ def active(active)
73
+ @active = active
74
+ end
75
+
69
76
  # @return [Class] consumer class that we should use
70
77
  # @note This is just an alias to the `#consumer` method. We however want to use it internally
71
78
  # instead of referencing the `#consumer`. We use this to indicate that this method returns
@@ -77,6 +84,9 @@ module Karafka
77
84
 
78
85
  # @return [Boolean] should this topic be in use
79
86
  def active?
87
+ # Never active if disabled via routing
88
+ return false unless @active
89
+
80
90
  topics = Karafka::App.config.internal.routing.active.topics
81
91
 
82
92
  # When empty it means no topics were specified, hence all should be used
@@ -93,6 +103,7 @@ module Karafka
93
103
  Hash[map].merge!(
94
104
  id: id,
95
105
  name: name,
106
+ active: active?,
96
107
  consumer: consumer,
97
108
  consumer_group_id: consumer_group.id,
98
109
  subscription_group: subscription_group