karafka 2.5.2 → 2.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +18 -0
  3. data/.yard-lint.yml +174 -0
  4. data/CHANGELOG.md +6 -0
  5. data/Gemfile +1 -0
  6. data/Gemfile.lock +24 -19
  7. data/examples/payloads/json/sample_set_03/event_type_1.json +1 -1
  8. data/examples/payloads/json/sample_set_03/event_type_2.json +1 -1
  9. data/examples/payloads/json/sample_set_03/event_type_3.json +1 -1
  10. data/karafka.gemspec +2 -2
  11. data/lib/active_job/queue_adapters/karafka_adapter.rb +2 -2
  12. data/lib/karafka/active_job/consumer.rb +2 -2
  13. data/lib/karafka/active_job/current_attributes.rb +2 -2
  14. data/lib/karafka/active_job/deserializer.rb +1 -1
  15. data/lib/karafka/active_job/dispatcher.rb +2 -2
  16. data/lib/karafka/admin/configs/resource.rb +7 -1
  17. data/lib/karafka/admin/consumer_groups.rb +6 -8
  18. data/lib/karafka/admin/topics.rb +5 -4
  19. data/lib/karafka/admin.rb +10 -10
  20. data/lib/karafka/app.rb +3 -3
  21. data/lib/karafka/base_consumer.rb +1 -1
  22. data/lib/karafka/cli/base.rb +1 -1
  23. data/lib/karafka/cli/console.rb +1 -1
  24. data/lib/karafka/cli/contracts/server.rb +1 -1
  25. data/lib/karafka/cli/help.rb +1 -1
  26. data/lib/karafka/cli/install.rb +2 -1
  27. data/lib/karafka/cli/server.rb +1 -1
  28. data/lib/karafka/cli/swarm.rb +1 -1
  29. data/lib/karafka/connection/client.rb +19 -18
  30. data/lib/karafka/connection/manager.rb +1 -0
  31. data/lib/karafka/connection/proxy.rb +1 -1
  32. data/lib/karafka/connection/rebalance_manager.rb +1 -1
  33. data/lib/karafka/connection/status.rb +1 -0
  34. data/lib/karafka/constraints.rb +1 -1
  35. data/lib/karafka/contracts/base.rb +1 -1
  36. data/lib/karafka/deserializers/payload.rb +1 -1
  37. data/lib/karafka/helpers/async.rb +1 -1
  38. data/lib/karafka/helpers/config_importer.rb +3 -3
  39. data/lib/karafka/helpers/multi_delegator.rb +3 -0
  40. data/lib/karafka/instrumentation/assignments_tracker.rb +2 -1
  41. data/lib/karafka/instrumentation/callbacks/error.rb +2 -2
  42. data/lib/karafka/instrumentation/callbacks/statistics.rb +3 -3
  43. data/lib/karafka/instrumentation/logger.rb +6 -6
  44. data/lib/karafka/instrumentation/monitor.rb +2 -2
  45. data/lib/karafka/instrumentation/vendors/appsignal/base.rb +1 -1
  46. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +1 -1
  47. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +2 -2
  48. data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +1 -1
  49. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +3 -15
  50. data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
  51. data/lib/karafka/pro/active_job/consumer.rb +2 -2
  52. data/lib/karafka/pro/active_job/dispatcher.rb +3 -3
  53. data/lib/karafka/pro/cleaner.rb +3 -3
  54. data/lib/karafka/pro/cli/contracts/server.rb +1 -1
  55. data/lib/karafka/pro/cli/parallel_segments/base.rb +4 -3
  56. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +1 -1
  57. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +1 -1
  58. data/lib/karafka/pro/cli/parallel_segments.rb +1 -1
  59. data/lib/karafka/pro/connection/manager.rb +1 -2
  60. data/lib/karafka/pro/connection/multiplexing/listener.rb +1 -0
  61. data/lib/karafka/pro/contracts/base.rb +1 -1
  62. data/lib/karafka/pro/encryption/cipher.rb +3 -2
  63. data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
  64. data/lib/karafka/pro/encryption/messages/parser.rb +1 -1
  65. data/lib/karafka/pro/encryption/setup/config.rb +1 -1
  66. data/lib/karafka/pro/iterator/tpl_builder.rb +1 -1
  67. data/lib/karafka/pro/iterator.rb +1 -1
  68. data/lib/karafka/pro/loader.rb +1 -1
  69. data/lib/karafka/pro/processing/coordinator.rb +1 -1
  70. data/lib/karafka/pro/processing/filters/base.rb +1 -0
  71. data/lib/karafka/pro/processing/filters/delayer.rb +1 -1
  72. data/lib/karafka/pro/processing/filters/expirer.rb +1 -1
  73. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +1 -1
  74. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +1 -1
  75. data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +1 -1
  76. data/lib/karafka/pro/processing/jobs/periodic.rb +1 -1
  77. data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +1 -1
  78. data/lib/karafka/pro/processing/jobs_builder.rb +1 -1
  79. data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +1 -0
  80. data/lib/karafka/pro/processing/partitioner.rb +1 -1
  81. data/lib/karafka/pro/processing/strategies/base.rb +1 -1
  82. data/lib/karafka/pro/processing/strategies/default.rb +2 -2
  83. data/lib/karafka/pro/processing/strategy_selector.rb +1 -0
  84. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +4 -2
  85. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +4 -2
  86. data/lib/karafka/pro/recurring_tasks/consumer.rb +3 -2
  87. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +2 -2
  88. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +1 -1
  89. data/lib/karafka/pro/recurring_tasks/deserializer.rb +1 -1
  90. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +1 -1
  91. data/lib/karafka/pro/recurring_tasks/executor.rb +2 -1
  92. data/lib/karafka/pro/recurring_tasks/schedule.rb +5 -2
  93. data/lib/karafka/pro/recurring_tasks/serializer.rb +6 -5
  94. data/lib/karafka/pro/recurring_tasks/setup/config.rb +2 -2
  95. data/lib/karafka/pro/recurring_tasks/task.rb +1 -1
  96. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +3 -0
  97. data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +1 -1
  98. data/lib/karafka/pro/routing/features/multiplexing.rb +5 -5
  99. data/lib/karafka/pro/routing/features/offset_metadata.rb +4 -4
  100. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +1 -1
  101. data/lib/karafka/pro/routing/features/patterns/patterns.rb +1 -1
  102. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +1 -1
  103. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +1 -1
  104. data/lib/karafka/pro/routing/features/swarm.rb +1 -1
  105. data/lib/karafka/pro/routing/features/throttling/topic.rb +3 -1
  106. data/lib/karafka/pro/scheduled_messages/consumer.rb +1 -1
  107. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +2 -2
  108. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +1 -1
  109. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +3 -2
  110. data/lib/karafka/pro/scheduled_messages/day.rb +1 -0
  111. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +1 -1
  112. data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +1 -1
  113. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +1 -0
  114. data/lib/karafka/pro/scheduled_messages/proxy.rb +1 -1
  115. data/lib/karafka/pro/scheduled_messages/serializer.rb +3 -3
  116. data/lib/karafka/pro/scheduled_messages/setup/config.rb +2 -2
  117. data/lib/karafka/pro/scheduled_messages/state.rb +1 -0
  118. data/lib/karafka/pro/scheduled_messages/tracker.rb +1 -0
  119. data/lib/karafka/process.rb +4 -4
  120. data/lib/karafka/processing/executor.rb +1 -1
  121. data/lib/karafka/processing/inline_insights/tracker.rb +1 -0
  122. data/lib/karafka/processing/jobs_queue.rb +1 -1
  123. data/lib/karafka/processing/result.rb +1 -0
  124. data/lib/karafka/processing/strategy_selector.rb +1 -0
  125. data/lib/karafka/routing/activity_manager.rb +1 -0
  126. data/lib/karafka/routing/builder.rb +3 -1
  127. data/lib/karafka/routing/contracts/consumer_group.rb +3 -2
  128. data/lib/karafka/routing/contracts/topic.rb +5 -2
  129. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
  130. data/lib/karafka/routing/features/declaratives/topic.rb +5 -2
  131. data/lib/karafka/routing/features/deserializers/topic.rb +3 -3
  132. data/lib/karafka/routing/features/inline_insights.rb +5 -5
  133. data/lib/karafka/routing/router.rb +1 -1
  134. data/lib/karafka/routing/subscription_group.rb +1 -1
  135. data/lib/karafka/routing/subscription_groups_builder.rb +1 -0
  136. data/lib/karafka/routing/topic.rb +3 -3
  137. data/lib/karafka/server.rb +1 -1
  138. data/lib/karafka/setup/attributes_map.rb +4 -2
  139. data/lib/karafka/setup/config.rb +21 -10
  140. data/lib/karafka/setup/config_proxy.rb +209 -0
  141. data/lib/karafka/setup/contracts/config.rb +1 -1
  142. data/lib/karafka/swarm/liveness_listener.rb +1 -0
  143. data/lib/karafka/swarm/manager.rb +7 -6
  144. data/lib/karafka/swarm/node.rb +1 -1
  145. data/lib/karafka/swarm/supervisor.rb +1 -0
  146. data/lib/karafka/time_trackers/base.rb +1 -1
  147. data/lib/karafka/version.rb +1 -1
  148. data/lib/karafka.rb +2 -2
  149. metadata +7 -5
@@ -5,7 +5,7 @@ module Karafka
5
5
  # CLI related contracts
6
6
  module Contracts
7
7
  # Contract for validating correctness of the server cli command options.
8
- class Server < ::Karafka::Contracts::Base
8
+ class Server < Karafka::Contracts::Base
9
9
  configure do |config|
10
10
  config.error_messages = YAML.safe_load_file(
11
11
  File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
@@ -10,7 +10,7 @@ module Karafka
10
10
  # Print available commands
11
11
  def call
12
12
  # Find the longest command for alignment purposes
13
- max_command_length = self.class.commands.map(&:name).map(&:size).max
13
+ max_command_length = self.class.commands.map { |command| command.name.size }.max
14
14
 
15
15
  puts 'Karafka commands:'
16
16
 
@@ -24,6 +24,7 @@ module Karafka
24
24
  'example_consumer.rb.erb' => 'app/consumers/example_consumer.rb'
25
25
  }.freeze
26
26
 
27
+ # Initializes the install command
27
28
  def initialize
28
29
  super
29
30
 
@@ -52,7 +53,7 @@ module Karafka
52
53
  FileUtils.mkdir_p File.dirname(pathed_target)
53
54
 
54
55
  template = File.read(Karafka.core_root.join("templates/#{source}"))
55
- render = ::ERB.new(template, trim_mode: '-').result(binding)
56
+ render = ERB.new(template, trim_mode: '-').result(binding)
56
57
 
57
58
  File.write(pathed_target, render)
58
59
 
@@ -10,7 +10,7 @@ module Karafka
10
10
  )
11
11
 
12
12
  # Types of things we can include / exclude from the routing via the CLI options
13
- SUPPORTED_TYPES = ::Karafka::Routing::ActivityManager::SUPPORTED_TYPES
13
+ SUPPORTED_TYPES = Karafka::Routing::ActivityManager::SUPPORTED_TYPES
14
14
 
15
15
  private_constant :SUPPORTED_TYPES
16
16
 
@@ -13,7 +13,7 @@ module Karafka
13
13
 
14
14
  # Starts the swarm
15
15
  def call
16
- ::Karafka::Swarm.ensure_supported!
16
+ Karafka::Swarm.ensure_supported!
17
17
 
18
18
  # Print our banner and info in the dev mode
19
19
  print_marketing_info if Karafka::App.env.development?
@@ -8,7 +8,7 @@ module Karafka
8
8
  # It is threadsafe and provides some security measures so we won't end up operating on a
9
9
  # closed consumer instance as it causes Ruby VM process to crash.
10
10
  class Client
11
- include ::Karafka::Core::Helpers::Time
11
+ include Karafka::Core::Helpers::Time
12
12
  include Helpers::ConfigImporter.new(
13
13
  logger: %i[logger],
14
14
  tick_interval: %i[internal tick_interval],
@@ -37,7 +37,7 @@ module Karafka
37
37
  COOP_UNSUBSCRIBE_FACTOR = 0.5
38
38
 
39
39
  # Errors upon which we early report that something is off without retrying prior to the
40
- # report
40
+ # report. Aside from those we ALWAYS early report on any fatal error.
41
41
  EARLY_REPORT_ERRORS = [
42
42
  :inconsistent_group_protocol, # 23
43
43
  :max_poll_exceeded, # -147
@@ -48,10 +48,7 @@ module Karafka
48
48
  :cluster_authorization_failed, # 31
49
49
  :illegal_generation,
50
50
  # this will not recover as fencing is permanent
51
- :fenced, # -144
52
51
  :auto_offset_reset, # -140
53
- # This can happen for many reasons, including issues with static membership being fenced
54
- :fatal, # -150,
55
52
  # This can happen with new rebalance protocol and same group.instance.id in use
56
53
  :unreleased_instance_id # 111
57
54
  ].freeze
@@ -489,7 +486,7 @@ module Karafka
489
486
  # If the seek message offset is in a time format, we need to find the closest "real"
490
487
  # offset matching before we seek
491
488
  if message.offset.is_a?(Time)
492
- tpl = ::Rdkafka::Consumer::TopicPartitionList.new
489
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
493
490
  tpl.add_topic_and_partitions_with_offsets(
494
491
  message.topic,
495
492
  message.partition => message.offset
@@ -546,9 +543,9 @@ module Karafka
546
543
  sg_id = @subscription_group.id
547
544
 
548
545
  # Remove callbacks runners that were registered
549
- ::Karafka::Core::Instrumentation.statistics_callbacks.delete(sg_id)
550
- ::Karafka::Core::Instrumentation.error_callbacks.delete(sg_id)
551
- ::Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.delete(sg_id)
546
+ Karafka::Core::Instrumentation.statistics_callbacks.delete(sg_id)
547
+ Karafka::Core::Instrumentation.error_callbacks.delete(sg_id)
548
+ Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.delete(sg_id)
552
549
 
553
550
  kafka.close
554
551
  @kafka = nil
@@ -564,7 +561,7 @@ module Karafka
564
561
  # ignored. We do however want to instrument on it
565
562
  def unsubscribe
566
563
  kafka.unsubscribe
567
- rescue ::Rdkafka::RdkafkaError => e
564
+ rescue Rdkafka::RdkafkaError => e
568
565
  Karafka.monitor.instrument(
569
566
  'error.occurred',
570
567
  caller: self,
@@ -593,8 +590,8 @@ module Karafka
593
590
  # established. It may be `-1` in case we lost the assignment or we did not yet fetch data
594
591
  # for this topic partition
595
592
  def topic_partition_position(topic, partition)
596
- rd_partition = ::Rdkafka::Consumer::Partition.new(partition, nil, 0)
597
- tpl = ::Rdkafka::Consumer::TopicPartitionList.new(topic => [rd_partition])
593
+ rd_partition = Rdkafka::Consumer::Partition.new(partition, nil, 0)
594
+ tpl = Rdkafka::Consumer::TopicPartitionList.new(topic => [rd_partition])
598
595
 
599
596
  kafka.position(tpl).to_h.fetch(topic).first.offset || -1
600
597
  end
@@ -645,7 +642,7 @@ module Karafka
645
642
  # If we did not exceed total time allocated, it means that we finished because of the
646
643
  # tick interval time limitations and not because time run out without any data
647
644
  time_poll.exceeded? ? nil : :tick_time
648
- rescue ::Rdkafka::RdkafkaError => e
645
+ rescue Rdkafka::RdkafkaError => e
649
646
  early_report = false
650
647
 
651
648
  retryable = time_poll.attempts <= MAX_POLL_RETRIES && time_poll.retryable?
@@ -655,6 +652,7 @@ module Karafka
655
652
  # Those are mainly network issues and exceeding the max poll interval
656
653
  # We want to report early on max poll interval exceeding because it may mean that the
657
654
  # underlying processing is taking too much time and it is not LRJ
655
+
658
656
  case e.code
659
657
  when *EARLY_REPORT_ERRORS
660
658
  early_report = true
@@ -678,6 +676,9 @@ module Karafka
678
676
  return e.details
679
677
  end
680
678
 
679
+ # Any fatal error should always cause early report
680
+ early_report = true if e.fatal?
681
+
681
682
  if early_report || !retryable
682
683
  Karafka.monitor.instrument(
683
684
  'error.occurred',
@@ -704,7 +705,7 @@ module Karafka
704
705
  # Builds a new rdkafka consumer instance based on the subscription group configuration
705
706
  # @return [Rdkafka::Consumer]
706
707
  def build_consumer
707
- ::Rdkafka::Config.logger = logger
708
+ Rdkafka::Config.logger = logger
708
709
 
709
710
  # We need to refresh the setup of this subscription group in case we started running in a
710
711
  # swarm. The initial configuration for validation comes from the parent node, but it needs
@@ -712,7 +713,7 @@ module Karafka
712
713
  # group instance id.
713
714
  @subscription_group.refresh
714
715
 
715
- config = ::Rdkafka::Config.new(@subscription_group.kafka)
716
+ config = Rdkafka::Config.new(@subscription_group.kafka)
716
717
  config.consumer_rebalance_listener = @rebalance_callback
717
718
  # We want to manage the events queue independently from the messages queue. Thanks to that
718
719
  # we can ensure, that we get statistics and errors often enough even when not polling
@@ -724,7 +725,7 @@ module Karafka
724
725
  @name = consumer.name
725
726
 
726
727
  # Register statistics runner for this particular type of callbacks
727
- ::Karafka::Core::Instrumentation.statistics_callbacks.add(
728
+ Karafka::Core::Instrumentation.statistics_callbacks.add(
728
729
  @subscription_group.id,
729
730
  Instrumentation::Callbacks::Statistics.new(
730
731
  @subscription_group.id,
@@ -734,7 +735,7 @@ module Karafka
734
735
  )
735
736
 
736
737
  # Register error tracking callback
737
- ::Karafka::Core::Instrumentation.error_callbacks.add(
738
+ Karafka::Core::Instrumentation.error_callbacks.add(
738
739
  @subscription_group.id,
739
740
  Instrumentation::Callbacks::Error.new(
740
741
  @subscription_group.id,
@@ -743,7 +744,7 @@ module Karafka
743
744
  )
744
745
  )
745
746
 
746
- ::Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.add(
747
+ Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.add(
747
748
  @subscription_group.id,
748
749
  Instrumentation::Callbacks::OauthbearerTokenRefresh.new(
749
750
  consumer
@@ -8,6 +8,7 @@ module Karafka
8
8
  # In the OSS version it starts listeners as they are without any connection management or
9
9
  # resources utilization supervision and shuts them down or quiets when time has come
10
10
  class Manager
11
+ # Initializes the connection manager
11
12
  def initialize
12
13
  @once_executions = Set.new
13
14
  end
@@ -155,7 +155,7 @@ module Karafka
155
155
 
156
156
  # @param tpl [Rdkafka::Consumer::TopicPartitionList] list of topics and partitions for which
157
157
  # we want to get the lag on the defined CG
158
- # @return [Hash<String, Hash>] hash with topics and their partitions lags
158
+ # @return [Hash{String => Hash}] hash with topics and their partitions lags
159
159
  def lag(tpl)
160
160
  l_config = proxy_config.committed
161
161
 
@@ -43,7 +43,7 @@ module Karafka
43
43
  @buffer = buffer
44
44
 
45
45
  # Connects itself to the instrumentation pipeline so rebalances can be tracked
46
- ::Karafka.monitor.subscribe(self)
46
+ Karafka.monitor.subscribe(self)
47
47
  end
48
48
 
49
49
  # Resets the rebalance manager state
@@ -46,6 +46,7 @@ module Karafka
46
46
  RUBY
47
47
  end
48
48
 
49
+ # Initializes the connection status and sets it to pending
49
50
  def initialize
50
51
  @mutex = Mutex.new
51
52
  pending!
@@ -42,7 +42,7 @@ module Karafka
42
42
  # @param string [String]
43
43
  # @return [::Gem::Version]
44
44
  def version(string)
45
- ::Gem::Version.new(string)
45
+ Gem::Version.new(string)
46
46
  end
47
47
  end
48
48
  end
@@ -3,7 +3,7 @@
3
3
  module Karafka
4
4
  module Contracts
5
5
  # Base contract for all Karafka contracts
6
- class Base < ::Karafka::Core::Contractable::Contract
6
+ class Base < Karafka::Core::Contractable::Contract
7
7
  # @param data [Hash] data for validation
8
8
  # @param scope [Array<String>] nested scope if in use
9
9
  # @return [Boolean] true if all good
@@ -9,7 +9,7 @@ module Karafka
9
9
  # @return [Hash] hash with deserialized JSON data
10
10
  def call(message)
11
11
  # nil payload can be present for example for tombstone messages
12
- message.raw_payload.nil? ? nil : ::JSON.parse(message.raw_payload)
12
+ message.raw_payload.nil? ? nil : JSON.parse(message.raw_payload)
13
13
  end
14
14
  end
15
15
  end
@@ -20,7 +20,7 @@ module Karafka
20
20
  #
21
21
  # @param base [Class] class we're including this module in
22
22
  def included(base)
23
- base.extend ::Forwardable
23
+ base.extend Forwardable
24
24
 
25
25
  base.def_delegators :@thread, :join, :terminate, :name
26
26
  end
@@ -5,7 +5,7 @@ module Karafka
5
5
  # Module allowing for configuration injections. By default injects whole app config
6
6
  # Allows for granular config injection
7
7
  class ConfigImporter < Module
8
- # @param attributes [Hash<Symbol, Array<Symbol>>] map defining what we want to inject.
8
+ # @param attributes [Hash{Symbol => Array<Symbol>}] map defining what we want to inject.
9
9
  # The key is the name under which attribute will be visible and the value is the full
10
10
  # path to the attribute
11
11
  def initialize(attributes = { config: %i[itself] })
@@ -20,7 +20,7 @@ module Karafka
20
20
  @attributes.each do |name, path|
21
21
  model.class_eval <<~RUBY, __FILE__, __LINE__ + 1
22
22
  def #{name}
23
- @#{name} ||= ::Karafka::App.config.#{path.join('.')}
23
+ @#{name} ||= Karafka::App.config.#{path.join('.')}
24
24
  end
25
25
  RUBY
26
26
  end
@@ -33,7 +33,7 @@ module Karafka
33
33
  @attributes.each do |name, path|
34
34
  model.class_eval <<~RUBY, __FILE__, __LINE__ + 1
35
35
  def self.#{name}
36
- @#{name} ||= ::Karafka::App.config.#{path.join('.')}
36
+ @#{name} ||= Karafka::App.config.#{path.join('.')}
37
37
  end
38
38
  RUBY
39
39
  end
@@ -25,6 +25,9 @@ module Karafka
25
25
  self
26
26
  end
27
27
 
28
+ # Creates an alias `to` for the `new` method to enable nice chaining API
29
+ # @return [MultiDelegator] new instance of the multi delegator
30
+ # @see #delegate
28
31
  alias to new
29
32
  end
30
33
  end
@@ -14,6 +14,7 @@ module Karafka
14
14
  class AssignmentsTracker
15
15
  include Singleton
16
16
 
17
+ # Initializes the assignments tracker with empty assignments
17
18
  def initialize
18
19
  @mutex = Mutex.new
19
20
  @assignments = Hash.new { |hash, key| hash[key] = [] }
@@ -21,7 +22,7 @@ module Karafka
21
22
 
22
23
  # Returns all the active/current assignments of this given process
23
24
  #
24
- # @return [Hash<Karafka::Routing::Topic, Array<Integer>>]
25
+ # @return [Hash{Karafka::Routing::Topic => Array<Integer>}]
25
26
  #
26
27
  # @note Keep in mind, that those assignments can change any time, especially when working
27
28
  # with multiple consumer groups or subscription groups.
@@ -10,8 +10,8 @@ module Karafka
10
10
  monitor: %i[monitor]
11
11
  )
12
12
 
13
- # @param subscription_group_id [String] id of the current subscription group instance
14
- # @param consumer_group_id [String] id of the current consumer group
13
+ # @param subscription_group_id [String]
14
+ # @param consumer_group_id [String]
15
15
  # @param client_name [String] rdkafka client name
16
16
  def initialize(subscription_group_id, consumer_group_id, client_name)
17
17
  @subscription_group_id = subscription_group_id
@@ -11,14 +11,14 @@ module Karafka
11
11
  monitor: %i[monitor]
12
12
  )
13
13
 
14
- # @param subscription_group_id [String] id of the current subscription group
15
- # @param consumer_group_id [String] id of the current consumer group
14
+ # @param subscription_group_id [String]
15
+ # @param consumer_group_id [String]
16
16
  # @param client_name [String] rdkafka client name
17
17
  def initialize(subscription_group_id, consumer_group_id, client_name)
18
18
  @subscription_group_id = subscription_group_id
19
19
  @consumer_group_id = consumer_group_id
20
20
  @client_name = client_name
21
- @statistics_decorator = ::Karafka::Core::Monitoring::StatisticsDecorator.new
21
+ @statistics_decorator = Karafka::Core::Monitoring::StatisticsDecorator.new
22
22
  end
23
23
 
24
24
  # Emits decorated statistics to the monitor
@@ -3,15 +3,15 @@
3
3
  module Karafka
4
4
  module Instrumentation
5
5
  # Default logger for Event Delegator
6
- # @note It uses ::Logger features - providing basic logging
6
+ # @note It uses Logger features - providing basic logging
7
7
  class Logger < ::Logger
8
8
  # Map containing information about log level for given environment
9
9
  ENV_MAP = {
10
- 'production' => ::Logger::ERROR,
11
- 'test' => ::Logger::ERROR,
12
- 'development' => ::Logger::INFO,
13
- 'debug' => ::Logger::DEBUG,
14
- 'default' => ::Logger::INFO
10
+ 'production' => Logger::ERROR,
11
+ 'test' => Logger::ERROR,
12
+ 'development' => Logger::INFO,
13
+ 'debug' => Logger::DEBUG,
14
+ 'default' => Logger::INFO
15
15
  }.freeze
16
16
 
17
17
  private_constant :ENV_MAP
@@ -5,14 +5,14 @@ module Karafka
5
5
  # Karafka instrumentation monitor that we use to publish events
6
6
  # By default uses our internal notifications bus but can be used with
7
7
  # `ActiveSupport::Notifications` as well
8
- class Monitor < ::Karafka::Core::Monitoring::Monitor
8
+ class Monitor < Karafka::Core::Monitoring::Monitor
9
9
  attr_reader :notifications_bus
10
10
 
11
11
  # @param notifications_bus [Object] either our internal notifications bus or
12
12
  # `ActiveSupport::Notifications`
13
13
  # @param namespace [String, nil] namespace for events or nil if no namespace
14
14
  def initialize(
15
- notifications_bus = ::Karafka::Instrumentation::Notifications.new,
15
+ notifications_bus = Karafka::Instrumentation::Notifications.new,
16
16
  namespace = nil
17
17
  )
18
18
  super
@@ -9,7 +9,7 @@ module Karafka
9
9
  module Appsignal
10
10
  # Base for all the instrumentation listeners
11
11
  class Base
12
- include ::Karafka::Core::Configurable
12
+ include Karafka::Core::Configurable
13
13
  extend Forwardable
14
14
 
15
15
  # @param block [Proc] configuration block
@@ -9,7 +9,7 @@ module Karafka
9
9
  # A karafka's logger listener for Datadog
10
10
  # It depends on the 'ddtrace' gem
11
11
  class LoggerListener
12
- include ::Karafka::Core::Configurable
12
+ include Karafka::Core::Configurable
13
13
  extend Forwardable
14
14
 
15
15
  def_delegators :config, :client, :service_name
@@ -11,7 +11,7 @@ module Karafka
11
11
  #
12
12
  # @note You need to setup the `dogstatsd-ruby` client and assign it
13
13
  class MetricsListener
14
- include ::Karafka::Core::Configurable
14
+ include Karafka::Core::Configurable
15
15
  extend Forwardable
16
16
 
17
17
  def_delegators(
@@ -212,7 +212,7 @@ module Karafka
212
212
  )
213
213
  else
214
214
  raise(
215
- ::ArgumentError,
215
+ ArgumentError,
216
216
  'distribution_mode setting value must be either :histogram or :distribution'
217
217
  )
218
218
  end
@@ -10,7 +10,7 @@ module Karafka
10
10
  # Base Kubernetes Listener providing basic HTTP server capabilities to respond with health
11
11
  # statuses
12
12
  class BaseListener
13
- include ::Karafka::Core::Helpers::Time
13
+ include Karafka::Core::Helpers::Time
14
14
 
15
15
  # All good with Karafka
16
16
  OK_CODE = '200 OK'
@@ -26,19 +26,6 @@ module Karafka
26
26
  #
27
27
  # @note Please use `Kubernetes::SwarmLivenessListener` when operating in the swarm mode
28
28
  class LivenessListener < BaseListener
29
- # When any of those occurs, it means something went wrong in a way that cannot be
30
- # recovered. In such cases we should report that the consumer process is not healthy.
31
- # - `fenced` - This instance has been fenced by a newer instance and will not do any
32
- # processing at all never. Fencing most of the time means the instance.group.id has
33
- # been reused without properly terminating the previous consumer process first
34
- # - `fatal` - any fatal error that halts the processing forever
35
- UNRECOVERABLE_RDKAFKA_ERRORS = [
36
- :fenced, # -144
37
- :fatal # -150
38
- ].freeze
39
-
40
- private_constant :UNRECOVERABLE_RDKAFKA_ERRORS
41
-
42
29
  # @param hostname [String, nil] hostname or nil to bind on all
43
30
  # @param port [Integer] TCP port on which we want to run our HTTP status server
44
31
  # @param consuming_ttl [Integer] time in ms after which we consider consumption hanging.
@@ -113,8 +100,9 @@ module Karafka
113
100
 
114
101
  # We are only interested in the rdkafka errors
115
102
  return unless error.is_a?(Rdkafka::RdkafkaError)
116
- # We mark as unrecoverable only on certain errors that will not be fixed by retrying
117
- return unless UNRECOVERABLE_RDKAFKA_ERRORS.include?(error.code)
103
+ # When any of those occurs, it means something went wrong in a way that cannot be
104
+ # recovered. In such cases we should report that the consumer process is not healthy.
105
+ return unless error.fatal?
118
106
 
119
107
  @unrecoverable = error.code
120
108
  end
@@ -46,7 +46,7 @@ module Karafka
46
46
  # @note Message can be from the future in case consumer machine and Kafka cluster drift
47
47
  # apart and the machine is behind the cluster.
48
48
  def local_created_at(last_message)
49
- now = ::Time.now
49
+ now = Time.now
50
50
 
51
51
  return now unless last_message
52
52
 
@@ -14,7 +14,7 @@ module Karafka
14
14
  #
15
15
  # It contains slightly better revocation warranties than the regular blocking consumer as
16
16
  # it can stop processing batch of jobs in the middle after the revocation.
17
- class Consumer < ::Karafka::ActiveJob::Consumer
17
+ class Consumer < Karafka::ActiveJob::Consumer
18
18
  # Runs ActiveJob jobs processing and handles lrj if needed
19
19
  def consume
20
20
  messages.each(clean: true) do |message|
@@ -25,7 +25,7 @@ module Karafka
25
25
  # We cannot early stop when running virtual partitions because the intermediate state
26
26
  # would force us not to commit the offsets. This would cause extensive
27
27
  # double-processing
28
- break if Karafka::App.stopping? && !topic.virtual_partitions?
28
+ break if ::Karafka::App.stopping? && !topic.virtual_partitions?
29
29
 
30
30
  consume_job(message)
31
31
 
@@ -10,7 +10,7 @@ module Karafka
10
10
  # Pro dispatcher that sends the ActiveJob job to a proper topic based on the queue name
11
11
  # and that allows to inject additional options into the producer, effectively allowing for a
12
12
  # much better and more granular control over the dispatch and consumption process.
13
- class Dispatcher < ::Karafka::ActiveJob::Dispatcher
13
+ class Dispatcher < Karafka::ActiveJob::Dispatcher
14
14
  include Helpers::ConfigImporter.new(
15
15
  deserializer: %i[internal active_job deserializer]
16
16
  )
@@ -31,7 +31,7 @@ module Karafka
31
31
  # Allows for setting a callable producer since at the moment of defining the class,
32
32
  # variants may not be available
33
33
  #
34
- # We do not initialize it with `-> { ::Karafka.producer }` so we do not have to call it
34
+ # We do not initialize it with `-> { Karafka.producer }` so we do not have to call it
35
35
  # each time for the defaults to preserve CPU cycles.
36
36
  #
37
37
  # We also do **not** cache the execution of this producer lambda because we want to
@@ -122,7 +122,7 @@ module Karafka
122
122
  def producer(job)
123
123
  dynamic_producer = fetch_option(job, :producer, DEFAULTS)
124
124
 
125
- dynamic_producer ? dynamic_producer.call(job) : ::Karafka.producer
125
+ dynamic_producer ? dynamic_producer.call(job) : Karafka.producer
126
126
  end
127
127
 
128
128
  # @param job [ActiveJob::Base] job instance
@@ -19,9 +19,9 @@ module Karafka
19
19
  class << self
20
20
  # @param _config [Karafka::Core::Configurable::Node] root node config
21
21
  def pre_setup(_config)
22
- ::Karafka::Messages::Message.prepend(Messages::Message)
23
- ::Karafka::Messages::Metadata.prepend(Messages::Metadata)
24
- ::Karafka::Messages::Messages.prepend(Messages::Messages)
22
+ Karafka::Messages::Message.prepend(Messages::Message)
23
+ Karafka::Messages::Metadata.prepend(Messages::Metadata)
24
+ Karafka::Messages::Messages.prepend(Messages::Messages)
25
25
  end
26
26
 
27
27
  # @param _config [Karafka::Core::Configurable::Node] root node config
@@ -10,7 +10,7 @@ module Karafka
10
10
  module Contracts
11
11
  # Contract for validating correctness of the server cli command options.
12
12
  # It differs slightly from the OSS one because it is aware of the routing patterns
13
- class Server < ::Karafka::Cli::Contracts::Server
13
+ class Server < Karafka::Cli::Contracts::Server
14
14
  configure do |config|
15
15
  config.error_messages = YAML.safe_load_file(
16
16
  File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
@@ -12,6 +12,7 @@ module Karafka
12
12
  include Helpers::Colorize
13
13
 
14
14
  # @param options [Hash] cli flags options
15
+ # @option options [Array<String>] :groups consumer group names to work with
15
16
  def initialize(options)
16
17
  @options = options
17
18
  end
@@ -23,12 +24,12 @@ module Karafka
23
24
 
24
25
  # Returns consumer groups for parallel segments with which we should be working
25
26
  #
26
- # @return [Hash<String, Array<Karafka::Routing::ConsumerGroup>>] hash with all parallel
27
+ # @return [Hash{String => Array<Karafka::Routing::ConsumerGroup>}] hash with all parallel
27
28
  # consumer groups as values and names of segments origin consumer group as the key.
28
29
  def applicable_groups
29
30
  requested_groups = options[:groups].dup || []
30
31
 
31
- workable_groups = ::Karafka::App
32
+ workable_groups = Karafka::App
32
33
  .routes
33
34
  .select(&:parallel_segments?)
34
35
  .group_by(&:segment_origin)
@@ -46,7 +47,7 @@ module Karafka
46
47
  applicable_groups[requested_group] = workable_group
47
48
  else
48
49
  raise(
49
- ::Karafka::Errors::ConsumerGroupNotFoundError,
50
+ Karafka::Errors::ConsumerGroupNotFoundError,
50
51
  "Consumer group #{requested_group} was not found"
51
52
  )
52
53
  end
@@ -132,7 +132,7 @@ module Karafka
132
132
  return unless inconclusive
133
133
 
134
134
  raise(
135
- ::Karafka::Errors::CommandValidationError,
135
+ Karafka::Errors::CommandValidationError,
136
136
  "Parallel segments for #{red(segment_origin)} have #{red('inconclusive')} offsets"
137
137
  )
138
138
  end
@@ -91,7 +91,7 @@ module Karafka
91
91
  next unless offset.to_i.positive?
92
92
 
93
93
  raise(
94
- ::Karafka::Errors::CommandValidationError,
94
+ Karafka::Errors::CommandValidationError,
95
95
  "Parallel segment #{red(cg_name)} already has offset #{red(offset)} " \
96
96
  "set for #{red("#{topic_name}##{partition_id}")}"
97
97
  )
@@ -51,7 +51,7 @@ module Karafka
51
51
  Collapse.new(options).call
52
52
  Distribute.new(options).call
53
53
  else
54
- raise ::ArgumentError, "Invalid topics action: #{action}"
54
+ raise ArgumentError, "Invalid topics action: #{action}"
55
55
  end
56
56
  end
57
57
  end
@@ -184,8 +184,7 @@ module Karafka
184
184
  .assignments
185
185
  .select { |_, partitions| partitions.size > 1 }
186
186
  .keys
187
- .map(&:subscription_group)
188
- .map(&:name)
187
+ .map { |sg| sg.subscription_group.name }
189
188
  .uniq
190
189
 
191
190
  # Select connections for scaling up