karafka 2.5.1 → 2.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (238) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +21 -29
  3. data/.github/workflows/ci_macos_arm64.yml +1 -1
  4. data/.github/workflows/push.yml +2 -2
  5. data/.github/workflows/trigger-wiki-refresh.yml +1 -1
  6. data/.ruby-version +1 -1
  7. data/.yard-lint.yml +174 -0
  8. data/CHANGELOG.md +20 -4
  9. data/Gemfile +1 -2
  10. data/Gemfile.lock +45 -41
  11. data/bin/integrations +2 -1
  12. data/bin/rspecs +4 -0
  13. data/config/locales/errors.yml +6 -4
  14. data/config/locales/pro_errors.yml +5 -4
  15. data/docker-compose.yml +1 -1
  16. data/examples/payloads/json/sample_set_02/download.json +191 -0
  17. data/examples/payloads/json/sample_set_03/event_type_1.json +18 -0
  18. data/examples/payloads/json/sample_set_03/event_type_2.json +263 -0
  19. data/examples/payloads/json/sample_set_03/event_type_3.json +41 -0
  20. data/karafka.gemspec +3 -3
  21. data/lib/active_job/queue_adapters/karafka_adapter.rb +3 -3
  22. data/lib/karafka/active_job/consumer.rb +7 -3
  23. data/lib/karafka/active_job/current_attributes/job_wrapper.rb +45 -0
  24. data/lib/karafka/active_job/current_attributes/loading.rb +1 -1
  25. data/lib/karafka/active_job/current_attributes/persistence.rb +19 -7
  26. data/lib/karafka/active_job/current_attributes.rb +3 -2
  27. data/lib/karafka/active_job/deserializer.rb +61 -0
  28. data/lib/karafka/active_job/dispatcher.rb +34 -14
  29. data/lib/karafka/active_job/job_options_contract.rb +2 -4
  30. data/lib/karafka/admin/acl.rb +8 -4
  31. data/lib/karafka/admin/configs/config.rb +6 -4
  32. data/lib/karafka/admin/configs/resource.rb +7 -1
  33. data/lib/karafka/admin/consumer_groups.rb +80 -12
  34. data/lib/karafka/admin/topics.rb +43 -9
  35. data/lib/karafka/admin.rb +23 -14
  36. data/lib/karafka/app.rb +3 -3
  37. data/lib/karafka/base_consumer.rb +6 -6
  38. data/lib/karafka/cli/base.rb +2 -2
  39. data/lib/karafka/cli/console.rb +1 -1
  40. data/lib/karafka/cli/contracts/server.rb +3 -5
  41. data/lib/karafka/cli/help.rb +1 -1
  42. data/lib/karafka/cli/install.rb +3 -2
  43. data/lib/karafka/cli/server.rb +1 -1
  44. data/lib/karafka/cli/swarm.rb +1 -1
  45. data/lib/karafka/cli/topics/align.rb +1 -1
  46. data/lib/karafka/cli/topics/repartition.rb +2 -2
  47. data/lib/karafka/connection/client.rb +30 -19
  48. data/lib/karafka/connection/listeners_batch.rb +2 -3
  49. data/lib/karafka/connection/manager.rb +1 -0
  50. data/lib/karafka/connection/proxy.rb +12 -8
  51. data/lib/karafka/connection/rebalance_manager.rb +1 -1
  52. data/lib/karafka/connection/status.rb +1 -0
  53. data/lib/karafka/constraints.rb +1 -1
  54. data/lib/karafka/contracts/base.rb +1 -1
  55. data/lib/karafka/deserializers/payload.rb +1 -1
  56. data/lib/karafka/env.rb +1 -2
  57. data/lib/karafka/helpers/async.rb +1 -1
  58. data/lib/karafka/helpers/config_importer.rb +3 -3
  59. data/lib/karafka/helpers/interval_runner.rb +4 -1
  60. data/lib/karafka/helpers/multi_delegator.rb +3 -0
  61. data/lib/karafka/instrumentation/assignments_tracker.rb +19 -1
  62. data/lib/karafka/instrumentation/callbacks/error.rb +2 -2
  63. data/lib/karafka/instrumentation/callbacks/statistics.rb +3 -3
  64. data/lib/karafka/instrumentation/logger.rb +6 -6
  65. data/lib/karafka/instrumentation/monitor.rb +3 -3
  66. data/lib/karafka/instrumentation/notifications.rb +1 -0
  67. data/lib/karafka/instrumentation/vendors/appsignal/base.rb +3 -4
  68. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +3 -4
  69. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +10 -11
  70. data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +1 -1
  71. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +5 -18
  72. data/lib/karafka/messages/builders/batch_metadata.rb +2 -2
  73. data/lib/karafka/messages/builders/message.rb +1 -1
  74. data/lib/karafka/messages/messages.rb +2 -3
  75. data/lib/karafka/patches/rdkafka/bindings.rb +6 -6
  76. data/lib/karafka/patches/rdkafka/opaque.rb +1 -1
  77. data/lib/karafka/pro/active_job/consumer.rb +2 -2
  78. data/lib/karafka/pro/active_job/dispatcher.rb +10 -6
  79. data/lib/karafka/pro/active_job/job_options_contract.rb +2 -4
  80. data/lib/karafka/pro/cleaner/messages/messages.rb +2 -3
  81. data/lib/karafka/pro/cleaner.rb +3 -3
  82. data/lib/karafka/pro/cli/contracts/server.rb +3 -5
  83. data/lib/karafka/pro/cli/parallel_segments/base.rb +5 -5
  84. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +3 -3
  85. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +3 -3
  86. data/lib/karafka/pro/cli/parallel_segments.rb +1 -1
  87. data/lib/karafka/pro/connection/manager.rb +3 -4
  88. data/lib/karafka/pro/connection/multiplexing/listener.rb +1 -0
  89. data/lib/karafka/pro/contracts/base.rb +1 -1
  90. data/lib/karafka/pro/encryption/cipher.rb +3 -2
  91. data/lib/karafka/pro/encryption/contracts/config.rb +5 -7
  92. data/lib/karafka/pro/encryption/messages/parser.rb +4 -4
  93. data/lib/karafka/pro/encryption/setup/config.rb +1 -1
  94. data/lib/karafka/pro/instrumentation/performance_tracker.rb +3 -3
  95. data/lib/karafka/pro/iterator/expander.rb +1 -1
  96. data/lib/karafka/pro/iterator/tpl_builder.rb +2 -2
  97. data/lib/karafka/pro/iterator.rb +3 -3
  98. data/lib/karafka/pro/loader.rb +1 -1
  99. data/lib/karafka/pro/processing/coordinator.rb +1 -1
  100. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +2 -3
  101. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +3 -3
  102. data/lib/karafka/pro/processing/filters/base.rb +1 -0
  103. data/lib/karafka/pro/processing/filters/delayer.rb +1 -1
  104. data/lib/karafka/pro/processing/filters/expirer.rb +1 -1
  105. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +1 -1
  106. data/lib/karafka/pro/processing/filters/throttler.rb +1 -1
  107. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +1 -1
  108. data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +1 -1
  109. data/lib/karafka/pro/processing/jobs/periodic.rb +1 -1
  110. data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +1 -1
  111. data/lib/karafka/pro/processing/jobs_builder.rb +1 -1
  112. data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +1 -0
  113. data/lib/karafka/pro/processing/partitioner.rb +1 -1
  114. data/lib/karafka/pro/processing/schedulers/default.rb +2 -4
  115. data/lib/karafka/pro/processing/strategies/base.rb +1 -1
  116. data/lib/karafka/pro/processing/strategies/default.rb +2 -2
  117. data/lib/karafka/pro/processing/strategies/lrj/default.rb +2 -4
  118. data/lib/karafka/pro/processing/strategies/vp/default.rb +2 -4
  119. data/lib/karafka/pro/processing/strategy_selector.rb +1 -0
  120. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +2 -3
  121. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +4 -2
  122. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +4 -2
  123. data/lib/karafka/pro/recurring_tasks/consumer.rb +3 -2
  124. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +4 -6
  125. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +3 -5
  126. data/lib/karafka/pro/recurring_tasks/deserializer.rb +1 -1
  127. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +7 -6
  128. data/lib/karafka/pro/recurring_tasks/executor.rb +2 -1
  129. data/lib/karafka/pro/recurring_tasks/schedule.rb +9 -8
  130. data/lib/karafka/pro/recurring_tasks/serializer.rb +6 -5
  131. data/lib/karafka/pro/recurring_tasks/setup/config.rb +2 -2
  132. data/lib/karafka/pro/recurring_tasks/task.rb +1 -1
  133. data/lib/karafka/pro/recurring_tasks.rb +8 -5
  134. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +2 -4
  135. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +2 -4
  136. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +3 -0
  137. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +2 -4
  138. data/lib/karafka/pro/routing/features/delaying/topic.rb +2 -4
  139. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +4 -8
  140. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +5 -7
  141. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +7 -6
  142. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +2 -2
  143. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +2 -4
  144. data/lib/karafka/pro/routing/features/expiring/topic.rb +2 -4
  145. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +2 -4
  146. data/lib/karafka/pro/routing/features/filtering/topic.rb +2 -3
  147. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +2 -4
  148. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +2 -4
  149. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +3 -5
  150. data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +1 -1
  151. data/lib/karafka/pro/routing/features/multiplexing.rb +5 -5
  152. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +3 -3
  153. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +2 -4
  154. data/lib/karafka/pro/routing/features/offset_metadata.rb +4 -4
  155. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +1 -1
  156. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +2 -4
  157. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +3 -5
  158. data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +2 -4
  159. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +2 -4
  160. data/lib/karafka/pro/routing/features/patterns/patterns.rb +1 -1
  161. data/lib/karafka/pro/routing/features/pausing/config.rb +26 -0
  162. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +17 -11
  163. data/lib/karafka/pro/routing/features/pausing/topic.rb +69 -8
  164. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +2 -4
  165. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +1 -1
  166. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +1 -1
  167. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +2 -4
  168. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +2 -4
  169. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +2 -4
  170. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +6 -8
  171. data/lib/karafka/pro/routing/features/swarm.rb +1 -1
  172. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +2 -4
  173. data/lib/karafka/pro/routing/features/throttling/topic.rb +3 -1
  174. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +2 -4
  175. data/lib/karafka/pro/scheduled_messages/consumer.rb +1 -1
  176. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +4 -6
  177. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +3 -5
  178. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +3 -2
  179. data/lib/karafka/pro/scheduled_messages/day.rb +1 -0
  180. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +1 -1
  181. data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +1 -1
  182. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +1 -0
  183. data/lib/karafka/pro/scheduled_messages/proxy.rb +1 -1
  184. data/lib/karafka/pro/scheduled_messages/serializer.rb +3 -3
  185. data/lib/karafka/pro/scheduled_messages/setup/config.rb +2 -2
  186. data/lib/karafka/pro/scheduled_messages/state.rb +1 -0
  187. data/lib/karafka/pro/scheduled_messages/tracker.rb +1 -0
  188. data/lib/karafka/pro/scheduled_messages.rb +4 -6
  189. data/lib/karafka/pro/swarm/liveness_listener.rb +2 -2
  190. data/lib/karafka/process.rb +4 -4
  191. data/lib/karafka/processing/coordinator.rb +2 -4
  192. data/lib/karafka/processing/coordinators_buffer.rb +2 -3
  193. data/lib/karafka/processing/executor.rb +3 -4
  194. data/lib/karafka/processing/inline_insights/tracker.rb +1 -0
  195. data/lib/karafka/processing/jobs/base.rb +2 -3
  196. data/lib/karafka/processing/jobs_queue.rb +1 -1
  197. data/lib/karafka/processing/result.rb +1 -0
  198. data/lib/karafka/processing/strategy_selector.rb +1 -0
  199. data/lib/karafka/processing/workers_batch.rb +2 -3
  200. data/lib/karafka/railtie.rb +1 -0
  201. data/lib/karafka/routing/activity_manager.rb +3 -2
  202. data/lib/karafka/routing/builder.rb +8 -8
  203. data/lib/karafka/routing/consumer_group.rb +4 -6
  204. data/lib/karafka/routing/contracts/consumer_group.rb +6 -7
  205. data/lib/karafka/routing/contracts/routing.rb +2 -4
  206. data/lib/karafka/routing/contracts/topic.rb +7 -6
  207. data/lib/karafka/routing/features/active_job/contracts/topic.rb +2 -4
  208. data/lib/karafka/routing/features/active_job/topic.rb +6 -0
  209. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +3 -5
  210. data/lib/karafka/routing/features/declaratives/contracts/topic.rb +3 -5
  211. data/lib/karafka/routing/features/declaratives/topic.rb +5 -2
  212. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +2 -4
  213. data/lib/karafka/routing/features/deserializers/topic.rb +3 -3
  214. data/lib/karafka/routing/features/eofed/contracts/topic.rb +2 -4
  215. data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +2 -4
  216. data/lib/karafka/routing/features/inline_insights.rb +5 -5
  217. data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +2 -4
  218. data/lib/karafka/routing/router.rb +1 -1
  219. data/lib/karafka/routing/subscription_group.rb +1 -1
  220. data/lib/karafka/routing/subscription_groups_builder.rb +1 -0
  221. data/lib/karafka/routing/topic.rb +3 -3
  222. data/lib/karafka/routing/topics.rb +4 -9
  223. data/lib/karafka/server.rb +2 -2
  224. data/lib/karafka/setup/attributes_map.rb +4 -2
  225. data/lib/karafka/setup/config.rb +85 -17
  226. data/lib/karafka/setup/config_proxy.rb +209 -0
  227. data/lib/karafka/setup/contracts/config.rb +13 -11
  228. data/lib/karafka/setup/defaults_injector.rb +3 -2
  229. data/lib/karafka/setup/dsl.rb +2 -3
  230. data/lib/karafka/swarm/liveness_listener.rb +3 -3
  231. data/lib/karafka/swarm/manager.rb +7 -6
  232. data/lib/karafka/swarm/node.rb +1 -1
  233. data/lib/karafka/swarm/supervisor.rb +2 -1
  234. data/lib/karafka/time_trackers/base.rb +1 -1
  235. data/lib/karafka/version.rb +1 -1
  236. data/lib/karafka.rb +4 -4
  237. metadata +14 -6
  238. data/.diffend.yml +0 -3
@@ -10,7 +10,7 @@ module Karafka
10
10
  # Print available commands
11
11
  def call
12
12
  # Find the longest command for alignment purposes
13
- max_command_length = self.class.commands.map(&:name).map(&:size).max
13
+ max_command_length = self.class.commands.map { |command| command.name.size }.max
14
14
 
15
15
  puts 'Karafka commands:'
16
16
 
@@ -24,6 +24,7 @@ module Karafka
24
24
  'example_consumer.rb.erb' => 'app/consumers/example_consumer.rb'
25
25
  }.freeze
26
26
 
27
+ # Initializes the install command
27
28
  def initialize
28
29
  super
29
30
 
@@ -52,9 +53,9 @@ module Karafka
52
53
  FileUtils.mkdir_p File.dirname(pathed_target)
53
54
 
54
55
  template = File.read(Karafka.core_root.join("templates/#{source}"))
55
- render = ::ERB.new(template, trim_mode: '-').result(binding)
56
+ render = ERB.new(template, trim_mode: '-').result(binding)
56
57
 
57
- File.open(pathed_target, 'w') { |file| file.write(render) }
58
+ File.write(pathed_target, render)
58
59
 
59
60
  puts "#{green('Created')} #{target}"
60
61
  end
@@ -10,7 +10,7 @@ module Karafka
10
10
  )
11
11
 
12
12
  # Types of things we can include / exclude from the routing via the CLI options
13
- SUPPORTED_TYPES = ::Karafka::Routing::ActivityManager::SUPPORTED_TYPES
13
+ SUPPORTED_TYPES = Karafka::Routing::ActivityManager::SUPPORTED_TYPES
14
14
 
15
15
  private_constant :SUPPORTED_TYPES
16
16
 
@@ -13,7 +13,7 @@ module Karafka
13
13
 
14
14
  # Starts the swarm
15
15
  def call
16
- ::Karafka::Swarm.ensure_supported!
16
+ Karafka::Swarm.ensure_supported!
17
17
 
18
18
  # Print our banner and info in the dev mode
19
19
  print_marketing_info if Karafka::App.env.development?
@@ -95,7 +95,7 @@ module Karafka
95
95
  names = config.synonyms.map(&:name) << config.name
96
96
 
97
97
  # We move forward only if given topic config is for altering
98
- next if (desired_configs.keys & names).empty?
98
+ next unless desired_configs.keys.intersect?(names)
99
99
 
100
100
  desired_config = nil
101
101
 
@@ -10,9 +10,9 @@ module Karafka
10
10
  def call
11
11
  any_repartitioned = false
12
12
 
13
- existing_partitions = existing_topics.map do |topic|
13
+ existing_partitions = existing_topics.to_h do |topic|
14
14
  [topic.fetch(:topic_name), topic.fetch(:partition_count)]
15
- end.to_h
15
+ end
16
16
 
17
17
  declaratives_routing_topics.each do |topic|
18
18
  name = topic.name
@@ -8,7 +8,7 @@ module Karafka
8
8
  # It is threadsafe and provides some security measures so we won't end up operating on a
9
9
  # closed consumer instance as it causes Ruby VM process to crash.
10
10
  class Client
11
- include ::Karafka::Core::Helpers::Time
11
+ include Karafka::Core::Helpers::Time
12
12
  include Helpers::ConfigImporter.new(
13
13
  logger: %i[logger],
14
14
  tick_interval: %i[internal tick_interval],
@@ -37,7 +37,7 @@ module Karafka
37
37
  COOP_UNSUBSCRIBE_FACTOR = 0.5
38
38
 
39
39
  # Errors upon which we early report that something is off without retrying prior to the
40
- # report
40
+ # report. Aside from those we ALWAYS early report on any fatal error.
41
41
  EARLY_REPORT_ERRORS = [
42
42
  :inconsistent_group_protocol, # 23
43
43
  :max_poll_exceeded, # -147
@@ -48,10 +48,9 @@ module Karafka
48
48
  :cluster_authorization_failed, # 31
49
49
  :illegal_generation,
50
50
  # this will not recover as fencing is permanent
51
- :fenced, # -144
52
51
  :auto_offset_reset, # -140
53
- # This can happen for many reasons, including issues with static membership being fenced
54
- :fatal # -150
52
+ # This can happen with new rebalance protocol and same group.instance.id in use
53
+ :unreleased_instance_id # 111
55
54
  ].freeze
56
55
 
57
56
  private_constant :MAX_POLL_RETRIES, :COOP_UNSUBSCRIBE_FACTOR, :EARLY_REPORT_ERRORS
@@ -407,6 +406,14 @@ module Karafka
407
406
  # 2ms when no callbacks are triggered.
408
407
  def events_poll(timeout = 0)
409
408
  kafka.events_poll(timeout)
409
+
410
+ # Emit event for monitoring - happens once per tick_interval (default 5s)
411
+ # Listeners can check assignment_lost?, track polling health, etc.
412
+ Karafka.monitor.instrument(
413
+ 'client.events_poll',
414
+ caller: self,
415
+ subscription_group: @subscription_group
416
+ )
410
417
  end
411
418
 
412
419
  # Returns pointer to the consumer group metadata. It is used only in the context of
@@ -479,7 +486,7 @@ module Karafka
479
486
  # If the seek message offset is in a time format, we need to find the closest "real"
480
487
  # offset matching before we seek
481
488
  if message.offset.is_a?(Time)
482
- tpl = ::Rdkafka::Consumer::TopicPartitionList.new
489
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
483
490
  tpl.add_topic_and_partitions_with_offsets(
484
491
  message.topic,
485
492
  message.partition => message.offset
@@ -536,9 +543,9 @@ module Karafka
536
543
  sg_id = @subscription_group.id
537
544
 
538
545
  # Remove callbacks runners that were registered
539
- ::Karafka::Core::Instrumentation.statistics_callbacks.delete(sg_id)
540
- ::Karafka::Core::Instrumentation.error_callbacks.delete(sg_id)
541
- ::Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.delete(sg_id)
546
+ Karafka::Core::Instrumentation.statistics_callbacks.delete(sg_id)
547
+ Karafka::Core::Instrumentation.error_callbacks.delete(sg_id)
548
+ Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.delete(sg_id)
542
549
 
543
550
  kafka.close
544
551
  @kafka = nil
@@ -554,7 +561,7 @@ module Karafka
554
561
  # ignored. We do however want to instrument on it
555
562
  def unsubscribe
556
563
  kafka.unsubscribe
557
- rescue ::Rdkafka::RdkafkaError => e
564
+ rescue Rdkafka::RdkafkaError => e
558
565
  Karafka.monitor.instrument(
559
566
  'error.occurred',
560
567
  caller: self,
@@ -583,8 +590,8 @@ module Karafka
583
590
  # established. It may be `-1` in case we lost the assignment or we did not yet fetch data
584
591
  # for this topic partition
585
592
  def topic_partition_position(topic, partition)
586
- rd_partition = ::Rdkafka::Consumer::Partition.new(partition, nil, 0)
587
- tpl = ::Rdkafka::Consumer::TopicPartitionList.new(topic => [rd_partition])
593
+ rd_partition = Rdkafka::Consumer::Partition.new(partition, nil, 0)
594
+ tpl = Rdkafka::Consumer::TopicPartitionList.new(topic => [rd_partition])
588
595
 
589
596
  kafka.position(tpl).to_h.fetch(topic).first.offset || -1
590
597
  end
@@ -612,7 +619,7 @@ module Karafka
612
619
  # We should not run a single poll longer than the tick frequency. Otherwise during a single
613
620
  # `#batch_poll` we would not be able to run `#events_poll` often enough effectively
614
621
  # blocking events from being handled.
615
- poll_tick = timeout > tick_interval ? tick_interval : timeout
622
+ poll_tick = [timeout, tick_interval].min
616
623
 
617
624
  result = kafka.poll(poll_tick)
618
625
 
@@ -635,7 +642,7 @@ module Karafka
635
642
  # If we did not exceed total time allocated, it means that we finished because of the
636
643
  # tick interval time limitations and not because time run out without any data
637
644
  time_poll.exceeded? ? nil : :tick_time
638
- rescue ::Rdkafka::RdkafkaError => e
645
+ rescue Rdkafka::RdkafkaError => e
639
646
  early_report = false
640
647
 
641
648
  retryable = time_poll.attempts <= MAX_POLL_RETRIES && time_poll.retryable?
@@ -645,6 +652,7 @@ module Karafka
645
652
  # Those are mainly network issues and exceeding the max poll interval
646
653
  # We want to report early on max poll interval exceeding because it may mean that the
647
654
  # underlying processing is taking too much time and it is not LRJ
655
+
648
656
  case e.code
649
657
  when *EARLY_REPORT_ERRORS
650
658
  early_report = true
@@ -668,6 +676,9 @@ module Karafka
668
676
  return e.details
669
677
  end
670
678
 
679
+ # Any fatal error should always cause early report
680
+ early_report = true if e.fatal?
681
+
671
682
  if early_report || !retryable
672
683
  Karafka.monitor.instrument(
673
684
  'error.occurred',
@@ -694,7 +705,7 @@ module Karafka
694
705
  # Builds a new rdkafka consumer instance based on the subscription group configuration
695
706
  # @return [Rdkafka::Consumer]
696
707
  def build_consumer
697
- ::Rdkafka::Config.logger = logger
708
+ Rdkafka::Config.logger = logger
698
709
 
699
710
  # We need to refresh the setup of this subscription group in case we started running in a
700
711
  # swarm. The initial configuration for validation comes from the parent node, but it needs
@@ -702,7 +713,7 @@ module Karafka
702
713
  # group instance id.
703
714
  @subscription_group.refresh
704
715
 
705
- config = ::Rdkafka::Config.new(@subscription_group.kafka)
716
+ config = Rdkafka::Config.new(@subscription_group.kafka)
706
717
  config.consumer_rebalance_listener = @rebalance_callback
707
718
  # We want to manage the events queue independently from the messages queue. Thanks to that
708
719
  # we can ensure, that we get statistics and errors often enough even when not polling
@@ -714,7 +725,7 @@ module Karafka
714
725
  @name = consumer.name
715
726
 
716
727
  # Register statistics runner for this particular type of callbacks
717
- ::Karafka::Core::Instrumentation.statistics_callbacks.add(
728
+ Karafka::Core::Instrumentation.statistics_callbacks.add(
718
729
  @subscription_group.id,
719
730
  Instrumentation::Callbacks::Statistics.new(
720
731
  @subscription_group.id,
@@ -724,7 +735,7 @@ module Karafka
724
735
  )
725
736
 
726
737
  # Register error tracking callback
727
- ::Karafka::Core::Instrumentation.error_callbacks.add(
738
+ Karafka::Core::Instrumentation.error_callbacks.add(
728
739
  @subscription_group.id,
729
740
  Instrumentation::Callbacks::Error.new(
730
741
  @subscription_group.id,
@@ -733,7 +744,7 @@ module Karafka
733
744
  )
734
745
  )
735
746
 
736
- ::Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.add(
747
+ Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.add(
737
748
  @subscription_group.id,
738
749
  Instrumentation::Callbacks::OauthbearerTokenRefresh.new(
739
750
  consumer
@@ -25,9 +25,8 @@ module Karafka
25
25
  end
26
26
 
27
27
  # Iterates over available listeners and yields each listener
28
- # @param block [Proc] block we want to run
29
- def each(&block)
30
- @batch.each(&block)
28
+ def each(&)
29
+ @batch.each(&)
31
30
  end
32
31
 
33
32
  # @return [Array<Listener>] active listeners
@@ -8,6 +8,7 @@ module Karafka
8
8
  # In the OSS version it starts listeners as they are without any connection management or
9
9
  # resources utilization supervision and shuts them down or quiets when time has come
10
10
  class Manager
11
+ # Initializes the connection manager
11
12
  def initialize
12
13
  @once_executions = Set.new
13
14
  end
@@ -10,6 +10,10 @@ module Karafka
10
10
  # do still want to be able to alter some functionalities. This wrapper helps us do it when
11
11
  # it would be needed
12
12
  class Proxy < SimpleDelegator
13
+ include Helpers::ConfigImporter.new(
14
+ proxy_config: %i[internal connection proxy]
15
+ )
16
+
13
17
  # Errors on which we want to retry
14
18
  # Includes temporary errors related to node not being (or not yet being) coordinator or a
15
19
  # leader to a given set of partitions. Usually goes away after a retry
@@ -21,6 +25,7 @@ module Karafka
21
25
  not_coordinator
22
26
  not_leader_for_partition
23
27
  coordinator_load_in_progress
28
+ stale_member_epoch
24
29
  ].freeze
25
30
 
26
31
  private_constant :RETRYABLE_DEFAULT_ERRORS
@@ -36,7 +41,6 @@ module Karafka
36
41
  # wrap an already wrapped object with another proxy level. Simplifies passing consumers
37
42
  # and makes it safe to wrap without type checking
38
43
  @wrapped = obj.is_a?(self.class) ? obj.wrapped : obj
39
- @config = ::Karafka::App.config.internal.connection.proxy
40
44
  end
41
45
 
42
46
  # Proxies the `#query_watermark_offsets` with extra recovery from timeout problems.
@@ -47,7 +51,7 @@ module Karafka
47
51
  # @param partition [Integer] partition number
48
52
  # @return [Array<Integer, Integer>] watermark offsets
49
53
  def query_watermark_offsets(topic, partition)
50
- l_config = @config.query_watermark_offsets
54
+ l_config = proxy_config.query_watermark_offsets
51
55
 
52
56
  # For newly created topics or in cases where we're trying to get them but there is no
53
57
  # leader, this can fail. It happens more often for new topics under KRaft, however we
@@ -67,7 +71,7 @@ module Karafka
67
71
  # @param tpl [Rdkafka::Consumer::TopicPartitionList] tpl to get time offsets
68
72
  # @return [Rdkafka::Consumer::TopicPartitionList] tpl with time offsets
69
73
  def offsets_for_times(tpl)
70
- l_config = @config.offsets_for_times
74
+ l_config = proxy_config.offsets_for_times
71
75
 
72
76
  with_broker_errors_retry(
73
77
  # required to be in seconds, not ms
@@ -84,7 +88,7 @@ module Karafka
84
88
  # assignment tpl usage
85
89
  # @return [Rdkafka::Consumer::TopicPartitionList] tpl with committed offsets and metadata
86
90
  def committed(tpl = nil)
87
- c_config = @config.committed
91
+ c_config = proxy_config.committed
88
92
 
89
93
  with_broker_errors_retry(
90
94
  # required to be in seconds, not ms
@@ -121,7 +125,7 @@ module Karafka
121
125
  # even when no stored, because with sync commit, it refreshes the ownership state of the
122
126
  # consumer in a sync way.
123
127
  def commit_offsets(tpl = nil, async: true)
124
- c_config = @config.commit
128
+ c_config = proxy_config.commit
125
129
 
126
130
  with_broker_errors_retry(
127
131
  wait_time: c_config.wait_time / 1_000.to_f,
@@ -151,9 +155,9 @@ module Karafka
151
155
 
152
156
  # @param tpl [Rdkafka::Consumer::TopicPartitionList] list of topics and partitions for which
153
157
  # we want to get the lag on the defined CG
154
- # @return [Hash<String, Hash>] hash with topics and their partitions lags
158
+ # @return [Hash{String => Hash}] hash with topics and their partitions lags
155
159
  def lag(tpl)
156
- l_config = @config.committed
160
+ l_config = proxy_config.committed
157
161
 
158
162
  with_broker_errors_retry(
159
163
  # required to be in seconds, not ms
@@ -168,7 +172,7 @@ module Karafka
168
172
  # get info on all topics
169
173
  # @return [Rdkafka::Metadata] rdkafka metadata object with the requested details
170
174
  def metadata(topic_name = nil)
171
- m_config = @config.metadata
175
+ m_config = proxy_config.metadata
172
176
 
173
177
  with_broker_errors_retry(
174
178
  # required to be in seconds, not ms
@@ -43,7 +43,7 @@ module Karafka
43
43
  @buffer = buffer
44
44
 
45
45
  # Connects itself to the instrumentation pipeline so rebalances can be tracked
46
- ::Karafka.monitor.subscribe(self)
46
+ Karafka.monitor.subscribe(self)
47
47
  end
48
48
 
49
49
  # Resets the rebalance manager state
@@ -46,6 +46,7 @@ module Karafka
46
46
  RUBY
47
47
  end
48
48
 
49
+ # Initializes the connection status and sets it to pending
49
50
  def initialize
50
51
  @mutex = Mutex.new
51
52
  pending!
@@ -42,7 +42,7 @@ module Karafka
42
42
  # @param string [String]
43
43
  # @return [::Gem::Version]
44
44
  def version(string)
45
- ::Gem::Version.new(string)
45
+ Gem::Version.new(string)
46
46
  end
47
47
  end
48
48
  end
@@ -3,7 +3,7 @@
3
3
  module Karafka
4
4
  module Contracts
5
5
  # Base contract for all Karafka contracts
6
- class Base < ::Karafka::Core::Contractable::Contract
6
+ class Base < Karafka::Core::Contractable::Contract
7
7
  # @param data [Hash] data for validation
8
8
  # @param scope [Array<String>] nested scope if in use
9
9
  # @return [Boolean] true if all good
@@ -9,7 +9,7 @@ module Karafka
9
9
  # @return [Hash] hash with deserialized JSON data
10
10
  def call(message)
11
11
  # nil payload can be present for example for tombstone messages
12
- message.raw_payload.nil? ? nil : ::JSON.parse(message.raw_payload)
12
+ message.raw_payload.nil? ? nil : JSON.parse(message.raw_payload)
13
13
  end
14
14
  end
15
15
  end
data/lib/karafka/env.rb CHANGED
@@ -21,8 +21,7 @@ module Karafka
21
21
  super('')
22
22
 
23
23
  LOOKUP_ENV_KEYS
24
- .map { |key| ENV[key] }
25
- .compact
24
+ .filter_map { |key| ENV.fetch(key, nil) }
26
25
  .first
27
26
  .then { |env| env || DEFAULT_ENV }
28
27
  .then { |env| replace(env) }
@@ -20,7 +20,7 @@ module Karafka
20
20
  #
21
21
  # @param base [Class] class we're including this module in
22
22
  def included(base)
23
- base.extend ::Forwardable
23
+ base.extend Forwardable
24
24
 
25
25
  base.def_delegators :@thread, :join, :terminate, :name
26
26
  end
@@ -5,7 +5,7 @@ module Karafka
5
5
  # Module allowing for configuration injections. By default injects whole app config
6
6
  # Allows for granular config injection
7
7
  class ConfigImporter < Module
8
- # @param attributes [Hash<Symbol, Array<Symbol>>] map defining what we want to inject.
8
+ # @param attributes [Hash{Symbol => Array<Symbol>}] map defining what we want to inject.
9
9
  # The key is the name under which attribute will be visible and the value is the full
10
10
  # path to the attribute
11
11
  def initialize(attributes = { config: %i[itself] })
@@ -20,7 +20,7 @@ module Karafka
20
20
  @attributes.each do |name, path|
21
21
  model.class_eval <<~RUBY, __FILE__, __LINE__ + 1
22
22
  def #{name}
23
- @#{name} ||= ::Karafka::App.config.#{path.join('.')}
23
+ @#{name} ||= Karafka::App.config.#{path.join('.')}
24
24
  end
25
25
  RUBY
26
26
  end
@@ -33,7 +33,7 @@ module Karafka
33
33
  @attributes.each do |name, path|
34
34
  model.class_eval <<~RUBY, __FILE__, __LINE__ + 1
35
35
  def self.#{name}
36
- @#{name} ||= ::Karafka::App.config.#{path.join('.')}
36
+ @#{name} ||= Karafka::App.config.#{path.join('.')}
37
37
  end
38
38
  RUBY
39
39
  end
@@ -11,11 +11,14 @@ module Karafka
11
11
  # or other places but would only slow things down if would run with each tick.
12
12
  class IntervalRunner
13
13
  include Karafka::Core::Helpers::Time
14
+ include Helpers::ConfigImporter.new(
15
+ tick_interval: %i[internal tick_interval]
16
+ )
14
17
 
15
18
  # @param interval [Integer] interval in ms for running the provided code. Defaults to the
16
19
  # `internal.tick_interval` value
17
20
  # @param block [Proc] block of code we want to run once in a while
18
- def initialize(interval: ::Karafka::App.config.internal.tick_interval, &block)
21
+ def initialize(interval: tick_interval, &block)
19
22
  @block = block
20
23
  @interval = interval
21
24
  @last_called_at = monotonic_now - @interval
@@ -25,6 +25,9 @@ module Karafka
25
25
  self
26
26
  end
27
27
 
28
+ # Creates an alias `to` for the `new` method to enable nice chaining API
29
+ # @return [MultiDelegator] new instance of the multi delegator
30
+ # @see #delegate
28
31
  alias to new
29
32
  end
30
33
  end
@@ -14,6 +14,7 @@ module Karafka
14
14
  class AssignmentsTracker
15
15
  include Singleton
16
16
 
17
+ # Initializes the assignments tracker with empty assignments
17
18
  def initialize
18
19
  @mutex = Mutex.new
19
20
  @assignments = Hash.new { |hash, key| hash[key] = [] }
@@ -21,7 +22,7 @@ module Karafka
21
22
 
22
23
  # Returns all the active/current assignments of this given process
23
24
  #
24
- # @return [Hash<Karafka::Routing::Topic, Array<Integer>>]
25
+ # @return [Hash{Karafka::Routing::Topic => Array<Integer>}]
25
26
  #
26
27
  # @note Keep in mind, that those assignments can change any time, especially when working
27
28
  # with multiple consumer groups or subscription groups.
@@ -79,6 +80,23 @@ module Karafka
79
80
  end
80
81
  end
81
82
 
83
+ # Handles events_poll notification to detect assignment loss
84
+ # This is called regularly (every tick_interval) so we check if assignment was lost
85
+ #
86
+ # @param event [Karafka::Core::Monitoring::Event]
87
+ # @note We can run the `#assignment_lost?` on each events poll because they happen once every
88
+ # 5 seconds during processing plus prior to each messages poll. It takes
89
+ # 0.6 microseconds per call.
90
+ def on_client_events_poll(event)
91
+ client = event[:caller]
92
+
93
+ # Only clear assignments if they were actually lost
94
+ return unless client.assignment_lost?
95
+
96
+ # Cleaning happens the same way as with the consumer reset
97
+ on_client_reset(event)
98
+ end
99
+
82
100
  # Removes partitions from the current assignments hash
83
101
  #
84
102
  # @param event [Karafka::Core::Monitoring::Event]
@@ -10,8 +10,8 @@ module Karafka
10
10
  monitor: %i[monitor]
11
11
  )
12
12
 
13
- # @param subscription_group_id [String] id of the current subscription group instance
14
- # @param consumer_group_id [String] id of the current consumer group
13
+ # @param subscription_group_id [String]
14
+ # @param consumer_group_id [String]
15
15
  # @param client_name [String] rdkafka client name
16
16
  def initialize(subscription_group_id, consumer_group_id, client_name)
17
17
  @subscription_group_id = subscription_group_id
@@ -11,14 +11,14 @@ module Karafka
11
11
  monitor: %i[monitor]
12
12
  )
13
13
 
14
- # @param subscription_group_id [String] id of the current subscription group
15
- # @param consumer_group_id [String] id of the current consumer group
14
+ # @param subscription_group_id [String]
15
+ # @param consumer_group_id [String]
16
16
  # @param client_name [String] rdkafka client name
17
17
  def initialize(subscription_group_id, consumer_group_id, client_name)
18
18
  @subscription_group_id = subscription_group_id
19
19
  @consumer_group_id = consumer_group_id
20
20
  @client_name = client_name
21
- @statistics_decorator = ::Karafka::Core::Monitoring::StatisticsDecorator.new
21
+ @statistics_decorator = Karafka::Core::Monitoring::StatisticsDecorator.new
22
22
  end
23
23
 
24
24
  # Emits decorated statistics to the monitor
@@ -3,15 +3,15 @@
3
3
  module Karafka
4
4
  module Instrumentation
5
5
  # Default logger for Event Delegator
6
- # @note It uses ::Logger features - providing basic logging
6
+ # @note It uses Logger features - providing basic logging
7
7
  class Logger < ::Logger
8
8
  # Map containing information about log level for given environment
9
9
  ENV_MAP = {
10
- 'production' => ::Logger::ERROR,
11
- 'test' => ::Logger::ERROR,
12
- 'development' => ::Logger::INFO,
13
- 'debug' => ::Logger::DEBUG,
14
- 'default' => ::Logger::INFO
10
+ 'production' => Logger::ERROR,
11
+ 'test' => Logger::ERROR,
12
+ 'development' => Logger::INFO,
13
+ 'debug' => Logger::DEBUG,
14
+ 'default' => Logger::INFO
15
15
  }.freeze
16
16
 
17
17
  private_constant :ENV_MAP
@@ -5,17 +5,17 @@ module Karafka
5
5
  # Karafka instrumentation monitor that we use to publish events
6
6
  # By default uses our internal notifications bus but can be used with
7
7
  # `ActiveSupport::Notifications` as well
8
- class Monitor < ::Karafka::Core::Monitoring::Monitor
8
+ class Monitor < Karafka::Core::Monitoring::Monitor
9
9
  attr_reader :notifications_bus
10
10
 
11
11
  # @param notifications_bus [Object] either our internal notifications bus or
12
12
  # `ActiveSupport::Notifications`
13
13
  # @param namespace [String, nil] namespace for events or nil if no namespace
14
14
  def initialize(
15
- notifications_bus = ::Karafka::Instrumentation::Notifications.new,
15
+ notifications_bus = Karafka::Instrumentation::Notifications.new,
16
16
  namespace = nil
17
17
  )
18
- super(notifications_bus, namespace)
18
+ super
19
19
  end
20
20
  end
21
21
  end
@@ -34,6 +34,7 @@ module Karafka
34
34
  client.pause
35
35
  client.resume
36
36
  client.reset
37
+ client.events_poll
37
38
 
38
39
  connection.listener.before_fetch_loop
39
40
  connection.listener.fetch_loop
@@ -9,7 +9,7 @@ module Karafka
9
9
  module Appsignal
10
10
  # Base for all the instrumentation listeners
11
11
  class Base
12
- include ::Karafka::Core::Configurable
12
+ include Karafka::Core::Configurable
13
13
  extend Forwardable
14
14
 
15
15
  # @param block [Proc] configuration block
@@ -18,10 +18,9 @@ module Karafka
18
18
  setup(&block) if block
19
19
  end
20
20
 
21
- # @param block [Proc] configuration block
22
21
  # @note We define this alias to be consistent with `Karafka#setup`
23
- def setup(&block)
24
- configure(&block)
22
+ def setup(&)
23
+ configure(&)
25
24
  end
26
25
  end
27
26
  end
@@ -9,7 +9,7 @@ module Karafka
9
9
  # A karafka's logger listener for Datadog
10
10
  # It depends on the 'ddtrace' gem
11
11
  class LoggerListener
12
- include ::Karafka::Core::Configurable
12
+ include Karafka::Core::Configurable
13
13
  extend Forwardable
14
14
 
15
15
  def_delegators :config, :client, :service_name
@@ -38,10 +38,9 @@ module Karafka
38
38
  @job_types_cache = {}
39
39
  end
40
40
 
41
- # @param block [Proc] configuration block
42
41
  # @note We define this alias to be consistent with `WaterDrop#setup`
43
- def setup(&block)
44
- configure(&block)
42
+ def setup(&)
43
+ configure(&)
45
44
  end
46
45
 
47
46
  # Prints info about the fact that a given job has started