karafka 2.5.1 → 2.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (238) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +21 -29
  3. data/.github/workflows/ci_macos_arm64.yml +1 -1
  4. data/.github/workflows/push.yml +2 -2
  5. data/.github/workflows/trigger-wiki-refresh.yml +1 -1
  6. data/.ruby-version +1 -1
  7. data/.yard-lint.yml +174 -0
  8. data/CHANGELOG.md +20 -4
  9. data/Gemfile +1 -2
  10. data/Gemfile.lock +45 -41
  11. data/bin/integrations +2 -1
  12. data/bin/rspecs +4 -0
  13. data/config/locales/errors.yml +6 -4
  14. data/config/locales/pro_errors.yml +5 -4
  15. data/docker-compose.yml +1 -1
  16. data/examples/payloads/json/sample_set_02/download.json +191 -0
  17. data/examples/payloads/json/sample_set_03/event_type_1.json +18 -0
  18. data/examples/payloads/json/sample_set_03/event_type_2.json +263 -0
  19. data/examples/payloads/json/sample_set_03/event_type_3.json +41 -0
  20. data/karafka.gemspec +3 -3
  21. data/lib/active_job/queue_adapters/karafka_adapter.rb +3 -3
  22. data/lib/karafka/active_job/consumer.rb +7 -3
  23. data/lib/karafka/active_job/current_attributes/job_wrapper.rb +45 -0
  24. data/lib/karafka/active_job/current_attributes/loading.rb +1 -1
  25. data/lib/karafka/active_job/current_attributes/persistence.rb +19 -7
  26. data/lib/karafka/active_job/current_attributes.rb +3 -2
  27. data/lib/karafka/active_job/deserializer.rb +61 -0
  28. data/lib/karafka/active_job/dispatcher.rb +34 -14
  29. data/lib/karafka/active_job/job_options_contract.rb +2 -4
  30. data/lib/karafka/admin/acl.rb +8 -4
  31. data/lib/karafka/admin/configs/config.rb +6 -4
  32. data/lib/karafka/admin/configs/resource.rb +7 -1
  33. data/lib/karafka/admin/consumer_groups.rb +80 -12
  34. data/lib/karafka/admin/topics.rb +43 -9
  35. data/lib/karafka/admin.rb +23 -14
  36. data/lib/karafka/app.rb +3 -3
  37. data/lib/karafka/base_consumer.rb +6 -6
  38. data/lib/karafka/cli/base.rb +2 -2
  39. data/lib/karafka/cli/console.rb +1 -1
  40. data/lib/karafka/cli/contracts/server.rb +3 -5
  41. data/lib/karafka/cli/help.rb +1 -1
  42. data/lib/karafka/cli/install.rb +3 -2
  43. data/lib/karafka/cli/server.rb +1 -1
  44. data/lib/karafka/cli/swarm.rb +1 -1
  45. data/lib/karafka/cli/topics/align.rb +1 -1
  46. data/lib/karafka/cli/topics/repartition.rb +2 -2
  47. data/lib/karafka/connection/client.rb +30 -19
  48. data/lib/karafka/connection/listeners_batch.rb +2 -3
  49. data/lib/karafka/connection/manager.rb +1 -0
  50. data/lib/karafka/connection/proxy.rb +12 -8
  51. data/lib/karafka/connection/rebalance_manager.rb +1 -1
  52. data/lib/karafka/connection/status.rb +1 -0
  53. data/lib/karafka/constraints.rb +1 -1
  54. data/lib/karafka/contracts/base.rb +1 -1
  55. data/lib/karafka/deserializers/payload.rb +1 -1
  56. data/lib/karafka/env.rb +1 -2
  57. data/lib/karafka/helpers/async.rb +1 -1
  58. data/lib/karafka/helpers/config_importer.rb +3 -3
  59. data/lib/karafka/helpers/interval_runner.rb +4 -1
  60. data/lib/karafka/helpers/multi_delegator.rb +3 -0
  61. data/lib/karafka/instrumentation/assignments_tracker.rb +19 -1
  62. data/lib/karafka/instrumentation/callbacks/error.rb +2 -2
  63. data/lib/karafka/instrumentation/callbacks/statistics.rb +3 -3
  64. data/lib/karafka/instrumentation/logger.rb +6 -6
  65. data/lib/karafka/instrumentation/monitor.rb +3 -3
  66. data/lib/karafka/instrumentation/notifications.rb +1 -0
  67. data/lib/karafka/instrumentation/vendors/appsignal/base.rb +3 -4
  68. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +3 -4
  69. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +10 -11
  70. data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +1 -1
  71. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +5 -18
  72. data/lib/karafka/messages/builders/batch_metadata.rb +2 -2
  73. data/lib/karafka/messages/builders/message.rb +1 -1
  74. data/lib/karafka/messages/messages.rb +2 -3
  75. data/lib/karafka/patches/rdkafka/bindings.rb +6 -6
  76. data/lib/karafka/patches/rdkafka/opaque.rb +1 -1
  77. data/lib/karafka/pro/active_job/consumer.rb +2 -2
  78. data/lib/karafka/pro/active_job/dispatcher.rb +10 -6
  79. data/lib/karafka/pro/active_job/job_options_contract.rb +2 -4
  80. data/lib/karafka/pro/cleaner/messages/messages.rb +2 -3
  81. data/lib/karafka/pro/cleaner.rb +3 -3
  82. data/lib/karafka/pro/cli/contracts/server.rb +3 -5
  83. data/lib/karafka/pro/cli/parallel_segments/base.rb +5 -5
  84. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +3 -3
  85. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +3 -3
  86. data/lib/karafka/pro/cli/parallel_segments.rb +1 -1
  87. data/lib/karafka/pro/connection/manager.rb +3 -4
  88. data/lib/karafka/pro/connection/multiplexing/listener.rb +1 -0
  89. data/lib/karafka/pro/contracts/base.rb +1 -1
  90. data/lib/karafka/pro/encryption/cipher.rb +3 -2
  91. data/lib/karafka/pro/encryption/contracts/config.rb +5 -7
  92. data/lib/karafka/pro/encryption/messages/parser.rb +4 -4
  93. data/lib/karafka/pro/encryption/setup/config.rb +1 -1
  94. data/lib/karafka/pro/instrumentation/performance_tracker.rb +3 -3
  95. data/lib/karafka/pro/iterator/expander.rb +1 -1
  96. data/lib/karafka/pro/iterator/tpl_builder.rb +2 -2
  97. data/lib/karafka/pro/iterator.rb +3 -3
  98. data/lib/karafka/pro/loader.rb +1 -1
  99. data/lib/karafka/pro/processing/coordinator.rb +1 -1
  100. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +2 -3
  101. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +3 -3
  102. data/lib/karafka/pro/processing/filters/base.rb +1 -0
  103. data/lib/karafka/pro/processing/filters/delayer.rb +1 -1
  104. data/lib/karafka/pro/processing/filters/expirer.rb +1 -1
  105. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +1 -1
  106. data/lib/karafka/pro/processing/filters/throttler.rb +1 -1
  107. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +1 -1
  108. data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +1 -1
  109. data/lib/karafka/pro/processing/jobs/periodic.rb +1 -1
  110. data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +1 -1
  111. data/lib/karafka/pro/processing/jobs_builder.rb +1 -1
  112. data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +1 -0
  113. data/lib/karafka/pro/processing/partitioner.rb +1 -1
  114. data/lib/karafka/pro/processing/schedulers/default.rb +2 -4
  115. data/lib/karafka/pro/processing/strategies/base.rb +1 -1
  116. data/lib/karafka/pro/processing/strategies/default.rb +2 -2
  117. data/lib/karafka/pro/processing/strategies/lrj/default.rb +2 -4
  118. data/lib/karafka/pro/processing/strategies/vp/default.rb +2 -4
  119. data/lib/karafka/pro/processing/strategy_selector.rb +1 -0
  120. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +2 -3
  121. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +4 -2
  122. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +4 -2
  123. data/lib/karafka/pro/recurring_tasks/consumer.rb +3 -2
  124. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +4 -6
  125. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +3 -5
  126. data/lib/karafka/pro/recurring_tasks/deserializer.rb +1 -1
  127. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +7 -6
  128. data/lib/karafka/pro/recurring_tasks/executor.rb +2 -1
  129. data/lib/karafka/pro/recurring_tasks/schedule.rb +9 -8
  130. data/lib/karafka/pro/recurring_tasks/serializer.rb +6 -5
  131. data/lib/karafka/pro/recurring_tasks/setup/config.rb +2 -2
  132. data/lib/karafka/pro/recurring_tasks/task.rb +1 -1
  133. data/lib/karafka/pro/recurring_tasks.rb +8 -5
  134. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +2 -4
  135. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +2 -4
  136. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +3 -0
  137. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +2 -4
  138. data/lib/karafka/pro/routing/features/delaying/topic.rb +2 -4
  139. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +4 -8
  140. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +5 -7
  141. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +7 -6
  142. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +2 -2
  143. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +2 -4
  144. data/lib/karafka/pro/routing/features/expiring/topic.rb +2 -4
  145. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +2 -4
  146. data/lib/karafka/pro/routing/features/filtering/topic.rb +2 -3
  147. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +2 -4
  148. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +2 -4
  149. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +3 -5
  150. data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +1 -1
  151. data/lib/karafka/pro/routing/features/multiplexing.rb +5 -5
  152. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +3 -3
  153. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +2 -4
  154. data/lib/karafka/pro/routing/features/offset_metadata.rb +4 -4
  155. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +1 -1
  156. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +2 -4
  157. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +3 -5
  158. data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +2 -4
  159. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +2 -4
  160. data/lib/karafka/pro/routing/features/patterns/patterns.rb +1 -1
  161. data/lib/karafka/pro/routing/features/pausing/config.rb +26 -0
  162. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +17 -11
  163. data/lib/karafka/pro/routing/features/pausing/topic.rb +69 -8
  164. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +2 -4
  165. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +1 -1
  166. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +1 -1
  167. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +2 -4
  168. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +2 -4
  169. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +2 -4
  170. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +6 -8
  171. data/lib/karafka/pro/routing/features/swarm.rb +1 -1
  172. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +2 -4
  173. data/lib/karafka/pro/routing/features/throttling/topic.rb +3 -1
  174. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +2 -4
  175. data/lib/karafka/pro/scheduled_messages/consumer.rb +1 -1
  176. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +4 -6
  177. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +3 -5
  178. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +3 -2
  179. data/lib/karafka/pro/scheduled_messages/day.rb +1 -0
  180. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +1 -1
  181. data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +1 -1
  182. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +1 -0
  183. data/lib/karafka/pro/scheduled_messages/proxy.rb +1 -1
  184. data/lib/karafka/pro/scheduled_messages/serializer.rb +3 -3
  185. data/lib/karafka/pro/scheduled_messages/setup/config.rb +2 -2
  186. data/lib/karafka/pro/scheduled_messages/state.rb +1 -0
  187. data/lib/karafka/pro/scheduled_messages/tracker.rb +1 -0
  188. data/lib/karafka/pro/scheduled_messages.rb +4 -6
  189. data/lib/karafka/pro/swarm/liveness_listener.rb +2 -2
  190. data/lib/karafka/process.rb +4 -4
  191. data/lib/karafka/processing/coordinator.rb +2 -4
  192. data/lib/karafka/processing/coordinators_buffer.rb +2 -3
  193. data/lib/karafka/processing/executor.rb +3 -4
  194. data/lib/karafka/processing/inline_insights/tracker.rb +1 -0
  195. data/lib/karafka/processing/jobs/base.rb +2 -3
  196. data/lib/karafka/processing/jobs_queue.rb +1 -1
  197. data/lib/karafka/processing/result.rb +1 -0
  198. data/lib/karafka/processing/strategy_selector.rb +1 -0
  199. data/lib/karafka/processing/workers_batch.rb +2 -3
  200. data/lib/karafka/railtie.rb +1 -0
  201. data/lib/karafka/routing/activity_manager.rb +3 -2
  202. data/lib/karafka/routing/builder.rb +8 -8
  203. data/lib/karafka/routing/consumer_group.rb +4 -6
  204. data/lib/karafka/routing/contracts/consumer_group.rb +6 -7
  205. data/lib/karafka/routing/contracts/routing.rb +2 -4
  206. data/lib/karafka/routing/contracts/topic.rb +7 -6
  207. data/lib/karafka/routing/features/active_job/contracts/topic.rb +2 -4
  208. data/lib/karafka/routing/features/active_job/topic.rb +6 -0
  209. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +3 -5
  210. data/lib/karafka/routing/features/declaratives/contracts/topic.rb +3 -5
  211. data/lib/karafka/routing/features/declaratives/topic.rb +5 -2
  212. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +2 -4
  213. data/lib/karafka/routing/features/deserializers/topic.rb +3 -3
  214. data/lib/karafka/routing/features/eofed/contracts/topic.rb +2 -4
  215. data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +2 -4
  216. data/lib/karafka/routing/features/inline_insights.rb +5 -5
  217. data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +2 -4
  218. data/lib/karafka/routing/router.rb +1 -1
  219. data/lib/karafka/routing/subscription_group.rb +1 -1
  220. data/lib/karafka/routing/subscription_groups_builder.rb +1 -0
  221. data/lib/karafka/routing/topic.rb +3 -3
  222. data/lib/karafka/routing/topics.rb +4 -9
  223. data/lib/karafka/server.rb +2 -2
  224. data/lib/karafka/setup/attributes_map.rb +4 -2
  225. data/lib/karafka/setup/config.rb +85 -17
  226. data/lib/karafka/setup/config_proxy.rb +209 -0
  227. data/lib/karafka/setup/contracts/config.rb +13 -11
  228. data/lib/karafka/setup/defaults_injector.rb +3 -2
  229. data/lib/karafka/setup/dsl.rb +2 -3
  230. data/lib/karafka/swarm/liveness_listener.rb +3 -3
  231. data/lib/karafka/swarm/manager.rb +7 -6
  232. data/lib/karafka/swarm/node.rb +1 -1
  233. data/lib/karafka/swarm/supervisor.rb +2 -1
  234. data/lib/karafka/time_trackers/base.rb +1 -1
  235. data/lib/karafka/version.rb +1 -1
  236. data/lib/karafka.rb +4 -4
  237. metadata +14 -6
  238. data/.diffend.yml +0 -3
@@ -8,7 +8,7 @@ module Karafka
8
8
  module Processing
9
9
  # Pro coordinator that provides extra orchestration methods useful for parallel processing
10
10
  # within the same partition
11
- class Coordinator < ::Karafka::Processing::Coordinator
11
+ class Coordinator < Karafka::Processing::Coordinator
12
12
  extend Forwardable
13
13
  include Helpers::ConfigImporter.new(
14
14
  errors_tracker_class: %i[internal processing errors_tracker_class]
@@ -79,9 +79,8 @@ module Karafka
79
79
  end
80
80
 
81
81
  # Iterates over errors
82
- # @param block [Proc] code we want to run on each error
83
- def each(&block)
84
- @errors.each(&block)
82
+ def each(&)
83
+ @errors.each(&)
85
84
  end
86
85
 
87
86
  # @return [Array<StandardError>] array with all the errors that occurred
@@ -68,7 +68,7 @@ module Karafka
68
68
  # @return [Integer] minimum timeout we need to pause. This is the minimum for all the
69
69
  # filters to satisfy all of them.
70
70
  def timeout
71
- applied.map(&:timeout).compact.min || 0
71
+ applied.filter_map(&:timeout).min || 0
72
72
  end
73
73
 
74
74
  # The first message we do need to get next time we poll. We use the minimum not to jump
@@ -78,7 +78,7 @@ module Karafka
78
78
  def cursor
79
79
  return nil unless active?
80
80
 
81
- applied.map(&:cursor).compact.min_by(&:offset)
81
+ applied.filter_map(&:cursor).min_by(&:offset)
82
82
  end
83
83
 
84
84
  # @return [Boolean] did any of the filters requested offset storage during filter
@@ -106,7 +106,7 @@ module Karafka
106
106
  def marking_cursor
107
107
  return nil unless active?
108
108
 
109
- applied.map(&:marking_cursor).compact.min_by(&:offset)
109
+ applied.filter_map(&:marking_cursor).min_by(&:offset)
110
110
  end
111
111
 
112
112
  private
@@ -20,6 +20,7 @@ module Karafka
20
20
 
21
21
  include Karafka::Core::Helpers::Time
22
22
 
23
+ # Initializes the filter as not yet applied
23
24
  def initialize
24
25
  @applied = false
25
26
  @cursor = nil
@@ -25,7 +25,7 @@ module Karafka
25
25
 
26
26
  # Time on message is in seconds with ms precision, so we need to convert the ttl that
27
27
  # is in ms to this format
28
- border = ::Time.now.utc - @delay / 1_000.0
28
+ border = Time.now.utc - (@delay / 1_000.0)
29
29
 
30
30
  messages.delete_if do |message|
31
31
  too_young = message.timestamp > border
@@ -26,7 +26,7 @@ module Karafka
26
26
 
27
27
  # Time on message is in seconds with ms precision, so we need to convert the ttl that
28
28
  # is in ms to this format
29
- border = ::Time.now.utc - @ttl / 1_000.to_f
29
+ border = Time.now.utc - (@ttl / 1_000.to_f)
30
30
 
31
31
  messages.delete_if do |message|
32
32
  too_old = message.timestamp < border
@@ -41,7 +41,7 @@ module Karafka
41
41
  # This can happen when we chain filters
42
42
  return unless @cursor
43
43
 
44
- insights = ::Karafka::Processing::InlineInsights::Tracker.find(
44
+ insights = Karafka::Processing::InlineInsights::Tracker.find(
45
45
  @topic,
46
46
  @partition
47
47
  )
@@ -67,7 +67,7 @@ module Karafka
67
67
  # so we are no longer throttled and so we can process at least one message
68
68
  def timeout
69
69
  timeout = @interval - (monotonic_now - @time)
70
- timeout <= 0 ? 0 : timeout
70
+ [timeout, 0].max
71
71
  end
72
72
  end
73
73
  end
@@ -17,7 +17,7 @@ module Karafka
17
17
  #
18
18
  # @note It needs to be working with a proper consumer that will handle the partition
19
19
  # management. This layer of the framework knows nothing about Kafka messages consumption.
20
- class ConsumeNonBlocking < ::Karafka::Processing::Jobs::Consume
20
+ class ConsumeNonBlocking < Karafka::Processing::Jobs::Consume
21
21
  self.action = :consume
22
22
 
23
23
  # Makes this job non-blocking from the start
@@ -11,7 +11,7 @@ module Karafka
11
11
  # We use this version for LRJ topics for cases where saturated resources would not allow
12
12
  # to run this job for extended period of time. Under such scenarios, if we would not use
13
13
  # a non-blocking one, we would reach max.poll.interval.ms.
14
- class EofedNonBlocking < ::Karafka::Processing::Jobs::Eofed
14
+ class EofedNonBlocking < Karafka::Processing::Jobs::Eofed
15
15
  self.action = :eofed
16
16
 
17
17
  # @param args [Array] any arguments accepted by `::Karafka::Processing::Jobs::Eofed`
@@ -9,7 +9,7 @@ module Karafka
9
9
  module Jobs
10
10
  # Job that represents a "ticking" work. Work that we run periodically for the Periodics
11
11
  # enabled topics.
12
- class Periodic < ::Karafka::Processing::Jobs::Base
12
+ class Periodic < Karafka::Processing::Jobs::Base
13
13
  self.action = :tick
14
14
 
15
15
  # @param executor [Karafka::Pro::Processing::Executor] pro executor that is suppose to
@@ -15,7 +15,7 @@ module Karafka
15
15
  # It can be useful when having long lasting jobs that would exceed `max.poll.interval`
16
16
  # in scenarios where there are more jobs than threads, without this being async we
17
17
  # would potentially stop polling
18
- class RevokedNonBlocking < ::Karafka::Processing::Jobs::Revoked
18
+ class RevokedNonBlocking < Karafka::Processing::Jobs::Revoked
19
19
  self.action = :revoked
20
20
 
21
21
  # Makes this job non-blocking from the start
@@ -7,7 +7,7 @@ module Karafka
7
7
  module Pro
8
8
  module Processing
9
9
  # Pro jobs builder that supports lrj
10
- class JobsBuilder < ::Karafka::Processing::JobsBuilder
10
+ class JobsBuilder < Karafka::Processing::JobsBuilder
11
11
  # @param executor [Karafka::Pro::Processing::Executor]
12
12
  def idle(executor)
13
13
  Karafka::Processing::Jobs::Idle.new(executor)
@@ -28,6 +28,7 @@ module Karafka
28
28
  def_delegators :instance, :register, :clear, :find
29
29
  end
30
30
 
31
+ # Initializes the fetcher with empty caches
31
32
  def initialize
32
33
  @mutexes = {}
33
34
  @clients = {}
@@ -7,7 +7,7 @@ module Karafka
7
7
  module Pro
8
8
  module Processing
9
9
  # Pro partitioner that can distribute work based on the virtual partitioner settings
10
- class Partitioner < ::Karafka::Processing::Partitioner
10
+ class Partitioner < Karafka::Processing::Partitioner
11
11
  # @param topic [String] topic name
12
12
  # @param messages [Array<Karafka::Messages::Message>] karafka messages
13
13
  # @param coordinator [Karafka::Pro::Processing::Coordinator] processing coordinator that
@@ -29,10 +29,8 @@ module Karafka
29
29
  def on_schedule_consumption(jobs_array)
30
30
  perf_tracker = Instrumentation::PerformanceTracker.instance
31
31
 
32
- ordered = []
33
-
34
- jobs_array.each do |job|
35
- ordered << [
32
+ ordered = jobs_array.map do |job|
33
+ [
36
34
  job,
37
35
  processing_cost(perf_tracker, job)
38
36
  ]
@@ -10,7 +10,7 @@ module Karafka
10
10
  module Strategies
11
11
  # Base strategy for Pro
12
12
  module Base
13
- include ::Karafka::Processing::Strategies::Base
13
+ include Karafka::Processing::Strategies::Base
14
14
  end
15
15
  end
16
16
  end
@@ -14,7 +14,7 @@ module Karafka
14
14
  # Nothing. Just standard, automatic flow
15
15
  module Default
16
16
  include Base
17
- include ::Karafka::Processing::Strategies::Default
17
+ include Karafka::Processing::Strategies::Default
18
18
 
19
19
  # Apply strategy for a non-feature based flow
20
20
  FEATURES = %i[].freeze
@@ -263,7 +263,7 @@ module Karafka
263
263
  # the post-user code execution marking with transactional producer to result in a
264
264
  # boolean state of marking for further framework flow. This is a normalization to make it
265
265
  # behave the same way as it would behave with a non-transactional one
266
- rescue ::Rdkafka::RdkafkaError, Errors::AssignmentLostError
266
+ rescue Rdkafka::RdkafkaError, Errors::AssignmentLostError
267
267
  false
268
268
  ensure
269
269
  @_transaction_internal = false
@@ -78,10 +78,8 @@ module Karafka
78
78
  # Allows for LRJ to synchronize its work. It may be needed because LRJ can run
79
79
  # lifecycle events like revocation while the LRJ work is running and there may be a
80
80
  # need for a critical section.
81
- #
82
- # @param block [Proc] block we want to run in a mutex to prevent race-conditions
83
- def synchronize(&block)
84
- coordinator.shared_mutex.synchronize(&block)
81
+ def synchronize(&)
82
+ coordinator.shared_mutex.synchronize(&)
85
83
  end
86
84
  end
87
85
  end
@@ -134,10 +134,8 @@ module Karafka
134
134
  # the end users. With LRJ it is needed and provided in the `LRJ::Default` strategy,
135
135
  # because lifecycle events on revocation can run in parallel to the LRJ job as it is
136
136
  # non-blocking.
137
- #
138
- # @param block [Proc] block we want to run in a mutex to prevent race-conditions
139
- def synchronize(&block)
140
- coordinator.shared_mutex.synchronize(&block)
137
+ def synchronize(&)
138
+ coordinator.shared_mutex.synchronize(&)
141
139
  end
142
140
 
143
141
  private
@@ -23,6 +23,7 @@ module Karafka
23
23
  filtering
24
24
  ].freeze
25
25
 
26
+ # Initializes the strategy selector and preloads all strategies
26
27
  def initialize
27
28
  # Preload the strategies
28
29
  # We load them once for performance reasons not to do too many lookups
@@ -16,12 +16,11 @@ module Karafka
16
16
  # want to pause
17
17
  # @param lock_id [Object] key we want to use if we want to set multiple locks on the same
18
18
  # subscription group
19
- # @param kwargs [Object] Any keyword arguments accepted by the jobs queue lock.
20
- def pause(subscription_group, lock_id = nil, **kwargs)
19
+ def pause(subscription_group, lock_id = nil, **)
21
20
  jobs_queue.lock_async(
22
21
  subscription_group.id,
23
22
  lock_id,
24
- **kwargs
23
+ **
25
24
  )
26
25
  end
27
26
 
@@ -11,8 +11,10 @@ module Karafka
11
11
  # Balanced distributor that groups messages by partition key
12
12
  # and processes larger groups first while maintaining message order within groups
13
13
  class Balanced < Base
14
- # @param messages [Array<Karafka::Messages::Message>] messages to distribute
15
- # @return [Hash<Integer, Array<Karafka::Messages::Message>>] hash with group ids as
14
+ # Distributes messages to virtual partitions ensuring balanced load across workers
15
+ # by grouping messages by partition key and assigning larger groups first
16
+ # @param messages [Array<Karafka::Messages::Message>]
17
+ # @return [Hash{Integer => Array<Karafka::Messages::Message>}] hash with group ids as
16
18
  # keys and message groups as values
17
19
  def call(messages)
18
20
  # Group messages by partition key
@@ -11,8 +11,10 @@ module Karafka
11
11
  # Consistent distributor that ensures messages with the same partition key
12
12
  # are always processed in the same virtual partition
13
13
  class Consistent < Base
14
- # @param messages [Array<Karafka::Messages::Message>] messages to distribute
15
- # @return [Hash<Integer, Array<Karafka::Messages::Message>>] hash with group ids as
14
+ # Distributes messages ensuring consistent routing where messages with the same
15
+ # partition key always go to the same virtual partition
16
+ # @param messages [Array<Karafka::Messages::Message>]
17
+ # @return [Hash{Integer => Array<Karafka::Messages::Message>}] hash with group ids as
16
18
  # keys and message groups as values
17
19
  def call(messages)
18
20
  messages
@@ -11,13 +11,14 @@ module Karafka
11
11
  # - we only run schedules that are of same or newer version
12
12
  # - we always mark as consumed in such a way, that the first message received after
13
13
  # assignment (if any) is a state
14
- class Consumer < ::Karafka::BaseConsumer
14
+ class Consumer < Karafka::BaseConsumer
15
15
  # @param args [Array] all arguments accepted by the consumer
16
16
  def initialize(*args)
17
17
  super
18
18
  @executor = Executor.new
19
19
  end
20
20
 
21
+ # Consumes messages and manages recurring tasks execution
21
22
  def consume
22
23
  # There is nothing we can do if we operate on a newer schedule. In such cases we should
23
24
  # just wait and re-raise error hoping someone will notice or that this will be
@@ -54,7 +55,7 @@ module Karafka
54
55
  # that collectively have a different outcome
55
56
  @executor.call
56
57
  else
57
- raise ::Karafka::Errors::UnsupportedCaseError, type
58
+ raise Karafka::Errors::UnsupportedCaseError, type
58
59
  end
59
60
  end
60
61
 
@@ -9,17 +9,15 @@ module Karafka
9
9
  # Recurring Tasks related contracts
10
10
  module Contracts
11
11
  # Makes sure, all the expected config is defined as it should be
12
- class Config < ::Karafka::Contracts::Base
12
+ class Config < Karafka::Contracts::Base
13
13
  configure do |config|
14
- config.error_messages = YAML.safe_load(
15
- File.read(
16
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
17
- )
14
+ config.error_messages = YAML.safe_load_file(
15
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
18
16
  ).fetch('en').fetch('validations').fetch('setup').fetch('config')
19
17
  end
20
18
 
21
19
  nested(:recurring_tasks) do
22
- required(:consumer_class) { |val| val < ::Karafka::BaseConsumer }
20
+ required(:consumer_class) { |val| val < Karafka::BaseConsumer }
23
21
  required(:deserializer) { |val| !val.nil? }
24
22
  required(:logging) { |val| [true, false].include?(val) }
25
23
  # Do not allow to run more often than every 5 seconds
@@ -9,12 +9,10 @@ module Karafka
9
9
  # Recurring Tasks related contracts
10
10
  module Contracts
11
11
  # Ensures that task details are as expected
12
- class Task < ::Karafka::Contracts::Base
12
+ class Task < Karafka::Contracts::Base
13
13
  configure do |config|
14
- config.error_messages = YAML.safe_load(
15
- File.read(
16
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
17
- )
14
+ config.error_messages = YAML.safe_load_file(
15
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
18
16
  ).fetch('en').fetch('validations').fetch('recurring_tasks')
19
17
  end
20
18
 
@@ -16,7 +16,7 @@ module Karafka
16
16
  # @param message [::Karafka::Messages::Message]
17
17
  # @return [Hash] deserialized data
18
18
  def call(message)
19
- ::JSON.parse(
19
+ JSON.parse(
20
20
  Zlib::Inflate.inflate(message.raw_payload),
21
21
  symbolize_names: true
22
22
  )
@@ -8,6 +8,10 @@ module Karafka
8
8
  module RecurringTasks
9
9
  # Dispatches appropriate recurring tasks related messages to expected topics
10
10
  class Dispatcher
11
+ extend Helpers::ConfigImporter.new(
12
+ topics: %i[recurring_tasks topics]
13
+ )
14
+
11
15
  class << self
12
16
  # Snapshots to Kafka current schedule state
13
17
  def schedule
@@ -44,13 +48,10 @@ module Karafka
44
48
  private
45
49
 
46
50
  # @return [::WaterDrop::Producer] web ui producer
51
+ # @note We do not fetch it via the ConfigImporter not to cache it so we can re-use it
52
+ # if needed
47
53
  def producer
48
- ::Karafka::App.config.recurring_tasks.producer
49
- end
50
-
51
- # @return [String] consumers commands topic
52
- def topics
53
- ::Karafka::App.config.recurring_tasks.topics
54
+ Karafka::App.config.recurring_tasks.producer
54
55
  end
55
56
 
56
57
  # @return [Serializer]
@@ -17,6 +17,7 @@ module Karafka
17
17
  trigger
18
18
  ].freeze
19
19
 
20
+ # Initializes the executor in replaying mode
20
21
  def initialize
21
22
  @replaying = true
22
23
  @incompatible = false
@@ -131,7 +132,7 @@ module Karafka
131
132
 
132
133
  # @return [Karafka::Pro::RecurringTasks::Schedule] current in-memory schedule
133
134
  def schedule
134
- ::Karafka::Pro::RecurringTasks.schedule
135
+ Karafka::Pro::RecurringTasks.schedule
135
136
  end
136
137
 
137
138
  # Dispatches the current schedule state to Kafka
@@ -13,7 +13,7 @@ module Karafka
13
13
  # @return [String]
14
14
  attr_reader :version
15
15
 
16
- # @return [Hash<String, Task>]
16
+ # @return [Hash{String => Task}]
17
17
  attr_reader :tasks
18
18
 
19
19
  # @param version [String] schedule version. In case of usage of versioning it is used to
@@ -32,9 +32,8 @@ module Karafka
32
32
  end
33
33
 
34
34
  # Iterates over tasks yielding them one after another
35
- # @param block [Proc] block that will be executed with each task
36
- def each(&block)
37
- @tasks.each_value(&block)
35
+ def each(&)
36
+ @tasks.each_value(&)
38
37
  end
39
38
 
40
39
  # @param id [String] id of a particular recurring task
@@ -44,10 +43,12 @@ module Karafka
44
43
  end
45
44
 
46
45
  # Allows us to have a nice DSL for defining schedules
47
- # @param args [Array] attributes accepted by the task initializer
48
- # @param block [Proc] block to execute
49
- def schedule(**args, &block)
50
- self << Task.new(**args, &block)
46
+ # @param args [Hash] attributes accepted by the task initializer
47
+ # @option args [String] :id unique task identifier
48
+ # @option args [String] :cron cron expression for task scheduling
49
+ # @option args [Proc] :previous_time optional lambda returning previous execution time
50
+ def schedule(**args, &)
51
+ self << Task.new(**args, &)
51
52
  end
52
53
  end
53
54
  end
@@ -11,7 +11,8 @@ module Karafka
11
11
  # Current recurring tasks related schema structure
12
12
  SCHEMA_VERSION = '1.0'
13
13
 
14
- # @param schedule [Karafka::Pro::RecurringTasks::Schedule] schedule to serialize
14
+ # Serializes and compresses the schedule with all its tasks and their execution state
15
+ # @param schedule [Karafka::Pro::RecurringTasks::Schedule]
15
16
  # @return [String] serialized and compressed current schedule data with its tasks and their
16
17
  # current state.
17
18
  def schedule(schedule)
@@ -46,7 +47,7 @@ module Karafka
46
47
  def command(command_name, task_id)
47
48
  data = {
48
49
  schema_version: SCHEMA_VERSION,
49
- schedule_version: ::Karafka::Pro::RecurringTasks.schedule.version,
50
+ schedule_version: Karafka::Pro::RecurringTasks.schedule.version,
50
51
  dispatched_at: Time.now.to_f,
51
52
  type: 'command',
52
53
  command: {
@@ -69,7 +70,7 @@ module Karafka
69
70
 
70
71
  data = {
71
72
  schema_version: SCHEMA_VERSION,
72
- schedule_version: ::Karafka::Pro::RecurringTasks.schedule.version,
73
+ schedule_version: Karafka::Pro::RecurringTasks.schedule.version,
73
74
  dispatched_at: Time.now.to_f,
74
75
  type: 'log',
75
76
  task: {
@@ -92,9 +93,9 @@ module Karafka
92
93
  hash.to_json
93
94
  end
94
95
 
95
- # Compresses the provided data
96
+ # Compresses the provided data using Zlib deflate algorithm
96
97
  #
97
- # @param data [String] data to compress
98
+ # @param data [String]
98
99
  # @return [String] compressed data
99
100
  def compress(data)
100
101
  Zlib::Deflate.deflate(data)
@@ -10,7 +10,7 @@ module Karafka
10
10
  module Setup
11
11
  # Config for recurring tasks
12
12
  class Config
13
- extend ::Karafka::Core::Configurable
13
+ extend Karafka::Core::Configurable
14
14
 
15
15
  setting(:consumer_class, default: Consumer)
16
16
  setting(:deserializer, default: Deserializer.new)
@@ -27,7 +27,7 @@ module Karafka
27
27
  # a separate instance in case of heavy usage of the transactional producer, etc.
28
28
  setting(
29
29
  :producer,
30
- constructor: -> { ::Karafka.producer },
30
+ constructor: -> { Karafka.producer },
31
31
  lazy: true
32
32
  )
33
33
 
@@ -31,7 +31,7 @@ module Karafka
31
31
  # @param block [Proc] code to execute.
32
32
  def initialize(id:, cron:, previous_time: 0, enabled: true, &block)
33
33
  @id = id
34
- @cron = ::Fugit::Cron.do_parse(cron)
34
+ @cron = Fugit::Cron.do_parse(cron)
35
35
  @previous_time = previous_time
36
36
  @start_time = Time.now
37
37
  @executable = block
@@ -7,16 +7,19 @@ module Karafka
7
7
  module Pro
8
8
  # Recurring tasks functionality
9
9
  module RecurringTasks
10
+ extend Helpers::ConfigImporter.new(
11
+ recurring_tasks_logging: %i[recurring_tasks logging]
12
+ )
13
+
10
14
  class << self
11
15
  # @return [Schedule, nil] current defined schedule or nil if not defined
12
16
  def schedule
13
- @schedule || define('0.0.0') {}
17
+ @schedule || define('0.0.0') { nil }
14
18
  end
15
19
 
16
20
  # Simplified API for schedules definitions and validates the tasks data
17
21
  #
18
22
  # @param version [String]
19
- # @param block [Proc]
20
23
  #
21
24
  # @example
22
25
  # Karafka::Pro::RecurringTasks.define('1.0.1') do
@@ -24,9 +27,9 @@ module Karafka
24
27
  # MailingJob.perform_async
25
28
  # end
26
29
  # end
27
- def define(version = '1.0.0', &block)
30
+ def define(version = '1.0.0', &)
28
31
  @schedule = Schedule.new(version: version)
29
- @schedule.instance_exec(&block)
32
+ @schedule.instance_exec(&)
30
33
 
31
34
  @schedule.each do |task|
32
35
  Contracts::Task.new.validate!(
@@ -75,7 +78,7 @@ module Karafka
75
78
  RecurringTasks.schedule
76
79
 
77
80
  # User can disable logging of executions, in which case we don't track them
78
- return unless Karafka::App.config.recurring_tasks.logging
81
+ return unless recurring_tasks_logging
79
82
 
80
83
  Karafka.monitor.subscribe(Listener.new)
81
84
  end
@@ -13,10 +13,8 @@ module Karafka
13
13
  # Contract to validate configuration of the adaptive iterator feature
14
14
  class Topic < Karafka::Contracts::Base
15
15
  configure do |config|
16
- config.error_messages = YAML.safe_load(
17
- File.read(
18
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
- )
16
+ config.error_messages = YAML.safe_load_file(
17
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
20
18
  ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
19
  end
22
20
 
@@ -13,10 +13,8 @@ module Karafka
13
13
  # Extended rules for dead letter queue settings
14
14
  class Topic < Karafka::Contracts::Base
15
15
  configure do |config|
16
- config.error_messages = YAML.safe_load(
17
- File.read(
18
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
- )
16
+ config.error_messages = YAML.safe_load_file(
17
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
20
18
  ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
19
  end
22
20
 
@@ -22,6 +22,9 @@ module Karafka
22
22
  # @param strategy [#call, nil] Strategy we want to use or nil if a default strategy
23
23
  # (same as in OSS) should be applied
24
24
  # @param args [Hash] Pro DLQ arguments
25
+ # @option args [String, nil] :topic name of the dead letter queue topic
26
+ # @option args [Integer] :max_retries maximum number of retries before dispatch to DLQ
27
+ # @option args [Boolean] :independent whether DLQ runs independently
25
28
  def dead_letter_queue(strategy: nil, **args)
26
29
  return @dead_letter_queue if @dead_letter_queue
27
30
 
@@ -13,10 +13,8 @@ module Karafka
13
13
  # Contract to validate configuration of the expiring feature
14
14
  class Topic < Karafka::Contracts::Base
15
15
  configure do |config|
16
- config.error_messages = YAML.safe_load(
17
- File.read(
18
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
- )
16
+ config.error_messages = YAML.safe_load_file(
17
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
20
18
  ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
19
  end
22
20
 
@@ -35,10 +35,8 @@ module Karafka
35
35
  end
36
36
 
37
37
  # Just an alias for nice API
38
- #
39
- # @param args [Array] Anything `#delaying` accepts
40
- def delay_by(*args)
41
- delaying(*args)
38
+ def delay_by(*)
39
+ delaying(*)
42
40
  end
43
41
 
44
42
  # @return [Boolean] is a given job delaying