karafka 2.5.1 → 2.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (151) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +3 -29
  3. data/.github/workflows/ci_macos_arm64.yml +1 -1
  4. data/.github/workflows/push.yml +2 -2
  5. data/.github/workflows/trigger-wiki-refresh.yml +1 -1
  6. data/.ruby-version +1 -1
  7. data/CHANGELOG.md +14 -4
  8. data/Gemfile +0 -2
  9. data/Gemfile.lock +30 -31
  10. data/bin/integrations +2 -1
  11. data/bin/rspecs +4 -0
  12. data/config/locales/errors.yml +6 -4
  13. data/config/locales/pro_errors.yml +5 -4
  14. data/docker-compose.yml +1 -1
  15. data/examples/payloads/json/sample_set_02/download.json +191 -0
  16. data/examples/payloads/json/sample_set_03/event_type_1.json +18 -0
  17. data/examples/payloads/json/sample_set_03/event_type_2.json +263 -0
  18. data/examples/payloads/json/sample_set_03/event_type_3.json +41 -0
  19. data/karafka.gemspec +1 -1
  20. data/lib/active_job/queue_adapters/karafka_adapter.rb +1 -1
  21. data/lib/karafka/active_job/consumer.rb +5 -1
  22. data/lib/karafka/active_job/current_attributes/job_wrapper.rb +45 -0
  23. data/lib/karafka/active_job/current_attributes/loading.rb +1 -1
  24. data/lib/karafka/active_job/current_attributes/persistence.rb +19 -7
  25. data/lib/karafka/active_job/current_attributes.rb +1 -0
  26. data/lib/karafka/active_job/deserializer.rb +61 -0
  27. data/lib/karafka/active_job/dispatcher.rb +32 -12
  28. data/lib/karafka/active_job/job_options_contract.rb +2 -4
  29. data/lib/karafka/admin/acl.rb +8 -4
  30. data/lib/karafka/admin/configs/config.rb +6 -4
  31. data/lib/karafka/admin/consumer_groups.rb +74 -4
  32. data/lib/karafka/admin/topics.rb +40 -7
  33. data/lib/karafka/admin.rb +13 -4
  34. data/lib/karafka/base_consumer.rb +5 -5
  35. data/lib/karafka/cli/base.rb +1 -1
  36. data/lib/karafka/cli/contracts/server.rb +2 -4
  37. data/lib/karafka/cli/install.rb +1 -1
  38. data/lib/karafka/cli/topics/align.rb +1 -1
  39. data/lib/karafka/cli/topics/repartition.rb +2 -2
  40. data/lib/karafka/connection/client.rb +12 -2
  41. data/lib/karafka/connection/listeners_batch.rb +2 -3
  42. data/lib/karafka/connection/proxy.rb +11 -7
  43. data/lib/karafka/env.rb +1 -2
  44. data/lib/karafka/helpers/interval_runner.rb +4 -1
  45. data/lib/karafka/instrumentation/assignments_tracker.rb +17 -0
  46. data/lib/karafka/instrumentation/monitor.rb +1 -1
  47. data/lib/karafka/instrumentation/notifications.rb +1 -0
  48. data/lib/karafka/instrumentation/vendors/appsignal/base.rb +2 -3
  49. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +2 -3
  50. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +8 -9
  51. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +2 -3
  52. data/lib/karafka/messages/builders/batch_metadata.rb +1 -1
  53. data/lib/karafka/messages/builders/message.rb +1 -1
  54. data/lib/karafka/messages/messages.rb +2 -3
  55. data/lib/karafka/patches/rdkafka/bindings.rb +6 -6
  56. data/lib/karafka/patches/rdkafka/opaque.rb +1 -1
  57. data/lib/karafka/pro/active_job/dispatcher.rb +7 -3
  58. data/lib/karafka/pro/active_job/job_options_contract.rb +2 -4
  59. data/lib/karafka/pro/cleaner/messages/messages.rb +2 -3
  60. data/lib/karafka/pro/cli/contracts/server.rb +2 -4
  61. data/lib/karafka/pro/cli/parallel_segments/base.rb +1 -2
  62. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +2 -2
  63. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +2 -2
  64. data/lib/karafka/pro/connection/manager.rb +2 -2
  65. data/lib/karafka/pro/encryption/contracts/config.rb +4 -6
  66. data/lib/karafka/pro/encryption/messages/parser.rb +3 -3
  67. data/lib/karafka/pro/instrumentation/performance_tracker.rb +3 -3
  68. data/lib/karafka/pro/iterator/expander.rb +1 -1
  69. data/lib/karafka/pro/iterator/tpl_builder.rb +1 -1
  70. data/lib/karafka/pro/iterator.rb +2 -2
  71. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +2 -3
  72. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +3 -3
  73. data/lib/karafka/pro/processing/filters/delayer.rb +1 -1
  74. data/lib/karafka/pro/processing/filters/expirer.rb +1 -1
  75. data/lib/karafka/pro/processing/filters/throttler.rb +1 -1
  76. data/lib/karafka/pro/processing/schedulers/default.rb +2 -4
  77. data/lib/karafka/pro/processing/strategies/lrj/default.rb +2 -4
  78. data/lib/karafka/pro/processing/strategies/vp/default.rb +2 -4
  79. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +2 -3
  80. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +2 -4
  81. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +2 -4
  82. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +6 -5
  83. data/lib/karafka/pro/recurring_tasks/schedule.rb +4 -6
  84. data/lib/karafka/pro/recurring_tasks.rb +8 -5
  85. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +2 -4
  86. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +2 -4
  87. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +2 -4
  88. data/lib/karafka/pro/routing/features/delaying/topic.rb +2 -4
  89. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +4 -8
  90. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +5 -7
  91. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +7 -6
  92. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +2 -2
  93. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +2 -4
  94. data/lib/karafka/pro/routing/features/expiring/topic.rb +2 -4
  95. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +2 -4
  96. data/lib/karafka/pro/routing/features/filtering/topic.rb +2 -3
  97. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +2 -4
  98. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +2 -4
  99. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +3 -5
  100. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +3 -3
  101. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +2 -4
  102. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +2 -4
  103. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +3 -5
  104. data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +2 -4
  105. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +2 -4
  106. data/lib/karafka/pro/routing/features/pausing/config.rb +26 -0
  107. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +17 -11
  108. data/lib/karafka/pro/routing/features/pausing/topic.rb +69 -8
  109. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +2 -4
  110. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +2 -4
  111. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +2 -4
  112. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +2 -4
  113. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +6 -8
  114. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +2 -4
  115. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +2 -4
  116. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +2 -4
  117. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +2 -4
  118. data/lib/karafka/pro/scheduled_messages.rb +4 -6
  119. data/lib/karafka/pro/swarm/liveness_listener.rb +2 -2
  120. data/lib/karafka/processing/coordinator.rb +2 -4
  121. data/lib/karafka/processing/coordinators_buffer.rb +2 -3
  122. data/lib/karafka/processing/executor.rb +2 -3
  123. data/lib/karafka/processing/jobs/base.rb +2 -3
  124. data/lib/karafka/processing/workers_batch.rb +2 -3
  125. data/lib/karafka/railtie.rb +1 -0
  126. data/lib/karafka/routing/activity_manager.rb +2 -2
  127. data/lib/karafka/routing/builder.rb +5 -7
  128. data/lib/karafka/routing/consumer_group.rb +4 -6
  129. data/lib/karafka/routing/contracts/consumer_group.rb +3 -5
  130. data/lib/karafka/routing/contracts/routing.rb +2 -4
  131. data/lib/karafka/routing/contracts/topic.rb +2 -4
  132. data/lib/karafka/routing/features/active_job/contracts/topic.rb +2 -4
  133. data/lib/karafka/routing/features/active_job/topic.rb +6 -0
  134. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +2 -4
  135. data/lib/karafka/routing/features/declaratives/contracts/topic.rb +3 -5
  136. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +2 -4
  137. data/lib/karafka/routing/features/eofed/contracts/topic.rb +2 -4
  138. data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +2 -4
  139. data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +2 -4
  140. data/lib/karafka/routing/topics.rb +4 -9
  141. data/lib/karafka/server.rb +1 -1
  142. data/lib/karafka/setup/config.rb +66 -9
  143. data/lib/karafka/setup/contracts/config.rb +12 -10
  144. data/lib/karafka/setup/defaults_injector.rb +3 -2
  145. data/lib/karafka/setup/dsl.rb +2 -3
  146. data/lib/karafka/swarm/liveness_listener.rb +2 -3
  147. data/lib/karafka/swarm/supervisor.rb +1 -1
  148. data/lib/karafka/version.rb +1 -1
  149. data/lib/karafka.rb +2 -2
  150. metadata +8 -2
  151. data/.diffend.yml +0 -3
@@ -11,6 +11,10 @@ module Karafka
11
11
  # and that allows to inject additional options into the producer, effectively allowing for a
12
12
  # much better and more granular control over the dispatch and consumption process.
13
13
  class Dispatcher < ::Karafka::ActiveJob::Dispatcher
14
+ include Helpers::ConfigImporter.new(
15
+ deserializer: %i[internal active_job deserializer]
16
+ )
17
+
14
18
  # Defaults for dispatching
15
19
  # They can be updated by using `#karafka_options` on the job
16
20
  DEFAULTS = {
@@ -43,7 +47,7 @@ module Karafka
43
47
  fetch_option(job, :dispatch_method, DEFAULTS),
44
48
  dispatch_details(job).merge!(
45
49
  topic: job.queue_name,
46
- payload: ::ActiveSupport::JSON.encode(serialize_job(job))
50
+ payload: serialize_job(job)
47
51
  )
48
52
  )
49
53
  end
@@ -64,7 +68,7 @@ module Karafka
64
68
 
65
69
  dispatches[d_method][producer] << dispatch_details(job).merge!(
66
70
  topic: job.queue_name,
67
- payload: ::ActiveSupport::JSON.encode(serialize_job(job))
71
+ payload: serialize_job(job)
68
72
  )
69
73
  end
70
74
 
@@ -90,7 +94,7 @@ module Karafka
90
94
 
91
95
  target_message = dispatch_details(job).merge!(
92
96
  topic: job.queue_name,
93
- payload: ::ActiveSupport::JSON.encode(serialize_job(job))
97
+ payload: serialize_job(job)
94
98
  )
95
99
 
96
100
  proxy_message = Pro::ScheduledMessages.schedule(
@@ -10,10 +10,8 @@ module Karafka
10
10
  # class that works with Pro features.
11
11
  class JobOptionsContract < Contracts::Base
12
12
  configure do |config|
13
- config.error_messages = YAML.safe_load(
14
- File.read(
15
- File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
16
- )
13
+ config.error_messages = YAML.safe_load_file(
14
+ File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
17
15
  ).fetch('en').fetch('validations').fetch('job_options')
18
16
  end
19
17
 
@@ -17,7 +17,6 @@ module Karafka
17
17
  module Messages
18
18
  # @param clean [Boolean] do we want to clean each message after we're done working with
19
19
  # it.
20
- # @param block [Proc] block we want to execute per each message
21
20
  #
22
21
  # @note Cleaning messages after we're done with each of them and did not fail does not
23
22
  # affect any other functionalities. The only thing that is crucial is to make sure,
@@ -27,14 +26,14 @@ module Karafka
27
26
  # @note This method calls super() to ensure compatibility with other libraries that
28
27
  # may have prepended modules to modify #each behavior. This preserves the method
29
28
  # chain and allows instrumentation libraries to function correctly.
30
- def each(clean: false, &block)
29
+ def each(clean: false, &)
31
30
  if clean
32
31
  super() do |message|
33
32
  yield(message)
34
33
  message.clean!
35
34
  end
36
35
  else
37
- super(&block)
36
+ super(&)
38
37
  end
39
38
  end
40
39
  end
@@ -12,10 +12,8 @@ module Karafka
12
12
  # It differs slightly from the OSS one because it is aware of the routing patterns
13
13
  class Server < ::Karafka::Cli::Contracts::Server
14
14
  configure do |config|
15
- config.error_messages = YAML.safe_load(
16
- File.read(
17
- File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
18
- )
15
+ config.error_messages = YAML.safe_load_file(
16
+ File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
19
17
  ).fetch('en').fetch('validations').fetch('cli').fetch('server')
20
18
  end
21
19
 
@@ -67,8 +67,7 @@ module Karafka
67
67
  consumer_groups = [segment_origin, segments.map(&:name)].flatten
68
68
 
69
69
  consumer_groups_with_topics = consumer_groups
70
- .map { |name| [name, topics_names] }
71
- .to_h
70
+ .to_h { |name| [name, topics_names] }
72
71
 
73
72
  lags_with_offsets = Karafka::Admin.read_lags_with_offsets(
74
73
  consumer_groups_with_topics
@@ -123,8 +123,8 @@ module Karafka
123
123
  inconclusive = true
124
124
 
125
125
  puts(
126
- " Inconclusive offsets for #{red(topic_name)}##{red(partition_id)}:" \
127
- " #{parallel_offsets.to_a.join(', ')}"
126
+ " Inconclusive offsets for #{red(topic_name)}##{red(partition_id)}: " \
127
+ "#{parallel_offsets.to_a.join(', ')}"
128
128
  )
129
129
  end
130
130
  end
@@ -92,8 +92,8 @@ module Karafka
92
92
 
93
93
  raise(
94
94
  ::Karafka::Errors::CommandValidationError,
95
- "Parallel segment #{red(cg_name)} already has offset #{red(offset)}" \
96
- " set for #{red("#{topic_name}##{partition_id}")}"
95
+ "Parallel segment #{red(cg_name)} already has offset #{red(offset)} " \
96
+ "set for #{red("#{topic_name}##{partition_id}")}"
97
97
  )
98
98
  end
99
99
  end
@@ -21,7 +21,7 @@ module Karafka
21
21
 
22
22
  # Creates new manager instance
23
23
  def initialize
24
- super()
24
+ super
25
25
  @mutex = Mutex.new
26
26
  @changes = Hash.new do |h, k|
27
27
  h[k] = {
@@ -69,7 +69,7 @@ module Karafka
69
69
  times = []
70
70
  # stateage is in microseconds
71
71
  # We monitor broker changes to make sure we do not introduce extra friction
72
- times << statistics['brokers'].values.map { |stats| stats['stateage'] }.min / 1_000
72
+ times << (statistics['brokers'].values.map { |stats| stats['stateage'] }.min / 1_000)
73
73
  times << statistics['cgrp']['rebalance_age']
74
74
  times << statistics['cgrp']['stateage']
75
75
 
@@ -11,10 +11,8 @@ module Karafka
11
11
  # Makes sure, all the expected config is defined as it should be
12
12
  class Config < ::Karafka::Contracts::Base
13
13
  configure do |config|
14
- config.error_messages = YAML.safe_load(
15
- File.read(
16
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
17
- )
14
+ config.error_messages = YAML.safe_load_file(
15
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
18
16
  ).fetch('en').fetch('validations').fetch('setup').fetch('config')
19
17
  end
20
18
 
@@ -26,8 +24,8 @@ module Karafka
26
24
 
27
25
  required(:private_keys) do |val|
28
26
  val.is_a?(Hash) &&
29
- val.keys.all? { |key| key.is_a?(String) } &&
30
- val.values.all? { |key| key.is_a?(String) }
27
+ val.keys.all?(String) &&
28
+ val.values.all?(String)
31
29
  end
32
30
  end
33
31
 
@@ -25,7 +25,7 @@ module Karafka
25
25
  encryption = headers['encryption']
26
26
  fingerprint = headers['encryption_fingerprint']
27
27
 
28
- return super(message) unless active && encryption
28
+ return super unless active && encryption
29
29
 
30
30
  # Decrypt raw payload so it can be handled by the default parser logic
31
31
  decrypted_payload = cipher.decrypt(
@@ -35,11 +35,11 @@ module Karafka
35
35
 
36
36
  message.raw_payload = decrypted_payload
37
37
 
38
- return super(message) unless fingerprint && fingerprinter
38
+ return super unless fingerprint && fingerprinter
39
39
 
40
40
  message_fingerprint = fingerprinter.hexdigest(decrypted_payload)
41
41
 
42
- return super(message) if message_fingerprint == fingerprint
42
+ return super if message_fingerprint == fingerprint
43
43
 
44
44
  raise(Errors::FingerprintVerificationError, message.to_s)
45
45
  end
@@ -50,7 +50,7 @@ module Karafka
50
50
  partition = messages.metadata.partition
51
51
 
52
52
  samples = @processing_times[topic][partition]
53
- samples << event[:time] / messages.size
53
+ samples << (event[:time] / messages.size)
54
54
 
55
55
  return unless samples.size > SAMPLES_COUNT
56
56
 
@@ -66,8 +66,8 @@ module Karafka
66
66
  def percentile(percentile, values)
67
67
  values_sorted = values.sort
68
68
 
69
- floor = (percentile * (values_sorted.length - 1) + 1).floor - 1
70
- mod = (percentile * (values_sorted.length - 1) + 1).modulo(1)
69
+ floor = ((percentile * (values_sorted.length - 1)) + 1).floor - 1
70
+ mod = ((percentile * (values_sorted.length - 1)) + 1).modulo(1)
71
71
 
72
72
  values_sorted[floor] + (mod * (values_sorted[floor + 1] - values_sorted[floor]))
73
73
  end
@@ -62,7 +62,7 @@ module Karafka
62
62
 
63
63
  # If we've got just array with topics, we need to convert that into a representation
64
64
  # that we can expand with offsets
65
- topics = topics.map { |name| [name, false] }.to_h if topics.is_a?(Array)
65
+ topics = topics.to_h { |name| [name, false] } if topics.is_a?(Array)
66
66
  # We remap by creating new hash, just in case the hash came as the argument for this
67
67
  # expanded. We do not want to modify user provided hash
68
68
  topics.transform_keys(&:to_s)
@@ -56,7 +56,7 @@ module Karafka
56
56
  next unless partitions.is_a?(Array) || partitions.is_a?(Range)
57
57
 
58
58
  # When no offsets defined, we just start from zero
59
- @mapped_topics[name] = partitions.map { |partition| [partition, 0] }.to_h
59
+ @mapped_topics[name] = partitions.to_h { |partition| [partition, 0] }
60
60
  end
61
61
  end
62
62
 
@@ -43,9 +43,9 @@ module Karafka
43
43
  )
44
44
  @topics_with_partitions = Expander.new.call(topics)
45
45
 
46
- @routing_topics = @topics_with_partitions.map do |name, _|
46
+ @routing_topics = @topics_with_partitions.to_h do |name, _|
47
47
  [name, ::Karafka::Routing::Router.find_or_initialize_by_name(name)]
48
- end.to_h
48
+ end
49
49
 
50
50
  @total_partitions = @topics_with_partitions.map(&:last).sum(&:count)
51
51
 
@@ -79,9 +79,8 @@ module Karafka
79
79
  end
80
80
 
81
81
  # Iterates over errors
82
- # @param block [Proc] code we want to run on each error
83
- def each(&block)
84
- @errors.each(&block)
82
+ def each(&)
83
+ @errors.each(&)
85
84
  end
86
85
 
87
86
  # @return [Array<StandardError>] array with all the errors that occurred
@@ -68,7 +68,7 @@ module Karafka
68
68
  # @return [Integer] minimum timeout we need to pause. This is the minimum for all the
69
69
  # filters to satisfy all of them.
70
70
  def timeout
71
- applied.map(&:timeout).compact.min || 0
71
+ applied.filter_map(&:timeout).min || 0
72
72
  end
73
73
 
74
74
  # The first message we do need to get next time we poll. We use the minimum not to jump
@@ -78,7 +78,7 @@ module Karafka
78
78
  def cursor
79
79
  return nil unless active?
80
80
 
81
- applied.map(&:cursor).compact.min_by(&:offset)
81
+ applied.filter_map(&:cursor).min_by(&:offset)
82
82
  end
83
83
 
84
84
  # @return [Boolean] did any of the filters requested offset storage during filter
@@ -106,7 +106,7 @@ module Karafka
106
106
  def marking_cursor
107
107
  return nil unless active?
108
108
 
109
- applied.map(&:marking_cursor).compact.min_by(&:offset)
109
+ applied.filter_map(&:marking_cursor).min_by(&:offset)
110
110
  end
111
111
 
112
112
  private
@@ -25,7 +25,7 @@ module Karafka
25
25
 
26
26
  # Time on message is in seconds with ms precision, so we need to convert the ttl that
27
27
  # is in ms to this format
28
- border = ::Time.now.utc - @delay / 1_000.0
28
+ border = ::Time.now.utc - (@delay / 1_000.0)
29
29
 
30
30
  messages.delete_if do |message|
31
31
  too_young = message.timestamp > border
@@ -26,7 +26,7 @@ module Karafka
26
26
 
27
27
  # Time on message is in seconds with ms precision, so we need to convert the ttl that
28
28
  # is in ms to this format
29
- border = ::Time.now.utc - @ttl / 1_000.to_f
29
+ border = ::Time.now.utc - (@ttl / 1_000.to_f)
30
30
 
31
31
  messages.delete_if do |message|
32
32
  too_old = message.timestamp < border
@@ -67,7 +67,7 @@ module Karafka
67
67
  # so we are no longer throttled and so we can process at least one message
68
68
  def timeout
69
69
  timeout = @interval - (monotonic_now - @time)
70
- timeout <= 0 ? 0 : timeout
70
+ [timeout, 0].max
71
71
  end
72
72
  end
73
73
  end
@@ -29,10 +29,8 @@ module Karafka
29
29
  def on_schedule_consumption(jobs_array)
30
30
  perf_tracker = Instrumentation::PerformanceTracker.instance
31
31
 
32
- ordered = []
33
-
34
- jobs_array.each do |job|
35
- ordered << [
32
+ ordered = jobs_array.map do |job|
33
+ [
36
34
  job,
37
35
  processing_cost(perf_tracker, job)
38
36
  ]
@@ -78,10 +78,8 @@ module Karafka
78
78
  # Allows for LRJ to synchronize its work. It may be needed because LRJ can run
79
79
  # lifecycle events like revocation while the LRJ work is running and there may be a
80
80
  # need for a critical section.
81
- #
82
- # @param block [Proc] block we want to run in a mutex to prevent race-conditions
83
- def synchronize(&block)
84
- coordinator.shared_mutex.synchronize(&block)
81
+ def synchronize(&)
82
+ coordinator.shared_mutex.synchronize(&)
85
83
  end
86
84
  end
87
85
  end
@@ -134,10 +134,8 @@ module Karafka
134
134
  # the end users. With LRJ it is needed and provided in the `LRJ::Default` strategy,
135
135
  # because lifecycle events on revocation can run in parallel to the LRJ job as it is
136
136
  # non-blocking.
137
- #
138
- # @param block [Proc] block we want to run in a mutex to prevent race-conditions
139
- def synchronize(&block)
140
- coordinator.shared_mutex.synchronize(&block)
137
+ def synchronize(&)
138
+ coordinator.shared_mutex.synchronize(&)
141
139
  end
142
140
 
143
141
  private
@@ -16,12 +16,11 @@ module Karafka
16
16
  # want to pause
17
17
  # @param lock_id [Object] key we want to use if we want to set multiple locks on the same
18
18
  # subscription group
19
- # @param kwargs [Object] Any keyword arguments accepted by the jobs queue lock.
20
- def pause(subscription_group, lock_id = nil, **kwargs)
19
+ def pause(subscription_group, lock_id = nil, **)
21
20
  jobs_queue.lock_async(
22
21
  subscription_group.id,
23
22
  lock_id,
24
- **kwargs
23
+ **
25
24
  )
26
25
  end
27
26
 
@@ -11,10 +11,8 @@ module Karafka
11
11
  # Makes sure, all the expected config is defined as it should be
12
12
  class Config < ::Karafka::Contracts::Base
13
13
  configure do |config|
14
- config.error_messages = YAML.safe_load(
15
- File.read(
16
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
17
- )
14
+ config.error_messages = YAML.safe_load_file(
15
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
18
16
  ).fetch('en').fetch('validations').fetch('setup').fetch('config')
19
17
  end
20
18
 
@@ -11,10 +11,8 @@ module Karafka
11
11
  # Ensures that task details are as expected
12
12
  class Task < ::Karafka::Contracts::Base
13
13
  configure do |config|
14
- config.error_messages = YAML.safe_load(
15
- File.read(
16
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
17
- )
14
+ config.error_messages = YAML.safe_load_file(
15
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
18
16
  ).fetch('en').fetch('validations').fetch('recurring_tasks')
19
17
  end
20
18
 
@@ -8,6 +8,10 @@ module Karafka
8
8
  module RecurringTasks
9
9
  # Dispatches appropriate recurring tasks related messages to expected topics
10
10
  class Dispatcher
11
+ extend Helpers::ConfigImporter.new(
12
+ topics: %i[recurring_tasks topics]
13
+ )
14
+
11
15
  class << self
12
16
  # Snapshots to Kafka current schedule state
13
17
  def schedule
@@ -44,15 +48,12 @@ module Karafka
44
48
  private
45
49
 
46
50
  # @return [::WaterDrop::Producer] web ui producer
51
+ # @note We do not fetch it via the ConfigImporter not to cache it so we can re-use it
52
+ # if needed
47
53
  def producer
48
54
  ::Karafka::App.config.recurring_tasks.producer
49
55
  end
50
56
 
51
- # @return [String] consumers commands topic
52
- def topics
53
- ::Karafka::App.config.recurring_tasks.topics
54
- end
55
-
56
57
  # @return [Serializer]
57
58
  def serializer
58
59
  Serializer.new
@@ -32,9 +32,8 @@ module Karafka
32
32
  end
33
33
 
34
34
  # Iterates over tasks yielding them one after another
35
- # @param block [Proc] block that will be executed with each task
36
- def each(&block)
37
- @tasks.each_value(&block)
35
+ def each(&)
36
+ @tasks.each_value(&)
38
37
  end
39
38
 
40
39
  # @param id [String] id of a particular recurring task
@@ -45,9 +44,8 @@ module Karafka
45
44
 
46
45
  # Allows us to have a nice DSL for defining schedules
47
46
  # @param args [Array] attributes accepted by the task initializer
48
- # @param block [Proc] block to execute
49
- def schedule(**args, &block)
50
- self << Task.new(**args, &block)
47
+ def schedule(**args, &)
48
+ self << Task.new(**args, &)
51
49
  end
52
50
  end
53
51
  end
@@ -7,16 +7,19 @@ module Karafka
7
7
  module Pro
8
8
  # Recurring tasks functionality
9
9
  module RecurringTasks
10
+ extend Helpers::ConfigImporter.new(
11
+ recurring_tasks_logging: %i[recurring_tasks logging]
12
+ )
13
+
10
14
  class << self
11
15
  # @return [Schedule, nil] current defined schedule or nil if not defined
12
16
  def schedule
13
- @schedule || define('0.0.0') {}
17
+ @schedule || define('0.0.0') { nil }
14
18
  end
15
19
 
16
20
  # Simplified API for schedules definitions and validates the tasks data
17
21
  #
18
22
  # @param version [String]
19
- # @param block [Proc]
20
23
  #
21
24
  # @example
22
25
  # Karafka::Pro::RecurringTasks.define('1.0.1') do
@@ -24,9 +27,9 @@ module Karafka
24
27
  # MailingJob.perform_async
25
28
  # end
26
29
  # end
27
- def define(version = '1.0.0', &block)
30
+ def define(version = '1.0.0', &)
28
31
  @schedule = Schedule.new(version: version)
29
- @schedule.instance_exec(&block)
32
+ @schedule.instance_exec(&)
30
33
 
31
34
  @schedule.each do |task|
32
35
  Contracts::Task.new.validate!(
@@ -75,7 +78,7 @@ module Karafka
75
78
  RecurringTasks.schedule
76
79
 
77
80
  # User can disable logging of executions, in which case we don't track them
78
- return unless Karafka::App.config.recurring_tasks.logging
81
+ return unless recurring_tasks_logging
79
82
 
80
83
  Karafka.monitor.subscribe(Listener.new)
81
84
  end
@@ -13,10 +13,8 @@ module Karafka
13
13
  # Contract to validate configuration of the adaptive iterator feature
14
14
  class Topic < Karafka::Contracts::Base
15
15
  configure do |config|
16
- config.error_messages = YAML.safe_load(
17
- File.read(
18
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
- )
16
+ config.error_messages = YAML.safe_load_file(
17
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
20
18
  ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
19
  end
22
20
 
@@ -13,10 +13,8 @@ module Karafka
13
13
  # Extended rules for dead letter queue settings
14
14
  class Topic < Karafka::Contracts::Base
15
15
  configure do |config|
16
- config.error_messages = YAML.safe_load(
17
- File.read(
18
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
- )
16
+ config.error_messages = YAML.safe_load_file(
17
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
20
18
  ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
19
  end
22
20
 
@@ -13,10 +13,8 @@ module Karafka
13
13
  # Contract to validate configuration of the expiring feature
14
14
  class Topic < Karafka::Contracts::Base
15
15
  configure do |config|
16
- config.error_messages = YAML.safe_load(
17
- File.read(
18
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
- )
16
+ config.error_messages = YAML.safe_load_file(
17
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
20
18
  ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
19
  end
22
20
 
@@ -35,10 +35,8 @@ module Karafka
35
35
  end
36
36
 
37
37
  # Just an alias for nice API
38
- #
39
- # @param args [Array] Anything `#delaying` accepts
40
- def delay_by(*args)
41
- delaying(*args)
38
+ def delay_by(*)
39
+ delaying(*)
42
40
  end
43
41
 
44
42
  # @return [Boolean] is a given job delaying
@@ -12,19 +12,15 @@ module Karafka
12
12
  # Contract to validate configuration of the direct assignments feature
13
13
  class ConsumerGroup < Karafka::Contracts::Base
14
14
  configure do |config|
15
- config.error_messages = YAML.safe_load(
16
- File.read(
17
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
18
- )
15
+ config.error_messages = YAML.safe_load_file(
16
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
17
  ).fetch('en').fetch('validations').fetch('routing').fetch('consumer_group')
20
18
 
21
19
  virtual do |data, errors|
22
20
  next unless errors.empty?
23
21
 
24
- active = []
25
-
26
- data[:topics].each do |topic|
27
- active << topic[:direct_assignments][:active]
22
+ active = data[:topics].map do |topic|
23
+ topic[:direct_assignments][:active]
28
24
  end
29
25
 
30
26
  # If none active we use standard subscriptions
@@ -13,10 +13,8 @@ module Karafka
13
13
  # Contract to validate configuration of the direct assignments topic feature
14
14
  class Topic < Karafka::Contracts::Base
15
15
  configure do |config|
16
- config.error_messages = YAML.safe_load(
17
- File.read(
18
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
- )
16
+ config.error_messages = YAML.safe_load_file(
17
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
20
18
  ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
19
  end
22
20
 
@@ -26,8 +24,8 @@ module Karafka
26
24
  required(:partitions) do |val|
27
25
  next true if val == true
28
26
  next false unless val.is_a?(Hash)
29
- next false unless val.keys.all? { |part| part.is_a?(Integer) }
30
- next false unless val.values.all? { |flag| flag == true }
27
+ next false unless val.keys.all?(Integer)
28
+ next false unless val.values.all?(true)
31
29
 
32
30
  true
33
31
  end
@@ -67,7 +65,7 @@ module Karafka
67
65
  direct_partitions = direct_assignments[:partitions].keys
68
66
  swarm_partitions = nodes.values.flatten
69
67
 
70
- next unless swarm_partitions.all? { |partition| partition.is_a?(Integer) }
68
+ next unless swarm_partitions.all?(Integer)
71
69
  next if direct_partitions.sort == swarm_partitions.sort
72
70
 
73
71
  # If we assigned more partitions than we distributed in swarm