karafka 2.5.1 → 2.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (238) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci_linux_ubuntu_x86_64_gnu.yml +21 -29
  3. data/.github/workflows/ci_macos_arm64.yml +1 -1
  4. data/.github/workflows/push.yml +2 -2
  5. data/.github/workflows/trigger-wiki-refresh.yml +1 -1
  6. data/.ruby-version +1 -1
  7. data/.yard-lint.yml +174 -0
  8. data/CHANGELOG.md +20 -4
  9. data/Gemfile +1 -2
  10. data/Gemfile.lock +45 -41
  11. data/bin/integrations +2 -1
  12. data/bin/rspecs +4 -0
  13. data/config/locales/errors.yml +6 -4
  14. data/config/locales/pro_errors.yml +5 -4
  15. data/docker-compose.yml +1 -1
  16. data/examples/payloads/json/sample_set_02/download.json +191 -0
  17. data/examples/payloads/json/sample_set_03/event_type_1.json +18 -0
  18. data/examples/payloads/json/sample_set_03/event_type_2.json +263 -0
  19. data/examples/payloads/json/sample_set_03/event_type_3.json +41 -0
  20. data/karafka.gemspec +3 -3
  21. data/lib/active_job/queue_adapters/karafka_adapter.rb +3 -3
  22. data/lib/karafka/active_job/consumer.rb +7 -3
  23. data/lib/karafka/active_job/current_attributes/job_wrapper.rb +45 -0
  24. data/lib/karafka/active_job/current_attributes/loading.rb +1 -1
  25. data/lib/karafka/active_job/current_attributes/persistence.rb +19 -7
  26. data/lib/karafka/active_job/current_attributes.rb +3 -2
  27. data/lib/karafka/active_job/deserializer.rb +61 -0
  28. data/lib/karafka/active_job/dispatcher.rb +34 -14
  29. data/lib/karafka/active_job/job_options_contract.rb +2 -4
  30. data/lib/karafka/admin/acl.rb +8 -4
  31. data/lib/karafka/admin/configs/config.rb +6 -4
  32. data/lib/karafka/admin/configs/resource.rb +7 -1
  33. data/lib/karafka/admin/consumer_groups.rb +80 -12
  34. data/lib/karafka/admin/topics.rb +43 -9
  35. data/lib/karafka/admin.rb +23 -14
  36. data/lib/karafka/app.rb +3 -3
  37. data/lib/karafka/base_consumer.rb +6 -6
  38. data/lib/karafka/cli/base.rb +2 -2
  39. data/lib/karafka/cli/console.rb +1 -1
  40. data/lib/karafka/cli/contracts/server.rb +3 -5
  41. data/lib/karafka/cli/help.rb +1 -1
  42. data/lib/karafka/cli/install.rb +3 -2
  43. data/lib/karafka/cli/server.rb +1 -1
  44. data/lib/karafka/cli/swarm.rb +1 -1
  45. data/lib/karafka/cli/topics/align.rb +1 -1
  46. data/lib/karafka/cli/topics/repartition.rb +2 -2
  47. data/lib/karafka/connection/client.rb +30 -19
  48. data/lib/karafka/connection/listeners_batch.rb +2 -3
  49. data/lib/karafka/connection/manager.rb +1 -0
  50. data/lib/karafka/connection/proxy.rb +12 -8
  51. data/lib/karafka/connection/rebalance_manager.rb +1 -1
  52. data/lib/karafka/connection/status.rb +1 -0
  53. data/lib/karafka/constraints.rb +1 -1
  54. data/lib/karafka/contracts/base.rb +1 -1
  55. data/lib/karafka/deserializers/payload.rb +1 -1
  56. data/lib/karafka/env.rb +1 -2
  57. data/lib/karafka/helpers/async.rb +1 -1
  58. data/lib/karafka/helpers/config_importer.rb +3 -3
  59. data/lib/karafka/helpers/interval_runner.rb +4 -1
  60. data/lib/karafka/helpers/multi_delegator.rb +3 -0
  61. data/lib/karafka/instrumentation/assignments_tracker.rb +19 -1
  62. data/lib/karafka/instrumentation/callbacks/error.rb +2 -2
  63. data/lib/karafka/instrumentation/callbacks/statistics.rb +3 -3
  64. data/lib/karafka/instrumentation/logger.rb +6 -6
  65. data/lib/karafka/instrumentation/monitor.rb +3 -3
  66. data/lib/karafka/instrumentation/notifications.rb +1 -0
  67. data/lib/karafka/instrumentation/vendors/appsignal/base.rb +3 -4
  68. data/lib/karafka/instrumentation/vendors/datadog/logger_listener.rb +3 -4
  69. data/lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb +10 -11
  70. data/lib/karafka/instrumentation/vendors/kubernetes/base_listener.rb +1 -1
  71. data/lib/karafka/instrumentation/vendors/kubernetes/liveness_listener.rb +5 -18
  72. data/lib/karafka/messages/builders/batch_metadata.rb +2 -2
  73. data/lib/karafka/messages/builders/message.rb +1 -1
  74. data/lib/karafka/messages/messages.rb +2 -3
  75. data/lib/karafka/patches/rdkafka/bindings.rb +6 -6
  76. data/lib/karafka/patches/rdkafka/opaque.rb +1 -1
  77. data/lib/karafka/pro/active_job/consumer.rb +2 -2
  78. data/lib/karafka/pro/active_job/dispatcher.rb +10 -6
  79. data/lib/karafka/pro/active_job/job_options_contract.rb +2 -4
  80. data/lib/karafka/pro/cleaner/messages/messages.rb +2 -3
  81. data/lib/karafka/pro/cleaner.rb +3 -3
  82. data/lib/karafka/pro/cli/contracts/server.rb +3 -5
  83. data/lib/karafka/pro/cli/parallel_segments/base.rb +5 -5
  84. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +3 -3
  85. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +3 -3
  86. data/lib/karafka/pro/cli/parallel_segments.rb +1 -1
  87. data/lib/karafka/pro/connection/manager.rb +3 -4
  88. data/lib/karafka/pro/connection/multiplexing/listener.rb +1 -0
  89. data/lib/karafka/pro/contracts/base.rb +1 -1
  90. data/lib/karafka/pro/encryption/cipher.rb +3 -2
  91. data/lib/karafka/pro/encryption/contracts/config.rb +5 -7
  92. data/lib/karafka/pro/encryption/messages/parser.rb +4 -4
  93. data/lib/karafka/pro/encryption/setup/config.rb +1 -1
  94. data/lib/karafka/pro/instrumentation/performance_tracker.rb +3 -3
  95. data/lib/karafka/pro/iterator/expander.rb +1 -1
  96. data/lib/karafka/pro/iterator/tpl_builder.rb +2 -2
  97. data/lib/karafka/pro/iterator.rb +3 -3
  98. data/lib/karafka/pro/loader.rb +1 -1
  99. data/lib/karafka/pro/processing/coordinator.rb +1 -1
  100. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +2 -3
  101. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +3 -3
  102. data/lib/karafka/pro/processing/filters/base.rb +1 -0
  103. data/lib/karafka/pro/processing/filters/delayer.rb +1 -1
  104. data/lib/karafka/pro/processing/filters/expirer.rb +1 -1
  105. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +1 -1
  106. data/lib/karafka/pro/processing/filters/throttler.rb +1 -1
  107. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +1 -1
  108. data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +1 -1
  109. data/lib/karafka/pro/processing/jobs/periodic.rb +1 -1
  110. data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +1 -1
  111. data/lib/karafka/pro/processing/jobs_builder.rb +1 -1
  112. data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +1 -0
  113. data/lib/karafka/pro/processing/partitioner.rb +1 -1
  114. data/lib/karafka/pro/processing/schedulers/default.rb +2 -4
  115. data/lib/karafka/pro/processing/strategies/base.rb +1 -1
  116. data/lib/karafka/pro/processing/strategies/default.rb +2 -2
  117. data/lib/karafka/pro/processing/strategies/lrj/default.rb +2 -4
  118. data/lib/karafka/pro/processing/strategies/vp/default.rb +2 -4
  119. data/lib/karafka/pro/processing/strategy_selector.rb +1 -0
  120. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +2 -3
  121. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +4 -2
  122. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +4 -2
  123. data/lib/karafka/pro/recurring_tasks/consumer.rb +3 -2
  124. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +4 -6
  125. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +3 -5
  126. data/lib/karafka/pro/recurring_tasks/deserializer.rb +1 -1
  127. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +7 -6
  128. data/lib/karafka/pro/recurring_tasks/executor.rb +2 -1
  129. data/lib/karafka/pro/recurring_tasks/schedule.rb +9 -8
  130. data/lib/karafka/pro/recurring_tasks/serializer.rb +6 -5
  131. data/lib/karafka/pro/recurring_tasks/setup/config.rb +2 -2
  132. data/lib/karafka/pro/recurring_tasks/task.rb +1 -1
  133. data/lib/karafka/pro/recurring_tasks.rb +8 -5
  134. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +2 -4
  135. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +2 -4
  136. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +3 -0
  137. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +2 -4
  138. data/lib/karafka/pro/routing/features/delaying/topic.rb +2 -4
  139. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +4 -8
  140. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +5 -7
  141. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +7 -6
  142. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +2 -2
  143. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +2 -4
  144. data/lib/karafka/pro/routing/features/expiring/topic.rb +2 -4
  145. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +2 -4
  146. data/lib/karafka/pro/routing/features/filtering/topic.rb +2 -3
  147. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +2 -4
  148. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +2 -4
  149. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +3 -5
  150. data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +1 -1
  151. data/lib/karafka/pro/routing/features/multiplexing.rb +5 -5
  152. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +3 -3
  153. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +2 -4
  154. data/lib/karafka/pro/routing/features/offset_metadata.rb +4 -4
  155. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +1 -1
  156. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +2 -4
  157. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +3 -5
  158. data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +2 -4
  159. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +2 -4
  160. data/lib/karafka/pro/routing/features/patterns/patterns.rb +1 -1
  161. data/lib/karafka/pro/routing/features/pausing/config.rb +26 -0
  162. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +17 -11
  163. data/lib/karafka/pro/routing/features/pausing/topic.rb +69 -8
  164. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +2 -4
  165. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +1 -1
  166. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +1 -1
  167. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +2 -4
  168. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +2 -4
  169. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +2 -4
  170. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +6 -8
  171. data/lib/karafka/pro/routing/features/swarm.rb +1 -1
  172. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +2 -4
  173. data/lib/karafka/pro/routing/features/throttling/topic.rb +3 -1
  174. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +2 -4
  175. data/lib/karafka/pro/scheduled_messages/consumer.rb +1 -1
  176. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +4 -6
  177. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +3 -5
  178. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +3 -2
  179. data/lib/karafka/pro/scheduled_messages/day.rb +1 -0
  180. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +1 -1
  181. data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +1 -1
  182. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +1 -0
  183. data/lib/karafka/pro/scheduled_messages/proxy.rb +1 -1
  184. data/lib/karafka/pro/scheduled_messages/serializer.rb +3 -3
  185. data/lib/karafka/pro/scheduled_messages/setup/config.rb +2 -2
  186. data/lib/karafka/pro/scheduled_messages/state.rb +1 -0
  187. data/lib/karafka/pro/scheduled_messages/tracker.rb +1 -0
  188. data/lib/karafka/pro/scheduled_messages.rb +4 -6
  189. data/lib/karafka/pro/swarm/liveness_listener.rb +2 -2
  190. data/lib/karafka/process.rb +4 -4
  191. data/lib/karafka/processing/coordinator.rb +2 -4
  192. data/lib/karafka/processing/coordinators_buffer.rb +2 -3
  193. data/lib/karafka/processing/executor.rb +3 -4
  194. data/lib/karafka/processing/inline_insights/tracker.rb +1 -0
  195. data/lib/karafka/processing/jobs/base.rb +2 -3
  196. data/lib/karafka/processing/jobs_queue.rb +1 -1
  197. data/lib/karafka/processing/result.rb +1 -0
  198. data/lib/karafka/processing/strategy_selector.rb +1 -0
  199. data/lib/karafka/processing/workers_batch.rb +2 -3
  200. data/lib/karafka/railtie.rb +1 -0
  201. data/lib/karafka/routing/activity_manager.rb +3 -2
  202. data/lib/karafka/routing/builder.rb +8 -8
  203. data/lib/karafka/routing/consumer_group.rb +4 -6
  204. data/lib/karafka/routing/contracts/consumer_group.rb +6 -7
  205. data/lib/karafka/routing/contracts/routing.rb +2 -4
  206. data/lib/karafka/routing/contracts/topic.rb +7 -6
  207. data/lib/karafka/routing/features/active_job/contracts/topic.rb +2 -4
  208. data/lib/karafka/routing/features/active_job/topic.rb +6 -0
  209. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +3 -5
  210. data/lib/karafka/routing/features/declaratives/contracts/topic.rb +3 -5
  211. data/lib/karafka/routing/features/declaratives/topic.rb +5 -2
  212. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +2 -4
  213. data/lib/karafka/routing/features/deserializers/topic.rb +3 -3
  214. data/lib/karafka/routing/features/eofed/contracts/topic.rb +2 -4
  215. data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +2 -4
  216. data/lib/karafka/routing/features/inline_insights.rb +5 -5
  217. data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +2 -4
  218. data/lib/karafka/routing/router.rb +1 -1
  219. data/lib/karafka/routing/subscription_group.rb +1 -1
  220. data/lib/karafka/routing/subscription_groups_builder.rb +1 -0
  221. data/lib/karafka/routing/topic.rb +3 -3
  222. data/lib/karafka/routing/topics.rb +4 -9
  223. data/lib/karafka/server.rb +2 -2
  224. data/lib/karafka/setup/attributes_map.rb +4 -2
  225. data/lib/karafka/setup/config.rb +85 -17
  226. data/lib/karafka/setup/config_proxy.rb +209 -0
  227. data/lib/karafka/setup/contracts/config.rb +13 -11
  228. data/lib/karafka/setup/defaults_injector.rb +3 -2
  229. data/lib/karafka/setup/dsl.rb +2 -3
  230. data/lib/karafka/swarm/liveness_listener.rb +3 -3
  231. data/lib/karafka/swarm/manager.rb +7 -6
  232. data/lib/karafka/swarm/node.rb +1 -1
  233. data/lib/karafka/swarm/supervisor.rb +2 -1
  234. data/lib/karafka/time_trackers/base.rb +1 -1
  235. data/lib/karafka/version.rb +1 -1
  236. data/lib/karafka.rb +4 -4
  237. metadata +14 -6
  238. data/.diffend.yml +0 -3
@@ -11,11 +11,12 @@ module Karafka
11
11
  #
12
12
  # @note You need to setup the `dogstatsd-ruby` client and assign it
13
13
  class MetricsListener
14
- include ::Karafka::Core::Configurable
14
+ include Karafka::Core::Configurable
15
15
  extend Forwardable
16
16
 
17
- def_delegators :config, :client, :rd_kafka_metrics, :namespace,
18
- :default_tags, :distribution_mode
17
+ def_delegators(
18
+ :config, :client, :rd_kafka_metrics, :namespace, :default_tags, :distribution_mode
19
+ )
19
20
 
20
21
  # Value object for storing a single rdkafka metric publishing details
21
22
  RdKafkaMetric = Struct.new(:type, :scope, :name, :key_location)
@@ -69,10 +70,9 @@ module Karafka
69
70
  setup(&block) if block
70
71
  end
71
72
 
72
- # @param block [Proc] configuration block
73
73
  # @note We define this alias to be consistent with `WaterDrop#setup`
74
- def setup(&block)
75
- configure(&block)
74
+ def setup(&)
75
+ configure(&)
76
76
  end
77
77
 
78
78
  # Hooks up to Karafka instrumentation for emitted statistics
@@ -198,22 +198,21 @@ module Karafka
198
198
 
199
199
  # Selects the histogram mode configured and uses it to report to DD client
200
200
  # @param key [String] non-namespaced key
201
- # @param args [Array] extra arguments to pass to the client
202
- def histogram(key, *args)
201
+ def histogram(key, *)
203
202
  case distribution_mode
204
203
  when :histogram
205
204
  client.histogram(
206
205
  namespaced_metric(key),
207
- *args
206
+ *
208
207
  )
209
208
  when :distribution
210
209
  client.distribution(
211
210
  namespaced_metric(key),
212
- *args
211
+ *
213
212
  )
214
213
  else
215
214
  raise(
216
- ::ArgumentError,
215
+ ArgumentError,
217
216
  'distribution_mode setting value must be either :histogram or :distribution'
218
217
  )
219
218
  end
@@ -10,7 +10,7 @@ module Karafka
10
10
  # Base Kubernetes Listener providing basic HTTP server capabilities to respond with health
11
11
  # statuses
12
12
  class BaseListener
13
- include ::Karafka::Core::Helpers::Time
13
+ include Karafka::Core::Helpers::Time
14
14
 
15
15
  # All good with Karafka
16
16
  OK_CODE = '200 OK'
@@ -26,19 +26,6 @@ module Karafka
26
26
  #
27
27
  # @note Please use `Kubernetes::SwarmLivenessListener` when operating in the swarm mode
28
28
  class LivenessListener < BaseListener
29
- # When any of those occurs, it means something went wrong in a way that cannot be
30
- # recovered. In such cases we should report that the consumer process is not healthy.
31
- # - `fenced` - This instance has been fenced by a newer instance and will not do any
32
- # processing at all never. Fencing most of the time means the instance.group.id has
33
- # been reused without properly terminating the previous consumer process first
34
- # - `fatal` - any fatal error that halts the processing forever
35
- UNRECOVERABLE_RDKAFKA_ERRORS = [
36
- :fenced, # -144
37
- :fatal # -150
38
- ].freeze
39
-
40
- private_constant :UNRECOVERABLE_RDKAFKA_ERRORS
41
-
42
29
  # @param hostname [String, nil] hostname or nil to bind on all
43
30
  # @param port [Integer] TCP port on which we want to run our HTTP status server
44
31
  # @param consuming_ttl [Integer] time in ms after which we consider consumption hanging.
@@ -113,8 +100,9 @@ module Karafka
113
100
 
114
101
  # We are only interested in the rdkafka errors
115
102
  return unless error.is_a?(Rdkafka::RdkafkaError)
116
- # We mark as unrecoverable only on certain errors that will not be fixed by retrying
117
- return unless UNRECOVERABLE_RDKAFKA_ERRORS.include?(error.code)
103
+ # When any of those occurs, it means something went wrong in a way that cannot be
104
+ # recovered. In such cases we should report that the consumer process is not healthy.
105
+ return unless error.fatal?
118
106
 
119
107
  @unrecoverable = error.code
120
108
  end
@@ -166,9 +154,8 @@ module Karafka
166
154
  end
167
155
 
168
156
  # Wraps the logic with a mutex
169
- # @param block [Proc] code we want to run in mutex
170
- def synchronize(&block)
171
- @mutex.synchronize(&block)
157
+ def synchronize(&)
158
+ @mutex.synchronize(&)
172
159
  end
173
160
 
174
161
  # @return [Integer] object id of the current thread
@@ -46,12 +46,12 @@ module Karafka
46
46
  # @note Message can be from the future in case consumer machine and Kafka cluster drift
47
47
  # apart and the machine is behind the cluster.
48
48
  def local_created_at(last_message)
49
- now = ::Time.now
49
+ now = Time.now
50
50
 
51
51
  return now unless last_message
52
52
 
53
53
  timestamp = last_message.timestamp
54
- timestamp > now ? now : timestamp
54
+ [timestamp, now].min
55
55
  end
56
56
  end
57
57
  end
@@ -28,7 +28,7 @@ module Karafka
28
28
 
29
29
  # And nullify it in the kafka message. This can save a lot of memory when used with
30
30
  # the Pro Cleaner API
31
- kafka_message.instance_variable_set('@payload', nil)
31
+ kafka_message.instance_variable_set(:@payload, nil)
32
32
 
33
33
  # Karafka messages cannot be frozen because of the lazy deserialization feature
34
34
  message = Karafka::Messages::Message.new(payload, metadata)
@@ -16,10 +16,9 @@ module Karafka
16
16
  @metadata = metadata
17
17
  end
18
18
 
19
- # @param block [Proc] block we want to execute per each message
20
19
  # @note Invocation of this method will not cause loading and deserializing of messages.
21
- def each(&block)
22
- @messages_array.each(&block)
20
+ def each(&)
21
+ @messages_array.each(&)
23
22
  end
24
23
 
25
24
  # Runs deserialization of all the messages and returns them
@@ -93,29 +93,29 @@ end
93
93
 
94
94
  # We need to replace the original callback with ours.
95
95
  # At the moment there is no API in rdkafka-ruby to do so
96
- ::Rdkafka::Bindings.send(
96
+ Rdkafka::Bindings.send(
97
97
  :remove_const,
98
98
  'RebalanceCallback'
99
99
  )
100
100
 
101
- ::Rdkafka::Bindings.const_set(
102
- 'RebalanceCallback',
101
+ Rdkafka::Bindings.const_set(
102
+ :RebalanceCallback,
103
103
  Karafka::Patches::Rdkafka::Bindings::RebalanceCallback
104
104
  )
105
105
 
106
- ::Rdkafka::Bindings.attach_function(
106
+ Rdkafka::Bindings.attach_function(
107
107
  :rd_kafka_rebalance_protocol,
108
108
  %i[pointer],
109
109
  :string
110
110
  )
111
111
 
112
- ::Rdkafka::Bindings.attach_function(
112
+ Rdkafka::Bindings.attach_function(
113
113
  :rd_kafka_incremental_assign,
114
114
  %i[pointer pointer],
115
115
  :string
116
116
  )
117
117
 
118
- ::Rdkafka::Bindings.attach_function(
118
+ Rdkafka::Bindings.attach_function(
119
119
  :rd_kafka_incremental_unassign,
120
120
  %i[pointer pointer],
121
121
  :string
@@ -31,6 +31,6 @@ module Karafka
31
31
  end
32
32
  end
33
33
 
34
- ::Rdkafka::Opaque.include(
34
+ Rdkafka::Opaque.include(
35
35
  Karafka::Patches::Rdkafka::Opaque
36
36
  )
@@ -14,7 +14,7 @@ module Karafka
14
14
  #
15
15
  # It contains slightly better revocation warranties than the regular blocking consumer as
16
16
  # it can stop processing batch of jobs in the middle after the revocation.
17
- class Consumer < ::Karafka::ActiveJob::Consumer
17
+ class Consumer < Karafka::ActiveJob::Consumer
18
18
  # Runs ActiveJob jobs processing and handles lrj if needed
19
19
  def consume
20
20
  messages.each(clean: true) do |message|
@@ -25,7 +25,7 @@ module Karafka
25
25
  # We cannot early stop when running virtual partitions because the intermediate state
26
26
  # would force us not to commit the offsets. This would cause extensive
27
27
  # double-processing
28
- break if Karafka::App.stopping? && !topic.virtual_partitions?
28
+ break if ::Karafka::App.stopping? && !topic.virtual_partitions?
29
29
 
30
30
  consume_job(message)
31
31
 
@@ -10,7 +10,11 @@ module Karafka
10
10
  # Pro dispatcher that sends the ActiveJob job to a proper topic based on the queue name
11
11
  # and that allows to inject additional options into the producer, effectively allowing for a
12
12
  # much better and more granular control over the dispatch and consumption process.
13
- class Dispatcher < ::Karafka::ActiveJob::Dispatcher
13
+ class Dispatcher < Karafka::ActiveJob::Dispatcher
14
+ include Helpers::ConfigImporter.new(
15
+ deserializer: %i[internal active_job deserializer]
16
+ )
17
+
14
18
  # Defaults for dispatching
15
19
  # They can be updated by using `#karafka_options` on the job
16
20
  DEFAULTS = {
@@ -27,7 +31,7 @@ module Karafka
27
31
  # Allows for setting a callable producer since at the moment of defining the class,
28
32
  # variants may not be available
29
33
  #
30
- # We do not initialize it with `-> { ::Karafka.producer }` so we do not have to call it
34
+ # We do not initialize it with `-> { Karafka.producer }` so we do not have to call it
31
35
  # each time for the defaults to preserve CPU cycles.
32
36
  #
33
37
  # We also do **not** cache the execution of this producer lambda because we want to
@@ -43,7 +47,7 @@ module Karafka
43
47
  fetch_option(job, :dispatch_method, DEFAULTS),
44
48
  dispatch_details(job).merge!(
45
49
  topic: job.queue_name,
46
- payload: ::ActiveSupport::JSON.encode(serialize_job(job))
50
+ payload: serialize_job(job)
47
51
  )
48
52
  )
49
53
  end
@@ -64,7 +68,7 @@ module Karafka
64
68
 
65
69
  dispatches[d_method][producer] << dispatch_details(job).merge!(
66
70
  topic: job.queue_name,
67
- payload: ::ActiveSupport::JSON.encode(serialize_job(job))
71
+ payload: serialize_job(job)
68
72
  )
69
73
  end
70
74
 
@@ -90,7 +94,7 @@ module Karafka
90
94
 
91
95
  target_message = dispatch_details(job).merge!(
92
96
  topic: job.queue_name,
93
- payload: ::ActiveSupport::JSON.encode(serialize_job(job))
97
+ payload: serialize_job(job)
94
98
  )
95
99
 
96
100
  proxy_message = Pro::ScheduledMessages.schedule(
@@ -118,7 +122,7 @@ module Karafka
118
122
  def producer(job)
119
123
  dynamic_producer = fetch_option(job, :producer, DEFAULTS)
120
124
 
121
- dynamic_producer ? dynamic_producer.call(job) : ::Karafka.producer
125
+ dynamic_producer ? dynamic_producer.call(job) : Karafka.producer
122
126
  end
123
127
 
124
128
  # @param job [ActiveJob::Base] job instance
@@ -10,10 +10,8 @@ module Karafka
10
10
  # class that works with Pro features.
11
11
  class JobOptionsContract < Contracts::Base
12
12
  configure do |config|
13
- config.error_messages = YAML.safe_load(
14
- File.read(
15
- File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
16
- )
13
+ config.error_messages = YAML.safe_load_file(
14
+ File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
17
15
  ).fetch('en').fetch('validations').fetch('job_options')
18
16
  end
19
17
 
@@ -17,7 +17,6 @@ module Karafka
17
17
  module Messages
18
18
  # @param clean [Boolean] do we want to clean each message after we're done working with
19
19
  # it.
20
- # @param block [Proc] block we want to execute per each message
21
20
  #
22
21
  # @note Cleaning messages after we're done with each of them and did not fail does not
23
22
  # affect any other functionalities. The only thing that is crucial is to make sure,
@@ -27,14 +26,14 @@ module Karafka
27
26
  # @note This method calls super() to ensure compatibility with other libraries that
28
27
  # may have prepended modules to modify #each behavior. This preserves the method
29
28
  # chain and allows instrumentation libraries to function correctly.
30
- def each(clean: false, &block)
29
+ def each(clean: false, &)
31
30
  if clean
32
31
  super() do |message|
33
32
  yield(message)
34
33
  message.clean!
35
34
  end
36
35
  else
37
- super(&block)
36
+ super(&)
38
37
  end
39
38
  end
40
39
  end
@@ -19,9 +19,9 @@ module Karafka
19
19
  class << self
20
20
  # @param _config [Karafka::Core::Configurable::Node] root node config
21
21
  def pre_setup(_config)
22
- ::Karafka::Messages::Message.prepend(Messages::Message)
23
- ::Karafka::Messages::Metadata.prepend(Messages::Metadata)
24
- ::Karafka::Messages::Messages.prepend(Messages::Messages)
22
+ Karafka::Messages::Message.prepend(Messages::Message)
23
+ Karafka::Messages::Metadata.prepend(Messages::Metadata)
24
+ Karafka::Messages::Messages.prepend(Messages::Messages)
25
25
  end
26
26
 
27
27
  # @param _config [Karafka::Core::Configurable::Node] root node config
@@ -10,12 +10,10 @@ module Karafka
10
10
  module Contracts
11
11
  # Contract for validating correctness of the server cli command options.
12
12
  # It differs slightly from the OSS one because it is aware of the routing patterns
13
- class Server < ::Karafka::Cli::Contracts::Server
13
+ class Server < Karafka::Cli::Contracts::Server
14
14
  configure do |config|
15
- config.error_messages = YAML.safe_load(
16
- File.read(
17
- File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
18
- )
15
+ config.error_messages = YAML.safe_load_file(
16
+ File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
19
17
  ).fetch('en').fetch('validations').fetch('cli').fetch('server')
20
18
  end
21
19
 
@@ -12,6 +12,7 @@ module Karafka
12
12
  include Helpers::Colorize
13
13
 
14
14
  # @param options [Hash] cli flags options
15
+ # @option options [Array<String>] :groups consumer group names to work with
15
16
  def initialize(options)
16
17
  @options = options
17
18
  end
@@ -23,12 +24,12 @@ module Karafka
23
24
 
24
25
  # Returns consumer groups for parallel segments with which we should be working
25
26
  #
26
- # @return [Hash<String, Array<Karafka::Routing::ConsumerGroup>>] hash with all parallel
27
+ # @return [Hash{String => Array<Karafka::Routing::ConsumerGroup>}] hash with all parallel
27
28
  # consumer groups as values and names of segments origin consumer group as the key.
28
29
  def applicable_groups
29
30
  requested_groups = options[:groups].dup || []
30
31
 
31
- workable_groups = ::Karafka::App
32
+ workable_groups = Karafka::App
32
33
  .routes
33
34
  .select(&:parallel_segments?)
34
35
  .group_by(&:segment_origin)
@@ -46,7 +47,7 @@ module Karafka
46
47
  applicable_groups[requested_group] = workable_group
47
48
  else
48
49
  raise(
49
- ::Karafka::Errors::ConsumerGroupNotFoundError,
50
+ Karafka::Errors::ConsumerGroupNotFoundError,
50
51
  "Consumer group #{requested_group} was not found"
51
52
  )
52
53
  end
@@ -67,8 +68,7 @@ module Karafka
67
68
  consumer_groups = [segment_origin, segments.map(&:name)].flatten
68
69
 
69
70
  consumer_groups_with_topics = consumer_groups
70
- .map { |name| [name, topics_names] }
71
- .to_h
71
+ .to_h { |name| [name, topics_names] }
72
72
 
73
73
  lags_with_offsets = Karafka::Admin.read_lags_with_offsets(
74
74
  consumer_groups_with_topics
@@ -123,8 +123,8 @@ module Karafka
123
123
  inconclusive = true
124
124
 
125
125
  puts(
126
- " Inconclusive offsets for #{red(topic_name)}##{red(partition_id)}:" \
127
- " #{parallel_offsets.to_a.join(', ')}"
126
+ " Inconclusive offsets for #{red(topic_name)}##{red(partition_id)}: " \
127
+ "#{parallel_offsets.to_a.join(', ')}"
128
128
  )
129
129
  end
130
130
  end
@@ -132,7 +132,7 @@ module Karafka
132
132
  return unless inconclusive
133
133
 
134
134
  raise(
135
- ::Karafka::Errors::CommandValidationError,
135
+ Karafka::Errors::CommandValidationError,
136
136
  "Parallel segments for #{red(segment_origin)} have #{red('inconclusive')} offsets"
137
137
  )
138
138
  end
@@ -91,9 +91,9 @@ module Karafka
91
91
  next unless offset.to_i.positive?
92
92
 
93
93
  raise(
94
- ::Karafka::Errors::CommandValidationError,
95
- "Parallel segment #{red(cg_name)} already has offset #{red(offset)}" \
96
- " set for #{red("#{topic_name}##{partition_id}")}"
94
+ Karafka::Errors::CommandValidationError,
95
+ "Parallel segment #{red(cg_name)} already has offset #{red(offset)} " \
96
+ "set for #{red("#{topic_name}##{partition_id}")}"
97
97
  )
98
98
  end
99
99
  end
@@ -51,7 +51,7 @@ module Karafka
51
51
  Collapse.new(options).call
52
52
  Distribute.new(options).call
53
53
  else
54
- raise ::ArgumentError, "Invalid topics action: #{action}"
54
+ raise ArgumentError, "Invalid topics action: #{action}"
55
55
  end
56
56
  end
57
57
  end
@@ -21,7 +21,7 @@ module Karafka
21
21
 
22
22
  # Creates new manager instance
23
23
  def initialize
24
- super()
24
+ super
25
25
  @mutex = Mutex.new
26
26
  @changes = Hash.new do |h, k|
27
27
  h[k] = {
@@ -69,7 +69,7 @@ module Karafka
69
69
  times = []
70
70
  # stateage is in microseconds
71
71
  # We monitor broker changes to make sure we do not introduce extra friction
72
- times << statistics['brokers'].values.map { |stats| stats['stateage'] }.min / 1_000
72
+ times << (statistics['brokers'].values.map { |stats| stats['stateage'] }.min / 1_000)
73
73
  times << statistics['cgrp']['rebalance_age']
74
74
  times << statistics['cgrp']['stateage']
75
75
 
@@ -184,8 +184,7 @@ module Karafka
184
184
  .assignments
185
185
  .select { |_, partitions| partitions.size > 1 }
186
186
  .keys
187
- .map(&:subscription_group)
188
- .map(&:name)
187
+ .map { |sg| sg.subscription_group.name }
189
188
  .uniq
190
189
 
191
190
  # Select connections for scaling up
@@ -12,6 +12,7 @@ module Karafka
12
12
  # Listener used to connect listeners manager to the lifecycle events that are significant
13
13
  # to its operations
14
14
  class Listener
15
+ # Initializes the multiplexing listener with the connection manager
15
16
  def initialize
16
17
  @manager = App.config.internal.connection.manager
17
18
  end
@@ -8,7 +8,7 @@ module Karafka
8
8
  # Pro contracts that aim to replace or complement the general framework contracts
9
9
  module Contracts
10
10
  # Base for all the Pro contracts
11
- class Base < ::Karafka::Contracts::Base
11
+ class Base < Karafka::Contracts::Base
12
12
  end
13
13
  end
14
14
  end
@@ -12,6 +12,7 @@ module Karafka
12
12
  encryption: %i[encryption]
13
13
  )
14
14
 
15
+ # Initializes the cipher with empty private keys cache
15
16
  def initialize
16
17
  @private_pems = {}
17
18
  end
@@ -35,7 +36,7 @@ module Karafka
35
36
 
36
37
  # @return [::OpenSSL::PKey::RSA] rsa public key
37
38
  def public_pem
38
- @public_pem ||= ::OpenSSL::PKey::RSA.new(encryption.public_key)
39
+ @public_pem ||= OpenSSL::PKey::RSA.new(encryption.public_key)
39
40
  end
40
41
 
41
42
  # @param version [String] version for which we want to get the rsa key
@@ -46,7 +47,7 @@ module Karafka
46
47
  key_string = encryption.private_keys[version]
47
48
  key_string || raise(Errors::PrivateKeyNotFoundError, version)
48
49
 
49
- @private_pems[version] = ::OpenSSL::PKey::RSA.new(key_string)
50
+ @private_pems[version] = OpenSSL::PKey::RSA.new(key_string)
50
51
  end
51
52
  end
52
53
  end
@@ -9,12 +9,10 @@ module Karafka
9
9
  # Encryption related contracts
10
10
  module Contracts
11
11
  # Makes sure, all the expected config is defined as it should be
12
- class Config < ::Karafka::Contracts::Base
12
+ class Config < Karafka::Contracts::Base
13
13
  configure do |config|
14
- config.error_messages = YAML.safe_load(
15
- File.read(
16
- File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
17
- )
14
+ config.error_messages = YAML.safe_load_file(
15
+ File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
18
16
  ).fetch('en').fetch('validations').fetch('setup').fetch('config')
19
17
  end
20
18
 
@@ -26,8 +24,8 @@ module Karafka
26
24
 
27
25
  required(:private_keys) do |val|
28
26
  val.is_a?(Hash) &&
29
- val.keys.all? { |key| key.is_a?(String) } &&
30
- val.values.all? { |key| key.is_a?(String) }
27
+ val.keys.all?(String) &&
28
+ val.values.all?(String)
31
29
  end
32
30
  end
33
31
 
@@ -11,7 +11,7 @@ module Karafka
11
11
  # @note There may be a case where someone decides not to encrypt data and we start getting
12
12
  # unencrypted payloads. That is why we always rely on message headers for encryption
13
13
  # indication.
14
- class Parser < ::Karafka::Messages::Parser
14
+ class Parser < Karafka::Messages::Parser
15
15
  include Helpers::ConfigImporter.new(
16
16
  cipher: %i[encryption cipher],
17
17
  active: %i[encryption active],
@@ -25,7 +25,7 @@ module Karafka
25
25
  encryption = headers['encryption']
26
26
  fingerprint = headers['encryption_fingerprint']
27
27
 
28
- return super(message) unless active && encryption
28
+ return super unless active && encryption
29
29
 
30
30
  # Decrypt raw payload so it can be handled by the default parser logic
31
31
  decrypted_payload = cipher.decrypt(
@@ -35,11 +35,11 @@ module Karafka
35
35
 
36
36
  message.raw_payload = decrypted_payload
37
37
 
38
- return super(message) unless fingerprint && fingerprinter
38
+ return super unless fingerprint && fingerprinter
39
39
 
40
40
  message_fingerprint = fingerprinter.hexdigest(decrypted_payload)
41
41
 
42
- return super(message) if message_fingerprint == fingerprint
42
+ return super if message_fingerprint == fingerprint
43
43
 
44
44
  raise(Errors::FingerprintVerificationError, message.to_s)
45
45
  end
@@ -10,7 +10,7 @@ module Karafka
10
10
  module Setup
11
11
  # Config for encryption
12
12
  class Config
13
- extend ::Karafka::Core::Configurable
13
+ extend Karafka::Core::Configurable
14
14
 
15
15
  # Should this feature be in use
16
16
  setting(:active, default: false)
@@ -50,7 +50,7 @@ module Karafka
50
50
  partition = messages.metadata.partition
51
51
 
52
52
  samples = @processing_times[topic][partition]
53
- samples << event[:time] / messages.size
53
+ samples << (event[:time] / messages.size)
54
54
 
55
55
  return unless samples.size > SAMPLES_COUNT
56
56
 
@@ -66,8 +66,8 @@ module Karafka
66
66
  def percentile(percentile, values)
67
67
  values_sorted = values.sort
68
68
 
69
- floor = (percentile * (values_sorted.length - 1) + 1).floor - 1
70
- mod = (percentile * (values_sorted.length - 1) + 1).modulo(1)
69
+ floor = ((percentile * (values_sorted.length - 1)) + 1).floor - 1
70
+ mod = ((percentile * (values_sorted.length - 1)) + 1).modulo(1)
71
71
 
72
72
  values_sorted[floor] + (mod * (values_sorted[floor + 1] - values_sorted[floor]))
73
73
  end
@@ -62,7 +62,7 @@ module Karafka
62
62
 
63
63
  # If we've got just array with topics, we need to convert that into a representation
64
64
  # that we can expand with offsets
65
- topics = topics.map { |name| [name, false] }.to_h if topics.is_a?(Array)
65
+ topics = topics.to_h { |name| [name, false] } if topics.is_a?(Array)
66
66
  # We remap by creating new hash, just in case the hash came as the argument for this
67
67
  # expanded. We do not want to modify user provided hash
68
68
  topics.transform_keys(&:to_s)
@@ -22,7 +22,7 @@ module Karafka
22
22
  # @param consumer [::Rdkafka::Consumer] consumer instance needed to talk with Kafka
23
23
  # @param expanded_topics [Hash] hash with expanded and normalized topics data
24
24
  def initialize(consumer, expanded_topics)
25
- @consumer = ::Karafka::Connection::Proxy.new(consumer)
25
+ @consumer = Karafka::Connection::Proxy.new(consumer)
26
26
  @expanded_topics = expanded_topics
27
27
  @mapped_topics = Hash.new { |h, k| h[k] = {} }
28
28
  end
@@ -56,7 +56,7 @@ module Karafka
56
56
  next unless partitions.is_a?(Array) || partitions.is_a?(Range)
57
57
 
58
58
  # When no offsets defined, we just start from zero
59
- @mapped_topics[name] = partitions.map { |partition| [partition, 0] }.to_h
59
+ @mapped_topics[name] = partitions.to_h { |partition| [partition, 0] }
60
60
  end
61
61
  end
62
62
 
@@ -43,9 +43,9 @@ module Karafka
43
43
  )
44
44
  @topics_with_partitions = Expander.new.call(topics)
45
45
 
46
- @routing_topics = @topics_with_partitions.map do |name, _|
47
- [name, ::Karafka::Routing::Router.find_or_initialize_by_name(name)]
48
- end.to_h
46
+ @routing_topics = @topics_with_partitions.to_h do |name, _|
47
+ [name, Karafka::Routing::Router.find_or_initialize_by_name(name)]
48
+ end
49
49
 
50
50
  @total_partitions = @topics_with_partitions.map(&:last).sum(&:count)
51
51
 
@@ -120,7 +120,7 @@ module Karafka
120
120
  # Loads the Pro features of Karafka
121
121
  # @note Object space lookup is not the fastest but we do it once during boot, so it's ok
122
122
  def load_topic_features
123
- ::Karafka::Pro::Routing::Features::Base.load_all
123
+ Karafka::Pro::Routing::Features::Base.load_all
124
124
  end
125
125
  end
126
126
  end