karafka 2.5.4 → 2.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (260) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +3 -0
  3. data/LICENSE-COMM +4 -2
  4. data/lib/karafka/admin/acl.rb +127 -80
  5. data/lib/karafka/admin/configs.rb +84 -70
  6. data/lib/karafka/admin/consumer_groups.rb +377 -330
  7. data/lib/karafka/admin/replication.rb +287 -263
  8. data/lib/karafka/admin/topics.rb +232 -186
  9. data/lib/karafka/admin.rb +277 -117
  10. data/lib/karafka/pro/active_job/consumer.rb +19 -2
  11. data/lib/karafka/pro/active_job/dispatcher.rb +19 -2
  12. data/lib/karafka/pro/active_job/job_options_contract.rb +19 -2
  13. data/lib/karafka/pro/base_consumer.rb +19 -2
  14. data/lib/karafka/pro/cleaner/errors.rb +19 -2
  15. data/lib/karafka/pro/cleaner/messages/message.rb +19 -2
  16. data/lib/karafka/pro/cleaner/messages/messages.rb +19 -2
  17. data/lib/karafka/pro/cleaner/messages/metadata.rb +19 -2
  18. data/lib/karafka/pro/cleaner.rb +19 -2
  19. data/lib/karafka/pro/cli/contracts/server.rb +19 -2
  20. data/lib/karafka/pro/cli/parallel_segments/base.rb +19 -2
  21. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +19 -2
  22. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +19 -2
  23. data/lib/karafka/pro/cli/parallel_segments.rb +19 -2
  24. data/lib/karafka/pro/connection/manager.rb +19 -2
  25. data/lib/karafka/pro/connection/multiplexing/listener.rb +19 -2
  26. data/lib/karafka/pro/contracts/base.rb +19 -2
  27. data/lib/karafka/pro/encryption/cipher.rb +19 -2
  28. data/lib/karafka/pro/encryption/contracts/config.rb +19 -2
  29. data/lib/karafka/pro/encryption/errors.rb +19 -2
  30. data/lib/karafka/pro/encryption/messages/middleware.rb +19 -2
  31. data/lib/karafka/pro/encryption/messages/parser.rb +19 -2
  32. data/lib/karafka/pro/encryption/setup/config.rb +19 -2
  33. data/lib/karafka/pro/encryption.rb +19 -2
  34. data/lib/karafka/pro/instrumentation/performance_tracker.rb +19 -2
  35. data/lib/karafka/pro/iterator/expander.rb +19 -2
  36. data/lib/karafka/pro/iterator/tpl_builder.rb +19 -2
  37. data/lib/karafka/pro/iterator.rb +19 -2
  38. data/lib/karafka/pro/loader.rb +19 -2
  39. data/lib/karafka/pro/processing/adaptive_iterator/consumer.rb +19 -2
  40. data/lib/karafka/pro/processing/adaptive_iterator/tracker.rb +19 -2
  41. data/lib/karafka/pro/processing/collapser.rb +19 -2
  42. data/lib/karafka/pro/processing/coordinator.rb +19 -2
  43. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +19 -2
  44. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +19 -2
  45. data/lib/karafka/pro/processing/coordinators/virtual_offset_manager.rb +19 -2
  46. data/lib/karafka/pro/processing/executor.rb +19 -2
  47. data/lib/karafka/pro/processing/expansions_selector.rb +19 -2
  48. data/lib/karafka/pro/processing/filters/base.rb +19 -2
  49. data/lib/karafka/pro/processing/filters/delayer.rb +19 -2
  50. data/lib/karafka/pro/processing/filters/expirer.rb +19 -2
  51. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +19 -2
  52. data/lib/karafka/pro/processing/filters/throttler.rb +19 -2
  53. data/lib/karafka/pro/processing/filters/virtual_limiter.rb +19 -2
  54. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +19 -2
  55. data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +19 -2
  56. data/lib/karafka/pro/processing/jobs/periodic.rb +19 -2
  57. data/lib/karafka/pro/processing/jobs/periodic_non_blocking.rb +19 -2
  58. data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +19 -2
  59. data/lib/karafka/pro/processing/jobs_builder.rb +19 -2
  60. data/lib/karafka/pro/processing/jobs_queue.rb +19 -2
  61. data/lib/karafka/pro/processing/offset_metadata/consumer.rb +19 -2
  62. data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +19 -2
  63. data/lib/karafka/pro/processing/offset_metadata/listener.rb +19 -2
  64. data/lib/karafka/pro/processing/parallel_segments/filters/base.rb +19 -2
  65. data/lib/karafka/pro/processing/parallel_segments/filters/default.rb +19 -2
  66. data/lib/karafka/pro/processing/parallel_segments/filters/mom.rb +19 -2
  67. data/lib/karafka/pro/processing/partitioner.rb +19 -2
  68. data/lib/karafka/pro/processing/periodic_job/consumer.rb +19 -2
  69. data/lib/karafka/pro/processing/piping/consumer.rb +19 -2
  70. data/lib/karafka/pro/processing/schedulers/base.rb +19 -2
  71. data/lib/karafka/pro/processing/schedulers/default.rb +19 -2
  72. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +19 -2
  73. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +19 -2
  74. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +19 -2
  75. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +19 -2
  76. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +19 -2
  77. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +19 -2
  78. data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +19 -2
  79. data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +19 -2
  80. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +19 -2
  81. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +19 -2
  82. data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +19 -2
  83. data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +19 -2
  84. data/lib/karafka/pro/processing/strategies/aj/lrj_mom.rb +19 -2
  85. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +19 -2
  86. data/lib/karafka/pro/processing/strategies/aj/mom.rb +19 -2
  87. data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +19 -2
  88. data/lib/karafka/pro/processing/strategies/base.rb +19 -2
  89. data/lib/karafka/pro/processing/strategies/default.rb +19 -2
  90. data/lib/karafka/pro/processing/strategies/dlq/default.rb +19 -2
  91. data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +19 -2
  92. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +19 -2
  93. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +19 -2
  94. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom_vp.rb +19 -2
  95. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_vp.rb +19 -2
  96. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +19 -2
  97. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom_vp.rb +19 -2
  98. data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +19 -2
  99. data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +19 -2
  100. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +19 -2
  101. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom_vp.rb +19 -2
  102. data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +19 -2
  103. data/lib/karafka/pro/processing/strategies/dlq/mom.rb +19 -2
  104. data/lib/karafka/pro/processing/strategies/dlq/mom_vp.rb +19 -2
  105. data/lib/karafka/pro/processing/strategies/dlq/vp.rb +19 -2
  106. data/lib/karafka/pro/processing/strategies/ftr/default.rb +19 -2
  107. data/lib/karafka/pro/processing/strategies/ftr/vp.rb +19 -2
  108. data/lib/karafka/pro/processing/strategies/lrj/default.rb +19 -2
  109. data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +19 -2
  110. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +19 -2
  111. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom_vp.rb +19 -2
  112. data/lib/karafka/pro/processing/strategies/lrj/ftr_vp.rb +19 -2
  113. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +19 -2
  114. data/lib/karafka/pro/processing/strategies/lrj/mom_vp.rb +19 -2
  115. data/lib/karafka/pro/processing/strategies/lrj/vp.rb +19 -2
  116. data/lib/karafka/pro/processing/strategies/mom/default.rb +19 -2
  117. data/lib/karafka/pro/processing/strategies/mom/ftr.rb +19 -2
  118. data/lib/karafka/pro/processing/strategies/mom/ftr_vp.rb +19 -2
  119. data/lib/karafka/pro/processing/strategies/mom/vp.rb +19 -2
  120. data/lib/karafka/pro/processing/strategies/vp/default.rb +19 -2
  121. data/lib/karafka/pro/processing/strategies.rb +19 -2
  122. data/lib/karafka/pro/processing/strategy_selector.rb +19 -2
  123. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +19 -2
  124. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +19 -2
  125. data/lib/karafka/pro/processing/virtual_partitions/distributors/base.rb +19 -2
  126. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +19 -2
  127. data/lib/karafka/pro/recurring_tasks/consumer.rb +19 -2
  128. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +19 -2
  129. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +19 -2
  130. data/lib/karafka/pro/recurring_tasks/deserializer.rb +19 -2
  131. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +19 -2
  132. data/lib/karafka/pro/recurring_tasks/errors.rb +19 -2
  133. data/lib/karafka/pro/recurring_tasks/executor.rb +19 -2
  134. data/lib/karafka/pro/recurring_tasks/listener.rb +19 -2
  135. data/lib/karafka/pro/recurring_tasks/matcher.rb +19 -2
  136. data/lib/karafka/pro/recurring_tasks/schedule.rb +19 -2
  137. data/lib/karafka/pro/recurring_tasks/serializer.rb +19 -2
  138. data/lib/karafka/pro/recurring_tasks/setup/config.rb +19 -2
  139. data/lib/karafka/pro/recurring_tasks/task.rb +19 -2
  140. data/lib/karafka/pro/recurring_tasks.rb +19 -2
  141. data/lib/karafka/pro/routing/features/active_job/builder.rb +19 -2
  142. data/lib/karafka/pro/routing/features/active_job.rb +19 -2
  143. data/lib/karafka/pro/routing/features/adaptive_iterator/config.rb +19 -2
  144. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +19 -2
  145. data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +19 -2
  146. data/lib/karafka/pro/routing/features/adaptive_iterator.rb +19 -2
  147. data/lib/karafka/pro/routing/features/base.rb +19 -2
  148. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +19 -2
  149. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +19 -2
  150. data/lib/karafka/pro/routing/features/dead_letter_queue.rb +19 -2
  151. data/lib/karafka/pro/routing/features/delaying/config.rb +19 -2
  152. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +19 -2
  153. data/lib/karafka/pro/routing/features/delaying/topic.rb +19 -2
  154. data/lib/karafka/pro/routing/features/delaying.rb +19 -2
  155. data/lib/karafka/pro/routing/features/direct_assignments/config.rb +19 -2
  156. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +19 -2
  157. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +19 -2
  158. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +19 -2
  159. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +19 -2
  160. data/lib/karafka/pro/routing/features/direct_assignments.rb +19 -2
  161. data/lib/karafka/pro/routing/features/expiring/config.rb +19 -2
  162. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +19 -2
  163. data/lib/karafka/pro/routing/features/expiring/topic.rb +19 -2
  164. data/lib/karafka/pro/routing/features/expiring.rb +19 -2
  165. data/lib/karafka/pro/routing/features/filtering/config.rb +19 -2
  166. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +19 -2
  167. data/lib/karafka/pro/routing/features/filtering/topic.rb +19 -2
  168. data/lib/karafka/pro/routing/features/filtering.rb +19 -2
  169. data/lib/karafka/pro/routing/features/inline_insights/config.rb +19 -2
  170. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +19 -2
  171. data/lib/karafka/pro/routing/features/inline_insights/topic.rb +19 -2
  172. data/lib/karafka/pro/routing/features/inline_insights.rb +19 -2
  173. data/lib/karafka/pro/routing/features/long_running_job/config.rb +19 -2
  174. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +19 -2
  175. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +19 -2
  176. data/lib/karafka/pro/routing/features/long_running_job.rb +19 -2
  177. data/lib/karafka/pro/routing/features/multiplexing/config.rb +19 -2
  178. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +19 -2
  179. data/lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb +19 -2
  180. data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +19 -2
  181. data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +19 -2
  182. data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +19 -2
  183. data/lib/karafka/pro/routing/features/multiplexing.rb +19 -2
  184. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +19 -2
  185. data/lib/karafka/pro/routing/features/non_blocking_job.rb +19 -2
  186. data/lib/karafka/pro/routing/features/offset_metadata/config.rb +19 -2
  187. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +19 -2
  188. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +19 -2
  189. data/lib/karafka/pro/routing/features/offset_metadata.rb +19 -2
  190. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +19 -2
  191. data/lib/karafka/pro/routing/features/parallel_segments/config.rb +19 -2
  192. data/lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb +19 -2
  193. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +19 -2
  194. data/lib/karafka/pro/routing/features/parallel_segments/topic.rb +19 -2
  195. data/lib/karafka/pro/routing/features/parallel_segments.rb +19 -2
  196. data/lib/karafka/pro/routing/features/patterns/builder.rb +19 -2
  197. data/lib/karafka/pro/routing/features/patterns/config.rb +19 -2
  198. data/lib/karafka/pro/routing/features/patterns/consumer_group.rb +19 -2
  199. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +19 -2
  200. data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +19 -2
  201. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +19 -2
  202. data/lib/karafka/pro/routing/features/patterns/detector.rb +19 -2
  203. data/lib/karafka/pro/routing/features/patterns/pattern.rb +19 -2
  204. data/lib/karafka/pro/routing/features/patterns/patterns.rb +19 -2
  205. data/lib/karafka/pro/routing/features/patterns/topic.rb +19 -2
  206. data/lib/karafka/pro/routing/features/patterns/topics.rb +19 -2
  207. data/lib/karafka/pro/routing/features/patterns.rb +19 -2
  208. data/lib/karafka/pro/routing/features/pausing/config.rb +19 -2
  209. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +19 -2
  210. data/lib/karafka/pro/routing/features/pausing/topic.rb +19 -2
  211. data/lib/karafka/pro/routing/features/pausing.rb +19 -2
  212. data/lib/karafka/pro/routing/features/periodic_job/config.rb +19 -2
  213. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +19 -2
  214. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +19 -2
  215. data/lib/karafka/pro/routing/features/periodic_job.rb +19 -2
  216. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +19 -2
  217. data/lib/karafka/pro/routing/features/recurring_tasks/config.rb +19 -2
  218. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +19 -2
  219. data/lib/karafka/pro/routing/features/recurring_tasks/proxy.rb +19 -2
  220. data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +19 -2
  221. data/lib/karafka/pro/routing/features/recurring_tasks.rb +19 -2
  222. data/lib/karafka/pro/routing/features/scheduled_messages/builder.rb +19 -2
  223. data/lib/karafka/pro/routing/features/scheduled_messages/config.rb +19 -2
  224. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +19 -2
  225. data/lib/karafka/pro/routing/features/scheduled_messages/proxy.rb +19 -2
  226. data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +19 -2
  227. data/lib/karafka/pro/routing/features/scheduled_messages.rb +19 -2
  228. data/lib/karafka/pro/routing/features/swarm/config.rb +19 -2
  229. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +19 -2
  230. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +19 -2
  231. data/lib/karafka/pro/routing/features/swarm/topic.rb +19 -2
  232. data/lib/karafka/pro/routing/features/swarm.rb +19 -2
  233. data/lib/karafka/pro/routing/features/throttling/config.rb +19 -2
  234. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +19 -2
  235. data/lib/karafka/pro/routing/features/throttling/topic.rb +19 -2
  236. data/lib/karafka/pro/routing/features/throttling.rb +19 -2
  237. data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +19 -2
  238. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +19 -2
  239. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +19 -2
  240. data/lib/karafka/pro/routing/features/virtual_partitions.rb +19 -2
  241. data/lib/karafka/pro/scheduled_messages/consumer.rb +19 -2
  242. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +19 -2
  243. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +19 -2
  244. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +19 -2
  245. data/lib/karafka/pro/scheduled_messages/day.rb +19 -2
  246. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +19 -2
  247. data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +19 -2
  248. data/lib/karafka/pro/scheduled_messages/dispatcher.rb +19 -2
  249. data/lib/karafka/pro/scheduled_messages/errors.rb +19 -2
  250. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +19 -2
  251. data/lib/karafka/pro/scheduled_messages/proxy.rb +19 -2
  252. data/lib/karafka/pro/scheduled_messages/schema_validator.rb +19 -2
  253. data/lib/karafka/pro/scheduled_messages/serializer.rb +19 -2
  254. data/lib/karafka/pro/scheduled_messages/setup/config.rb +19 -2
  255. data/lib/karafka/pro/scheduled_messages/state.rb +19 -2
  256. data/lib/karafka/pro/scheduled_messages/tracker.rb +19 -2
  257. data/lib/karafka/pro/scheduled_messages.rb +19 -2
  258. data/lib/karafka/pro/swarm/liveness_listener.rb +19 -2
  259. data/lib/karafka/version.rb +1 -1
  260. metadata +2 -2
@@ -18,401 +18,448 @@ module Karafka
18
18
  private_constant :LONG_TIME_AGO, :DAY_IN_SECONDS
19
19
 
20
20
  class << self
21
- # Moves the offset on a given consumer group and provided topic to the requested location
22
- #
23
- # @param consumer_group_id [String] id of the consumer group for which we want to move the
24
- # existing offset
25
- # @param topics_with_partitions_and_offsets [Hash] Hash with list of topics and settings to
26
- # where to move given consumer. It allows us to move particular partitions or whole
27
- # topics if we want to reset all partitions to for example a point in time.
28
- #
29
- # @return [void]
30
- #
31
- # @note This method should **not** be executed on a running consumer group as it creates a
32
- # "fake" consumer and uses it to move offsets.
33
- #
34
- # @example Move a single topic partition nr 1 offset to 100
35
- # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 100 } })
36
- #
37
- # @example Move offsets on all partitions of a topic to 100
38
- # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 100 })
39
- #
40
- # @example Move offset to 5 seconds ago on partition 2
41
- # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 2 => 5.seconds.ago } })
42
- #
43
- # @example Move to the earliest offset on all the partitions of a topic
44
- # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 'earliest' })
45
- #
46
- # @example Move to the latest (high-watermark) offset on all the partitions of a topic
47
- # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 'latest' })
48
- #
49
- # @example Move offset of a single partition to earliest
50
- # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 'earliest' } })
51
- #
52
- # @example Move offset of a single partition to latest
53
- # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 'latest' } })
21
+ # @param consumer_group_id [String] consumer group for which we want to move offsets
22
+ # @param topics_with_partitions_and_offsets [Hash] hash with topics and settings
23
+ # @see #seek
54
24
  def seek(consumer_group_id, topics_with_partitions_and_offsets)
55
- tpl_base = {}
25
+ new.seek(consumer_group_id, topics_with_partitions_and_offsets)
26
+ end
56
27
 
57
- # Normalize the data so we always have all partitions and topics in the same format
58
- # That is in a format where we have topics and all partitions with their per partition
59
- # assigned offsets
60
- topics_with_partitions_and_offsets.each do |topic, partitions_with_offsets|
61
- tpl_base[topic] = {}
28
+ # @param previous_name [String] old consumer group name
29
+ # @param new_name [String] new consumer group name
30
+ # @param topics [Array<String>] topics for which we want to copy offsets
31
+ # @see #copy
32
+ def copy(previous_name, new_name, topics)
33
+ new.copy(previous_name, new_name, topics)
34
+ end
62
35
 
63
- if partitions_with_offsets.is_a?(Hash)
64
- tpl_base[topic] = partitions_with_offsets
65
- else
66
- topic_info = Topics.info(topic)
67
- topic_info[:partition_count].times do |partition|
68
- tpl_base[topic][partition] = partitions_with_offsets
69
- end
70
- end
71
- end
36
+ # @param previous_name [String] old consumer group name
37
+ # @param new_name [String] new consumer group name
38
+ # @param topics [Array<String>] topics for which we want to migrate offsets
39
+ # @param delete_previous [Boolean] should we delete previous consumer group after rename
40
+ # @see #rename
41
+ def rename(previous_name, new_name, topics, delete_previous: true)
42
+ new.rename(previous_name, new_name, topics, delete_previous: delete_previous)
43
+ end
72
44
 
73
- tpl_base.each_value do |partitions|
74
- partitions.transform_values! do |position|
75
- # Support both symbol and string based references
76
- casted_position = position.is_a?(Symbol) ? position.to_s : position
77
-
78
- # This remap allows us to transform some special cases in a reference that can be
79
- # understood by Kafka
80
- case casted_position
81
- # Earliest is not always 0. When compacting/deleting it can be much later, that's why
82
- # we fetch the oldest possible offset
83
- # false is treated the same as 'earliest'
84
- when 'earliest', false
85
- LONG_TIME_AGO
86
- # Latest will always be the high-watermark offset and we can get it just by getting
87
- # a future position
88
- when 'latest'
89
- Time.now + DAY_IN_SECONDS
90
- # Regular offset case
91
- else
92
- position
93
- end
94
- end
95
- end
45
+ # @param consumer_group_id [String] consumer group name
46
+ # @see #delete
47
+ def delete(consumer_group_id)
48
+ new.delete(consumer_group_id)
49
+ end
96
50
 
97
- tpl = Rdkafka::Consumer::TopicPartitionList.new
98
- # In case of time based location, we need to to a pre-resolution, that's why we keep it
99
- # separately
100
- time_tpl = Rdkafka::Consumer::TopicPartitionList.new
101
-
102
- # Distribute properly the offset type
103
- tpl_base.each do |topic, partitions_with_offsets|
104
- partitions_with_offsets.each do |partition, offset|
105
- target = offset.is_a?(Time) ? time_tpl : tpl
106
- # We reverse and uniq to make sure that potentially duplicated references are removed
107
- # in such a way that the newest stays
108
- target.to_h[topic] ||= []
109
- target.to_h[topic] << Rdkafka::Consumer::Partition.new(partition, offset)
110
- target.to_h[topic].reverse!
111
- target.to_h[topic].uniq!(&:partition)
112
- target.to_h[topic].reverse!
51
+ # @param consumer_group_id [String] consumer group id to trigger rebalance for
52
+ # @see #trigger_rebalance
53
+ def trigger_rebalance(consumer_group_id)
54
+ new.trigger_rebalance(consumer_group_id)
55
+ end
56
+
57
+ # @param consumer_groups_with_topics [Hash{String => Array<String>}] hash with consumer
58
+ # groups names with array of topics
59
+ # @param active_topics_only [Boolean] if set to false, will select also inactive topics
60
+ # @see #read_lags_with_offsets
61
+ def read_lags_with_offsets(consumer_groups_with_topics = {}, active_topics_only: true)
62
+ new.read_lags_with_offsets(
63
+ consumer_groups_with_topics,
64
+ active_topics_only: active_topics_only
65
+ )
66
+ end
67
+ end
68
+
69
+ # Moves the offset on a given consumer group and provided topic to the requested location
70
+ #
71
+ # @param consumer_group_id [String] id of the consumer group for which we want to move the
72
+ # existing offset
73
+ # @param topics_with_partitions_and_offsets [Hash] Hash with list of topics and settings to
74
+ # where to move given consumer. It allows us to move particular partitions or whole
75
+ # topics if we want to reset all partitions to for example a point in time.
76
+ #
77
+ # @return [void]
78
+ #
79
+ # @note This method should **not** be executed on a running consumer group as it creates a
80
+ # "fake" consumer and uses it to move offsets.
81
+ #
82
+ # @example Move a single topic partition nr 1 offset to 100
83
+ # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 100 } })
84
+ #
85
+ # @example Move offsets on all partitions of a topic to 100
86
+ # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 100 })
87
+ #
88
+ # @example Move offset to 5 seconds ago on partition 2
89
+ # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 2 => 5.seconds.ago } })
90
+ #
91
+ # @example Move to the earliest offset on all the partitions of a topic
92
+ # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 'earliest' })
93
+ #
94
+ # @example Move to the latest (high-watermark) offset on all the partitions of a topic
95
+ # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => 'latest' })
96
+ #
97
+ # @example Move offset of a single partition to earliest
98
+ # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 'earliest' } })
99
+ #
100
+ # @example Move offset of a single partition to latest
101
+ # Karafka::Admin::ConsumerGroups.seek('group-id', { 'topic' => { 1 => 'latest' } })
102
+ def seek(consumer_group_id, topics_with_partitions_and_offsets)
103
+ tpl_base = {}
104
+
105
+ # Normalize the data so we always have all partitions and topics in the same format
106
+ # That is in a format where we have topics and all partitions with their per partition
107
+ # assigned offsets
108
+ topics_with_partitions_and_offsets.each do |topic, partitions_with_offsets|
109
+ tpl_base[topic] = {}
110
+
111
+ if partitions_with_offsets.is_a?(Hash)
112
+ tpl_base[topic] = partitions_with_offsets
113
+ else
114
+ topic_info = Topics.new(kafka: @custom_kafka).info(topic)
115
+ topic_info[:partition_count].times do |partition|
116
+ tpl_base[topic][partition] = partitions_with_offsets
113
117
  end
114
118
  end
119
+ end
115
120
 
116
- settings = { 'group.id': consumer_group_id }
117
-
118
- with_consumer(settings) do |consumer|
119
- # If we have any time based stuff to resolve, we need to do it prior to commits
120
- unless time_tpl.empty?
121
- real_offsets = consumer.offsets_for_times(time_tpl)
122
-
123
- real_offsets.to_h.each do |name, results|
124
- results.each do |result|
125
- raise(Errors::InvalidTimeBasedOffsetError) unless result
126
-
127
- partition = result.partition
128
-
129
- # Negative offset means we're beyond last message and we need to query for the
130
- # high watermark offset to get the most recent offset and move there
131
- if result.offset.negative?
132
- _, offset = consumer.query_watermark_offsets(name, result.partition)
133
- else
134
- # If we get an offset, it means there existed a message close to this time
135
- # location
136
- offset = result.offset
137
- end
138
-
139
- # Since now we have proper offsets, we can add this to the final tpl for commit
140
- tpl.to_h[name] ||= []
141
- tpl.to_h[name] << Rdkafka::Consumer::Partition.new(partition, offset)
142
- tpl.to_h[name].reverse!
143
- tpl.to_h[name].uniq!(&:partition)
144
- tpl.to_h[name].reverse!
145
- end
146
- end
121
+ tpl_base.each_value do |partitions|
122
+ partitions.transform_values! do |position|
123
+ # Support both symbol and string based references
124
+ casted_position = position.is_a?(Symbol) ? position.to_s : position
125
+
126
+ # This remap allows us to transform some special cases in a reference that can be
127
+ # understood by Kafka
128
+ case casted_position
129
+ # Earliest is not always 0. When compacting/deleting it can be much later, that's why
130
+ # we fetch the oldest possible offset
131
+ # false is treated the same as 'earliest'
132
+ when 'earliest', false
133
+ LONG_TIME_AGO
134
+ # Latest will always be the high-watermark offset and we can get it just by getting
135
+ # a future position
136
+ when 'latest'
137
+ Time.now + DAY_IN_SECONDS
138
+ # Regular offset case
139
+ else
140
+ position
147
141
  end
142
+ end
143
+ end
148
144
 
149
- consumer.commit_offsets(tpl, async: false)
145
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
146
+ # In case of time based location, we need to to a pre-resolution, that's why we keep it
147
+ # separately
148
+ time_tpl = Rdkafka::Consumer::TopicPartitionList.new
149
+
150
+ # Distribute properly the offset type
151
+ tpl_base.each do |topic, partitions_with_offsets|
152
+ partitions_with_offsets.each do |partition, offset|
153
+ target = offset.is_a?(Time) ? time_tpl : tpl
154
+ # We reverse and uniq to make sure that potentially duplicated references are removed
155
+ # in such a way that the newest stays
156
+ target.to_h[topic] ||= []
157
+ target.to_h[topic] << Rdkafka::Consumer::Partition.new(partition, offset)
158
+ target.to_h[topic].reverse!
159
+ target.to_h[topic].uniq!(&:partition)
160
+ target.to_h[topic].reverse!
150
161
  end
151
162
  end
152
163
 
153
- # Takes consumer group and its topics and copies all the offsets to a new named group
154
- #
155
- # @param previous_name [String] old consumer group name
156
- # @param new_name [String] new consumer group name
157
- # @param topics [Array<String>] topics for which we want to migrate offsets during rename
158
- #
159
- # @return [Boolean] true if anything was migrated, otherwise false
160
- #
161
- # @note This method should **not** be executed on a running consumer group as it creates a
162
- # "fake" consumer and uses it to move offsets.
163
- #
164
- # @note If new consumer group exists, old offsets will be added to it.
165
- def copy(previous_name, new_name, topics)
166
- remap = Hash.new { |h, k| h[k] = {} }
164
+ settings = { 'group.id': consumer_group_id }
167
165
 
168
- old_lags = read_lags_with_offsets({ previous_name => topics })
166
+ with_consumer(settings) do |consumer|
167
+ # If we have any time based stuff to resolve, we need to do it prior to commits
168
+ unless time_tpl.empty?
169
+ real_offsets = consumer.offsets_for_times(time_tpl)
169
170
 
170
- return false if old_lags.empty?
171
- return false if old_lags.values.all? { |topic_data| topic_data.values.all?(&:empty?) }
171
+ real_offsets.to_h.each do |name, results|
172
+ results.each do |result|
173
+ raise(Errors::InvalidTimeBasedOffsetError) unless result
172
174
 
173
- read_lags_with_offsets({ previous_name => topics })
174
- .fetch(previous_name)
175
- .each do |topic, partitions|
176
- partitions.each do |partition_id, details|
177
- offset = details[:offset]
175
+ partition = result.partition
178
176
 
179
- # No offset on this partition
180
- next if offset.negative?
177
+ # Negative offset means we're beyond last message and we need to query for the
178
+ # high watermark offset to get the most recent offset and move there
179
+ if result.offset.negative?
180
+ _, offset = consumer.query_watermark_offsets(name, result.partition)
181
+ else
182
+ # If we get an offset, it means there existed a message close to this time
183
+ # location
184
+ offset = result.offset
185
+ end
181
186
 
182
- remap[topic][partition_id] = offset
187
+ # Since now we have proper offsets, we can add this to the final tpl for commit
188
+ tpl.to_h[name] ||= []
189
+ tpl.to_h[name] << Rdkafka::Consumer::Partition.new(partition, offset)
190
+ tpl.to_h[name].reverse!
191
+ tpl.to_h[name].uniq!(&:partition)
192
+ tpl.to_h[name].reverse!
183
193
  end
184
194
  end
195
+ end
185
196
 
186
- seek(new_name, remap)
187
-
188
- true
197
+ consumer.commit_offsets(tpl, async: false)
189
198
  end
199
+ end
190
200
 
191
- # Takes consumer group and its topics and migrates all the offsets to a new named group
192
- #
193
- # @param previous_name [String] old consumer group name
194
- # @param new_name [String] new consumer group name
195
- # @param topics [Array<String>] topics for which we want to migrate offsets during rename
196
- # @param delete_previous [Boolean] should we delete previous consumer group after rename.
197
- # Defaults to true.
198
- #
199
- # @return [Boolean] true if rename (and optionally removal) was ok or false if there was
200
- # nothing really to rename
201
- #
202
- # @note This method should **not** be executed on a running consumer group as it creates a
203
- # "fake" consumer and uses it to move offsets.
204
- #
205
- # @note After migration unless `delete_previous` is set to `false`, old group will be
206
- # removed.
207
- #
208
- # @note If new consumer group exists, old offsets will be added to it.
209
- def rename(previous_name, new_name, topics, delete_previous: true)
210
- copy_result = copy(previous_name, new_name, topics)
201
+ # Takes consumer group and its topics and copies all the offsets to a new named group
202
+ #
203
+ # @param previous_name [String] old consumer group name
204
+ # @param new_name [String] new consumer group name
205
+ # @param topics [Array<String>] topics for which we want to migrate offsets during rename
206
+ #
207
+ # @return [Boolean] true if anything was migrated, otherwise false
208
+ #
209
+ # @note This method should **not** be executed on a running consumer group as it creates a
210
+ # "fake" consumer and uses it to move offsets.
211
+ #
212
+ # @note If new consumer group exists, old offsets will be added to it.
213
+ def copy(previous_name, new_name, topics)
214
+ remap = Hash.new { |h, k| h[k] = {} }
215
+
216
+ old_lags = read_lags_with_offsets({ previous_name => topics })
217
+
218
+ return false if old_lags.empty?
219
+ return false if old_lags.values.all? { |topic_data| topic_data.values.all?(&:empty?) }
220
+
221
+ read_lags_with_offsets({ previous_name => topics })
222
+ .fetch(previous_name)
223
+ .each do |topic, partitions|
224
+ partitions.each do |partition_id, details|
225
+ offset = details[:offset]
226
+
227
+ # No offset on this partition
228
+ next if offset.negative?
229
+
230
+ remap[topic][partition_id] = offset
231
+ end
232
+ end
211
233
 
212
- return false unless copy_result
213
- return copy_result unless delete_previous
234
+ seek(new_name, remap)
214
235
 
215
- delete(previous_name)
236
+ true
237
+ end
216
238
 
217
- true
218
- end
239
+ # Takes consumer group and its topics and migrates all the offsets to a new named group
240
+ #
241
+ # @param previous_name [String] old consumer group name
242
+ # @param new_name [String] new consumer group name
243
+ # @param topics [Array<String>] topics for which we want to migrate offsets during rename
244
+ # @param delete_previous [Boolean] should we delete previous consumer group after rename.
245
+ # Defaults to true.
246
+ #
247
+ # @return [Boolean] true if rename (and optionally removal) was ok or false if there was
248
+ # nothing really to rename
249
+ #
250
+ # @note This method should **not** be executed on a running consumer group as it creates a
251
+ # "fake" consumer and uses it to move offsets.
252
+ #
253
+ # @note After migration unless `delete_previous` is set to `false`, old group will be
254
+ # removed.
255
+ #
256
+ # @note If new consumer group exists, old offsets will be added to it.
257
+ def rename(previous_name, new_name, topics, delete_previous: true)
258
+ copy_result = copy(previous_name, new_name, topics)
259
+
260
+ return false unless copy_result
261
+ return copy_result unless delete_previous
262
+
263
+ delete(previous_name)
264
+
265
+ true
266
+ end
219
267
 
220
- # Removes given consumer group (if exists)
221
- #
222
- # @param consumer_group_id [String] consumer group name
223
- #
224
- # @return [void]
225
- #
226
- # @note This method should not be used on a running consumer group as it will not yield any
227
- # results.
228
- def delete(consumer_group_id)
229
- with_admin do |admin|
230
- handler = admin.delete_group(consumer_group_id)
231
- handler.wait(max_wait_timeout: max_wait_time_seconds)
232
- end
268
+ # Removes given consumer group (if exists)
269
+ #
270
+ # @param consumer_group_id [String] consumer group name
271
+ #
272
+ # @return [void]
273
+ #
274
+ # @note This method should not be used on a running consumer group as it will not yield any
275
+ # results.
276
+ def delete(consumer_group_id)
277
+ with_admin do |admin|
278
+ handler = admin.delete_group(consumer_group_id)
279
+ handler.wait(max_wait_timeout: max_wait_time_seconds)
233
280
  end
281
+ end
234
282
 
235
- # Triggers a rebalance for the specified consumer group by briefly joining and leaving
236
- #
237
- # @param consumer_group_id [String] consumer group id to trigger rebalance for
238
- #
239
- # @return [void]
240
- #
241
- # @raise [Karafka::Errors::InvalidConfigurationError] when consumer group is not found in
242
- # routing or has no topics
243
- #
244
- # @note This method creates a temporary "fake" consumer that joins the consumer group,
245
- # triggering a rebalance when it joins and another when it leaves. This should only be
246
- # used for operational/testing purposes as it causes two rebalances.
247
- #
248
- # @note The consumer group does not need to be running for this to work, but if it is,
249
- # it will experience rebalances.
250
- #
251
- # @note The behavior follows the configured rebalance protocol. For cooperative sticky
252
- # rebalancing or KIP-848 based protocols, there may be no immediate reaction to the
253
- # rebalance trigger as these protocols allow incremental partition reassignments without
254
- # stopping all consumers.
255
- #
256
- # @note Topics are always detected from the routing configuration. The consumer settings
257
- # (kafka config) are taken from the first topic in the consumer group to ensure
258
- # consistency with the actual consumer configuration.
259
- #
260
- # @example Trigger rebalance for a consumer group
261
- # Karafka::Admin::ConsumerGroups.trigger_rebalance('my-group')
262
- def trigger_rebalance(consumer_group_id)
263
- consumer_group = Karafka::App.routes.find { |cg| cg.id == consumer_group_id }
264
-
265
- unless consumer_group
266
- raise(
267
- Errors::InvalidConfigurationError,
268
- "Consumer group '#{consumer_group_id}' not found in routing"
269
- )
270
- end
283
+ # Triggers a rebalance for the specified consumer group by briefly joining and leaving
284
+ #
285
+ # @param consumer_group_id [String] consumer group id to trigger rebalance for
286
+ #
287
+ # @return [void]
288
+ #
289
+ # @raise [Karafka::Errors::InvalidConfigurationError] when consumer group is not found in
290
+ # routing or has no topics
291
+ #
292
+ # @note This method creates a temporary "fake" consumer that joins the consumer group,
293
+ # triggering a rebalance when it joins and another when it leaves. This should only be
294
+ # used for operational/testing purposes as it causes two rebalances.
295
+ #
296
+ # @note The consumer group does not need to be running for this to work, but if it is,
297
+ # it will experience rebalances.
298
+ #
299
+ # @note The behavior follows the configured rebalance protocol. For cooperative sticky
300
+ # rebalancing or KIP-848 based protocols, there may be no immediate reaction to the
301
+ # rebalance trigger as these protocols allow incremental partition reassignments without
302
+ # stopping all consumers.
303
+ #
304
+ # @note Topics are always detected from the routing configuration. The consumer settings
305
+ # (kafka config) are taken from the first topic in the consumer group to ensure
306
+ # consistency with the actual consumer configuration.
307
+ #
308
+ # @example Trigger rebalance for a consumer group
309
+ # Karafka::Admin::ConsumerGroups.trigger_rebalance('my-group')
310
+ def trigger_rebalance(consumer_group_id)
311
+ consumer_group = Karafka::App.routes.find { |cg| cg.id == consumer_group_id }
312
+
313
+ unless consumer_group
314
+ raise(
315
+ Errors::InvalidConfigurationError,
316
+ "Consumer group '#{consumer_group_id}' not found in routing"
317
+ )
318
+ end
271
319
 
272
- topics = consumer_group.topics.map(&:name)
320
+ topics = consumer_group.topics.map(&:name)
273
321
 
274
- if topics.empty?
275
- raise(
276
- Errors::InvalidConfigurationError,
277
- "Consumer group '#{consumer_group_id}' has no topics"
278
- )
279
- end
322
+ if topics.empty?
323
+ raise(
324
+ Errors::InvalidConfigurationError,
325
+ "Consumer group '#{consumer_group_id}' has no topics"
326
+ )
327
+ end
280
328
 
281
- # Get the first topic to extract kafka settings
282
- first_topic = consumer_group.topics.first
329
+ # Get the first topic to extract kafka settings
330
+ first_topic = consumer_group.topics.first
283
331
 
284
- # Build consumer settings using the consumer group's kafka config from first topic
285
- # This ensures we use the same settings as the actual consumers
286
- # Following the same pattern as in Karafka::Connection::Client#build_kafka
287
- consumer_settings = Setup::AttributesMap.consumer(first_topic.kafka.dup)
288
- consumer_settings[:'group.id'] = consumer_group.id
289
- consumer_settings[:'enable.auto.offset.store'] = false
290
- consumer_settings[:'auto.offset.reset'] ||= first_topic.initial_offset
332
+ # Build consumer settings using the consumer group's kafka config from first topic
333
+ # This ensures we use the same settings as the actual consumers
334
+ # Following the same pattern as in Karafka::Connection::Client#build_kafka
335
+ consumer_settings = Setup::AttributesMap.consumer(first_topic.kafka.dup)
336
+ consumer_settings[:'group.id'] = consumer_group.id
337
+ consumer_settings[:'enable.auto.offset.store'] = false
338
+ consumer_settings[:'auto.offset.reset'] ||= first_topic.initial_offset
291
339
 
292
- with_consumer(consumer_settings) do |consumer|
293
- # Subscribe to the topics - this triggers the first rebalance
294
- consumer.subscribe(*topics)
340
+ with_consumer(consumer_settings) do |consumer|
341
+ # Subscribe to the topics - this triggers the first rebalance
342
+ consumer.subscribe(*topics)
295
343
 
296
- # Wait briefly (100ms) to allow the rebalance to initiate
297
- # The actual rebalance happens asynchronously, so we just need to give it a moment
298
- sleep(0.1)
344
+ # Wait briefly (100ms) to allow the rebalance to initiate
345
+ # The actual rebalance happens asynchronously, so we just need to give it a moment
346
+ sleep(0.1)
299
347
 
300
- # Unsubscribe - this will trigger the second rebalance when the consumer closes
301
- # The ensure block in with_consumer will handle the unsubscribe and close
302
- end
348
+ # Unsubscribe - this will trigger the second rebalance when the consumer closes
349
+ # The ensure block in with_consumer will handle the unsubscribe and close
303
350
  end
351
+ end
304
352
 
305
- # Reads lags and offsets for given topics in the context of consumer groups defined in the
306
- # routing
307
- #
308
- # @param consumer_groups_with_topics [Hash{String => Array<String>}] hash with consumer
309
- # groups names with array of topics to query per consumer group inside
310
- # @param active_topics_only [Boolean] if set to false, when we use routing topics, will
311
- # select also topics that are marked as inactive in routing
312
- #
313
- # @return [Hash{String => Hash{Integer => Hash{Integer => Object}}}] hash where the top
314
- # level keys are the consumer groups and values are hashes with topics and inside
315
- # partitions with lags and offsets
316
- #
317
- # @note For topics that do not exist, topic details will be set to an empty hash
318
- #
319
- # @note For topics that exist but were never consumed by a given CG we set `-1` as lag and
320
- # the offset on each of the partitions that were not consumed.
321
- #
322
- # @note This lag reporting is for committed lags and is "Kafka-centric", meaning that this
323
- # represents lags from Kafka perspective and not the consumer. They may differ.
324
- def read_lags_with_offsets(consumer_groups_with_topics = {}, active_topics_only: true)
325
- # We first fetch all the topics with partitions count that exist in the cluster so we
326
- # do not query for topics that do not exist and so we can get partitions count for all
327
- # the topics we may need. The non-existent and not consumed will be filled at the end
328
- existing_topics = cluster_info.topics.to_h do |topic|
329
- [topic[:topic_name], topic[:partition_count]]
330
- end.freeze
331
-
332
- # If no expected CGs, we use all from routing that have active topics
333
- if consumer_groups_with_topics.empty?
334
- consumer_groups_with_topics = Karafka::App.routes.to_h do |cg|
335
- cg_topics = cg.topics.select do |cg_topic|
336
- active_topics_only ? cg_topic.active? : true
337
- end
338
-
339
- [cg.id, cg_topics.map(&:name)]
353
+ # Reads lags and offsets for given topics in the context of consumer groups defined in the
354
+ # routing
355
+ #
356
+ # @param consumer_groups_with_topics [Hash{String => Array<String>}] hash with consumer
357
+ # groups names with array of topics to query per consumer group inside
358
+ # @param active_topics_only [Boolean] if set to false, when we use routing topics, will
359
+ # select also topics that are marked as inactive in routing
360
+ #
361
+ # @return [Hash{String => Hash{Integer => Hash{Integer => Object}}}] hash where the top
362
+ # level keys are the consumer groups and values are hashes with topics and inside
363
+ # partitions with lags and offsets
364
+ #
365
+ # @note For topics that do not exist, topic details will be set to an empty hash
366
+ #
367
+ # @note For topics that exist but were never consumed by a given CG we set `-1` as lag and
368
+ # the offset on each of the partitions that were not consumed.
369
+ #
370
+ # @note This lag reporting is for committed lags and is "Kafka-centric", meaning that this
371
+ # represents lags from Kafka perspective and not the consumer. They may differ.
372
+ def read_lags_with_offsets(consumer_groups_with_topics = {}, active_topics_only: true)
373
+ # We first fetch all the topics with partitions count that exist in the cluster so we
374
+ # do not query for topics that do not exist and so we can get partitions count for all
375
+ # the topics we may need. The non-existent and not consumed will be filled at the end
376
+ existing_topics = cluster_info.topics.to_h do |topic|
377
+ [topic[:topic_name], topic[:partition_count]]
378
+ end.freeze
379
+
380
+ # If no expected CGs, we use all from routing that have active topics
381
+ if consumer_groups_with_topics.empty?
382
+ consumer_groups_with_topics = Karafka::App.routes.to_h do |cg|
383
+ cg_topics = cg.topics.select do |cg_topic|
384
+ active_topics_only ? cg_topic.active? : true
340
385
  end
386
+
387
+ [cg.id, cg_topics.map(&:name)]
341
388
  end
389
+ end
342
390
 
343
- # We make a copy because we will remove once with non-existing topics
344
- # We keep original requested consumer groups with topics for later backfilling
345
- cgs_with_topics = consumer_groups_with_topics.dup
346
- cgs_with_topics.transform_values!(&:dup)
391
+ # We make a copy because we will remove once with non-existing topics
392
+ # We keep original requested consumer groups with topics for later backfilling
393
+ cgs_with_topics = consumer_groups_with_topics.dup
394
+ cgs_with_topics.transform_values!(&:dup)
347
395
 
348
- # We can query only topics that do exist, this is why we are cleaning those that do not
349
- # exist
350
- cgs_with_topics.each_value do |requested_topics|
351
- requested_topics.delete_if { |topic| !existing_topics.include?(topic) }
352
- end
396
+ # We can query only topics that do exist, this is why we are cleaning those that do not
397
+ # exist
398
+ cgs_with_topics.each_value do |requested_topics|
399
+ requested_topics.delete_if { |topic| !existing_topics.include?(topic) }
400
+ end
353
401
 
354
- groups_lags = Hash.new { |h, k| h[k] = {} }
355
- groups_offs = Hash.new { |h, k| h[k] = {} }
402
+ groups_lags = Hash.new { |h, k| h[k] = {} }
403
+ groups_offs = Hash.new { |h, k| h[k] = {} }
356
404
 
357
- cgs_with_topics.each do |cg, topics|
358
- # Do not add to tpl topics that do not exist
359
- next if topics.empty?
405
+ cgs_with_topics.each do |cg, topics|
406
+ # Do not add to tpl topics that do not exist
407
+ next if topics.empty?
360
408
 
361
- tpl = Rdkafka::Consumer::TopicPartitionList.new
409
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
362
410
 
363
- with_consumer('group.id': cg) do |consumer|
364
- topics.each { |topic| tpl.add_topic(topic, existing_topics[topic]) }
411
+ with_consumer('group.id': cg) do |consumer|
412
+ topics.each { |topic| tpl.add_topic(topic, existing_topics[topic]) }
365
413
 
366
- commit_offsets = consumer.committed(tpl)
414
+ commit_offsets = consumer.committed(tpl)
367
415
 
368
- commit_offsets.to_h.each do |topic, partitions|
369
- groups_offs[cg][topic] = {}
416
+ commit_offsets.to_h.each do |topic, partitions|
417
+ groups_offs[cg][topic] = {}
370
418
 
371
- partitions.each do |partition|
372
- # -1 when no offset is stored
373
- groups_offs[cg][topic][partition.partition] = partition.offset || -1
374
- end
419
+ partitions.each do |partition|
420
+ # -1 when no offset is stored
421
+ groups_offs[cg][topic][partition.partition] = partition.offset || -1
375
422
  end
423
+ end
376
424
 
377
- consumer.lag(commit_offsets).each do |topic, partitions_lags|
378
- groups_lags[cg][topic] = partitions_lags
379
- end
425
+ consumer.lag(commit_offsets).each do |topic, partitions_lags|
426
+ groups_lags[cg][topic] = partitions_lags
380
427
  end
381
428
  end
429
+ end
382
430
 
383
- consumer_groups_with_topics.each do |cg, topics|
384
- groups_lags[cg]
431
+ consumer_groups_with_topics.each do |cg, topics|
432
+ groups_lags[cg]
385
433
 
386
- topics.each do |topic|
387
- groups_lags[cg][topic] ||= {}
434
+ topics.each do |topic|
435
+ groups_lags[cg][topic] ||= {}
388
436
 
389
- next unless existing_topics.key?(topic)
437
+ next unless existing_topics.key?(topic)
390
438
 
391
- # We backfill because there is a case where our consumer group would consume for
392
- # example only one partition out of 20, rest needs to get -1
393
- existing_topics[topic].times do |partition_id|
394
- groups_lags[cg][topic][partition_id] ||= -1
395
- end
439
+ # We backfill because there is a case where our consumer group would consume for
440
+ # example only one partition out of 20, rest needs to get -1
441
+ existing_topics[topic].times do |partition_id|
442
+ groups_lags[cg][topic][partition_id] ||= -1
396
443
  end
397
444
  end
445
+ end
398
446
 
399
- merged = Hash.new { |h, k| h[k] = {} }
447
+ merged = Hash.new { |h, k| h[k] = {} }
400
448
 
401
- groups_lags.each do |cg, topics|
402
- topics.each do |topic, partitions|
403
- merged[cg][topic] = {}
449
+ groups_lags.each do |cg, topics|
450
+ topics.each do |topic, partitions|
451
+ merged[cg][topic] = {}
404
452
 
405
- partitions.each do |partition, lag|
406
- merged[cg][topic][partition] = {
407
- offset: groups_offs.fetch(cg).fetch(topic).fetch(partition),
408
- lag: lag
409
- }
410
- end
453
+ partitions.each do |partition, lag|
454
+ merged[cg][topic][partition] = {
455
+ offset: groups_offs.fetch(cg).fetch(topic).fetch(partition),
456
+ lag: lag
457
+ }
411
458
  end
412
459
  end
413
-
414
- merged
415
460
  end
461
+
462
+ merged
416
463
  end
417
464
  end
418
465
  end