karafka 2.5.4 → 2.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (260) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +3 -0
  3. data/LICENSE-COMM +4 -2
  4. data/lib/karafka/admin/acl.rb +127 -80
  5. data/lib/karafka/admin/configs.rb +84 -70
  6. data/lib/karafka/admin/consumer_groups.rb +377 -330
  7. data/lib/karafka/admin/replication.rb +287 -263
  8. data/lib/karafka/admin/topics.rb +232 -186
  9. data/lib/karafka/admin.rb +277 -117
  10. data/lib/karafka/pro/active_job/consumer.rb +19 -2
  11. data/lib/karafka/pro/active_job/dispatcher.rb +19 -2
  12. data/lib/karafka/pro/active_job/job_options_contract.rb +19 -2
  13. data/lib/karafka/pro/base_consumer.rb +19 -2
  14. data/lib/karafka/pro/cleaner/errors.rb +19 -2
  15. data/lib/karafka/pro/cleaner/messages/message.rb +19 -2
  16. data/lib/karafka/pro/cleaner/messages/messages.rb +19 -2
  17. data/lib/karafka/pro/cleaner/messages/metadata.rb +19 -2
  18. data/lib/karafka/pro/cleaner.rb +19 -2
  19. data/lib/karafka/pro/cli/contracts/server.rb +19 -2
  20. data/lib/karafka/pro/cli/parallel_segments/base.rb +19 -2
  21. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +19 -2
  22. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +19 -2
  23. data/lib/karafka/pro/cli/parallel_segments.rb +19 -2
  24. data/lib/karafka/pro/connection/manager.rb +19 -2
  25. data/lib/karafka/pro/connection/multiplexing/listener.rb +19 -2
  26. data/lib/karafka/pro/contracts/base.rb +19 -2
  27. data/lib/karafka/pro/encryption/cipher.rb +19 -2
  28. data/lib/karafka/pro/encryption/contracts/config.rb +19 -2
  29. data/lib/karafka/pro/encryption/errors.rb +19 -2
  30. data/lib/karafka/pro/encryption/messages/middleware.rb +19 -2
  31. data/lib/karafka/pro/encryption/messages/parser.rb +19 -2
  32. data/lib/karafka/pro/encryption/setup/config.rb +19 -2
  33. data/lib/karafka/pro/encryption.rb +19 -2
  34. data/lib/karafka/pro/instrumentation/performance_tracker.rb +19 -2
  35. data/lib/karafka/pro/iterator/expander.rb +19 -2
  36. data/lib/karafka/pro/iterator/tpl_builder.rb +19 -2
  37. data/lib/karafka/pro/iterator.rb +19 -2
  38. data/lib/karafka/pro/loader.rb +19 -2
  39. data/lib/karafka/pro/processing/adaptive_iterator/consumer.rb +19 -2
  40. data/lib/karafka/pro/processing/adaptive_iterator/tracker.rb +19 -2
  41. data/lib/karafka/pro/processing/collapser.rb +19 -2
  42. data/lib/karafka/pro/processing/coordinator.rb +19 -2
  43. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +19 -2
  44. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +19 -2
  45. data/lib/karafka/pro/processing/coordinators/virtual_offset_manager.rb +19 -2
  46. data/lib/karafka/pro/processing/executor.rb +19 -2
  47. data/lib/karafka/pro/processing/expansions_selector.rb +19 -2
  48. data/lib/karafka/pro/processing/filters/base.rb +19 -2
  49. data/lib/karafka/pro/processing/filters/delayer.rb +19 -2
  50. data/lib/karafka/pro/processing/filters/expirer.rb +19 -2
  51. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +19 -2
  52. data/lib/karafka/pro/processing/filters/throttler.rb +19 -2
  53. data/lib/karafka/pro/processing/filters/virtual_limiter.rb +19 -2
  54. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +19 -2
  55. data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +19 -2
  56. data/lib/karafka/pro/processing/jobs/periodic.rb +19 -2
  57. data/lib/karafka/pro/processing/jobs/periodic_non_blocking.rb +19 -2
  58. data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +19 -2
  59. data/lib/karafka/pro/processing/jobs_builder.rb +19 -2
  60. data/lib/karafka/pro/processing/jobs_queue.rb +19 -2
  61. data/lib/karafka/pro/processing/offset_metadata/consumer.rb +19 -2
  62. data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +19 -2
  63. data/lib/karafka/pro/processing/offset_metadata/listener.rb +19 -2
  64. data/lib/karafka/pro/processing/parallel_segments/filters/base.rb +19 -2
  65. data/lib/karafka/pro/processing/parallel_segments/filters/default.rb +19 -2
  66. data/lib/karafka/pro/processing/parallel_segments/filters/mom.rb +19 -2
  67. data/lib/karafka/pro/processing/partitioner.rb +19 -2
  68. data/lib/karafka/pro/processing/periodic_job/consumer.rb +19 -2
  69. data/lib/karafka/pro/processing/piping/consumer.rb +19 -2
  70. data/lib/karafka/pro/processing/schedulers/base.rb +19 -2
  71. data/lib/karafka/pro/processing/schedulers/default.rb +19 -2
  72. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +19 -2
  73. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +19 -2
  74. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +19 -2
  75. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +19 -2
  76. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +19 -2
  77. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +19 -2
  78. data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +19 -2
  79. data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +19 -2
  80. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +19 -2
  81. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +19 -2
  82. data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +19 -2
  83. data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +19 -2
  84. data/lib/karafka/pro/processing/strategies/aj/lrj_mom.rb +19 -2
  85. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +19 -2
  86. data/lib/karafka/pro/processing/strategies/aj/mom.rb +19 -2
  87. data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +19 -2
  88. data/lib/karafka/pro/processing/strategies/base.rb +19 -2
  89. data/lib/karafka/pro/processing/strategies/default.rb +19 -2
  90. data/lib/karafka/pro/processing/strategies/dlq/default.rb +19 -2
  91. data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +19 -2
  92. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +19 -2
  93. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +19 -2
  94. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom_vp.rb +19 -2
  95. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_vp.rb +19 -2
  96. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +19 -2
  97. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom_vp.rb +19 -2
  98. data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +19 -2
  99. data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +19 -2
  100. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +19 -2
  101. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom_vp.rb +19 -2
  102. data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +19 -2
  103. data/lib/karafka/pro/processing/strategies/dlq/mom.rb +19 -2
  104. data/lib/karafka/pro/processing/strategies/dlq/mom_vp.rb +19 -2
  105. data/lib/karafka/pro/processing/strategies/dlq/vp.rb +19 -2
  106. data/lib/karafka/pro/processing/strategies/ftr/default.rb +19 -2
  107. data/lib/karafka/pro/processing/strategies/ftr/vp.rb +19 -2
  108. data/lib/karafka/pro/processing/strategies/lrj/default.rb +19 -2
  109. data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +19 -2
  110. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +19 -2
  111. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom_vp.rb +19 -2
  112. data/lib/karafka/pro/processing/strategies/lrj/ftr_vp.rb +19 -2
  113. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +19 -2
  114. data/lib/karafka/pro/processing/strategies/lrj/mom_vp.rb +19 -2
  115. data/lib/karafka/pro/processing/strategies/lrj/vp.rb +19 -2
  116. data/lib/karafka/pro/processing/strategies/mom/default.rb +19 -2
  117. data/lib/karafka/pro/processing/strategies/mom/ftr.rb +19 -2
  118. data/lib/karafka/pro/processing/strategies/mom/ftr_vp.rb +19 -2
  119. data/lib/karafka/pro/processing/strategies/mom/vp.rb +19 -2
  120. data/lib/karafka/pro/processing/strategies/vp/default.rb +19 -2
  121. data/lib/karafka/pro/processing/strategies.rb +19 -2
  122. data/lib/karafka/pro/processing/strategy_selector.rb +19 -2
  123. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +19 -2
  124. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +19 -2
  125. data/lib/karafka/pro/processing/virtual_partitions/distributors/base.rb +19 -2
  126. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +19 -2
  127. data/lib/karafka/pro/recurring_tasks/consumer.rb +19 -2
  128. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +19 -2
  129. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +19 -2
  130. data/lib/karafka/pro/recurring_tasks/deserializer.rb +19 -2
  131. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +19 -2
  132. data/lib/karafka/pro/recurring_tasks/errors.rb +19 -2
  133. data/lib/karafka/pro/recurring_tasks/executor.rb +19 -2
  134. data/lib/karafka/pro/recurring_tasks/listener.rb +19 -2
  135. data/lib/karafka/pro/recurring_tasks/matcher.rb +19 -2
  136. data/lib/karafka/pro/recurring_tasks/schedule.rb +19 -2
  137. data/lib/karafka/pro/recurring_tasks/serializer.rb +19 -2
  138. data/lib/karafka/pro/recurring_tasks/setup/config.rb +19 -2
  139. data/lib/karafka/pro/recurring_tasks/task.rb +19 -2
  140. data/lib/karafka/pro/recurring_tasks.rb +19 -2
  141. data/lib/karafka/pro/routing/features/active_job/builder.rb +19 -2
  142. data/lib/karafka/pro/routing/features/active_job.rb +19 -2
  143. data/lib/karafka/pro/routing/features/adaptive_iterator/config.rb +19 -2
  144. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +19 -2
  145. data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +19 -2
  146. data/lib/karafka/pro/routing/features/adaptive_iterator.rb +19 -2
  147. data/lib/karafka/pro/routing/features/base.rb +19 -2
  148. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +19 -2
  149. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +19 -2
  150. data/lib/karafka/pro/routing/features/dead_letter_queue.rb +19 -2
  151. data/lib/karafka/pro/routing/features/delaying/config.rb +19 -2
  152. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +19 -2
  153. data/lib/karafka/pro/routing/features/delaying/topic.rb +19 -2
  154. data/lib/karafka/pro/routing/features/delaying.rb +19 -2
  155. data/lib/karafka/pro/routing/features/direct_assignments/config.rb +19 -2
  156. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +19 -2
  157. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +19 -2
  158. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +19 -2
  159. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +19 -2
  160. data/lib/karafka/pro/routing/features/direct_assignments.rb +19 -2
  161. data/lib/karafka/pro/routing/features/expiring/config.rb +19 -2
  162. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +19 -2
  163. data/lib/karafka/pro/routing/features/expiring/topic.rb +19 -2
  164. data/lib/karafka/pro/routing/features/expiring.rb +19 -2
  165. data/lib/karafka/pro/routing/features/filtering/config.rb +19 -2
  166. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +19 -2
  167. data/lib/karafka/pro/routing/features/filtering/topic.rb +19 -2
  168. data/lib/karafka/pro/routing/features/filtering.rb +19 -2
  169. data/lib/karafka/pro/routing/features/inline_insights/config.rb +19 -2
  170. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +19 -2
  171. data/lib/karafka/pro/routing/features/inline_insights/topic.rb +19 -2
  172. data/lib/karafka/pro/routing/features/inline_insights.rb +19 -2
  173. data/lib/karafka/pro/routing/features/long_running_job/config.rb +19 -2
  174. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +19 -2
  175. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +19 -2
  176. data/lib/karafka/pro/routing/features/long_running_job.rb +19 -2
  177. data/lib/karafka/pro/routing/features/multiplexing/config.rb +19 -2
  178. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +19 -2
  179. data/lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb +19 -2
  180. data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +19 -2
  181. data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +19 -2
  182. data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +19 -2
  183. data/lib/karafka/pro/routing/features/multiplexing.rb +19 -2
  184. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +19 -2
  185. data/lib/karafka/pro/routing/features/non_blocking_job.rb +19 -2
  186. data/lib/karafka/pro/routing/features/offset_metadata/config.rb +19 -2
  187. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +19 -2
  188. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +19 -2
  189. data/lib/karafka/pro/routing/features/offset_metadata.rb +19 -2
  190. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +19 -2
  191. data/lib/karafka/pro/routing/features/parallel_segments/config.rb +19 -2
  192. data/lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb +19 -2
  193. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +19 -2
  194. data/lib/karafka/pro/routing/features/parallel_segments/topic.rb +19 -2
  195. data/lib/karafka/pro/routing/features/parallel_segments.rb +19 -2
  196. data/lib/karafka/pro/routing/features/patterns/builder.rb +19 -2
  197. data/lib/karafka/pro/routing/features/patterns/config.rb +19 -2
  198. data/lib/karafka/pro/routing/features/patterns/consumer_group.rb +19 -2
  199. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +19 -2
  200. data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +19 -2
  201. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +19 -2
  202. data/lib/karafka/pro/routing/features/patterns/detector.rb +19 -2
  203. data/lib/karafka/pro/routing/features/patterns/pattern.rb +19 -2
  204. data/lib/karafka/pro/routing/features/patterns/patterns.rb +19 -2
  205. data/lib/karafka/pro/routing/features/patterns/topic.rb +19 -2
  206. data/lib/karafka/pro/routing/features/patterns/topics.rb +19 -2
  207. data/lib/karafka/pro/routing/features/patterns.rb +19 -2
  208. data/lib/karafka/pro/routing/features/pausing/config.rb +19 -2
  209. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +19 -2
  210. data/lib/karafka/pro/routing/features/pausing/topic.rb +19 -2
  211. data/lib/karafka/pro/routing/features/pausing.rb +19 -2
  212. data/lib/karafka/pro/routing/features/periodic_job/config.rb +19 -2
  213. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +19 -2
  214. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +19 -2
  215. data/lib/karafka/pro/routing/features/periodic_job.rb +19 -2
  216. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +19 -2
  217. data/lib/karafka/pro/routing/features/recurring_tasks/config.rb +19 -2
  218. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +19 -2
  219. data/lib/karafka/pro/routing/features/recurring_tasks/proxy.rb +19 -2
  220. data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +19 -2
  221. data/lib/karafka/pro/routing/features/recurring_tasks.rb +19 -2
  222. data/lib/karafka/pro/routing/features/scheduled_messages/builder.rb +19 -2
  223. data/lib/karafka/pro/routing/features/scheduled_messages/config.rb +19 -2
  224. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +19 -2
  225. data/lib/karafka/pro/routing/features/scheduled_messages/proxy.rb +19 -2
  226. data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +19 -2
  227. data/lib/karafka/pro/routing/features/scheduled_messages.rb +19 -2
  228. data/lib/karafka/pro/routing/features/swarm/config.rb +19 -2
  229. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +19 -2
  230. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +19 -2
  231. data/lib/karafka/pro/routing/features/swarm/topic.rb +19 -2
  232. data/lib/karafka/pro/routing/features/swarm.rb +19 -2
  233. data/lib/karafka/pro/routing/features/throttling/config.rb +19 -2
  234. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +19 -2
  235. data/lib/karafka/pro/routing/features/throttling/topic.rb +19 -2
  236. data/lib/karafka/pro/routing/features/throttling.rb +19 -2
  237. data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +19 -2
  238. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +19 -2
  239. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +19 -2
  240. data/lib/karafka/pro/routing/features/virtual_partitions.rb +19 -2
  241. data/lib/karafka/pro/scheduled_messages/consumer.rb +19 -2
  242. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +19 -2
  243. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +19 -2
  244. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +19 -2
  245. data/lib/karafka/pro/scheduled_messages/day.rb +19 -2
  246. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +19 -2
  247. data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +19 -2
  248. data/lib/karafka/pro/scheduled_messages/dispatcher.rb +19 -2
  249. data/lib/karafka/pro/scheduled_messages/errors.rb +19 -2
  250. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +19 -2
  251. data/lib/karafka/pro/scheduled_messages/proxy.rb +19 -2
  252. data/lib/karafka/pro/scheduled_messages/schema_validator.rb +19 -2
  253. data/lib/karafka/pro/scheduled_messages/serializer.rb +19 -2
  254. data/lib/karafka/pro/scheduled_messages/setup/config.rb +19 -2
  255. data/lib/karafka/pro/scheduled_messages/state.rb +19 -2
  256. data/lib/karafka/pro/scheduled_messages/tracker.rb +19 -2
  257. data/lib/karafka/pro/scheduled_messages.rb +19 -2
  258. data/lib/karafka/pro/swarm/liveness_listener.rb +19 -2
  259. data/lib/karafka/version.rb +1 -1
  260. metadata +2 -2
@@ -59,21 +59,47 @@ module Karafka
59
59
  :steps
60
60
  )
61
61
 
62
- # Builds the replication plan
62
+ # Class level APIs for convenience
63
+ class << self
64
+ # @param topic [String] topic name to plan replication for
65
+ # @param to [Integer] target replication factor
66
+ # @param brokers [Hash, nil] optional manual broker assignments per partition
67
+ # @see #plan
68
+ def plan(topic:, to:, brokers: nil)
69
+ new.plan(topic: topic, to: to, brokers: brokers)
70
+ end
71
+
72
+ # @param topic [String] topic name to rebalance
73
+ # @see #rebalance
74
+ def rebalance(topic:)
75
+ new.rebalance(topic: topic)
76
+ end
77
+ end
78
+
79
+ # Builds the replication plan or creates an admin operations instance
63
80
  #
64
- # @param topic [String] topic name
65
- # @param current_replication_factor [Integer] current replication factor
66
- # @param target_replication_factor [Integer] target replication factor
67
- # @param partitions_assignment [Hash] partition to brokers assignment
68
- # @param cluster_info [Hash] broker information
81
+ # This class serves dual purposes:
82
+ # 1. As an admin operations instance when called with only kafka: parameter
83
+ # 2. As a plan result object when called with topic and plan parameters
84
+ #
85
+ # @param kafka [Hash] custom kafka configuration for admin operations (optional)
86
+ # @param topic [String] topic name (for plan result)
87
+ # @param current_replication_factor [Integer] current replication factor (for plan result)
88
+ # @param target_replication_factor [Integer] target replication factor (for plan result)
89
+ # @param partitions_assignment [Hash] partition to brokers assignment (for plan result)
90
+ # @param cluster_info [Hash] broker information (for plan result)
69
91
  def initialize(
70
- topic:,
71
- current_replication_factor:,
72
- target_replication_factor:,
73
- partitions_assignment:,
74
- cluster_info:
92
+ kafka: nil,
93
+ topic: nil,
94
+ current_replication_factor: nil,
95
+ target_replication_factor: nil,
96
+ partitions_assignment: nil,
97
+ cluster_info: nil
75
98
  )
76
- super()
99
+ super(kafka: kafka || {})
100
+
101
+ # If topic is provided, this is a plan result object
102
+ return unless topic
77
103
 
78
104
  @topic = topic
79
105
  @current_replication_factor = current_replication_factor
@@ -88,6 +114,141 @@ module Karafka
88
114
  freeze
89
115
  end
90
116
 
117
+ # Plans replication factor increase for a given topic
118
+ #
119
+ # Generates a detailed reassignment plan that preserves existing replica assignments
120
+ # while adding new replicas to meet the target replication factor. The plan uses
121
+ # round-robin distribution to balance new replicas across available brokers.
122
+ #
123
+ # @param topic [String] name of the topic
124
+ # @param to [Integer] target replication factor (must be higher than current)
125
+ # @param brokers [Hash{Integer => Array<Integer>}] optional manual broker assignments
126
+ # per partition. Keys are partition IDs, values are arrays of broker IDs. If not provided
127
+ # automatic distribution (usually fine) will be used
128
+ # @return [Replication] plan object containing JSON, commands, and instructions
129
+ #
130
+ # @raise [ArgumentError] if target replication factor is not higher than current
131
+ # @raise [ArgumentError] if target replication factor exceeds available broker count
132
+ # @raise [Rdkafka::RdkafkaError] if topic metadata cannot be fetched
133
+ #
134
+ # @example Increase replication from 1 to 3 with automatic distribution
135
+ # plan = Replication.plan(topic: 'events', to: 3)
136
+ #
137
+ # # Inspect the plan
138
+ # puts plan.summary
139
+ # puts plan.reassignment_json
140
+ #
141
+ # # Check which brokers will get new replicas
142
+ # plan.partitions_assignment.each do |partition_id, broker_ids|
143
+ # puts "Partition #{partition_id}: #{broker_ids.join(', ')}"
144
+ # end
145
+ #
146
+ # # Save and execute
147
+ # plan.export_to_file('increase_rf.json')
148
+ #
149
+ # @example Increase replication with manual broker placement
150
+ # # Specify exactly which brokers should host each partition
151
+ # plan = Replication.plan(
152
+ # topic: 'events',
153
+ # to: 3,
154
+ # brokers: {
155
+ # 0 => [1, 2, 4], # Partition 0 on brokers 1, 2, 4
156
+ # 1 => [2, 3, 4], # Partition 1 on brokers 2, 3, 4
157
+ # 2 => [1, 3, 5] # Partition 2 on brokers 1, 3, 5
158
+ # }
159
+ # )
160
+ #
161
+ # # The plan will use your exact broker specifications
162
+ # puts plan.partitions_assignment
163
+ # # => {0=>[1, 2, 4], 1=>[2, 3, 4], 2=>[1, 3, 5]}
164
+ #
165
+ # @note When using manual placement, ensure all partitions are specified
166
+ # @note Manual placement overrides automatic distribution entirely
167
+ def plan(topic:, to:, brokers: nil)
168
+ topic_info = fetch_topic_info(topic)
169
+ first_partition = topic_info[:partitions].first
170
+ current_rf = first_partition[:replica_count] || first_partition[:replicas]&.size
171
+ fetched_cluster_info = fetch_cluster_info
172
+
173
+ # Use contract for validation
174
+ validation_data = {
175
+ topic: topic,
176
+ to: to,
177
+ brokers: brokers,
178
+ current_rf: current_rf,
179
+ broker_count: fetched_cluster_info[:brokers].size,
180
+ topic_info: topic_info,
181
+ cluster_info: fetched_cluster_info
182
+ }
183
+
184
+ Contracts::Replication.new.validate!(validation_data)
185
+
186
+ partitions_assignment = brokers || generate_partitions_assignment(
187
+ topic_info: topic_info,
188
+ target_replication_factor: to,
189
+ cluster_info: fetched_cluster_info
190
+ )
191
+
192
+ self.class.new(
193
+ kafka: @custom_kafka,
194
+ topic: topic,
195
+ current_replication_factor: current_rf,
196
+ target_replication_factor: to,
197
+ partitions_assignment: partitions_assignment,
198
+ cluster_info: fetched_cluster_info
199
+ )
200
+ end
201
+
202
+ # Plans rebalancing of existing replicas across brokers
203
+ #
204
+ # Generates a reassignment plan that redistributes existing replicas more evenly
205
+ # across the cluster without changing the replication factor. Useful for:
206
+ #
207
+ # - Balancing load after adding new brokers to the cluster
208
+ # - Redistributing replicas after broker failures and recovery
209
+ # - Optimizing replica placement for better resource utilization
210
+ # - Moving replicas away from overloaded brokers
211
+ #
212
+ # @param topic [String] name of the topic to rebalance
213
+ # @return [Replication] rebalancing plan
214
+ #
215
+ # @example Rebalance after adding new brokers
216
+ # # After adding brokers 4 and 5 to a 3-broker cluster
217
+ # plan = Replication.rebalance(topic: 'events')
218
+ #
219
+ # # Review how replicas will be redistributed
220
+ # puts plan.summary
221
+ #
222
+ # # Execute if distribution looks good
223
+ # plan.export_to_file('rebalance.json')
224
+ # # Then run: kafka-reassign-partitions.sh --execute ...
225
+ #
226
+ # @note This maintains the same replication factor
227
+ # @note All data will be copied to new locations during rebalancing
228
+ # @note Consider impact on cluster resources during rebalancing
229
+ def rebalance(topic:)
230
+ topic_info = fetch_topic_info(topic)
231
+ first_partition = topic_info[:partitions].first
232
+ current_rf = first_partition[:replica_count] || first_partition[:replicas]&.size
233
+ fetched_cluster_info = fetch_cluster_info
234
+
235
+ partitions_assignment = generate_partitions_assignment(
236
+ topic_info: topic_info,
237
+ target_replication_factor: current_rf,
238
+ cluster_info: fetched_cluster_info,
239
+ rebalance_only: true
240
+ )
241
+
242
+ self.class.new(
243
+ kafka: @custom_kafka,
244
+ topic: topic,
245
+ current_replication_factor: current_rf,
246
+ target_replication_factor: current_rf,
247
+ partitions_assignment: partitions_assignment,
248
+ cluster_info: fetched_cluster_info
249
+ )
250
+ end
251
+
91
252
  # Export the reassignment JSON to a file
92
253
  # @param file_path [String] path where to save the JSON file
93
254
  def export_to_file(file_path)
@@ -115,274 +276,137 @@ module Karafka
115
276
  SUMMARY
116
277
  end
117
278
 
118
- class << self
119
- # Plans replication factor increase for a given topic
120
- #
121
- # Generates a detailed reassignment plan that preserves existing replica assignments
122
- # while adding new replicas to meet the target replication factor. The plan uses
123
- # round-robin distribution to balance new replicas across available brokers.
124
- #
125
- # @param topic [String] name of the topic
126
- # @param to [Integer] target replication factor (must be higher than current)
127
- # @param brokers [Hash{Integer => Array<Integer>}] optional manual broker assignments
128
- # per partition. Keys are partition IDs, values are arrays of broker IDs. If not provided
129
- # automatic distribution (usually fine) will be used
130
- # @return [Replication] plan object containing JSON, commands, and instructions
131
- #
132
- # @raise [ArgumentError] if target replication factor is not higher than current
133
- # @raise [ArgumentError] if target replication factor exceeds available broker count
134
- # @raise [Rdkafka::RdkafkaError] if topic metadata cannot be fetched
135
- #
136
- # @example Increase replication from 1 to 3 with automatic distribution
137
- # plan = Replication.plan(topic: 'events', to: 3)
138
- #
139
- # # Inspect the plan
140
- # puts plan.summary
141
- # puts plan.reassignment_json
142
- #
143
- # # Check which brokers will get new replicas
144
- # plan.partitions_assignment.each do |partition_id, broker_ids|
145
- # puts "Partition #{partition_id}: #{broker_ids.join(', ')}"
146
- # end
147
- #
148
- # # Save and execute
149
- # plan.export_to_file('increase_rf.json')
150
- #
151
- # @example Increase replication with manual broker placement
152
- # # Specify exactly which brokers should host each partition
153
- # plan = Replication.plan(
154
- # topic: 'events',
155
- # to: 3,
156
- # brokers: {
157
- # 0 => [1, 2, 4], # Partition 0 on brokers 1, 2, 4
158
- # 1 => [2, 3, 4], # Partition 1 on brokers 2, 3, 4
159
- # 2 => [1, 3, 5] # Partition 2 on brokers 1, 3, 5
160
- # }
161
- # )
162
- #
163
- # # The plan will use your exact broker specifications
164
- # puts plan.partitions_assignment
165
- # # => {0=>[1, 2, 4], 1=>[2, 3, 4], 2=>[1, 3, 5]}
166
- #
167
- # @note When using manual placement, ensure all partitions are specified
168
- # @note Manual placement overrides automatic distribution entirely
169
- def plan(topic:, to:, brokers: nil)
170
- topic_info = fetch_topic_info(topic)
171
- first_partition = topic_info[:partitions].first
172
- current_rf = first_partition[:replica_count] || first_partition[:replicas]&.size
173
- cluster_info = fetch_cluster_info
174
-
175
- # Use contract for validation
176
- validation_data = {
177
- topic: topic,
178
- to: to,
179
- brokers: brokers,
180
- current_rf: current_rf,
181
- broker_count: cluster_info[:brokers].size,
182
- topic_info: topic_info,
183
- cluster_info: cluster_info
184
- }
185
-
186
- Contracts::Replication.new.validate!(validation_data)
187
-
188
- partitions_assignment = brokers || generate_partitions_assignment(
189
- topic_info: topic_info,
190
- target_replication_factor: to,
191
- cluster_info: cluster_info
192
- )
193
-
194
- new(
195
- topic: topic,
196
- current_replication_factor: current_rf,
197
- target_replication_factor: to,
198
- partitions_assignment: partitions_assignment,
199
- cluster_info: cluster_info
200
- )
201
- end
202
-
203
- # Plans rebalancing of existing replicas across brokers
204
- #
205
- # Generates a reassignment plan that redistributes existing replicas more evenly
206
- # across the cluster without changing the replication factor. Useful for:
207
- #
208
- # - Balancing load after adding new brokers to the cluster
209
- # - Redistributing replicas after broker failures and recovery
210
- # - Optimizing replica placement for better resource utilization
211
- # - Moving replicas away from overloaded brokers
212
- #
213
- # @param topic [String] name of the topic to rebalance
214
- # @return [Replication] rebalancing plan
215
- #
216
- # @example Rebalance after adding new brokers
217
- # # After adding brokers 4 and 5 to a 3-broker cluster
218
- # plan = Replication.rebalance(topic: 'events')
219
- #
220
- # # Review how replicas will be redistributed
221
- # puts plan.summary
222
- #
223
- # # Execute if distribution looks good
224
- # plan.export_to_file('rebalance.json')
225
- # # Then run: kafka-reassign-partitions.sh --execute ...
226
- #
227
- # @note This maintains the same replication factor
228
- # @note All data will be copied to new locations during rebalancing
229
- # @note Consider impact on cluster resources during rebalancing
230
- def rebalance(topic:)
231
- topic_info = fetch_topic_info(topic)
232
- first_partition = topic_info[:partitions].first
233
- current_rf = first_partition[:replica_count] || first_partition[:replicas]&.size
234
- cluster_info = fetch_cluster_info
235
-
236
- partitions_assignment = generate_partitions_assignment(
237
- topic_info: topic_info,
238
- target_replication_factor: current_rf,
239
- cluster_info: cluster_info,
240
- rebalance_only: true
241
- )
242
-
243
- new(
244
- topic: topic,
245
- current_replication_factor: current_rf,
246
- target_replication_factor: current_rf,
247
- partitions_assignment: partitions_assignment,
248
- cluster_info: cluster_info
249
- )
250
- end
251
-
252
- private
253
-
254
- # Fetches topic metadata including partitions and replica information
255
- # @param topic [String] name of the topic
256
- # @return [Hash] topic information with partitions metadata
257
- def fetch_topic_info(topic)
258
- Topics.info(topic)
259
- end
279
+ private
260
280
 
261
- # Fetches cluster broker information from Kafka metadata
262
- # @return [Hash] cluster information with broker details (node_id, host:port)
263
- def fetch_cluster_info
264
- cluster_metadata = cluster_info
265
- {
266
- brokers: cluster_metadata.brokers.map do |broker|
267
- # Handle both hash and object formats from metadata
268
- # rdkafka returns hashes with broker_id, broker_name, broker_port
269
- if broker.is_a?(Hash)
270
- node_id = broker[:broker_id] || broker[:node_id]
271
- host = broker[:broker_name] || broker[:host]
272
- port = broker[:broker_port] || broker[:port]
273
- { node_id: node_id, host: "#{host}:#{port}" }
274
- else
275
- { node_id: broker.node_id, host: "#{broker.host}:#{broker.port}" }
276
- end
277
- end
278
- }
279
- end
281
+ # Fetches topic metadata including partitions and replica information
282
+ # @param topic [String] name of the topic
283
+ # @return [Hash] topic information with partitions metadata
284
+ def fetch_topic_info(topic)
285
+ Topics.new(kafka: @custom_kafka).info(topic)
286
+ end
280
287
 
281
- # Generates partition-to-broker assignments for replication changes
282
- # Handles both replication factor increases and rebalancing scenarios
283
- # @param topic_info [Hash] topic metadata with partition information
284
- # @param target_replication_factor [Integer] desired replication factor
285
- # @param cluster_info [Hash] cluster metadata with broker information
286
- # @param rebalance_only [Boolean] true for rebalancing, false for increase
287
- # @return [Hash{Integer => Array<Integer>}] assignments (partition_id => broker_ids)
288
- def generate_partitions_assignment(
289
- topic_info:,
290
- target_replication_factor:,
291
- cluster_info:,
292
- rebalance_only: false
293
- )
294
- partitions = topic_info[:partitions]
295
- brokers = cluster_info[:brokers].map { |broker_info| broker_info[:node_id] }.sort
296
- assignments = {}
297
-
298
- partitions.each do |partition_info|
299
- partition_id = partition_info[:partition_id]
300
-
301
- # Handle both :replicas (array of objects) and :replica_brokers (array of IDs)
302
- replicas = partition_info[:replicas] || partition_info[:replica_brokers] || []
303
- current_replicas = if replicas.first.respond_to?(:node_id)
304
- replicas.map(&:node_id).sort
305
- else
306
- replicas.sort
307
- end
308
-
309
- if rebalance_only
310
- # For rebalancing, redistribute current replicas optimally
311
- new_replicas = select_brokers_for_partition(
312
- partition_id: partition_id,
313
- brokers: brokers,
314
- replica_count: target_replication_factor,
315
- avoid_brokers: []
316
- )
288
+ # Fetches cluster broker information from Kafka metadata
289
+ # @return [Hash] cluster information with broker details (node_id, host:port)
290
+ def fetch_cluster_info
291
+ cluster_metadata = cluster_info
292
+ {
293
+ brokers: cluster_metadata.brokers.map do |broker|
294
+ # Handle both hash and object formats from metadata
295
+ # rdkafka returns hashes with broker_id, broker_name, broker_port
296
+ if broker.is_a?(Hash)
297
+ node_id = broker[:broker_id] || broker[:node_id]
298
+ host = broker[:broker_name] || broker[:host]
299
+ port = broker[:broker_port] || broker[:port]
300
+ { node_id: node_id, host: "#{host}:#{port}" }
317
301
  else
318
- # For replication increase, keep existing replicas and add new ones
319
- additional_needed = target_replication_factor - current_replicas.size
320
- available_brokers = brokers - current_replicas
321
-
322
- additional_replicas = select_additional_brokers(
323
- available_brokers: available_brokers,
324
- needed_count: additional_needed,
325
- partition_id: partition_id
326
- )
327
-
328
- new_replicas = (current_replicas + additional_replicas).sort
302
+ { node_id: broker.node_id, host: "#{broker.host}:#{broker.port}" }
329
303
  end
304
+ end
305
+ }
306
+ end
330
307
 
331
- assignments[partition_id] = new_replicas
308
+ # Generates partition-to-broker assignments for replication changes
309
+ # Handles both replication factor increases and rebalancing scenarios
310
+ # @param topic_info [Hash] topic metadata with partition information
311
+ # @param target_replication_factor [Integer] desired replication factor
312
+ # @param cluster_info [Hash] cluster metadata with broker information
313
+ # @param rebalance_only [Boolean] true for rebalancing, false for increase
314
+ # @return [Hash{Integer => Array<Integer>}] assignments (partition_id => broker_ids)
315
+ def generate_partitions_assignment(
316
+ topic_info:,
317
+ target_replication_factor:,
318
+ cluster_info:,
319
+ rebalance_only: false
320
+ )
321
+ partitions = topic_info[:partitions]
322
+ brokers = cluster_info[:brokers].map { |broker_info| broker_info[:node_id] }.sort
323
+ assignments = {}
324
+
325
+ partitions.each do |partition_info|
326
+ partition_id = partition_info[:partition_id]
327
+
328
+ # Handle both :replicas (array of objects) and :replica_brokers (array of IDs)
329
+ replicas = partition_info[:replicas] || partition_info[:replica_brokers] || []
330
+ current_replicas = if replicas.first.respond_to?(:node_id)
331
+ replicas.map(&:node_id).sort
332
+ else
333
+ replicas.sort
334
+ end
335
+
336
+ if rebalance_only
337
+ # For rebalancing, redistribute current replicas optimally
338
+ new_replicas = select_brokers_for_partition(
339
+ partition_id: partition_id,
340
+ brokers: brokers,
341
+ replica_count: target_replication_factor,
342
+ avoid_brokers: []
343
+ )
344
+ else
345
+ # For replication increase, keep existing replicas and add new ones
346
+ additional_needed = target_replication_factor - current_replicas.size
347
+ available_brokers = brokers - current_replicas
348
+
349
+ additional_replicas = select_additional_brokers(
350
+ available_brokers: available_brokers,
351
+ needed_count: additional_needed,
352
+ partition_id: partition_id
353
+ )
354
+
355
+ new_replicas = (current_replicas + additional_replicas).sort
332
356
  end
333
357
 
334
- assignments
358
+ assignments[partition_id] = new_replicas
335
359
  end
336
360
 
337
- # Selects brokers for a partition using round-robin distribution
338
- # Distributes replicas evenly across available brokers
339
- # @param partition_id [Integer] partition identifier for offset calculation
340
- # @param brokers [Array<Integer>] available broker node IDs
341
- # @param replica_count [Integer] number of replicas needed
342
- # @param avoid_brokers [Array<Integer>] broker IDs to exclude from selection
343
- # @return [Array<Integer>] sorted array of selected broker node IDs
344
- def select_brokers_for_partition(
345
- partition_id:,
346
- brokers:,
347
- replica_count:,
348
- avoid_brokers: []
349
- )
350
- available_brokers = brokers - avoid_brokers
361
+ assignments
362
+ end
351
363
 
352
- # Simple round-robin selection starting from a different offset per partition
353
- # This helps distribute replicas more evenly across brokers
354
- start_index = partition_id % available_brokers.size
355
- selected = []
364
+ # Selects brokers for a partition using round-robin distribution
365
+ # Distributes replicas evenly across available brokers
366
+ # @param partition_id [Integer] partition identifier for offset calculation
367
+ # @param brokers [Array<Integer>] available broker node IDs
368
+ # @param replica_count [Integer] number of replicas needed
369
+ # @param avoid_brokers [Array<Integer>] broker IDs to exclude from selection
370
+ # @return [Array<Integer>] sorted array of selected broker node IDs
371
+ def select_brokers_for_partition(
372
+ partition_id:,
373
+ brokers:,
374
+ replica_count:,
375
+ avoid_brokers: []
376
+ )
377
+ available_brokers = brokers - avoid_brokers
356
378
 
357
- replica_count.times do |replica_index|
358
- broker_index = (start_index + replica_index) % available_brokers.size
359
- selected << available_brokers[broker_index]
360
- end
379
+ # Simple round-robin selection starting from a different offset per partition
380
+ # This helps distribute replicas more evenly across brokers
381
+ start_index = partition_id % available_brokers.size
382
+ selected = []
361
383
 
362
- selected.sort
384
+ replica_count.times do |replica_index|
385
+ broker_index = (start_index + replica_index) % available_brokers.size
386
+ selected << available_brokers[broker_index]
363
387
  end
364
388
 
365
- # Selects additional brokers for increasing replication factor
366
- # Uses round-robin selection for even distribution across available brokers
367
- # @param available_brokers [Array<Integer>] broker IDs available for new replicas
368
- # @param needed_count [Integer] number of additional brokers needed
369
- # @param partition_id [Integer] partition identifier for offset calculation
370
- # @return [Array<Integer>] sorted array of selected broker node IDs
371
- def select_additional_brokers(available_brokers:, needed_count:, partition_id:)
372
- # Use round-robin starting from partition-specific offset
373
- start_index = partition_id % available_brokers.size
374
- selected = []
375
-
376
- needed_count.times do |additional_replica_index|
377
- broker_index = (start_index + additional_replica_index) % available_brokers.size
378
- selected << available_brokers[broker_index]
379
- end
389
+ selected.sort
390
+ end
380
391
 
381
- selected.sort
392
+ # Selects additional brokers for increasing replication factor
393
+ # Uses round-robin selection for even distribution across available brokers
394
+ # @param available_brokers [Array<Integer>] broker IDs available for new replicas
395
+ # @param needed_count [Integer] number of additional brokers needed
396
+ # @param partition_id [Integer] partition identifier for offset calculation
397
+ # @return [Array<Integer>] sorted array of selected broker node IDs
398
+ def select_additional_brokers(available_brokers:, needed_count:, partition_id:)
399
+ # Use round-robin starting from partition-specific offset
400
+ start_index = partition_id % available_brokers.size
401
+ selected = []
402
+
403
+ needed_count.times do |additional_replica_index|
404
+ broker_index = (start_index + additional_replica_index) % available_brokers.size
405
+ selected << available_brokers[broker_index]
382
406
  end
383
- end
384
407
 
385
- private
408
+ selected.sort
409
+ end
386
410
 
387
411
  # Generates the JSON structure required by kafka-reassign-partitions.sh
388
412
  # Creates Kafka-compatible reassignment plan with version and partitions data