karafka 2.5.7 → 2.5.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (263) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +9 -0
  3. data/LICENSE-COMM +4 -2
  4. data/config/locales/pro_errors.yml +1 -0
  5. data/lib/karafka/admin/replication.rb +36 -4
  6. data/lib/karafka/pro/active_job/consumer.rb +8 -0
  7. data/lib/karafka/pro/active_job/dispatcher.rb +8 -0
  8. data/lib/karafka/pro/active_job/job_options_contract.rb +8 -0
  9. data/lib/karafka/pro/admin/recovery/errors.rb +51 -0
  10. data/lib/karafka/pro/admin/recovery.rb +486 -0
  11. data/lib/karafka/pro/base_consumer.rb +8 -0
  12. data/lib/karafka/pro/cleaner/errors.rb +8 -0
  13. data/lib/karafka/pro/cleaner/messages/message.rb +8 -0
  14. data/lib/karafka/pro/cleaner/messages/messages.rb +8 -0
  15. data/lib/karafka/pro/cleaner/messages/metadata.rb +8 -0
  16. data/lib/karafka/pro/cleaner.rb +8 -0
  17. data/lib/karafka/pro/cli/contracts/server.rb +8 -0
  18. data/lib/karafka/pro/cli/parallel_segments/base.rb +8 -0
  19. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +8 -0
  20. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +8 -0
  21. data/lib/karafka/pro/cli/parallel_segments.rb +8 -0
  22. data/lib/karafka/pro/cli/topics/health.rb +10 -2
  23. data/lib/karafka/pro/cli/topics.rb +8 -0
  24. data/lib/karafka/pro/connection/manager.rb +8 -0
  25. data/lib/karafka/pro/connection/multiplexing/listener.rb +8 -0
  26. data/lib/karafka/pro/contracts/base.rb +8 -0
  27. data/lib/karafka/pro/encryption/cipher.rb +8 -0
  28. data/lib/karafka/pro/encryption/contracts/config.rb +8 -0
  29. data/lib/karafka/pro/encryption/errors.rb +8 -0
  30. data/lib/karafka/pro/encryption/messages/middleware.rb +8 -0
  31. data/lib/karafka/pro/encryption/messages/parser.rb +8 -0
  32. data/lib/karafka/pro/encryption/setup/config.rb +8 -0
  33. data/lib/karafka/pro/encryption.rb +8 -0
  34. data/lib/karafka/pro/instrumentation/performance_tracker.rb +8 -0
  35. data/lib/karafka/pro/iterator/expander.rb +9 -1
  36. data/lib/karafka/pro/iterator/tpl_builder.rb +8 -0
  37. data/lib/karafka/pro/iterator.rb +9 -1
  38. data/lib/karafka/pro/loader.rb +8 -0
  39. data/lib/karafka/pro/processing/adaptive_iterator/consumer.rb +8 -0
  40. data/lib/karafka/pro/processing/adaptive_iterator/tracker.rb +8 -0
  41. data/lib/karafka/pro/processing/collapser.rb +8 -0
  42. data/lib/karafka/pro/processing/coordinator.rb +8 -0
  43. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +8 -0
  44. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +8 -0
  45. data/lib/karafka/pro/processing/coordinators/virtual_offset_manager.rb +8 -0
  46. data/lib/karafka/pro/processing/executor.rb +8 -0
  47. data/lib/karafka/pro/processing/expansions_selector.rb +8 -0
  48. data/lib/karafka/pro/processing/filters/base.rb +8 -0
  49. data/lib/karafka/pro/processing/filters/delayer.rb +8 -0
  50. data/lib/karafka/pro/processing/filters/expirer.rb +8 -0
  51. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +8 -0
  52. data/lib/karafka/pro/processing/filters/throttler.rb +8 -0
  53. data/lib/karafka/pro/processing/filters/virtual_limiter.rb +8 -0
  54. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +8 -0
  55. data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +8 -0
  56. data/lib/karafka/pro/processing/jobs/periodic.rb +8 -0
  57. data/lib/karafka/pro/processing/jobs/periodic_non_blocking.rb +8 -0
  58. data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +8 -0
  59. data/lib/karafka/pro/processing/jobs_builder.rb +8 -0
  60. data/lib/karafka/pro/processing/jobs_queue.rb +8 -0
  61. data/lib/karafka/pro/processing/offset_metadata/consumer.rb +8 -0
  62. data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +8 -0
  63. data/lib/karafka/pro/processing/offset_metadata/listener.rb +8 -0
  64. data/lib/karafka/pro/processing/parallel_segments/filters/base.rb +8 -0
  65. data/lib/karafka/pro/processing/parallel_segments/filters/default.rb +8 -0
  66. data/lib/karafka/pro/processing/parallel_segments/filters/mom.rb +8 -0
  67. data/lib/karafka/pro/processing/partitioner.rb +8 -0
  68. data/lib/karafka/pro/processing/periodic_job/consumer.rb +8 -0
  69. data/lib/karafka/pro/processing/piping/consumer.rb +8 -0
  70. data/lib/karafka/pro/processing/schedulers/base.rb +8 -0
  71. data/lib/karafka/pro/processing/schedulers/default.rb +8 -0
  72. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +8 -0
  73. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +8 -0
  74. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +8 -0
  75. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +8 -0
  76. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +8 -0
  77. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +8 -0
  78. data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +8 -0
  79. data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +8 -0
  80. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +8 -0
  81. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +8 -0
  82. data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +8 -0
  83. data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +8 -0
  84. data/lib/karafka/pro/processing/strategies/aj/lrj_mom.rb +8 -0
  85. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +8 -0
  86. data/lib/karafka/pro/processing/strategies/aj/mom.rb +8 -0
  87. data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +8 -0
  88. data/lib/karafka/pro/processing/strategies/base.rb +8 -0
  89. data/lib/karafka/pro/processing/strategies/default.rb +8 -0
  90. data/lib/karafka/pro/processing/strategies/dlq/default.rb +8 -0
  91. data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +8 -0
  92. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +8 -0
  93. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +8 -0
  94. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom_vp.rb +8 -0
  95. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_vp.rb +8 -0
  96. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +8 -0
  97. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom_vp.rb +8 -0
  98. data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +8 -0
  99. data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +8 -0
  100. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +8 -0
  101. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom_vp.rb +8 -0
  102. data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +8 -0
  103. data/lib/karafka/pro/processing/strategies/dlq/mom.rb +8 -0
  104. data/lib/karafka/pro/processing/strategies/dlq/mom_vp.rb +8 -0
  105. data/lib/karafka/pro/processing/strategies/dlq/vp.rb +8 -0
  106. data/lib/karafka/pro/processing/strategies/ftr/default.rb +8 -0
  107. data/lib/karafka/pro/processing/strategies/ftr/vp.rb +8 -0
  108. data/lib/karafka/pro/processing/strategies/lrj/default.rb +8 -0
  109. data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +8 -0
  110. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +8 -0
  111. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom_vp.rb +8 -0
  112. data/lib/karafka/pro/processing/strategies/lrj/ftr_vp.rb +8 -0
  113. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +8 -0
  114. data/lib/karafka/pro/processing/strategies/lrj/mom_vp.rb +8 -0
  115. data/lib/karafka/pro/processing/strategies/lrj/vp.rb +8 -0
  116. data/lib/karafka/pro/processing/strategies/mom/default.rb +8 -0
  117. data/lib/karafka/pro/processing/strategies/mom/ftr.rb +8 -0
  118. data/lib/karafka/pro/processing/strategies/mom/ftr_vp.rb +8 -0
  119. data/lib/karafka/pro/processing/strategies/mom/vp.rb +8 -0
  120. data/lib/karafka/pro/processing/strategies/vp/default.rb +8 -0
  121. data/lib/karafka/pro/processing/strategies.rb +8 -0
  122. data/lib/karafka/pro/processing/strategy_selector.rb +8 -0
  123. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +8 -0
  124. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +8 -0
  125. data/lib/karafka/pro/processing/virtual_partitions/distributors/base.rb +8 -0
  126. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +8 -0
  127. data/lib/karafka/pro/recurring_tasks/consumer.rb +8 -0
  128. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +8 -0
  129. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +8 -0
  130. data/lib/karafka/pro/recurring_tasks/deserializer.rb +8 -0
  131. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +8 -0
  132. data/lib/karafka/pro/recurring_tasks/errors.rb +8 -0
  133. data/lib/karafka/pro/recurring_tasks/executor.rb +8 -0
  134. data/lib/karafka/pro/recurring_tasks/listener.rb +8 -0
  135. data/lib/karafka/pro/recurring_tasks/matcher.rb +8 -0
  136. data/lib/karafka/pro/recurring_tasks/schedule.rb +8 -0
  137. data/lib/karafka/pro/recurring_tasks/serializer.rb +8 -0
  138. data/lib/karafka/pro/recurring_tasks/setup/config.rb +8 -0
  139. data/lib/karafka/pro/recurring_tasks/task.rb +8 -0
  140. data/lib/karafka/pro/recurring_tasks.rb +8 -0
  141. data/lib/karafka/pro/routing/features/active_job/builder.rb +8 -0
  142. data/lib/karafka/pro/routing/features/active_job.rb +8 -0
  143. data/lib/karafka/pro/routing/features/adaptive_iterator/config.rb +8 -0
  144. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +8 -0
  145. data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +8 -0
  146. data/lib/karafka/pro/routing/features/adaptive_iterator.rb +8 -0
  147. data/lib/karafka/pro/routing/features/base.rb +8 -0
  148. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +8 -0
  149. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +8 -0
  150. data/lib/karafka/pro/routing/features/dead_letter_queue.rb +8 -0
  151. data/lib/karafka/pro/routing/features/delaying/config.rb +8 -0
  152. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +8 -0
  153. data/lib/karafka/pro/routing/features/delaying/topic.rb +8 -0
  154. data/lib/karafka/pro/routing/features/delaying.rb +8 -0
  155. data/lib/karafka/pro/routing/features/direct_assignments/config.rb +8 -0
  156. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +8 -0
  157. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +8 -0
  158. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +8 -0
  159. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +8 -0
  160. data/lib/karafka/pro/routing/features/direct_assignments.rb +8 -0
  161. data/lib/karafka/pro/routing/features/expiring/config.rb +8 -0
  162. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +8 -0
  163. data/lib/karafka/pro/routing/features/expiring/topic.rb +8 -0
  164. data/lib/karafka/pro/routing/features/expiring.rb +8 -0
  165. data/lib/karafka/pro/routing/features/filtering/config.rb +8 -0
  166. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +8 -0
  167. data/lib/karafka/pro/routing/features/filtering/topic.rb +8 -0
  168. data/lib/karafka/pro/routing/features/filtering.rb +8 -0
  169. data/lib/karafka/pro/routing/features/inline_insights/config.rb +8 -0
  170. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +8 -0
  171. data/lib/karafka/pro/routing/features/inline_insights/topic.rb +8 -0
  172. data/lib/karafka/pro/routing/features/inline_insights.rb +8 -0
  173. data/lib/karafka/pro/routing/features/long_running_job/config.rb +8 -0
  174. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +8 -0
  175. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +8 -0
  176. data/lib/karafka/pro/routing/features/long_running_job.rb +8 -0
  177. data/lib/karafka/pro/routing/features/multiplexing/config.rb +8 -0
  178. data/lib/karafka/pro/routing/features/multiplexing/contracts/routing.rb +81 -0
  179. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +8 -0
  180. data/lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb +8 -0
  181. data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +8 -0
  182. data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +8 -0
  183. data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +8 -0
  184. data/lib/karafka/pro/routing/features/multiplexing.rb +17 -2
  185. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +8 -0
  186. data/lib/karafka/pro/routing/features/non_blocking_job.rb +8 -0
  187. data/lib/karafka/pro/routing/features/offset_metadata/config.rb +8 -0
  188. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +8 -0
  189. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +8 -0
  190. data/lib/karafka/pro/routing/features/offset_metadata.rb +8 -0
  191. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +8 -0
  192. data/lib/karafka/pro/routing/features/parallel_segments/config.rb +8 -0
  193. data/lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb +8 -0
  194. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +8 -0
  195. data/lib/karafka/pro/routing/features/parallel_segments/topic.rb +8 -0
  196. data/lib/karafka/pro/routing/features/parallel_segments.rb +8 -0
  197. data/lib/karafka/pro/routing/features/patterns/builder.rb +8 -0
  198. data/lib/karafka/pro/routing/features/patterns/config.rb +8 -0
  199. data/lib/karafka/pro/routing/features/patterns/consumer_group.rb +8 -0
  200. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +8 -0
  201. data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +8 -0
  202. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +8 -0
  203. data/lib/karafka/pro/routing/features/patterns/detector.rb +8 -0
  204. data/lib/karafka/pro/routing/features/patterns/pattern.rb +8 -0
  205. data/lib/karafka/pro/routing/features/patterns/patterns.rb +8 -0
  206. data/lib/karafka/pro/routing/features/patterns/topic.rb +8 -0
  207. data/lib/karafka/pro/routing/features/patterns/topics.rb +8 -0
  208. data/lib/karafka/pro/routing/features/patterns.rb +8 -0
  209. data/lib/karafka/pro/routing/features/pausing/config.rb +8 -0
  210. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +8 -0
  211. data/lib/karafka/pro/routing/features/pausing/topic.rb +8 -0
  212. data/lib/karafka/pro/routing/features/pausing.rb +8 -0
  213. data/lib/karafka/pro/routing/features/periodic_job/config.rb +8 -0
  214. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +8 -0
  215. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +8 -0
  216. data/lib/karafka/pro/routing/features/periodic_job.rb +8 -0
  217. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +8 -0
  218. data/lib/karafka/pro/routing/features/recurring_tasks/config.rb +8 -0
  219. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +8 -0
  220. data/lib/karafka/pro/routing/features/recurring_tasks/proxy.rb +8 -0
  221. data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +8 -0
  222. data/lib/karafka/pro/routing/features/recurring_tasks.rb +8 -0
  223. data/lib/karafka/pro/routing/features/scheduled_messages/builder.rb +8 -0
  224. data/lib/karafka/pro/routing/features/scheduled_messages/config.rb +8 -0
  225. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +8 -0
  226. data/lib/karafka/pro/routing/features/scheduled_messages/proxy.rb +8 -0
  227. data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +8 -0
  228. data/lib/karafka/pro/routing/features/scheduled_messages.rb +8 -0
  229. data/lib/karafka/pro/routing/features/swarm/config.rb +8 -0
  230. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +8 -0
  231. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +8 -0
  232. data/lib/karafka/pro/routing/features/swarm/topic.rb +8 -0
  233. data/lib/karafka/pro/routing/features/swarm.rb +8 -0
  234. data/lib/karafka/pro/routing/features/throttling/config.rb +8 -0
  235. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +8 -0
  236. data/lib/karafka/pro/routing/features/throttling/topic.rb +8 -0
  237. data/lib/karafka/pro/routing/features/throttling.rb +8 -0
  238. data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +8 -0
  239. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +8 -0
  240. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +8 -0
  241. data/lib/karafka/pro/routing/features/virtual_partitions.rb +8 -0
  242. data/lib/karafka/pro/scheduled_messages/consumer.rb +8 -0
  243. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +8 -0
  244. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +8 -0
  245. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +8 -0
  246. data/lib/karafka/pro/scheduled_messages/day.rb +8 -0
  247. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +8 -0
  248. data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +8 -0
  249. data/lib/karafka/pro/scheduled_messages/dispatcher.rb +8 -0
  250. data/lib/karafka/pro/scheduled_messages/errors.rb +8 -0
  251. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +8 -0
  252. data/lib/karafka/pro/scheduled_messages/proxy.rb +8 -0
  253. data/lib/karafka/pro/scheduled_messages/schema_validator.rb +8 -0
  254. data/lib/karafka/pro/scheduled_messages/serializer.rb +8 -0
  255. data/lib/karafka/pro/scheduled_messages/setup/config.rb +8 -0
  256. data/lib/karafka/pro/scheduled_messages/state.rb +8 -0
  257. data/lib/karafka/pro/scheduled_messages/tracker.rb +8 -0
  258. data/lib/karafka/pro/scheduled_messages.rb +8 -0
  259. data/lib/karafka/pro/swarm/liveness_listener.rb +29 -14
  260. data/lib/karafka/setup/config_proxy.rb +9 -1
  261. data/lib/karafka/swarm/liveness_listener.rb +17 -8
  262. data/lib/karafka/version.rb +1 -1
  263. metadata +5 -2
@@ -0,0 +1,486 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Karafka Pro - Source Available Commercial Software
4
+ # Copyright (c) 2017-present Maciej Mensfeld. All rights reserved.
5
+ #
6
+ # This software is NOT open source. It is source-available commercial software
7
+ # requiring a paid license for use. It is NOT covered by LGPL.
8
+ #
9
+ # The author retains all right, title, and interest in this software,
10
+ # including all copyrights, patents, and other intellectual property rights.
11
+ # No patent rights are granted under this license.
12
+ #
13
+ # PROHIBITED:
14
+ # - Use without a valid commercial license
15
+ # - Redistribution, modification, or derivative works without authorization
16
+ # - Reverse engineering, decompilation, or disassembly of this software
17
+ # - Use as training data for AI/ML models or inclusion in datasets
18
+ # - Scraping, crawling, or automated collection for any purpose
19
+ #
20
+ # PERMITTED:
21
+ # - Reading, referencing, and linking for personal or commercial use
22
+ # - Runtime retrieval by AI assistants, coding agents, and RAG systems
23
+ # for the purpose of providing contextual help to Karafka users
24
+ #
25
+ # Receipt, viewing, or possession of this software does not convey or
26
+ # imply any license or right beyond those expressly stated above.
27
+ #
28
+ # License: https://karafka.io/docs/Pro-License-Comm/
29
+ # Contact: contact@karafka.io
30
+
31
+ module Karafka
32
+ module Pro
33
+ # Pro Admin utilities
34
+ module Admin
35
+ # Consumer group recovery toolkit.
36
+ #
37
+ # Provides coordinator-bypass offset reading and blast-radius assessment for scenarios
38
+ # where the Kafka group coordinator is in a FAILED state and normal admin APIs return
39
+ # NOT_COORDINATOR or time out.
40
+ #
41
+ # Works for any coordinator failure scenario:
42
+ # - KAFKA-19862 (compaction race during coordinator load)
43
+ # - Broker OOM / GC pause making coordinator unreachable
44
+ # - Network partition isolating the coordinator broker
45
+ # - Any future bug that transitions a coordinator shard to FAILED
46
+ #
47
+ # Each consumer group is assigned to a specific __consumer_offsets partition (and therefore
48
+ # a specific coordinator broker) based on its name. When that coordinator enters a FAILED
49
+ # state, all operations for the group - joins, heartbeats, offset commits, and offset
50
+ # fetches - are stuck until the coordinator recovers.
51
+ #
52
+ # A common recovery strategy is migrating to a new consumer group with a different name,
53
+ # which causes Kafka to hash it to a (likely) different __consumer_offsets partition served
54
+ # by a healthy coordinator. This class provides the tools to:
55
+ # 1. Read committed offsets directly from the raw __consumer_offsets log (bypassing the
56
+ # broken coordinator) via {#read_committed_offsets}
57
+ # 2. Assess blast radius: which broker coordinates a group ({#coordinator_for}), which
58
+ # partitions a broker leads ({#affected_partitions}), and which groups are affected
59
+ # ({#affected_groups})
60
+ #
61
+ # To complete the migration, use {Karafka::Admin::ConsumerGroups.seek} to write the
62
+ # recovered offsets to the new group.
63
+ #
64
+ # All reads go through the fetch API and never touch the group coordinator.
65
+ #
66
+ # @note These methods should NOT be used unless you are experiencing issues that require
67
+ # manual intervention. Misuse can lead to data loss or other problems.
68
+ class Recovery < Karafka::Admin
69
+ # Internal topic where Kafka stores committed offsets and group metadata
70
+ OFFSETS_TOPIC = "__consumer_offsets"
71
+
72
+ # Default lookback window for offset scanning (1 hour). Covers any normal commit interval.
73
+ # Provide an earlier Time if your group commits infrequently or the incident has been
74
+ # ongoing for longer than 1 hour.
75
+ DEFAULT_LAST_COMMITTED_AT_OFFSET = 3_600
76
+
77
+ private_constant :OFFSETS_TOPIC, :DEFAULT_LAST_COMMITTED_AT_OFFSET
78
+
79
+ class << self
80
+ # @param consumer_group_id [String] consumer group to read offsets for
81
+ # @param last_committed_at [Time] approximate time of last successful offset commit
82
+ # (default: 1 hour ago). A good rule of thumb is the crash time minus 10 minutes
83
+ # @return [Hash{String => Hash{Integer => Integer}}]
84
+ # @see #read_committed_offsets
85
+ def read_committed_offsets(
86
+ consumer_group_id,
87
+ last_committed_at: Time.now - DEFAULT_LAST_COMMITTED_AT_OFFSET
88
+ )
89
+ new.read_committed_offsets(consumer_group_id, last_committed_at: last_committed_at)
90
+ end
91
+
92
+ # @param consumer_group_id [String] consumer group id
93
+ # @return [Integer] __consumer_offsets partition number
94
+ # @see #offsets_partition_for
95
+ def offsets_partition_for(consumer_group_id)
96
+ new.offsets_partition_for(consumer_group_id)
97
+ end
98
+
99
+ # @param consumer_group_id [String] consumer group to look up
100
+ # @return [Hash] coordinator broker info
101
+ # @see #coordinator_for
102
+ def coordinator_for(consumer_group_id)
103
+ new.coordinator_for(consumer_group_id)
104
+ end
105
+
106
+ # @param partition [Integer] __consumer_offsets partition to scan
107
+ # @param last_committed_at [Time] approximate time of last successful offset commit
108
+ # (default: 1 hour ago). A good rule of thumb is the crash time minus 10 minutes
109
+ # @return [Array<String>] sorted consumer group names
110
+ # @see #affected_groups
111
+ def affected_groups(partition, last_committed_at: Time.now - DEFAULT_LAST_COMMITTED_AT_OFFSET)
112
+ new.affected_groups(partition, last_committed_at: last_committed_at)
113
+ end
114
+
115
+ # @param broker_id [Integer] broker node id
116
+ # @return [Array<Integer>] sorted partition numbers
117
+ # @see #affected_partitions
118
+ def affected_partitions(broker_id)
119
+ new.affected_partitions(broker_id)
120
+ end
121
+ end
122
+
123
+ # Reads committed offsets for a consumer group directly from the __consumer_offsets internal
124
+ # topic, bypassing the group coordinator. Only scans the single __consumer_offsets partition
125
+ # that holds data for the given group (determined by Java's String#hashCode mod partition
126
+ # count), starting from last_committed_at and reading forward to EOF. Later records
127
+ # overwrite earlier ones so the result always reflects the most recent committed offset per
128
+ # partition.
129
+ #
130
+ # @note All consumers in this group should be fully stopped before calling this method.
131
+ # While normally they would already be stopped due to a coordinator failure, if the
132
+ # cluster recovers concurrently, active consumers may commit newer offsets that this scan
133
+ # will not capture, resulting in stale data.
134
+ #
135
+ # @note This method may take a noticeable amount of time to complete because it scans
136
+ # the raw __consumer_offsets log from last_committed_at forward to the end. The duration
137
+ # depends on the volume of offset commits in the scan window across all consumer groups
138
+ # that hash to the same __consumer_offsets partition.
139
+ #
140
+ # @note The result only contains topic-partitions that had offsets committed after
141
+ # last_committed_at. If a partition never had an offset committed, or if the commit
142
+ # happened before last_committed_at, it will be absent from the result. It is the
143
+ # caller's responsibility to verify that all expected topic-partitions are present before
144
+ # using the result for migration or other operations.
145
+ #
146
+ # @param consumer_group_id [String] consumer group to read offsets for
147
+ # @param last_committed_at [Time] approximate time of last successful offset commit
148
+ # (default: 1 hour ago). A good rule of thumb is the crash time minus 10 minutes
149
+ # @return [Hash{String => Hash{Integer => Integer}}]
150
+ # { topic => { partition => committed_offset } }
151
+ #
152
+ # @example Read offsets for the last hour (default)
153
+ # Karafka::Admin::Recovery.read_committed_offsets('sync')
154
+ # #=> { 'events' => { 0 => 1400, 1 => 1402, ... } }
155
+ #
156
+ # @example Read offsets for the last 6 hours
157
+ # Karafka::Admin::Recovery.read_committed_offsets(
158
+ # 'sync', last_committed_at: Time.now - 6 * 3600
159
+ # )
160
+ #
161
+ # @example Read offsets from a specific point in time
162
+ # Karafka::Admin::Recovery.read_committed_offsets('sync', last_committed_at: Time.new(2025, 3, 1))
163
+ #
164
+ # @example Migrate a stuck consumer group to a new name (two-step workflow)
165
+ # # Step 1: Read committed offsets from the broken group (bypasses coordinator)
166
+ # offsets = Karafka::Admin::Recovery.read_committed_offsets('sync')
167
+ # #=> { 'events' => { 0 => 1400, 1 => 1402 }, 'orders' => { 0 => 890 } }
168
+ #
169
+ # # Step 2: Inspect the recovered offsets — verify all expected topics and partitions
170
+ # # are present and the offset values look reasonable before committing them
171
+ #
172
+ # # Step 3: Write the offsets to the target group using standard Admin APIs
173
+ # Karafka::Admin::ConsumerGroups.seek('sync_v2', offsets)
174
+ #
175
+ # # Now reconfigure your consumers to use 'sync_v2' and restart them
176
+ def read_committed_offsets(
177
+ consumer_group_id,
178
+ last_committed_at: Time.now - DEFAULT_LAST_COMMITTED_AT_OFFSET
179
+ )
180
+ committed = Hash.new { |h, k| h[k] = {} }
181
+ target_partition = offsets_partition_for(consumer_group_id)
182
+
183
+ iterator = Pro::Iterator.new(
184
+ { OFFSETS_TOPIC => { target_partition => last_committed_at } },
185
+ settings: @custom_kafka
186
+ )
187
+
188
+ iterator.each do |message|
189
+ next unless message.raw_key
190
+
191
+ parsed = parse_offset_commit(message)
192
+ next unless parsed
193
+ next unless parsed[:group] == consumer_group_id
194
+
195
+ if parsed[:offset].nil?
196
+ # Tombstone — offset was deleted, remove from results
197
+ committed[parsed[:topic]].delete(parsed[:partition])
198
+ committed.delete(parsed[:topic]) if committed[parsed[:topic]].empty?
199
+ else
200
+ # Last write wins — scanning forward means we naturally end up with the most
201
+ # recent commit per partition
202
+ committed[parsed[:topic]][parsed[:partition]] = parsed[:offset]
203
+ end
204
+ end
205
+
206
+ committed.sort.to_h.transform_values { |parts| parts.sort.to_h }
207
+ end
208
+
209
+ # Determines which __consumer_offsets partition holds data for a given consumer group. Kafka
210
+ # uses Utils.abs(String#hashCode) % numPartitions where hashCode is Java's 32-bit signed
211
+ # hash: s[0]*31^(n-1) + s[1]*31^(n-2) + ... + s[n-1], computed with int32 overflow
212
+ # semantics. Utils.abs maps Integer.MIN_VALUE to 0.
213
+ #
214
+ # @param consumer_group_id [String] consumer group id
215
+ # @return [Integer] __consumer_offsets partition number
216
+ #
217
+ # @example Check which partition stores offsets for a group
218
+ # Karafka::Admin::Recovery.offsets_partition_for('my-group')
219
+ # #=> 17
220
+ def offsets_partition_for(consumer_group_id)
221
+ h = java_hash_code(consumer_group_id)
222
+ # Kafka's Utils.abs: Integer.MIN_VALUE maps to 0
223
+ h = (h == -2_147_483_648) ? 0 : h.abs
224
+ h % offsets_partition_count
225
+ end
226
+
227
+ # Returns which broker is the coordinator for a consumer group. The coordinator is the
228
+ # leader of the __consumer_offsets partition assigned to this group. Pure metadata lookup
229
+ # that does not scan any topic data.
230
+ #
231
+ # Use this to quickly identify which broker is responsible for a consumer group. During an
232
+ # incident, this tells you whether a specific group is affected by a broker outage. If the
233
+ # returned broker is the one that is down or in a FAILED state, the group is stuck and
234
+ # needs migration.
235
+ #
236
+ # @param consumer_group_id [String] consumer group to look up
237
+ # @return [Hash{Symbol => Object}] coordinator info with :partition, :broker_id,
238
+ # and :broker_host keys
239
+ #
240
+ # @example Find coordinator for a group
241
+ # Karafka::Admin::Recovery.coordinator_for('my-group')
242
+ # #=> { partition: 17, broker_id: 2, broker_host: "broker2:9092" }
243
+ #
244
+ # @example Check if a group is affected by a broker outage
245
+ # info = Karafka::Admin::Recovery.coordinator_for('my-group')
246
+ # if info[:broker_id] == failed_broker_id
247
+ # puts "Group 'my-group' is stuck on failed broker #{info[:broker_host]}"
248
+ # end
249
+ def coordinator_for(consumer_group_id)
250
+ target_partition = offsets_partition_for(consumer_group_id)
251
+ metadata = cluster_info
252
+
253
+ offsets_topic = metadata.topics.find { |t| t[:topic_name] == OFFSETS_TOPIC }
254
+
255
+ unless offsets_topic
256
+ raise(
257
+ Errors::MetadataError,
258
+ "Could not retrieve metadata for '#{OFFSETS_TOPIC}'"
259
+ )
260
+ end
261
+
262
+ partitions = offsets_topic[:partitions]
263
+ partition_info = partitions.find { |p| p[:partition_id] == target_partition }
264
+
265
+ unless partition_info
266
+ raise(
267
+ Errors::MetadataError,
268
+ "Could not find partition #{target_partition} in '#{OFFSETS_TOPIC}'"
269
+ )
270
+ end
271
+
272
+ leader_id = partition_info[:leader]
273
+
274
+ broker = metadata.brokers.find do |b|
275
+ if b.is_a?(Hash)
276
+ (b[:broker_id] || b[:node_id]) == leader_id
277
+ else
278
+ b.node_id == leader_id
279
+ end
280
+ end
281
+
282
+ unless broker
283
+ raise(
284
+ Errors::MetadataError,
285
+ "Could not find broker #{leader_id} in cluster metadata"
286
+ )
287
+ end
288
+
289
+ if broker.is_a?(Hash)
290
+ host = broker[:broker_name] || broker[:host]
291
+ port = broker[:broker_port] || broker[:port]
292
+ broker_host = "#{host}:#{port}"
293
+ broker_id = broker[:broker_id] || broker[:node_id]
294
+ else
295
+ broker_host = "#{broker.host}:#{broker.port}"
296
+ broker_id = broker.node_id
297
+ end
298
+
299
+ { partition: target_partition, broker_id: broker_id, broker_host: broker_host }
300
+ end
301
+
302
+ # Scans a __consumer_offsets partition and returns consumer group names that have active
303
+ # committed offsets. Groups where all offsets have been tombstoned (deleted) within the
304
+ # scan window are excluded.
305
+ #
306
+ # Use this to discover which consumer groups are affected when a coordinator broker fails.
307
+ # Combined with {#affected_partitions}, this gives the full blast radius of a broker
308
+ # outage: first find which __consumer_offsets partitions the failed broker leads, then
309
+ # scan each partition to discover all affected consumer groups.
310
+ #
311
+ # @param partition [Integer] __consumer_offsets partition to scan
312
+ # @param last_committed_at [Time] approximate time of last successful offset commit
313
+ # (default: 1 hour ago). A good rule of thumb is the crash time minus 10 minutes
314
+ # @return [Array<String>] sorted list of consumer group names with active offsets
315
+ #
316
+ # @example Find all groups on partition 17
317
+ # Karafka::Admin::Recovery.affected_groups(17)
318
+ # #=> ["group-a", "group-b", "group-c"]
319
+ #
320
+ # @example Full blast radius of a broker outage
321
+ # partitions = Karafka::Admin::Recovery.affected_partitions(failed_broker_id)
322
+ # all_affected = partitions.flat_map do |p|
323
+ # Karafka::Admin::Recovery.affected_groups(p)
324
+ # end.uniq
325
+ def affected_groups(partition, last_committed_at: Time.now - DEFAULT_LAST_COMMITTED_AT_OFFSET)
326
+ count = offsets_partition_count
327
+
328
+ unless partition >= 0 && partition < count
329
+ raise(
330
+ Errors::PartitionOutOfRangeError,
331
+ "Partition #{partition} is out of range (0...#{count})"
332
+ )
333
+ end
334
+
335
+ # Track offsets per group with last-write-wins so fully tombstoned groups
336
+ # (all offsets deleted) are excluded from the result
337
+ committed = Hash.new { |h, k| h[k] = Hash.new { |h2, k2| h2[k2] = {} } }
338
+
339
+ iterator = Pro::Iterator.new(
340
+ { OFFSETS_TOPIC => { partition => last_committed_at } },
341
+ settings: @custom_kafka
342
+ )
343
+
344
+ iterator.each do |message|
345
+ next unless message.raw_key
346
+
347
+ parsed = parse_offset_commit(message)
348
+ next unless parsed
349
+
350
+ group = parsed[:group]
351
+
352
+ if parsed[:offset].nil?
353
+ committed[group][parsed[:topic]].delete(parsed[:partition])
354
+ committed[group].delete(parsed[:topic]) if committed[group][parsed[:topic]].empty?
355
+ else
356
+ committed[group][parsed[:topic]][parsed[:partition]] = parsed[:offset]
357
+ end
358
+ end
359
+
360
+ committed.select { |_, topics| !topics.empty? }.keys.sort
361
+ end
362
+
363
+ # Returns all __consumer_offsets partitions led by a given broker. Pure metadata lookup
364
+ # that does not scan any topic data.
365
+ #
366
+ # Use this as the first step in assessing the blast radius of a broker outage. The
367
+ # returned partition numbers can be passed to {#affected_groups} to discover all consumer
368
+ # groups that need recovery or migration.
369
+ #
370
+ # @param broker_id [Integer] broker node id
371
+ # @return [Array<Integer>] sorted list of __consumer_offsets partition numbers
372
+ #
373
+ # @example Find partitions led by broker 2
374
+ # Karafka::Admin::Recovery.affected_partitions(2)
375
+ # #=> [3, 17, 28, 42]
376
+ def affected_partitions(broker_id)
377
+ metadata = cluster_info
378
+
379
+ offsets_topic = metadata.topics.find { |t| t[:topic_name] == OFFSETS_TOPIC }
380
+
381
+ unless offsets_topic
382
+ raise(
383
+ Errors::MetadataError,
384
+ "Could not retrieve metadata for '#{OFFSETS_TOPIC}'"
385
+ )
386
+ end
387
+
388
+ offsets_topic[:partitions]
389
+ .select { |p| p[:leader] == broker_id }
390
+ .map { |p| p[:partition_id] }
391
+ .sort
392
+ end
393
+
394
+ private
395
+
396
+ # Parses a raw __consumer_offsets message into structured offset commit data.
397
+ # Handles both v0 and v1 offset commit key formats (both use the same layout for
398
+ # group/topic/partition). Tombstone records (nil payload) indicate offset deletion and
399
+ # are returned with offset: nil so callers can remove stale entries.
400
+ #
401
+ # @param message [Karafka::Messages::Message] raw message from __consumer_offsets
402
+ # @return [Hash, nil] parsed offset commit or nil if not an offset commit record.
403
+ # When the record is a tombstone (deletion), the :offset value will be nil.
404
+ def parse_offset_commit(message)
405
+ return nil unless message.raw_key
406
+
407
+ key = message.raw_key.b
408
+ key_version = key[0, 2].unpack1("n")
409
+
410
+ # Versions 0 and 1 are offset commit records with identical key layout
411
+ return nil unless key_version <= 1
412
+
413
+ pos = 2
414
+ gl = key[pos, 2].unpack1("n")
415
+ pos += 2
416
+ group = key[pos, gl].force_encoding("UTF-8")
417
+ pos += gl
418
+ tl = key[pos, 2].unpack1("n")
419
+ pos += 2
420
+ topic = key[pos, tl].force_encoding("UTF-8")
421
+ pos += tl
422
+ partition = key[pos, 4].unpack1("N")
423
+
424
+ # Tombstone (nil payload) means the offset was deleted
425
+ unless message.raw_payload
426
+ return { group: group, topic: topic, partition: partition, offset: nil }
427
+ end
428
+
429
+ val = message.raw_payload.b
430
+
431
+ # value layout: int16 version | int64 offset | ...
432
+ offset = val[2, 8].unpack1("q>")
433
+
434
+ { group: group, topic: topic, partition: partition, offset: offset }
435
+ end
436
+
437
+ # Computes Java's String#hashCode for a given string. Java hashes UTF-16 code units
438
+ # (char values), not raw bytes. For ASCII-only strings this is identical to byte-level
439
+ # hashing, but non-ASCII characters (accented letters, CJK, emoji) require encoding to
440
+ # UTF-16 and hashing each 16-bit code unit (including surrogate pairs for characters
441
+ # above U+FFFF).
442
+ #
443
+ # @param str [String] input string
444
+ # @return [Integer] signed 32-bit hash value matching Java's String#hashCode
445
+ def java_hash_code(str)
446
+ hash = 0
447
+
448
+ # Encode to UTF-16BE to get Java's char sequence, then hash each 16-bit code unit
449
+ str.encode("UTF-16BE").bytes.each_slice(2) do |hi, lo|
450
+ code_unit = (hi << 8) | lo
451
+ hash = (hash * 31 + code_unit) & 0xFFFFFFFF
452
+ end
453
+
454
+ # Convert unsigned 32-bit to signed 32-bit (Java int semantics)
455
+ (hash >= 0x80000000) ? hash - 0x100000000 : hash
456
+ end
457
+
458
+ # Returns the partition count of the __consumer_offsets topic. Memoized per instance since
459
+ # this value never changes at runtime.
460
+ #
461
+ # @return [Integer] number of partitions
462
+ # @raise [Errors::MetadataError] when topic metadata cannot be retrieved
463
+ def offsets_partition_count
464
+ @offsets_partition_count ||= begin
465
+ topic_info = cluster_info.topics.find do |t|
466
+ t[:topic_name] == OFFSETS_TOPIC
467
+ end
468
+
469
+ unless topic_info
470
+ raise(
471
+ Errors::MetadataError,
472
+ "Could not retrieve partition count for '#{OFFSETS_TOPIC}'"
473
+ )
474
+ end
475
+
476
+ topic_info[:partition_count]
477
+ end
478
+ end
479
+ end
480
+ end
481
+ end
482
+ end
483
+
484
+ # We alias this for Pro users so we don't end up having two Admin namespaces from the end
485
+ # user perspective. This enhances the UX.
486
+ Karafka::Admin::Recovery = Karafka::Pro::Admin::Recovery
@@ -6,9 +6,14 @@
6
6
  # This software is NOT open source. It is source-available commercial software
7
7
  # requiring a paid license for use. It is NOT covered by LGPL.
8
8
  #
9
+ # The author retains all right, title, and interest in this software,
10
+ # including all copyrights, patents, and other intellectual property rights.
11
+ # No patent rights are granted under this license.
12
+ #
9
13
  # PROHIBITED:
10
14
  # - Use without a valid commercial license
11
15
  # - Redistribution, modification, or derivative works without authorization
16
+ # - Reverse engineering, decompilation, or disassembly of this software
12
17
  # - Use as training data for AI/ML models or inclusion in datasets
13
18
  # - Scraping, crawling, or automated collection for any purpose
14
19
  #
@@ -17,6 +22,9 @@
17
22
  # - Runtime retrieval by AI assistants, coding agents, and RAG systems
18
23
  # for the purpose of providing contextual help to Karafka users
19
24
  #
25
+ # Receipt, viewing, or possession of this software does not convey or
26
+ # imply any license or right beyond those expressly stated above.
27
+ #
20
28
  # License: https://karafka.io/docs/Pro-License-Comm/
21
29
  # Contact: contact@karafka.io
22
30
 
@@ -6,9 +6,14 @@
6
6
  # This software is NOT open source. It is source-available commercial software
7
7
  # requiring a paid license for use. It is NOT covered by LGPL.
8
8
  #
9
+ # The author retains all right, title, and interest in this software,
10
+ # including all copyrights, patents, and other intellectual property rights.
11
+ # No patent rights are granted under this license.
12
+ #
9
13
  # PROHIBITED:
10
14
  # - Use without a valid commercial license
11
15
  # - Redistribution, modification, or derivative works without authorization
16
+ # - Reverse engineering, decompilation, or disassembly of this software
12
17
  # - Use as training data for AI/ML models or inclusion in datasets
13
18
  # - Scraping, crawling, or automated collection for any purpose
14
19
  #
@@ -17,6 +22,9 @@
17
22
  # - Runtime retrieval by AI assistants, coding agents, and RAG systems
18
23
  # for the purpose of providing contextual help to Karafka users
19
24
  #
25
+ # Receipt, viewing, or possession of this software does not convey or
26
+ # imply any license or right beyond those expressly stated above.
27
+ #
20
28
  # License: https://karafka.io/docs/Pro-License-Comm/
21
29
  # Contact: contact@karafka.io
22
30
 
@@ -6,9 +6,14 @@
6
6
  # This software is NOT open source. It is source-available commercial software
7
7
  # requiring a paid license for use. It is NOT covered by LGPL.
8
8
  #
9
+ # The author retains all right, title, and interest in this software,
10
+ # including all copyrights, patents, and other intellectual property rights.
11
+ # No patent rights are granted under this license.
12
+ #
9
13
  # PROHIBITED:
10
14
  # - Use without a valid commercial license
11
15
  # - Redistribution, modification, or derivative works without authorization
16
+ # - Reverse engineering, decompilation, or disassembly of this software
12
17
  # - Use as training data for AI/ML models or inclusion in datasets
13
18
  # - Scraping, crawling, or automated collection for any purpose
14
19
  #
@@ -17,6 +22,9 @@
17
22
  # - Runtime retrieval by AI assistants, coding agents, and RAG systems
18
23
  # for the purpose of providing contextual help to Karafka users
19
24
  #
25
+ # Receipt, viewing, or possession of this software does not convey or
26
+ # imply any license or right beyond those expressly stated above.
27
+ #
20
28
  # License: https://karafka.io/docs/Pro-License-Comm/
21
29
  # Contact: contact@karafka.io
22
30
 
@@ -6,9 +6,14 @@
6
6
  # This software is NOT open source. It is source-available commercial software
7
7
  # requiring a paid license for use. It is NOT covered by LGPL.
8
8
  #
9
+ # The author retains all right, title, and interest in this software,
10
+ # including all copyrights, patents, and other intellectual property rights.
11
+ # No patent rights are granted under this license.
12
+ #
9
13
  # PROHIBITED:
10
14
  # - Use without a valid commercial license
11
15
  # - Redistribution, modification, or derivative works without authorization
16
+ # - Reverse engineering, decompilation, or disassembly of this software
12
17
  # - Use as training data for AI/ML models or inclusion in datasets
13
18
  # - Scraping, crawling, or automated collection for any purpose
14
19
  #
@@ -17,6 +22,9 @@
17
22
  # - Runtime retrieval by AI assistants, coding agents, and RAG systems
18
23
  # for the purpose of providing contextual help to Karafka users
19
24
  #
25
+ # Receipt, viewing, or possession of this software does not convey or
26
+ # imply any license or right beyond those expressly stated above.
27
+ #
20
28
  # License: https://karafka.io/docs/Pro-License-Comm/
21
29
  # Contact: contact@karafka.io
22
30
 
@@ -6,9 +6,14 @@
6
6
  # This software is NOT open source. It is source-available commercial software
7
7
  # requiring a paid license for use. It is NOT covered by LGPL.
8
8
  #
9
+ # The author retains all right, title, and interest in this software,
10
+ # including all copyrights, patents, and other intellectual property rights.
11
+ # No patent rights are granted under this license.
12
+ #
9
13
  # PROHIBITED:
10
14
  # - Use without a valid commercial license
11
15
  # - Redistribution, modification, or derivative works without authorization
16
+ # - Reverse engineering, decompilation, or disassembly of this software
12
17
  # - Use as training data for AI/ML models or inclusion in datasets
13
18
  # - Scraping, crawling, or automated collection for any purpose
14
19
  #
@@ -17,6 +22,9 @@
17
22
  # - Runtime retrieval by AI assistants, coding agents, and RAG systems
18
23
  # for the purpose of providing contextual help to Karafka users
19
24
  #
25
+ # Receipt, viewing, or possession of this software does not convey or
26
+ # imply any license or right beyond those expressly stated above.
27
+ #
20
28
  # License: https://karafka.io/docs/Pro-License-Comm/
21
29
  # Contact: contact@karafka.io
22
30
 
@@ -6,9 +6,14 @@
6
6
  # This software is NOT open source. It is source-available commercial software
7
7
  # requiring a paid license for use. It is NOT covered by LGPL.
8
8
  #
9
+ # The author retains all right, title, and interest in this software,
10
+ # including all copyrights, patents, and other intellectual property rights.
11
+ # No patent rights are granted under this license.
12
+ #
9
13
  # PROHIBITED:
10
14
  # - Use without a valid commercial license
11
15
  # - Redistribution, modification, or derivative works without authorization
16
+ # - Reverse engineering, decompilation, or disassembly of this software
12
17
  # - Use as training data for AI/ML models or inclusion in datasets
13
18
  # - Scraping, crawling, or automated collection for any purpose
14
19
  #
@@ -17,6 +22,9 @@
17
22
  # - Runtime retrieval by AI assistants, coding agents, and RAG systems
18
23
  # for the purpose of providing contextual help to Karafka users
19
24
  #
25
+ # Receipt, viewing, or possession of this software does not convey or
26
+ # imply any license or right beyond those expressly stated above.
27
+ #
20
28
  # License: https://karafka.io/docs/Pro-License-Comm/
21
29
  # Contact: contact@karafka.io
22
30
 
@@ -6,9 +6,14 @@
6
6
  # This software is NOT open source. It is source-available commercial software
7
7
  # requiring a paid license for use. It is NOT covered by LGPL.
8
8
  #
9
+ # The author retains all right, title, and interest in this software,
10
+ # including all copyrights, patents, and other intellectual property rights.
11
+ # No patent rights are granted under this license.
12
+ #
9
13
  # PROHIBITED:
10
14
  # - Use without a valid commercial license
11
15
  # - Redistribution, modification, or derivative works without authorization
16
+ # - Reverse engineering, decompilation, or disassembly of this software
12
17
  # - Use as training data for AI/ML models or inclusion in datasets
13
18
  # - Scraping, crawling, or automated collection for any purpose
14
19
  #
@@ -17,6 +22,9 @@
17
22
  # - Runtime retrieval by AI assistants, coding agents, and RAG systems
18
23
  # for the purpose of providing contextual help to Karafka users
19
24
  #
25
+ # Receipt, viewing, or possession of this software does not convey or
26
+ # imply any license or right beyond those expressly stated above.
27
+ #
20
28
  # License: https://karafka.io/docs/Pro-License-Comm/
21
29
  # Contact: contact@karafka.io
22
30