karafka 2.5.4 → 2.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (260) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +3 -0
  3. data/LICENSE-COMM +4 -2
  4. data/lib/karafka/admin/acl.rb +127 -80
  5. data/lib/karafka/admin/configs.rb +84 -70
  6. data/lib/karafka/admin/consumer_groups.rb +377 -330
  7. data/lib/karafka/admin/replication.rb +287 -263
  8. data/lib/karafka/admin/topics.rb +232 -186
  9. data/lib/karafka/admin.rb +277 -117
  10. data/lib/karafka/pro/active_job/consumer.rb +19 -2
  11. data/lib/karafka/pro/active_job/dispatcher.rb +19 -2
  12. data/lib/karafka/pro/active_job/job_options_contract.rb +19 -2
  13. data/lib/karafka/pro/base_consumer.rb +19 -2
  14. data/lib/karafka/pro/cleaner/errors.rb +19 -2
  15. data/lib/karafka/pro/cleaner/messages/message.rb +19 -2
  16. data/lib/karafka/pro/cleaner/messages/messages.rb +19 -2
  17. data/lib/karafka/pro/cleaner/messages/metadata.rb +19 -2
  18. data/lib/karafka/pro/cleaner.rb +19 -2
  19. data/lib/karafka/pro/cli/contracts/server.rb +19 -2
  20. data/lib/karafka/pro/cli/parallel_segments/base.rb +19 -2
  21. data/lib/karafka/pro/cli/parallel_segments/collapse.rb +19 -2
  22. data/lib/karafka/pro/cli/parallel_segments/distribute.rb +19 -2
  23. data/lib/karafka/pro/cli/parallel_segments.rb +19 -2
  24. data/lib/karafka/pro/connection/manager.rb +19 -2
  25. data/lib/karafka/pro/connection/multiplexing/listener.rb +19 -2
  26. data/lib/karafka/pro/contracts/base.rb +19 -2
  27. data/lib/karafka/pro/encryption/cipher.rb +19 -2
  28. data/lib/karafka/pro/encryption/contracts/config.rb +19 -2
  29. data/lib/karafka/pro/encryption/errors.rb +19 -2
  30. data/lib/karafka/pro/encryption/messages/middleware.rb +19 -2
  31. data/lib/karafka/pro/encryption/messages/parser.rb +19 -2
  32. data/lib/karafka/pro/encryption/setup/config.rb +19 -2
  33. data/lib/karafka/pro/encryption.rb +19 -2
  34. data/lib/karafka/pro/instrumentation/performance_tracker.rb +19 -2
  35. data/lib/karafka/pro/iterator/expander.rb +19 -2
  36. data/lib/karafka/pro/iterator/tpl_builder.rb +19 -2
  37. data/lib/karafka/pro/iterator.rb +19 -2
  38. data/lib/karafka/pro/loader.rb +19 -2
  39. data/lib/karafka/pro/processing/adaptive_iterator/consumer.rb +19 -2
  40. data/lib/karafka/pro/processing/adaptive_iterator/tracker.rb +19 -2
  41. data/lib/karafka/pro/processing/collapser.rb +19 -2
  42. data/lib/karafka/pro/processing/coordinator.rb +19 -2
  43. data/lib/karafka/pro/processing/coordinators/errors_tracker.rb +19 -2
  44. data/lib/karafka/pro/processing/coordinators/filters_applier.rb +19 -2
  45. data/lib/karafka/pro/processing/coordinators/virtual_offset_manager.rb +19 -2
  46. data/lib/karafka/pro/processing/executor.rb +19 -2
  47. data/lib/karafka/pro/processing/expansions_selector.rb +19 -2
  48. data/lib/karafka/pro/processing/filters/base.rb +19 -2
  49. data/lib/karafka/pro/processing/filters/delayer.rb +19 -2
  50. data/lib/karafka/pro/processing/filters/expirer.rb +19 -2
  51. data/lib/karafka/pro/processing/filters/inline_insights_delayer.rb +19 -2
  52. data/lib/karafka/pro/processing/filters/throttler.rb +19 -2
  53. data/lib/karafka/pro/processing/filters/virtual_limiter.rb +19 -2
  54. data/lib/karafka/pro/processing/jobs/consume_non_blocking.rb +19 -2
  55. data/lib/karafka/pro/processing/jobs/eofed_non_blocking.rb +19 -2
  56. data/lib/karafka/pro/processing/jobs/periodic.rb +19 -2
  57. data/lib/karafka/pro/processing/jobs/periodic_non_blocking.rb +19 -2
  58. data/lib/karafka/pro/processing/jobs/revoked_non_blocking.rb +19 -2
  59. data/lib/karafka/pro/processing/jobs_builder.rb +19 -2
  60. data/lib/karafka/pro/processing/jobs_queue.rb +19 -2
  61. data/lib/karafka/pro/processing/offset_metadata/consumer.rb +19 -2
  62. data/lib/karafka/pro/processing/offset_metadata/fetcher.rb +19 -2
  63. data/lib/karafka/pro/processing/offset_metadata/listener.rb +19 -2
  64. data/lib/karafka/pro/processing/parallel_segments/filters/base.rb +19 -2
  65. data/lib/karafka/pro/processing/parallel_segments/filters/default.rb +19 -2
  66. data/lib/karafka/pro/processing/parallel_segments/filters/mom.rb +19 -2
  67. data/lib/karafka/pro/processing/partitioner.rb +19 -2
  68. data/lib/karafka/pro/processing/periodic_job/consumer.rb +19 -2
  69. data/lib/karafka/pro/processing/piping/consumer.rb +19 -2
  70. data/lib/karafka/pro/processing/schedulers/base.rb +19 -2
  71. data/lib/karafka/pro/processing/schedulers/default.rb +19 -2
  72. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +19 -2
  73. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +19 -2
  74. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom.rb +19 -2
  75. data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_mom_vp.rb +19 -2
  76. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +19 -2
  77. data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +19 -2
  78. data/lib/karafka/pro/processing/strategies/aj/dlq_mom.rb +19 -2
  79. data/lib/karafka/pro/processing/strategies/aj/dlq_mom_vp.rb +19 -2
  80. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom.rb +19 -2
  81. data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +19 -2
  82. data/lib/karafka/pro/processing/strategies/aj/ftr_mom.rb +19 -2
  83. data/lib/karafka/pro/processing/strategies/aj/ftr_mom_vp.rb +19 -2
  84. data/lib/karafka/pro/processing/strategies/aj/lrj_mom.rb +19 -2
  85. data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +19 -2
  86. data/lib/karafka/pro/processing/strategies/aj/mom.rb +19 -2
  87. data/lib/karafka/pro/processing/strategies/aj/mom_vp.rb +19 -2
  88. data/lib/karafka/pro/processing/strategies/base.rb +19 -2
  89. data/lib/karafka/pro/processing/strategies/default.rb +19 -2
  90. data/lib/karafka/pro/processing/strategies/dlq/default.rb +19 -2
  91. data/lib/karafka/pro/processing/strategies/dlq/ftr.rb +19 -2
  92. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +19 -2
  93. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +19 -2
  94. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom_vp.rb +19 -2
  95. data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_vp.rb +19 -2
  96. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom.rb +19 -2
  97. data/lib/karafka/pro/processing/strategies/dlq/ftr_mom_vp.rb +19 -2
  98. data/lib/karafka/pro/processing/strategies/dlq/ftr_vp.rb +19 -2
  99. data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +19 -2
  100. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +19 -2
  101. data/lib/karafka/pro/processing/strategies/dlq/lrj_mom_vp.rb +19 -2
  102. data/lib/karafka/pro/processing/strategies/dlq/lrj_vp.rb +19 -2
  103. data/lib/karafka/pro/processing/strategies/dlq/mom.rb +19 -2
  104. data/lib/karafka/pro/processing/strategies/dlq/mom_vp.rb +19 -2
  105. data/lib/karafka/pro/processing/strategies/dlq/vp.rb +19 -2
  106. data/lib/karafka/pro/processing/strategies/ftr/default.rb +19 -2
  107. data/lib/karafka/pro/processing/strategies/ftr/vp.rb +19 -2
  108. data/lib/karafka/pro/processing/strategies/lrj/default.rb +19 -2
  109. data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +19 -2
  110. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +19 -2
  111. data/lib/karafka/pro/processing/strategies/lrj/ftr_mom_vp.rb +19 -2
  112. data/lib/karafka/pro/processing/strategies/lrj/ftr_vp.rb +19 -2
  113. data/lib/karafka/pro/processing/strategies/lrj/mom.rb +19 -2
  114. data/lib/karafka/pro/processing/strategies/lrj/mom_vp.rb +19 -2
  115. data/lib/karafka/pro/processing/strategies/lrj/vp.rb +19 -2
  116. data/lib/karafka/pro/processing/strategies/mom/default.rb +19 -2
  117. data/lib/karafka/pro/processing/strategies/mom/ftr.rb +19 -2
  118. data/lib/karafka/pro/processing/strategies/mom/ftr_vp.rb +19 -2
  119. data/lib/karafka/pro/processing/strategies/mom/vp.rb +19 -2
  120. data/lib/karafka/pro/processing/strategies/vp/default.rb +19 -2
  121. data/lib/karafka/pro/processing/strategies.rb +19 -2
  122. data/lib/karafka/pro/processing/strategy_selector.rb +19 -2
  123. data/lib/karafka/pro/processing/subscription_groups_coordinator.rb +19 -2
  124. data/lib/karafka/pro/processing/virtual_partitions/distributors/balanced.rb +19 -2
  125. data/lib/karafka/pro/processing/virtual_partitions/distributors/base.rb +19 -2
  126. data/lib/karafka/pro/processing/virtual_partitions/distributors/consistent.rb +19 -2
  127. data/lib/karafka/pro/recurring_tasks/consumer.rb +19 -2
  128. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +19 -2
  129. data/lib/karafka/pro/recurring_tasks/contracts/task.rb +19 -2
  130. data/lib/karafka/pro/recurring_tasks/deserializer.rb +19 -2
  131. data/lib/karafka/pro/recurring_tasks/dispatcher.rb +19 -2
  132. data/lib/karafka/pro/recurring_tasks/errors.rb +19 -2
  133. data/lib/karafka/pro/recurring_tasks/executor.rb +19 -2
  134. data/lib/karafka/pro/recurring_tasks/listener.rb +19 -2
  135. data/lib/karafka/pro/recurring_tasks/matcher.rb +19 -2
  136. data/lib/karafka/pro/recurring_tasks/schedule.rb +19 -2
  137. data/lib/karafka/pro/recurring_tasks/serializer.rb +19 -2
  138. data/lib/karafka/pro/recurring_tasks/setup/config.rb +19 -2
  139. data/lib/karafka/pro/recurring_tasks/task.rb +19 -2
  140. data/lib/karafka/pro/recurring_tasks.rb +19 -2
  141. data/lib/karafka/pro/routing/features/active_job/builder.rb +19 -2
  142. data/lib/karafka/pro/routing/features/active_job.rb +19 -2
  143. data/lib/karafka/pro/routing/features/adaptive_iterator/config.rb +19 -2
  144. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +19 -2
  145. data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +19 -2
  146. data/lib/karafka/pro/routing/features/adaptive_iterator.rb +19 -2
  147. data/lib/karafka/pro/routing/features/base.rb +19 -2
  148. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +19 -2
  149. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +19 -2
  150. data/lib/karafka/pro/routing/features/dead_letter_queue.rb +19 -2
  151. data/lib/karafka/pro/routing/features/delaying/config.rb +19 -2
  152. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +19 -2
  153. data/lib/karafka/pro/routing/features/delaying/topic.rb +19 -2
  154. data/lib/karafka/pro/routing/features/delaying.rb +19 -2
  155. data/lib/karafka/pro/routing/features/direct_assignments/config.rb +19 -2
  156. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +19 -2
  157. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +19 -2
  158. data/lib/karafka/pro/routing/features/direct_assignments/subscription_group.rb +19 -2
  159. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +19 -2
  160. data/lib/karafka/pro/routing/features/direct_assignments.rb +19 -2
  161. data/lib/karafka/pro/routing/features/expiring/config.rb +19 -2
  162. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +19 -2
  163. data/lib/karafka/pro/routing/features/expiring/topic.rb +19 -2
  164. data/lib/karafka/pro/routing/features/expiring.rb +19 -2
  165. data/lib/karafka/pro/routing/features/filtering/config.rb +19 -2
  166. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +19 -2
  167. data/lib/karafka/pro/routing/features/filtering/topic.rb +19 -2
  168. data/lib/karafka/pro/routing/features/filtering.rb +19 -2
  169. data/lib/karafka/pro/routing/features/inline_insights/config.rb +19 -2
  170. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +19 -2
  171. data/lib/karafka/pro/routing/features/inline_insights/topic.rb +19 -2
  172. data/lib/karafka/pro/routing/features/inline_insights.rb +19 -2
  173. data/lib/karafka/pro/routing/features/long_running_job/config.rb +19 -2
  174. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +19 -2
  175. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +19 -2
  176. data/lib/karafka/pro/routing/features/long_running_job.rb +19 -2
  177. data/lib/karafka/pro/routing/features/multiplexing/config.rb +19 -2
  178. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +19 -2
  179. data/lib/karafka/pro/routing/features/multiplexing/patches/contracts/consumer_group.rb +19 -2
  180. data/lib/karafka/pro/routing/features/multiplexing/proxy.rb +19 -2
  181. data/lib/karafka/pro/routing/features/multiplexing/subscription_group.rb +19 -2
  182. data/lib/karafka/pro/routing/features/multiplexing/subscription_groups_builder.rb +19 -2
  183. data/lib/karafka/pro/routing/features/multiplexing.rb +19 -2
  184. data/lib/karafka/pro/routing/features/non_blocking_job/topic.rb +19 -2
  185. data/lib/karafka/pro/routing/features/non_blocking_job.rb +19 -2
  186. data/lib/karafka/pro/routing/features/offset_metadata/config.rb +19 -2
  187. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +19 -2
  188. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +19 -2
  189. data/lib/karafka/pro/routing/features/offset_metadata.rb +19 -2
  190. data/lib/karafka/pro/routing/features/parallel_segments/builder.rb +19 -2
  191. data/lib/karafka/pro/routing/features/parallel_segments/config.rb +19 -2
  192. data/lib/karafka/pro/routing/features/parallel_segments/consumer_group.rb +19 -2
  193. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +19 -2
  194. data/lib/karafka/pro/routing/features/parallel_segments/topic.rb +19 -2
  195. data/lib/karafka/pro/routing/features/parallel_segments.rb +19 -2
  196. data/lib/karafka/pro/routing/features/patterns/builder.rb +19 -2
  197. data/lib/karafka/pro/routing/features/patterns/config.rb +19 -2
  198. data/lib/karafka/pro/routing/features/patterns/consumer_group.rb +19 -2
  199. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +19 -2
  200. data/lib/karafka/pro/routing/features/patterns/contracts/pattern.rb +19 -2
  201. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +19 -2
  202. data/lib/karafka/pro/routing/features/patterns/detector.rb +19 -2
  203. data/lib/karafka/pro/routing/features/patterns/pattern.rb +19 -2
  204. data/lib/karafka/pro/routing/features/patterns/patterns.rb +19 -2
  205. data/lib/karafka/pro/routing/features/patterns/topic.rb +19 -2
  206. data/lib/karafka/pro/routing/features/patterns/topics.rb +19 -2
  207. data/lib/karafka/pro/routing/features/patterns.rb +19 -2
  208. data/lib/karafka/pro/routing/features/pausing/config.rb +19 -2
  209. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +19 -2
  210. data/lib/karafka/pro/routing/features/pausing/topic.rb +19 -2
  211. data/lib/karafka/pro/routing/features/pausing.rb +19 -2
  212. data/lib/karafka/pro/routing/features/periodic_job/config.rb +19 -2
  213. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +19 -2
  214. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +19 -2
  215. data/lib/karafka/pro/routing/features/periodic_job.rb +19 -2
  216. data/lib/karafka/pro/routing/features/recurring_tasks/builder.rb +19 -2
  217. data/lib/karafka/pro/routing/features/recurring_tasks/config.rb +19 -2
  218. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +19 -2
  219. data/lib/karafka/pro/routing/features/recurring_tasks/proxy.rb +19 -2
  220. data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +19 -2
  221. data/lib/karafka/pro/routing/features/recurring_tasks.rb +19 -2
  222. data/lib/karafka/pro/routing/features/scheduled_messages/builder.rb +19 -2
  223. data/lib/karafka/pro/routing/features/scheduled_messages/config.rb +19 -2
  224. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +19 -2
  225. data/lib/karafka/pro/routing/features/scheduled_messages/proxy.rb +19 -2
  226. data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +19 -2
  227. data/lib/karafka/pro/routing/features/scheduled_messages.rb +19 -2
  228. data/lib/karafka/pro/routing/features/swarm/config.rb +19 -2
  229. data/lib/karafka/pro/routing/features/swarm/contracts/routing.rb +19 -2
  230. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +19 -2
  231. data/lib/karafka/pro/routing/features/swarm/topic.rb +19 -2
  232. data/lib/karafka/pro/routing/features/swarm.rb +19 -2
  233. data/lib/karafka/pro/routing/features/throttling/config.rb +19 -2
  234. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +19 -2
  235. data/lib/karafka/pro/routing/features/throttling/topic.rb +19 -2
  236. data/lib/karafka/pro/routing/features/throttling.rb +19 -2
  237. data/lib/karafka/pro/routing/features/virtual_partitions/config.rb +19 -2
  238. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +19 -2
  239. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +19 -2
  240. data/lib/karafka/pro/routing/features/virtual_partitions.rb +19 -2
  241. data/lib/karafka/pro/scheduled_messages/consumer.rb +19 -2
  242. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +19 -2
  243. data/lib/karafka/pro/scheduled_messages/contracts/message.rb +19 -2
  244. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +19 -2
  245. data/lib/karafka/pro/scheduled_messages/day.rb +19 -2
  246. data/lib/karafka/pro/scheduled_messages/deserializers/headers.rb +19 -2
  247. data/lib/karafka/pro/scheduled_messages/deserializers/payload.rb +19 -2
  248. data/lib/karafka/pro/scheduled_messages/dispatcher.rb +19 -2
  249. data/lib/karafka/pro/scheduled_messages/errors.rb +19 -2
  250. data/lib/karafka/pro/scheduled_messages/max_epoch.rb +19 -2
  251. data/lib/karafka/pro/scheduled_messages/proxy.rb +19 -2
  252. data/lib/karafka/pro/scheduled_messages/schema_validator.rb +19 -2
  253. data/lib/karafka/pro/scheduled_messages/serializer.rb +19 -2
  254. data/lib/karafka/pro/scheduled_messages/setup/config.rb +19 -2
  255. data/lib/karafka/pro/scheduled_messages/state.rb +19 -2
  256. data/lib/karafka/pro/scheduled_messages/tracker.rb +19 -2
  257. data/lib/karafka/pro/scheduled_messages.rb +19 -2
  258. data/lib/karafka/pro/swarm/liveness_listener.rb +19 -2
  259. data/lib/karafka/version.rb +1 -1
  260. metadata +2 -2
@@ -7,232 +7,278 @@ module Karafka
7
7
  # introspection
8
8
  class Topics < Admin
9
9
  class << self
10
- # Allows us to read messages from the topic
11
- #
12
10
  # @param name [String, Symbol] topic name
13
11
  # @param partition [Integer] partition
14
12
  # @param count [Integer] how many messages we want to get at most
15
- # @param start_offset [Integer, Time] offset from which we should start. If -1 is provided
16
- # (default) we will start from the latest offset. If time is provided, the appropriate
17
- # offset will be resolved. If negative beyond -1 is provided, we move backwards more.
13
+ # @param start_offset [Integer, Time] offset from which we should start
18
14
  # @param settings [Hash] kafka extra settings (optional)
19
- #
20
- # @return [Array<Karafka::Messages::Message>] array with messages
15
+ # @see #read
21
16
  def read(name, partition, count, start_offset = -1, settings = {})
22
- messages = []
23
- tpl = Rdkafka::Consumer::TopicPartitionList.new
24
- low_offset, high_offset = nil
17
+ new.read(name, partition, count, start_offset, settings)
18
+ end
19
+
20
+ # @param name [String] topic name
21
+ # @param partitions [Integer] number of partitions for this topic
22
+ # @param replication_factor [Integer] number of replicas
23
+ # @param topic_config [Hash] topic config details as described in the
24
+ # `base topic configuration`))
25
+ # @see #create
26
+ def create(name, partitions, replication_factor, topic_config = {})
27
+ new.create(name, partitions, replication_factor, topic_config)
28
+ end
29
+
30
+ # @param name [String] topic name
31
+ # @see #delete
32
+ def delete(name)
33
+ new.delete(name)
34
+ end
35
+
36
+ # @param name [String] topic name
37
+ # @param partitions [Integer] total number of partitions we expect to end up with
38
+ # @see #create_partitions
39
+ def create_partitions(name, partitions)
40
+ new.create_partitions(name, partitions)
41
+ end
25
42
 
26
- with_consumer(settings) do |consumer|
27
- # Convert the time offset (if needed)
28
- start_offset = resolve_offset(consumer, name.to_s, partition, start_offset)
43
+ # @param name_or_hash [String, Symbol, Hash] topic name or hash with topics and partitions
44
+ # @param partition [Integer, nil] partition (nil when using hash format)
45
+ # @see #read_watermark_offsets
46
+ def read_watermark_offsets(name_or_hash, partition = nil)
47
+ new.read_watermark_offsets(name_or_hash, partition)
48
+ end
29
49
 
30
- low_offset, high_offset = consumer.query_watermark_offsets(name, partition)
50
+ # @param topic_name [String] name of the topic we're interested in
51
+ # @see #info
52
+ def info(topic_name)
53
+ new.info(topic_name)
54
+ end
55
+ end
31
56
 
32
- # Select offset dynamically if -1 or less and move backwards with the negative
33
- # offset, allowing to start from N messages back from high-watermark
34
- start_offset = high_offset - count - start_offset.abs + 1 if start_offset.negative?
35
- start_offset = low_offset if start_offset.negative?
57
+ # Allows us to read messages from the topic
58
+ #
59
+ # @param name [String, Symbol] topic name
60
+ # @param partition [Integer] partition
61
+ # @param count [Integer] how many messages we want to get at most
62
+ # @param start_offset [Integer, Time] offset from which we should start. If -1 is provided
63
+ # (default) we will start from the latest offset. If time is provided, the appropriate
64
+ # offset will be resolved. If negative beyond -1 is provided, we move backwards more.
65
+ # @param settings [Hash] kafka extra settings (optional)
66
+ #
67
+ # @return [Array<Karafka::Messages::Message>] array with messages
68
+ def read(name, partition, count, start_offset = -1, settings = {})
69
+ messages = []
70
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
71
+ low_offset, high_offset = nil
36
72
 
37
- # Build the requested range - since first element is on the start offset we need to
38
- # subtract one from requested count to end up with expected number of elements
39
- requested_range = (start_offset..(start_offset + count - 1))
40
- # Establish theoretical available range. Note, that this does not handle cases related
41
- # to log retention or compaction
42
- available_range = (low_offset..(high_offset - 1))
43
- # Select only offset that we can select. This will remove all the potential offsets
44
- # that are below the low watermark offset
45
- possible_range = requested_range.select { |offset| available_range.include?(offset) }
73
+ with_consumer(settings) do |consumer|
74
+ # Convert the time offset (if needed)
75
+ start_offset = resolve_offset(consumer, name.to_s, partition, start_offset)
46
76
 
47
- start_offset = possible_range.first
48
- count = possible_range.size
77
+ low_offset, high_offset = consumer.query_watermark_offsets(name, partition)
49
78
 
50
- tpl.add_topic_and_partitions_with_offsets(name, partition => start_offset)
51
- consumer.assign(tpl)
79
+ # Select offset dynamically if -1 or less and move backwards with the negative
80
+ # offset, allowing to start from N messages back from high-watermark
81
+ start_offset = high_offset - count - start_offset.abs + 1 if start_offset.negative?
82
+ start_offset = low_offset if start_offset.negative?
52
83
 
53
- # We should poll as long as we don't have all the messages that we need or as long as
54
- # we do not read all the messages from the topic
55
- loop do
56
- # If we've got as many messages as we've wanted stop
57
- break if messages.size >= count
84
+ # Build the requested range - since first element is on the start offset we need to
85
+ # subtract one from requested count to end up with expected number of elements
86
+ requested_range = (start_offset..(start_offset + count - 1))
87
+ # Establish theoretical available range. Note, that this does not handle cases related
88
+ # to log retention or compaction
89
+ available_range = (low_offset..(high_offset - 1))
90
+ # Select only offset that we can select. This will remove all the potential offsets
91
+ # that are below the low watermark offset
92
+ possible_range = requested_range.select { |offset| available_range.include?(offset) }
58
93
 
59
- message = consumer.poll(200)
94
+ start_offset = possible_range.first
95
+ count = possible_range.size
60
96
 
61
- next unless message
97
+ tpl.add_topic_and_partitions_with_offsets(name, partition => start_offset)
98
+ consumer.assign(tpl)
62
99
 
63
- # If the message we've got is beyond the requested range, stop
64
- break unless possible_range.include?(message.offset)
100
+ # We should poll as long as we don't have all the messages that we need or as long as
101
+ # we do not read all the messages from the topic
102
+ loop do
103
+ # If we've got as many messages as we've wanted stop
104
+ break if messages.size >= count
65
105
 
66
- messages << message
67
- rescue Rdkafka::RdkafkaError => e
68
- # End of partition
69
- break if e.code == :partition_eof
106
+ message = consumer.poll(200)
70
107
 
71
- raise e
72
- end
73
- end
108
+ next unless message
74
109
 
75
- # Use topic from routes if we can match it or create a dummy one
76
- # Dummy one is used in case we cannot match the topic with routes. This can happen
77
- # when admin API is used to read topics that are not part of the routing
78
- topic = Karafka::Routing::Router.find_or_initialize_by_name(name)
79
-
80
- messages.map! do |message|
81
- Messages::Builders::Message.call(
82
- message,
83
- topic,
84
- Time.now
85
- )
86
- end
87
- end
110
+ # If the message we've got is beyond the requested range, stop
111
+ break unless possible_range.include?(message.offset)
88
112
 
89
- # Creates Kafka topic with given settings
90
- #
91
- # @param name [String] topic name
92
- # @param partitions [Integer] number of partitions we expect
93
- # @param replication_factor [Integer] number of replicas
94
- # @param topic_config [Hash] topic config details as described here:
95
- # https://kafka.apache.org/documentation/#topicconfigs
96
- #
97
- # @return [void]
98
- def create(name, partitions, replication_factor, topic_config = {})
99
- with_admin do |admin|
100
- handler = admin.create_topic(name, partitions, replication_factor, topic_config)
113
+ messages << message
114
+ rescue Rdkafka::RdkafkaError => e
115
+ # End of partition
116
+ break if e.code == :partition_eof
101
117
 
102
- with_re_wait(
103
- -> { handler.wait(max_wait_timeout: max_wait_time_seconds) },
104
- -> { names.include?(name) }
105
- )
118
+ raise e
106
119
  end
107
120
  end
108
121
 
109
- # Deleted a given topic
110
- #
111
- # @param name [String] topic name
112
- #
113
- # @return [void]
114
- def delete(name)
115
- with_admin do |admin|
116
- handler = admin.delete_topic(name)
122
+ # Use topic from routes if we can match it or create a dummy one
123
+ # Dummy one is used in case we cannot match the topic with routes. This can happen
124
+ # when admin API is used to read topics that are not part of the routing
125
+ topic = Karafka::Routing::Router.find_or_initialize_by_name(name)
117
126
 
118
- with_re_wait(
119
- -> { handler.wait(max_wait_timeout: max_wait_time_seconds) },
120
- -> { !names.include?(name) }
121
- )
122
- end
127
+ messages.map! do |message|
128
+ Messages::Builders::Message.call(
129
+ message,
130
+ topic,
131
+ Time.now
132
+ )
123
133
  end
134
+ end
124
135
 
125
- # Creates more partitions for a given topic
126
- #
127
- # @param name [String] topic name
128
- # @param partitions [Integer] total number of partitions we expect to end up with
129
- #
130
- # @return [void]
131
- def create_partitions(name, partitions)
132
- with_admin do |admin|
133
- handler = admin.create_partitions(name, partitions)
136
+ # Creates Kafka topic with given settings
137
+ #
138
+ # @param name [String] topic name
139
+ # @param partitions [Integer] number of partitions we expect
140
+ # @param replication_factor [Integer] number of replicas
141
+ # @param topic_config [Hash] topic config details as described here:
142
+ # https://kafka.apache.org/documentation/#topicconfigs
143
+ #
144
+ # @return [void]
145
+ def create(name, partitions, replication_factor, topic_config = {})
146
+ with_admin do |admin|
147
+ handler = admin.create_topic(name, partitions, replication_factor, topic_config)
134
148
 
135
- with_re_wait(
136
- -> { handler.wait(max_wait_timeout: max_wait_time_seconds) },
137
- -> { info(name).fetch(:partition_count) >= partitions }
138
- )
139
- end
149
+ with_re_wait(
150
+ -> { handler.wait(max_wait_timeout: max_wait_time_seconds) },
151
+ -> { names.include?(name) }
152
+ )
140
153
  end
154
+ end
141
155
 
142
- # Fetches the watermark offsets for a given topic partition or multiple topics and
143
- # partitions
144
- #
145
- # @param name_or_hash [String, Symbol, Hash] topic name or hash with topics and partitions
146
- # @param partition [Integer, nil] partition number
147
- # (required when first param is topic name)
148
- #
149
- # @return [Array<Integer, Integer>, Hash] when querying single partition returns array with
150
- # low and high watermark offsets, when querying multiple returns nested hash
151
- #
152
- # @example Query single partition
153
- # Karafka::Admin::Topics.read_watermark_offsets('events', 0)
154
- # # => [0, 100]
155
- #
156
- # @example Query specific partitions across multiple topics
157
- # Karafka::Admin::Topics.read_watermark_offsets(
158
- # { 'events' => [0, 1], 'logs' => [0] }
159
- # )
160
- # # => {
161
- # # 'events' => {
162
- # # 0 => [0, 100],
163
- # # 1 => [0, 150]
164
- # # },
165
- # # 'logs' => {
166
- # # 0 => [0, 50]
167
- # # }
168
- # # }
169
- def read_watermark_offsets(name_or_hash, partition = nil)
170
- # Normalize input to hash format
171
- topics_with_partitions = partition ? { name_or_hash => [partition] } : name_or_hash
156
+ # Deleted a given topic
157
+ #
158
+ # @param name [String] topic name
159
+ #
160
+ # @return [void]
161
+ def delete(name)
162
+ with_admin do |admin|
163
+ handler = admin.delete_topic(name)
172
164
 
173
- result = Hash.new { |h, k| h[k] = {} }
165
+ with_re_wait(
166
+ -> { handler.wait(max_wait_timeout: max_wait_time_seconds) },
167
+ -> { !names.include?(name) }
168
+ )
169
+ end
170
+ end
174
171
 
175
- with_consumer do |consumer|
176
- topics_with_partitions.each do |topic, partitions|
177
- partitions.each do |partition_id|
178
- result[topic][partition_id] = consumer.query_watermark_offsets(topic, partition_id)
179
- end
180
- end
181
- end
172
+ # Creates more partitions for a given topic
173
+ #
174
+ # @param name [String] topic name
175
+ # @param partitions [Integer] total number of partitions we expect to end up with
176
+ #
177
+ # @return [void]
178
+ def create_partitions(name, partitions)
179
+ with_admin do |admin|
180
+ handler = admin.create_partitions(name, partitions)
182
181
 
183
- # Return single array for single partition query, hash for multiple
184
- partition ? result.dig(name_or_hash, partition) : result
182
+ with_re_wait(
183
+ -> { handler.wait(max_wait_timeout: max_wait_time_seconds) },
184
+ -> { info(name).fetch(:partition_count) >= partitions }
185
+ )
185
186
  end
187
+ end
186
188
 
187
- # Returns basic topic metadata
188
- #
189
- # @param topic_name [String] name of the topic we're interested in
190
- # @return [Hash] topic metadata info hash
191
- # @raise [Rdkafka::RdkafkaError] `unknown_topic_or_part` if requested topic is not found
192
- #
193
- # @note This query is much more efficient than doing a full `#cluster_info` + topic lookup
194
- # because it does not have to query for all the topics data but just the topic we're
195
- # interested in
196
- def info(topic_name)
197
- with_admin do |admin|
198
- admin
199
- .metadata(topic_name)
200
- .topics
201
- .find { |topic| topic[:topic_name] == topic_name }
189
+ # Fetches the watermark offsets for a given topic partition or multiple topics and
190
+ # partitions
191
+ #
192
+ # @param name_or_hash [String, Symbol, Hash] topic name or hash with topics and partitions
193
+ # @param partition [Integer, nil] partition number
194
+ # (required when first param is topic name)
195
+ #
196
+ # @return [Array<Integer, Integer>, Hash] when querying single partition returns array with
197
+ # low and high watermark offsets, when querying multiple returns nested hash
198
+ #
199
+ # @example Query single partition
200
+ # Karafka::Admin::Topics.read_watermark_offsets('events', 0)
201
+ # # => [0, 100]
202
+ #
203
+ # @example Query specific partitions across multiple topics
204
+ # Karafka::Admin::Topics.read_watermark_offsets(
205
+ # { 'events' => [0, 1], 'logs' => [0] }
206
+ # )
207
+ # # => {
208
+ # # 'events' => {
209
+ # # 0 => [0, 100],
210
+ # # 1 => [0, 150]
211
+ # # },
212
+ # # 'logs' => {
213
+ # # 0 => [0, 50]
214
+ # # }
215
+ # # }
216
+ def read_watermark_offsets(name_or_hash, partition = nil)
217
+ # Normalize input to hash format
218
+ topics_with_partitions = partition ? { name_or_hash => [partition] } : name_or_hash
219
+
220
+ result = Hash.new { |h, k| h[k] = {} }
221
+
222
+ with_consumer do |consumer|
223
+ topics_with_partitions.each do |topic, partitions|
224
+ partitions.each do |partition_id|
225
+ result[topic][partition_id] = consumer.query_watermark_offsets(topic, partition_id)
226
+ end
202
227
  end
203
228
  end
204
229
 
205
- private
230
+ # Return single array for single partition query, hash for multiple
231
+ partition ? result.dig(name_or_hash, partition) : result
232
+ end
206
233
 
207
- # @return [Array<String>] topics names
208
- def names
209
- cluster_info.topics.map { |topic| topic.fetch(:topic_name) }
234
+ # Returns basic topic metadata
235
+ #
236
+ # @param topic_name [String] name of the topic we're interested in
237
+ # @return [Hash] topic metadata info hash
238
+ # @raise [Rdkafka::RdkafkaError] `unknown_topic_or_part` if requested topic is not found
239
+ #
240
+ # @note This query is much more efficient than doing a full `#cluster_info` + topic lookup
241
+ # because it does not have to query for all the topics data but just the topic we're
242
+ # interested in
243
+ def info(topic_name)
244
+ with_admin do |admin|
245
+ admin
246
+ .metadata(topic_name)
247
+ .topics
248
+ .find { |topic| topic[:topic_name] == topic_name }
210
249
  end
250
+ end
211
251
 
212
- # Resolves the offset if offset is in a time format. Otherwise returns the offset without
213
- # resolving.
214
- # @param consumer [::Rdkafka::Consumer]
215
- # @param name [String, Symbol] expected topic name
216
- # @param partition [Integer]
217
- # @param offset [Integer, Time]
218
- # @return [Integer] expected offset
219
- def resolve_offset(consumer, name, partition, offset)
220
- if offset.is_a?(Time)
221
- tpl = Rdkafka::Consumer::TopicPartitionList.new
222
- tpl.add_topic_and_partitions_with_offsets(
223
- name, partition => offset
224
- )
225
-
226
- real_offsets = consumer.offsets_for_times(tpl)
227
- detected_offset = real_offsets
228
- .to_h
229
- .fetch(name)
230
- .find { |p_data| p_data.partition == partition }
231
-
232
- detected_offset&.offset || raise(Errors::InvalidTimeBasedOffsetError)
233
- else
234
- offset
235
- end
252
+ private
253
+
254
+ # @return [Array<String>] topics names
255
+ def names
256
+ cluster_info.topics.map { |topic| topic.fetch(:topic_name) }
257
+ end
258
+
259
+ # Resolves the offset if offset is in a time format. Otherwise returns the offset without
260
+ # resolving.
261
+ # @param consumer [::Rdkafka::Consumer]
262
+ # @param name [String, Symbol] expected topic name
263
+ # @param partition [Integer]
264
+ # @param offset [Integer, Time]
265
+ # @return [Integer] expected offset
266
+ def resolve_offset(consumer, name, partition, offset)
267
+ if offset.is_a?(Time)
268
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
269
+ tpl.add_topic_and_partitions_with_offsets(
270
+ name, partition => offset
271
+ )
272
+
273
+ real_offsets = consumer.offsets_for_times(tpl)
274
+ detected_offset = real_offsets
275
+ .to_h
276
+ .fetch(name)
277
+ .find { |p_data| p_data.partition == partition }
278
+
279
+ detected_offset&.offset || raise(Errors::InvalidTimeBasedOffsetError)
280
+ else
281
+ offset
236
282
  end
237
283
  end
238
284
  end