karafka-web 0.6.3 → 0.7.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (214) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +13 -4
  4. data/CHANGELOG.md +119 -5
  5. data/Gemfile +1 -0
  6. data/Gemfile.lock +27 -24
  7. data/README.md +2 -0
  8. data/bin/rspecs +6 -0
  9. data/certs/cert_chain.pem +21 -21
  10. data/docker-compose.yml +22 -0
  11. data/karafka-web.gemspec +3 -3
  12. data/lib/karafka/web/app.rb +6 -2
  13. data/lib/karafka/web/cli.rb +51 -47
  14. data/lib/karafka/web/config.rb +33 -9
  15. data/lib/karafka/web/contracts/base.rb +32 -0
  16. data/lib/karafka/web/contracts/config.rb +63 -0
  17. data/lib/karafka/web/deserializer.rb +10 -1
  18. data/lib/karafka/web/errors.rb +29 -7
  19. data/lib/karafka/web/installer.rb +58 -148
  20. data/lib/karafka/web/management/base.rb +34 -0
  21. data/lib/karafka/web/management/clean_boot_file.rb +31 -0
  22. data/lib/karafka/web/management/create_initial_states.rb +101 -0
  23. data/lib/karafka/web/management/create_topics.rb +127 -0
  24. data/lib/karafka/web/management/delete_topics.rb +28 -0
  25. data/lib/karafka/web/management/enable.rb +82 -0
  26. data/lib/karafka/web/management/extend_boot_file.rb +37 -0
  27. data/lib/karafka/web/processing/consumer.rb +73 -17
  28. data/lib/karafka/web/processing/consumers/aggregators/base.rb +56 -0
  29. data/lib/karafka/web/processing/consumers/aggregators/metrics.rb +154 -0
  30. data/lib/karafka/web/processing/consumers/aggregators/state.rb +180 -0
  31. data/lib/karafka/web/processing/consumers/contracts/aggregated_stats.rb +32 -0
  32. data/lib/karafka/web/processing/consumers/contracts/metrics.rb +53 -0
  33. data/lib/karafka/web/processing/consumers/contracts/process.rb +19 -0
  34. data/lib/karafka/web/processing/consumers/contracts/state.rb +49 -0
  35. data/lib/karafka/web/processing/consumers/contracts/topic_stats.rb +21 -0
  36. data/lib/karafka/web/processing/consumers/metrics.rb +29 -0
  37. data/lib/karafka/web/processing/consumers/schema_manager.rb +56 -0
  38. data/lib/karafka/web/processing/consumers/state.rb +6 -9
  39. data/lib/karafka/web/processing/time_series_tracker.rb +130 -0
  40. data/lib/karafka/web/tracking/consumers/contracts/consumer_group.rb +2 -2
  41. data/lib/karafka/web/tracking/consumers/contracts/job.rb +2 -1
  42. data/lib/karafka/web/tracking/consumers/contracts/partition.rb +14 -1
  43. data/lib/karafka/web/tracking/consumers/contracts/report.rb +10 -8
  44. data/lib/karafka/web/tracking/consumers/contracts/subscription_group.rb +2 -2
  45. data/lib/karafka/web/tracking/consumers/contracts/topic.rb +2 -2
  46. data/lib/karafka/web/tracking/consumers/listeners/processing.rb +6 -2
  47. data/lib/karafka/web/tracking/consumers/listeners/statistics.rb +15 -1
  48. data/lib/karafka/web/tracking/consumers/reporter.rb +14 -6
  49. data/lib/karafka/web/tracking/consumers/sampler.rb +80 -39
  50. data/lib/karafka/web/tracking/contracts/error.rb +2 -1
  51. data/lib/karafka/web/ui/app.rb +20 -10
  52. data/lib/karafka/web/ui/base.rb +56 -6
  53. data/lib/karafka/web/ui/controllers/base.rb +28 -0
  54. data/lib/karafka/web/ui/controllers/become_pro.rb +1 -1
  55. data/lib/karafka/web/ui/controllers/cluster.rb +12 -6
  56. data/lib/karafka/web/ui/controllers/consumers.rb +4 -2
  57. data/lib/karafka/web/ui/controllers/dashboard.rb +32 -0
  58. data/lib/karafka/web/ui/controllers/errors.rb +19 -6
  59. data/lib/karafka/web/ui/controllers/jobs.rb +4 -2
  60. data/lib/karafka/web/ui/controllers/requests/params.rb +28 -0
  61. data/lib/karafka/web/ui/controllers/responses/redirect.rb +29 -0
  62. data/lib/karafka/web/ui/helpers/application_helper.rb +57 -14
  63. data/lib/karafka/web/ui/helpers/paths_helper.rb +48 -0
  64. data/lib/karafka/web/ui/lib/hash_proxy.rb +18 -6
  65. data/lib/karafka/web/ui/lib/paginations/base.rb +61 -0
  66. data/lib/karafka/web/ui/lib/paginations/offset_based.rb +96 -0
  67. data/lib/karafka/web/ui/lib/paginations/page_based.rb +70 -0
  68. data/lib/karafka/web/ui/lib/paginations/paginators/arrays.rb +33 -0
  69. data/lib/karafka/web/ui/lib/paginations/paginators/base.rb +23 -0
  70. data/lib/karafka/web/ui/lib/paginations/paginators/partitions.rb +52 -0
  71. data/lib/karafka/web/ui/lib/paginations/paginators/sets.rb +85 -0
  72. data/lib/karafka/web/ui/lib/paginations/watermark_offsets_based.rb +75 -0
  73. data/lib/karafka/web/ui/lib/ttl_cache.rb +82 -0
  74. data/lib/karafka/web/ui/models/cluster_info.rb +59 -0
  75. data/lib/karafka/web/ui/models/consumers_metrics.rb +46 -0
  76. data/lib/karafka/web/ui/models/{state.rb → consumers_state.rb} +6 -2
  77. data/lib/karafka/web/ui/models/health.rb +37 -7
  78. data/lib/karafka/web/ui/models/message.rb +123 -39
  79. data/lib/karafka/web/ui/models/metrics/aggregated.rb +196 -0
  80. data/lib/karafka/web/ui/models/metrics/charts/aggregated.rb +50 -0
  81. data/lib/karafka/web/ui/models/metrics/charts/topics.rb +109 -0
  82. data/lib/karafka/web/ui/models/metrics/topics.rb +101 -0
  83. data/lib/karafka/web/ui/models/partition.rb +27 -0
  84. data/lib/karafka/web/ui/models/process.rb +12 -1
  85. data/lib/karafka/web/ui/models/status.rb +110 -22
  86. data/lib/karafka/web/ui/models/visibility_filter.rb +33 -0
  87. data/lib/karafka/web/ui/pro/app.rb +87 -19
  88. data/lib/karafka/web/ui/pro/controllers/cluster.rb +11 -0
  89. data/lib/karafka/web/ui/pro/controllers/consumers.rb +13 -7
  90. data/lib/karafka/web/ui/pro/controllers/dashboard.rb +54 -0
  91. data/lib/karafka/web/ui/pro/controllers/dlq.rb +1 -2
  92. data/lib/karafka/web/ui/pro/controllers/errors.rb +46 -10
  93. data/lib/karafka/web/ui/pro/controllers/explorer.rb +145 -15
  94. data/lib/karafka/web/ui/pro/controllers/health.rb +10 -2
  95. data/lib/karafka/web/ui/pro/controllers/messages.rb +62 -0
  96. data/lib/karafka/web/ui/pro/controllers/routing.rb +44 -0
  97. data/lib/karafka/web/ui/pro/views/consumers/_breadcrumbs.erb +7 -1
  98. data/lib/karafka/web/ui/pro/views/consumers/_consumer.erb +1 -1
  99. data/lib/karafka/web/ui/pro/views/consumers/_counters.erb +7 -5
  100. data/lib/karafka/web/ui/pro/views/consumers/consumer/_job.erb +3 -3
  101. data/lib/karafka/web/ui/pro/views/consumers/consumer/_metrics.erb +5 -4
  102. data/lib/karafka/web/ui/pro/views/consumers/consumer/_partition.erb +13 -4
  103. data/lib/karafka/web/ui/pro/views/consumers/consumer/_subscription_group.erb +3 -2
  104. data/lib/karafka/web/ui/pro/views/consumers/consumer/_tabs.erb +7 -0
  105. data/lib/karafka/web/ui/pro/views/consumers/details.erb +21 -0
  106. data/lib/karafka/web/ui/pro/views/consumers/index.erb +4 -2
  107. data/lib/karafka/web/ui/pro/views/dashboard/_ranges_selector.erb +39 -0
  108. data/lib/karafka/web/ui/pro/views/dashboard/index.erb +82 -0
  109. data/lib/karafka/web/ui/pro/views/dlq/_topic.erb +1 -1
  110. data/lib/karafka/web/ui/pro/views/errors/_breadcrumbs.erb +8 -6
  111. data/lib/karafka/web/ui/pro/views/errors/_error.erb +2 -2
  112. data/lib/karafka/web/ui/pro/views/errors/_partition_option.erb +1 -1
  113. data/lib/karafka/web/ui/pro/views/errors/_table.erb +21 -0
  114. data/lib/karafka/web/ui/pro/views/errors/_title_with_select.erb +31 -0
  115. data/lib/karafka/web/ui/pro/views/errors/index.erb +9 -56
  116. data/lib/karafka/web/ui/pro/views/errors/partition.erb +17 -0
  117. data/lib/karafka/web/ui/pro/views/errors/show.erb +1 -1
  118. data/lib/karafka/web/ui/pro/views/explorer/_breadcrumbs.erb +6 -4
  119. data/lib/karafka/web/ui/pro/views/explorer/_filtered.erb +16 -0
  120. data/lib/karafka/web/ui/pro/views/explorer/_message.erb +14 -4
  121. data/lib/karafka/web/ui/pro/views/explorer/_no_topics.erb +7 -0
  122. data/lib/karafka/web/ui/pro/views/explorer/_partition_option.erb +3 -3
  123. data/lib/karafka/web/ui/pro/views/explorer/_topic.erb +1 -1
  124. data/lib/karafka/web/ui/pro/views/explorer/index.erb +12 -8
  125. data/lib/karafka/web/ui/pro/views/explorer/messages/_headers.erb +15 -0
  126. data/lib/karafka/web/ui/pro/views/explorer/messages/_key.erb +12 -0
  127. data/lib/karafka/web/ui/pro/views/explorer/partition/_details.erb +35 -0
  128. data/lib/karafka/web/ui/pro/views/explorer/partition/_messages.erb +1 -0
  129. data/lib/karafka/web/ui/pro/views/explorer/partition.erb +6 -4
  130. data/lib/karafka/web/ui/pro/views/explorer/show.erb +48 -5
  131. data/lib/karafka/web/ui/pro/views/explorer/topic/_details.erb +23 -0
  132. data/lib/karafka/web/ui/pro/views/explorer/topic/_empty.erb +3 -0
  133. data/lib/karafka/web/ui/pro/views/explorer/topic/_limited.erb +4 -0
  134. data/lib/karafka/web/ui/pro/views/explorer/topic.erb +51 -0
  135. data/lib/karafka/web/ui/pro/views/health/_breadcrumbs.erb +16 -0
  136. data/lib/karafka/web/ui/pro/views/health/_no_data.erb +9 -0
  137. data/lib/karafka/web/ui/pro/views/health/_partition.erb +17 -15
  138. data/lib/karafka/web/ui/pro/views/health/_partition_offset.erb +40 -0
  139. data/lib/karafka/web/ui/pro/views/health/_tabs.erb +27 -0
  140. data/lib/karafka/web/ui/pro/views/health/offsets.erb +71 -0
  141. data/lib/karafka/web/ui/pro/views/health/overview.erb +68 -0
  142. data/lib/karafka/web/ui/pro/views/jobs/_job.erb +6 -3
  143. data/lib/karafka/web/ui/pro/views/jobs/index.erb +4 -1
  144. data/lib/karafka/web/ui/pro/views/routing/_consumer_group.erb +37 -0
  145. data/lib/karafka/web/ui/pro/views/routing/_detail.erb +25 -0
  146. data/lib/karafka/web/ui/pro/views/routing/_topic.erb +23 -0
  147. data/lib/karafka/web/ui/pro/views/routing/index.erb +10 -0
  148. data/lib/karafka/web/ui/pro/views/routing/show.erb +26 -0
  149. data/lib/karafka/web/ui/pro/views/shared/_navigation.erb +7 -10
  150. data/lib/karafka/web/ui/public/images/logo-gray.svg +28 -0
  151. data/lib/karafka/web/ui/public/javascripts/application.js +30 -0
  152. data/lib/karafka/web/ui/public/javascripts/chart.min.js +14 -0
  153. data/lib/karafka/web/ui/public/javascripts/charts.js +330 -0
  154. data/lib/karafka/web/ui/public/javascripts/datepicker.js +6 -0
  155. data/lib/karafka/web/ui/public/javascripts/live_poll.js +39 -12
  156. data/lib/karafka/web/ui/public/javascripts/offset_datetime.js +74 -0
  157. data/lib/karafka/web/ui/public/javascripts/tabs.js +59 -0
  158. data/lib/karafka/web/ui/public/stylesheets/application.css +11 -0
  159. data/lib/karafka/web/ui/public/stylesheets/datepicker.min.css +12 -0
  160. data/lib/karafka/web/ui/views/cluster/_no_partitions.erb +3 -0
  161. data/lib/karafka/web/ui/views/cluster/_partition.erb +20 -22
  162. data/lib/karafka/web/ui/views/cluster/index.erb +6 -1
  163. data/lib/karafka/web/ui/views/consumers/_consumer.erb +1 -1
  164. data/lib/karafka/web/ui/views/consumers/_counters.erb +6 -4
  165. data/lib/karafka/web/ui/views/consumers/_summary.erb +3 -3
  166. data/lib/karafka/web/ui/views/consumers/index.erb +3 -1
  167. data/lib/karafka/web/ui/views/dashboard/_feature_pro.erb +3 -0
  168. data/lib/karafka/web/ui/views/dashboard/_not_enough_data.erb +15 -0
  169. data/lib/karafka/web/ui/views/dashboard/_ranges_selector.erb +23 -0
  170. data/lib/karafka/web/ui/views/dashboard/index.erb +95 -0
  171. data/lib/karafka/web/ui/views/errors/_detail.erb +12 -0
  172. data/lib/karafka/web/ui/views/errors/_error.erb +2 -2
  173. data/lib/karafka/web/ui/views/errors/show.erb +1 -1
  174. data/lib/karafka/web/ui/views/jobs/index.erb +3 -1
  175. data/lib/karafka/web/ui/views/layout.erb +10 -3
  176. data/lib/karafka/web/ui/views/routing/_consumer_group.erb +8 -6
  177. data/lib/karafka/web/ui/views/routing/_detail.erb +2 -2
  178. data/lib/karafka/web/ui/views/routing/_topic.erb +1 -1
  179. data/lib/karafka/web/ui/views/routing/show.erb +1 -1
  180. data/lib/karafka/web/ui/views/shared/_brand.erb +2 -2
  181. data/lib/karafka/web/ui/views/shared/_chart.erb +14 -0
  182. data/lib/karafka/web/ui/views/shared/_content.erb +2 -2
  183. data/lib/karafka/web/ui/views/shared/_feature_pro.erb +1 -1
  184. data/lib/karafka/web/ui/views/shared/_flashes.erb +9 -0
  185. data/lib/karafka/web/ui/views/shared/_footer.erb +22 -0
  186. data/lib/karafka/web/ui/views/shared/_header.erb +15 -9
  187. data/lib/karafka/web/ui/views/shared/_live_poll.erb +7 -0
  188. data/lib/karafka/web/ui/views/shared/_navigation.erb +5 -8
  189. data/lib/karafka/web/ui/views/shared/_no_paginated_data.erb +9 -0
  190. data/lib/karafka/web/ui/views/shared/_pagination.erb +17 -13
  191. data/lib/karafka/web/ui/views/shared/_tab_nav.erb +7 -0
  192. data/lib/karafka/web/ui/views/shared/exceptions/not_found.erb +34 -32
  193. data/lib/karafka/web/ui/views/shared/exceptions/pro_only.erb +45 -43
  194. data/lib/karafka/web/ui/views/status/failures/_consumers_reports_schema_state.erb +15 -0
  195. data/lib/karafka/web/ui/views/status/failures/_enabled.erb +8 -0
  196. data/lib/karafka/web/ui/views/status/failures/_initial_consumers_metrics.erb +11 -0
  197. data/lib/karafka/web/ui/views/status/failures/{_initial_state.erb → _initial_consumers_state.erb} +3 -3
  198. data/lib/karafka/web/ui/views/status/failures/_partitions.erb +14 -6
  199. data/lib/karafka/web/ui/views/status/info/_components.erb +21 -1
  200. data/lib/karafka/web/ui/views/status/show.erb +62 -5
  201. data/lib/karafka/web/ui/views/status/successes/_enabled.erb +1 -0
  202. data/lib/karafka/web/ui/views/status/warnings/_replication.erb +19 -0
  203. data/lib/karafka/web/version.rb +1 -1
  204. data/lib/karafka/web.rb +11 -0
  205. data.tar.gz.sig +0 -0
  206. metadata +124 -39
  207. metadata.gz.sig +0 -0
  208. data/lib/karafka/web/processing/consumers/aggregator.rb +0 -130
  209. data/lib/karafka/web/tracking/contracts/base.rb +0 -34
  210. data/lib/karafka/web/ui/lib/paginate_array.rb +0 -38
  211. data/lib/karafka/web/ui/pro/views/explorer/_encryption_enabled.erb +0 -18
  212. data/lib/karafka/web/ui/pro/views/explorer/partition/_watermark_offsets.erb +0 -10
  213. data/lib/karafka/web/ui/pro/views/health/index.erb +0 -60
  214. /data/lib/karafka/web/ui/pro/views/explorer/{_detail.erb → messages/_detail.erb} +0 -0
@@ -0,0 +1,127 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Management
6
+ # Creates all the needed topics (if they don't exist).
7
+ # It does **not** populate data.
8
+ class CreateTopics < Base
9
+ # Runs the creation process
10
+ #
11
+ # @param replication_factor [Integer] replication factor for Web-UI topics
12
+ def call(replication_factor)
13
+ consumers_states_topic = ::Karafka::Web.config.topics.consumers.states
14
+ consumers_metrics_topic = ::Karafka::Web.config.topics.consumers.metrics
15
+ consumers_reports_topic = ::Karafka::Web.config.topics.consumers.reports
16
+ errors_topic = ::Karafka::Web.config.topics.errors
17
+
18
+ # Create only if needed
19
+ if existing_topics_names.include?(consumers_states_topic)
20
+ exists(consumers_states_topic)
21
+ else
22
+ creating(consumers_states_topic)
23
+ # This topic needs to have one partition
24
+ ::Karafka::Admin.create_topic(
25
+ consumers_states_topic,
26
+ 1,
27
+ replication_factor,
28
+ # We care only about the most recent state, previous are irrelevant. So we can easily
29
+ # compact after one minute. We do not use this beyond the most recent collective
30
+ # state, hence it all can easily go away. We also limit the segment size to at most
31
+ # 100MB not to use more space ever.
32
+ {
33
+ 'cleanup.policy': 'compact',
34
+ 'retention.ms': 60 * 60 * 1_000,
35
+ 'segment.ms': 24 * 60 * 60 * 1_000, # 1 day
36
+ 'segment.bytes': 104_857_600 # 100MB
37
+ }
38
+ )
39
+ created(consumers_states_topic)
40
+ end
41
+
42
+ if existing_topics_names.include?(consumers_metrics_topic)
43
+ exists(consumers_metrics_topic)
44
+ else
45
+ creating(consumers_metrics_topic)
46
+ # This topic needs to have one partition
47
+ # Same as states - only most recent is relevant as it is a materialized state
48
+ ::Karafka::Admin.create_topic(
49
+ consumers_metrics_topic,
50
+ 1,
51
+ replication_factor,
52
+ {
53
+ 'cleanup.policy': 'compact',
54
+ 'retention.ms': 60 * 60 * 1_000, # 1h
55
+ 'segment.ms': 24 * 60 * 60 * 1_000, # 1 day
56
+ 'segment.bytes': 104_857_600 # 100MB
57
+ }
58
+ )
59
+ created(consumers_metrics_topic)
60
+ end
61
+
62
+ if existing_topics_names.include?(consumers_reports_topic)
63
+ exists(consumers_reports_topic)
64
+ else
65
+ creating(consumers_reports_topic)
66
+ # This topic needs to have one partition
67
+ ::Karafka::Admin.create_topic(
68
+ consumers_reports_topic,
69
+ 1,
70
+ replication_factor,
71
+ # We do not need to to store this data for longer than 1 day as this data is only
72
+ # used to materialize the end states
73
+ # On the other hand we do not want to have it really short-living because in case of
74
+ # a consumer crash, we may want to use this info to catch up and backfill the state.
75
+ # In case its not consumed because no processes are running, it also usually means
76
+ # there's no data to consume because no karafka servers report
77
+ {
78
+ 'retention.ms': 24 * 60 * 60 * 1_000 # 1 day
79
+ }
80
+ )
81
+ created(consumers_reports_topic)
82
+ end
83
+
84
+ if existing_topics_names.include?(errors_topic)
85
+ exists(errors_topic)
86
+ else
87
+ creating(errors_topic)
88
+ # All the errors will be dispatched here
89
+ # This topic can have multiple partitions but we go with one by default. A single Ruby
90
+ # process should not crash that often and if there is an expectation of a higher volume
91
+ # of errors, this can be changed by the end user
92
+ ::Karafka::Admin.create_topic(
93
+ errors_topic,
94
+ 1,
95
+ replication_factor,
96
+ # Remove really old errors (older than 3 months just to preserve space)
97
+ {
98
+ 'retention.ms': 3 * 31 * 24 * 60 * 60 * 1_000 # 3 months
99
+ }
100
+ )
101
+ created(errors_topic)
102
+ end
103
+ end
104
+
105
+ private
106
+
107
+ # @param topic_name [String] name of the topic that exists
108
+ # @return [String] formatted message
109
+ def exists(topic_name)
110
+ puts("Topic #{topic_name} #{already} exists.")
111
+ end
112
+
113
+ # @param topic_name [String] name of the topic that we are creating
114
+ # @return [String] formatted message
115
+ def creating(topic_name)
116
+ puts("Creating topic #{topic_name}...")
117
+ end
118
+
119
+ # @param topic_name [String] name of the topic that we created
120
+ # @return [String] formatted message
121
+ def created(topic_name)
122
+ puts("Topic #{topic_name} #{successfully} created.")
123
+ end
124
+ end
125
+ end
126
+ end
127
+ end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Management
6
+ # Removes the Web-UI topics from Kafka
7
+ class DeleteTopics < Base
8
+ # Removes the Web-UI topics
9
+ def call
10
+ [
11
+ ::Karafka::Web.config.topics.consumers.states,
12
+ ::Karafka::Web.config.topics.consumers.reports,
13
+ ::Karafka::Web.config.topics.consumers.metrics,
14
+ ::Karafka::Web.config.topics.errors
15
+ ].each do |topic_name|
16
+ if existing_topics_names.include?(topic_name.to_s)
17
+ puts "Removing #{topic_name}..."
18
+ ::Karafka::Admin.delete_topic(topic_name)
19
+ puts "Topic #{topic_name} #{successfully} deleted."
20
+ else
21
+ puts "Topic #{topic_name} not found."
22
+ end
23
+ end
24
+ end
25
+ end
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,82 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Management
6
+ # @note This runs on each process start that has `karafka.rb`. It needs to be executed
7
+ # also in the context of other processes types and not only karafka server, because it
8
+ # installs producers instrumentation and routing as well.
9
+ class Enable < Base
10
+ # Enables routing consumer group and subscribes Web-UI listeners
11
+ def call
12
+ extend_routing
13
+ subscribe_to_monitor
14
+ end
15
+
16
+ private
17
+
18
+ # Enables all the needed routes
19
+ def extend_routing
20
+ ::Karafka::App.routes.draw do
21
+ web_deserializer = ::Karafka::Web::Deserializer.new
22
+
23
+ consumer_group ::Karafka::Web.config.processing.consumer_group do
24
+ # Topic we listen on to materialize the states
25
+ topic ::Karafka::Web.config.topics.consumers.reports do
26
+ config(active: false)
27
+ active ::Karafka::Web.config.processing.active
28
+ # Since we materialize state in intervals, we can poll for half of this time without
29
+ # impacting the reporting responsiveness
30
+ max_wait_time ::Karafka::Web.config.processing.interval / 2
31
+ max_messages 1_000
32
+ consumer ::Karafka::Web::Processing::Consumer
33
+ # This needs to be true in order not to reload the consumer in dev. This consumer
34
+ # should not be affected by the end user development process
35
+ consumer_persistence true
36
+ deserializer web_deserializer
37
+ manual_offset_management true
38
+ # Start from the most recent data, do not materialize historical states
39
+ # This prevents us from dealing with cases, where client id would be changed and
40
+ # consumer group name would be renamed and we would start consuming all historical
41
+ initial_offset 'latest'
42
+ end
43
+
44
+ # We define those three here without consumption, so Web understands how to deserialize
45
+ # them when used / viewed
46
+ topic ::Karafka::Web.config.topics.consumers.states do
47
+ config(active: false)
48
+ active false
49
+ deserializer web_deserializer
50
+ end
51
+
52
+ topic ::Karafka::Web.config.topics.consumers.metrics do
53
+ config(active: false)
54
+ active false
55
+ deserializer web_deserializer
56
+ end
57
+
58
+ topic ::Karafka::Web.config.topics.errors do
59
+ config(active: false)
60
+ active false
61
+ deserializer web_deserializer
62
+ end
63
+ end
64
+ end
65
+ end
66
+
67
+ # Subscribes with all needed listeners
68
+ def subscribe_to_monitor
69
+ # Installs all the consumer related listeners
70
+ ::Karafka::Web.config.tracking.consumers.listeners.each do |listener|
71
+ ::Karafka.monitor.subscribe(listener)
72
+ end
73
+
74
+ # Installs all the producer related listeners
75
+ ::Karafka::Web.config.tracking.producers.listeners.each do |listener|
76
+ ::Karafka.producer.monitor.subscribe(listener)
77
+ end
78
+ end
79
+ end
80
+ end
81
+ end
82
+ end
@@ -0,0 +1,37 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Management
6
+ # Extends the boot file with Web components
7
+ class ExtendBootFile < Base
8
+ # Code that is needed in the `karafka.rb` to connect Web UI to Karafka
9
+ ENABLER_CODE = 'Karafka::Web.enable!'
10
+
11
+ # Template with initial Web UI configuration
12
+ # Session secret needs to be set per user and per env
13
+ SETUP_TEMPLATE = <<~CONFIG.freeze
14
+ Karafka::Web.setup do |config|
15
+ # You may want to set it per ENV. This value was randomly generated.
16
+ config.ui.sessions.secret = '#{SecureRandom.hex(32)}'
17
+ end
18
+
19
+ #{ENABLER_CODE}
20
+ CONFIG
21
+
22
+ # Adds needed code
23
+ def call
24
+ if File.read(Karafka.boot_file).include?(ENABLER_CODE)
25
+ puts "Web UI #{already} installed."
26
+ else
27
+ puts 'Updating the Karafka boot file...'
28
+ File.open(Karafka.boot_file, 'a') do |f|
29
+ f << "\n#{SETUP_TEMPLATE}\n"
30
+ end
31
+ puts "Karafka boot file #{successfully} updated."
32
+ end
33
+ end
34
+ end
35
+ end
36
+ end
37
+ end
@@ -15,50 +15,106 @@ module Karafka
15
15
  def initialize(*args)
16
16
  super
17
17
 
18
- @flush_interval = ::Karafka::Web.config.processing.interval / 1_000
19
- @consumers_aggregator = ::Karafka::Web.config.processing.consumers.aggregator
20
- # We set this that way so we report with first batch and so we report in the development
21
- # mode. In the development mode, there is a new instance per each invocation, thus we need
22
- # to always initially report, so the web UI works well in the dev mode where consumer
23
- # instances are not long-living.
18
+ @flush_interval = ::Karafka::Web.config.processing.interval
19
+
20
+ @schema_manager = Consumers::SchemaManager.new
21
+ @state_aggregator = Consumers::Aggregators::State.new(@schema_manager)
22
+ @state_contract = Consumers::Contracts::State.new
23
+
24
+ @metrics_aggregator = Consumers::Aggregators::Metrics.new
25
+ @metrics_contract = Consumers::Contracts::Metrics.new
26
+
27
+ # We set this that way so we report with first batch and so we report as fast as possible
24
28
  @flushed_at = monotonic_now - @flush_interval
25
29
  end
26
30
 
27
31
  # Aggregates consumers state into a single current state representation
28
32
  def consume
29
- messages
30
- .select { |message| message.payload[:type] == 'consumer' }
31
- .each { |message| @consumers_aggregator.add(message.payload, message.offset) }
33
+ consumers_messages = messages.select { |message| message.payload[:type] == 'consumer' }
34
+
35
+ # If there is even one incompatible message, we need to stop
36
+ consumers_messages.each do |message|
37
+ unless @schema_manager.compatible?(message)
38
+ dispatch
39
+
40
+ raise ::Karafka::Web::Errors::Processing::IncompatibleSchemaError
41
+ end
42
+
43
+ # We need to run the aggregations on each message in order to compensate for
44
+ # potential lags.
45
+ @state_aggregator.add(message.payload, message.offset)
46
+ @metrics_aggregator.add_report(message.payload)
47
+ @metrics_aggregator.add_stats(@state_aggregator.stats)
48
+
49
+ # Optimize memory usage in pro
50
+ message.clean! if Karafka.pro?
51
+ end
32
52
 
33
53
  return unless periodic_flush?
34
54
 
35
- flush
55
+ dispatch
36
56
 
37
57
  mark_as_consumed(messages.last)
38
58
  end
39
59
 
40
60
  # Flush final state on shutdown
41
61
  def shutdown
42
- flush if @consumers_aggregator
62
+ return unless @state_aggregator
63
+
64
+ materialize
65
+ validate!
66
+ flush
43
67
  end
44
68
 
45
69
  private
46
70
 
71
+ # Flushes the state of the Web-UI to the DB
72
+ def dispatch
73
+ materialize
74
+ validate!
75
+ flush
76
+ end
77
+
47
78
  # @return [Boolean] is it time to persist the new current state
48
79
  def periodic_flush?
49
80
  (monotonic_now - @flushed_at) > @flush_interval
50
81
  end
51
82
 
83
+ # Materializes the current state and metrics for flushing
84
+ def materialize
85
+ @state = @state_aggregator.to_h
86
+ @metrics = @metrics_aggregator.to_h
87
+ end
88
+
89
+ # Ensures that the aggregated data complies with our schema expectation.
90
+ # If you ever get to this place, this is probably a bug and you should report it.
91
+ def validate!
92
+ @state_contract.validate!(@state)
93
+ @metrics_contract.validate!(@metrics)
94
+ end
95
+
52
96
  # Persists the new current state by flushing it to Kafka
53
97
  def flush
54
98
  @flushed_at = monotonic_now
55
99
 
56
- producer.produce_async(
57
- topic: Karafka::Web.config.topics.consumers.states,
58
- payload: @consumers_aggregator.to_json,
59
- # This will ensure that the consumer states are compacted
60
- key: Karafka::Web.config.topics.consumers.states,
61
- partition: 0
100
+ producer.produce_many_async(
101
+ [
102
+ {
103
+ topic: Karafka::Web.config.topics.consumers.states,
104
+ payload: Zlib::Deflate.deflate(@state.to_json),
105
+ # This will ensure that the consumer states are compacted
106
+ key: Karafka::Web.config.topics.consumers.states,
107
+ partition: 0,
108
+ headers: { 'zlib' => 'true' }
109
+ },
110
+ {
111
+ topic: Karafka::Web.config.topics.consumers.metrics,
112
+ payload: Zlib::Deflate.deflate(@metrics.to_json),
113
+ key: Karafka::Web.config.topics.consumers.metrics,
114
+ partition: 0,
115
+ headers: { 'zlib' => 'true' }
116
+ }
117
+ ]
62
118
  )
63
119
  end
64
120
  end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Processing
6
+ module Consumers
7
+ # Namespace for data aggregators that track changes based on the incoming reports and
8
+ # aggregate metrics over time
9
+ module Aggregators
10
+ # Base for all the consumer related aggregators that operate on processes reports
11
+ #
12
+ # @note It is important to understand, that we operate here on a moment in time and this
13
+ # moment may not mean "current" now. There might have been a lag and we may be catching
14
+ # up on older states. This is why we use `@aggregated_from` time instead of the real
15
+ # now. In case of a lag, we want to aggregate and catch up with data, without
16
+ # assigning it to the time of processing but aligning it with the time from which the
17
+ # given reports came. This allows us to compensate for the potential lag related to
18
+ # rebalances, downtimes, failures, etc.
19
+ class Base
20
+ include ::Karafka::Core::Helpers::Time
21
+
22
+ def initialize
23
+ @active_reports = {}
24
+ end
25
+
26
+ # Adds report to the internal active reports hash and updates the aggregation time
27
+ # for internal time reference usage
28
+ # @param report [Hash] incoming process state report
29
+ def add(report)
30
+ memoize_process_report(report)
31
+ update_aggregated_from
32
+ end
33
+
34
+ private
35
+
36
+ # Updates the report for given process in memory
37
+ # @param report [Hash]
38
+ def memoize_process_report(report)
39
+ @active_reports[report[:process][:name]] = report
40
+ end
41
+
42
+ # Updates the time of the aggregation
43
+ #
44
+ # @return [Float] time of the aggregation
45
+ #
46
+ # @note Since this runs before eviction because of age, we always assume there is at
47
+ # least one report from which we can take the dispatch time
48
+ def update_aggregated_from
49
+ @aggregated_from = @active_reports.values.map { |report| report[:dispatched_at] }.max
50
+ end
51
+ end
52
+ end
53
+ end
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,154 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Processing
6
+ module Consumers
7
+ module Aggregators
8
+ # Aggregates metrics for metrics topic. Tracks consumers data and converts it into a
9
+ # state that can then be used to enrich previous time based states to get a time-series
10
+ # values for charts and metrics
11
+ class Metrics < Base
12
+ # Current schema version
13
+ # This can be used in the future for detecting incompatible changes and writing
14
+ # migrations
15
+ SCHEMA_VERSION = '1.0.0'
16
+
17
+ def initialize
18
+ super
19
+ @aggregated_tracker = TimeSeriesTracker.new(metrics.fetch(:aggregated))
20
+ @consumer_groups_tracker = TimeSeriesTracker.new(metrics.fetch(:consumer_groups))
21
+ end
22
+
23
+ # Adds the current report to active reports and removes old once
24
+ #
25
+ # @param report [Hash] single process full report
26
+ def add_report(report)
27
+ add(report)
28
+ evict_expired_processes
29
+ add_consumers_groups_metrics
30
+ end
31
+
32
+ # Updates the aggregated stats metrics
33
+ #
34
+ # @param stats [Hash] aggregated statistics
35
+ def add_stats(stats)
36
+ metrics[:aggregated] = @aggregated_tracker.add(
37
+ stats,
38
+ @aggregated_from
39
+ )
40
+ end
41
+
42
+ # Converts our current knowledge into a report hash.
43
+ #
44
+ # @return [Hash] Statistics hash
45
+ #
46
+ # @note We materialize the consumers groups time series only here and not in real time,
47
+ # because we materialize it based on the tracked active collective state. Materializing
48
+ # on each update that would not be dispatched would be pointless.
49
+ def to_h
50
+ metrics[:schema_version] = SCHEMA_VERSION
51
+ metrics[:dispatched_at] = float_now
52
+ metrics[:aggregated] = @aggregated_tracker.to_h
53
+ metrics[:consumer_groups] = @consumer_groups_tracker.to_h
54
+
55
+ metrics
56
+ end
57
+
58
+ private
59
+
60
+ # @return [Hash] the initial metric taken from Kafka
61
+ def metrics
62
+ @metrics ||= Consumers::Metrics.current!
63
+ end
64
+
65
+ # Evicts outdated reports.
66
+ #
67
+ # @note This eviction differs from the one that we have for the states. For states we
68
+ # do not evict stopped because we want to report them for a moment. Here we do not
69
+ # care about what a stopped process was doing and we can also remove it from active
70
+ # reports.
71
+ def evict_expired_processes
72
+ max_ttl = @aggregated_from - ::Karafka::Web.config.ttl / 1_000
73
+
74
+ @active_reports.delete_if do |_name, report|
75
+ report[:dispatched_at] < max_ttl || report[:process][:status] == 'stopped'
76
+ end
77
+ end
78
+
79
+ # Materialize and add consumers groups states into the tracker
80
+ def add_consumers_groups_metrics
81
+ @consumer_groups_tracker.add(
82
+ materialize_consumers_groups_current_state,
83
+ @aggregated_from
84
+ )
85
+ end
86
+
87
+ # Materializes the current state of consumers group data
88
+ #
89
+ # At the moment we report only topics lags but the format we are using supports
90
+ # extending this information in the future if it would be needed.
91
+ #
92
+ # @return [Hash] hash with nested consumers and their topics details structure
93
+ # @note We do **not** report on a per partition basis because it would significantly
94
+ # increase needed storage.
95
+ def materialize_consumers_groups_current_state
96
+ cgs = {}
97
+
98
+ @active_reports.each do |_, details|
99
+ details.fetch(:consumer_groups).each do |group_name, group_details|
100
+ group_details.fetch(:subscription_groups).each do |_sg_name, sg_details|
101
+ sg_details.fetch(:topics).each do |topic_name, topic_details|
102
+ partitions_data = topic_details.fetch(:partitions).values
103
+
104
+ lags = partitions_data
105
+ .map { |p_details| p_details[:lag] || 0 }
106
+ .reject(&:negative?)
107
+
108
+ lags_stored = partitions_data
109
+ .map { |p_details| p_details.fetch(:lag_stored) }
110
+ .reject(&:negative?)
111
+
112
+ offsets_hi = partitions_data
113
+ .map { |p_details| p_details.fetch(:hi_offset) }
114
+ .reject(&:negative?)
115
+
116
+ # Last stable offsets freeze durations - we pick the max freeze to indicate
117
+ # the longest open transaction that potentially may be hanging
118
+ ls_offsets_fd = partitions_data
119
+ .map { |p_details| p_details.fetch(:ls_offset_fd) }
120
+ .reject(&:negative?)
121
+
122
+ # If there is no lag that would not be negative, it means we did not mark
123
+ # any messages as consumed on this topic in any partitions, hence we cannot
124
+ # compute lag easily
125
+ # We do not want to initialize any data for this topic, when there is nothing
126
+ # useful we could present
127
+ #
128
+ # In theory lag stored must mean that lag must exist but just to be sure we
129
+ # check both here
130
+ next if lags.empty? || lags_stored.empty?
131
+
132
+ cgs[group_name] ||= {}
133
+ cgs[group_name][topic_name] = {
134
+ lag_stored: lags_stored.sum,
135
+ lag: lags.sum,
136
+ pace: offsets_hi.sum,
137
+ # Take max last stable offset duration without any change. This can
138
+ # indicate a hanging transaction, because the offset will not move forward
139
+ # and will stay with a growing freeze duration when stuck
140
+ ls_offset_fd: ls_offsets_fd.max
141
+ }
142
+ end
143
+ end
144
+ end
145
+ end
146
+
147
+ cgs
148
+ end
149
+ end
150
+ end
151
+ end
152
+ end
153
+ end
154
+ end