karafka-web 0.6.1 → 0.6.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (55) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +3 -5
  3. data/CHANGELOG.md +12 -0
  4. data/Gemfile.lock +1 -1
  5. data/lib/karafka/web/config.rb +2 -0
  6. data/lib/karafka/web/tracking/consumers/reporter.rb +5 -3
  7. data/lib/karafka/web/ui/base.rb +6 -2
  8. data/lib/karafka/web/ui/controllers/base.rb +17 -0
  9. data/lib/karafka/web/ui/controllers/cluster.rb +5 -2
  10. data/lib/karafka/web/ui/controllers/consumers.rb +3 -1
  11. data/lib/karafka/web/ui/controllers/errors.rb +19 -6
  12. data/lib/karafka/web/ui/controllers/jobs.rb +3 -1
  13. data/lib/karafka/web/ui/controllers/requests/params.rb +10 -0
  14. data/lib/karafka/web/ui/lib/paginations/base.rb +61 -0
  15. data/lib/karafka/web/ui/lib/paginations/offset_based.rb +96 -0
  16. data/lib/karafka/web/ui/lib/paginations/page_based.rb +70 -0
  17. data/lib/karafka/web/ui/lib/paginations/paginators/arrays.rb +33 -0
  18. data/lib/karafka/web/ui/lib/paginations/paginators/base.rb +23 -0
  19. data/lib/karafka/web/ui/lib/paginations/paginators/partitions.rb +52 -0
  20. data/lib/karafka/web/ui/lib/paginations/paginators/sets.rb +85 -0
  21. data/lib/karafka/web/ui/lib/ttl_cache.rb +74 -0
  22. data/lib/karafka/web/ui/models/cluster_info.rb +59 -0
  23. data/lib/karafka/web/ui/models/message.rb +114 -38
  24. data/lib/karafka/web/ui/models/status.rb +3 -1
  25. data/lib/karafka/web/ui/pro/app.rb +11 -3
  26. data/lib/karafka/web/ui/pro/controllers/consumers.rb +3 -1
  27. data/lib/karafka/web/ui/pro/controllers/dlq.rb +1 -2
  28. data/lib/karafka/web/ui/pro/controllers/errors.rb +43 -10
  29. data/lib/karafka/web/ui/pro/controllers/explorer.rb +52 -7
  30. data/lib/karafka/web/ui/pro/views/errors/_breadcrumbs.erb +8 -6
  31. data/lib/karafka/web/ui/pro/views/errors/_error.erb +1 -1
  32. data/lib/karafka/web/ui/pro/views/errors/_partition_option.erb +1 -1
  33. data/lib/karafka/web/ui/pro/views/errors/_table.erb +21 -0
  34. data/lib/karafka/web/ui/pro/views/errors/_title_with_select.erb +31 -0
  35. data/lib/karafka/web/ui/pro/views/errors/index.erb +9 -56
  36. data/lib/karafka/web/ui/pro/views/errors/partition.erb +17 -0
  37. data/lib/karafka/web/ui/pro/views/explorer/_breadcrumbs.erb +1 -1
  38. data/lib/karafka/web/ui/pro/views/explorer/_message.erb +8 -2
  39. data/lib/karafka/web/ui/pro/views/explorer/_partition_option.erb +1 -1
  40. data/lib/karafka/web/ui/pro/views/explorer/_topic.erb +1 -1
  41. data/lib/karafka/web/ui/pro/views/explorer/partition/_messages.erb +1 -0
  42. data/lib/karafka/web/ui/pro/views/explorer/partition.erb +1 -1
  43. data/lib/karafka/web/ui/pro/views/explorer/topic/_empty.erb +3 -0
  44. data/lib/karafka/web/ui/pro/views/explorer/topic/_limited.erb +4 -0
  45. data/lib/karafka/web/ui/pro/views/explorer/topic/_partitions.erb +11 -0
  46. data/lib/karafka/web/ui/pro/views/explorer/topic.erb +49 -0
  47. data/lib/karafka/web/ui/pro/views/shared/_navigation.erb +1 -1
  48. data/lib/karafka/web/ui/views/cluster/_partition.erb +1 -1
  49. data/lib/karafka/web/ui/views/errors/_error.erb +1 -1
  50. data/lib/karafka/web/ui/views/shared/_pagination.erb +16 -12
  51. data/lib/karafka/web/version.rb +1 -1
  52. data.tar.gz.sig +0 -0
  53. metadata +18 -3
  54. metadata.gz.sig +0 -0
  55. data/lib/karafka/web/ui/lib/paginate_array.rb +0 -38
@@ -0,0 +1,74 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Ui
6
+ # Non info related extra components used in the UI
7
+ module Lib
8
+ # Ttl Cache for caching things in-memory
9
+ # @note It **is** thread-safe
10
+ class TtlCache
11
+ include ::Karafka::Core::Helpers::Time
12
+
13
+ # @param ttl [Integer] time in ms how long should this cache keep data
14
+ def initialize(ttl)
15
+ @ttl = ttl
16
+ @times = {}
17
+ @values = {}
18
+ @mutex = Mutex.new
19
+ end
20
+
21
+ # Reads data from the cache
22
+ #
23
+ # @param key [String, Symbol] key for the cache read
24
+ # @return [Object] anything that was cached
25
+ def read(key)
26
+ @mutex.synchronize do
27
+ evict
28
+ @values[key]
29
+ end
30
+ end
31
+
32
+ # Writes to the cache
33
+ #
34
+ # @param key [String, Symbol] key for the cache
35
+ # @param value [Object] value we want to cache
36
+ # @return [Object] value we have written
37
+ def write(key, value)
38
+ @mutex.synchronize do
39
+ @times[key] = monotonic_now + @ttl
40
+ @values[key] = value
41
+ end
42
+ end
43
+
44
+ # Reads from the cache and if value not present, will run the block and store its result
45
+ # in the cache
46
+ #
47
+ # @param key [String, Symbol] key for the cache read
48
+ # @return [Object] anything that was cached or yielded
49
+ def fetch(key)
50
+ @mutex.synchronize do
51
+ evict
52
+
53
+ return @values[key] if @values.key?(key)
54
+
55
+ @values[key] = yield
56
+ end
57
+ end
58
+
59
+ private
60
+
61
+ # Removes expired elements from the cache
62
+ def evict
63
+ @times.each do |key, time|
64
+ next if time >= monotonic_now
65
+
66
+ @times.delete(key)
67
+ @values.delete(key)
68
+ end
69
+ end
70
+ end
71
+ end
72
+ end
73
+ end
74
+ end
@@ -0,0 +1,59 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Ui
6
+ module Models
7
+ # Wraps around the `Karafka::Admin#cluster_info` with caching and some additional aliases
8
+ # so we can reference relevant information easily
9
+ class ClusterInfo
10
+ class << self
11
+ # Gets us all the cluster metadata info
12
+ #
13
+ # @param cached [Boolean] should we use cached data (true by default)
14
+ # @return [Rdkafka::Metadata] cluster metadata info
15
+ def fetch(cached: true)
16
+ cache = ::Karafka::Web.config.ui.cache
17
+
18
+ cluster_info = cache.read(:cluster_info)
19
+
20
+ if cluster_info.nil? || !cached
21
+ cluster_info = cache.write(:cluster_info, Karafka::Admin.cluster_info)
22
+ end
23
+
24
+ cluster_info
25
+ end
26
+
27
+ # Returns us all the info about available topics from the cluster
28
+ #
29
+ # @param cached [Boolean] should we use cached data (true by default)
30
+ # @return [Array<Ui::Models::Topic>] topics details
31
+ def topics(cached: true)
32
+ fetch(cached: cached)
33
+ .topics
34
+ .map { |topic| Topic.new(topic) }
35
+ end
36
+
37
+ # Fetches us details about particular topic
38
+ #
39
+ # @param topic_name [String] name of the topic we are looking for
40
+ # @param cached [Boolean] should we use cached data (true by default)
41
+ # @return [Ui::Models::Topic] topic details
42
+ def topic(topic_name, cached: true)
43
+ topics(cached: cached)
44
+ .find { |topic_data| topic_data.topic_name == topic_name }
45
+ .tap { |topic| topic || raise(Web::Errors::Ui::NotFoundError, topic_name) }
46
+ end
47
+
48
+ # @param topic_name [String] name of the topic we are looking for
49
+ # @param cached [Boolean] should we use cached data (true by default)
50
+ # @return [Integer] number of partitions in a given topic
51
+ def partitions_count(topic_name, cached: true)
52
+ topic(topic_name, cached: cached).partition_count
53
+ end
54
+ end
55
+ end
56
+ end
57
+ end
58
+ end
59
+ end
@@ -7,6 +7,8 @@ module Karafka
7
7
  # A proxy between `::Karafka::Messages::Message` and web UI
8
8
  # We work with the Karafka messages but use this model to wrap the work needed.
9
9
  class Message
10
+ extend Lib::Paginations::Paginators
11
+
10
12
  class << self
11
13
  # Looks for a message from a given topic partition
12
14
  #
@@ -30,41 +32,44 @@ module Karafka
30
32
  )
31
33
  end
32
34
 
33
- # Fetches requested page of Kafka messages.
35
+ # Fetches requested `page_count` number of Kafka messages starting from the oldest
36
+ # requested `start_offset`. If `start_offset` is `-1`, will fetch the most recent
37
+ # results
34
38
  #
35
39
  # @param topic_id [String]
36
40
  # @param partition_id [Integer]
37
- # @param page [Integer]
38
- # @return [Array] We return both page data as well as all the details needed to build
41
+ # @param start_offset [Integer] oldest offset from which we want to get the data
42
+ # @param watermark_offsets [Ui::Models::WatermarkOffsets] watermark offsets
43
+ # @return [Array] We return page data as well as all the details needed to build
39
44
  # the pagination details.
40
- def page(topic_id, partition_id, page)
41
- low_offset, high_offset = Karafka::Admin.read_watermark_offsets(
42
- topic_id,
43
- partition_id
44
- )
45
+ def offset_page(topic_id, partition_id, start_offset, watermark_offsets)
46
+ low_offset = watermark_offsets.low
47
+ high_offset = watermark_offsets.high
45
48
 
46
- partitions_count = fetch_partition_count(topic_id)
49
+ # If we start from offset -1, it means we want first page with the most recent
50
+ # results. We obtain this page by using the offset based on the high watermark
51
+ # off
52
+ start_offset = high_offset - per_page if start_offset == -1
47
53
 
48
- no_data_result = [false, [], false, partitions_count]
54
+ # No previous pages, no data, and no more offsets
55
+ no_data_result = [false, [], false]
49
56
 
50
- # If there is not even one message, we need to early exit
51
- # If low and high watermark offsets are of the same value, it means no data in the
52
- # topic is present
57
+ # If there is no data, we return the no results result
53
58
  return no_data_result if low_offset == high_offset
54
59
 
55
- # We add plus one because we compute previous offset from which we want to start and
56
- # not previous page leading offset
57
- start_offset = high_offset - (per_page * page)
58
-
59
60
  if start_offset <= low_offset
61
+ # If this page does not contain max per page, compute how many messages we can
62
+ # fetch before stopping
60
63
  count = per_page - (low_offset - start_offset)
61
- previous_page = page < 2 ? false : page - 1
62
- next_page = false
64
+ next_offset = false
63
65
  start_offset = low_offset
64
66
  else
65
- previous_page = page < 2 ? false : page - 1
66
- next_page = page + 1
67
- count = per_page
67
+ next_offset = start_offset - per_page
68
+ # Do not go below the lowest possible offset
69
+ next_offset = low_offset if next_offset < low_offset
70
+ count = high_offset - start_offset
71
+ # If there would be more messages that we want to get, force max
72
+ count = per_page if count > per_page
68
73
  end
69
74
 
70
75
  # This code is a bit tricky. Since topics can be compacted and certain offsets may
@@ -93,17 +98,97 @@ module Karafka
93
98
 
94
99
  next unless messages
95
100
 
101
+ previous_offset = start_offset + count
102
+
96
103
  return [
97
- previous_page,
98
- fill_compacted(messages, context_offset, context_count).reverse,
99
- next_page,
100
- partitions_count
104
+ # If there is a potential previous page with more recent data, compute its
105
+ # offset
106
+ previous_offset >= high_offset ? false : previous_offset,
107
+ fill_compacted(messages, partition_id, context_offset, context_count).reverse,
108
+ next_offset
101
109
  ]
102
110
  end
103
111
 
104
112
  no_data_result
105
113
  end
106
114
 
115
+ # Fetches requested `page_count` number of Kafka messages from the topic partitions
116
+ # and merges the results. Ensures, that pagination works as expected.
117
+ #
118
+ # @param topic_id [String]
119
+ # @param partitions_ids [Array<Integer>] for which of the partitions we want to
120
+ # get the data. This is a limiting factor because of the fact that we have to
121
+ # query the watermark offsets independently
122
+ # @param page [Integer] which page we want to get
123
+ def topic_page(topic_id, partitions_ids, page)
124
+ # This is the bottleneck, for each partition we make one request :(
125
+ offsets = partitions_ids.map do |partition_id|
126
+ [partition_id, Models::WatermarkOffsets.find(topic_id, partition_id)]
127
+ end.to_h
128
+
129
+ # Count number of elements we have in each partition
130
+ # This assumes linear presence until low. If not, gaps will be filled like we fill
131
+ # for per partition view
132
+ counts = offsets.values.map { |offset| offset[:high] - offset[:low] }
133
+
134
+ # Establish initial offsets for the iterator (where to start) per partition
135
+ # We do not use the negative lookup iterator because we already can compute starting
136
+ # offsets. This saves a lot of calls to Kafka
137
+ ranges = Sets.call(counts, page).map do |partition_position, partition_range|
138
+ partition_id = partitions_ids.to_a[partition_position]
139
+ watermarks = offsets[partition_id]
140
+
141
+ lowest = watermarks[:high] - partition_range.last - 1
142
+ # We -1 because high watermark offset is the next incoming offset and not the last
143
+ # one in the topic partition
144
+ highest = watermarks[:high] - partition_range.first - 1
145
+
146
+ # This range represents offsets we want to fetch
147
+ [partition_id, lowest..highest]
148
+ end.to_h
149
+
150
+ # We start on our topic from the lowest offset for each expected partition
151
+ iterator = Karafka::Pro::Iterator.new(
152
+ { topic_id => ranges.transform_values(&:first) }
153
+ )
154
+
155
+ # Build the aggregated representation for each partition messages, so we can start
156
+ # with assumption that all the topics are fully compacted. Then we can nicely replace
157
+ # compacted `false` data with real messages, effectively ensuring that the gaps are
158
+ # filled with `false` out-of-the-box
159
+ aggregated = Hash.new { |h, k| h[k] = {} }
160
+
161
+ # We initialize the hash so we have a constant ascending order based on the partition
162
+ # number
163
+ partitions_ids.each { |i| aggregated[i] }
164
+
165
+ # We prefill all the potential offsets for each partition, so in case they were
166
+ # compacted, we get a continuous flow
167
+ ranges.each do |partition, range|
168
+ partition_aggr = aggregated[partition]
169
+ range.each { |i| partition_aggr[i] = [partition, i] }
170
+ end
171
+
172
+ # Iterate over all partitions and collect data
173
+ iterator.each do |message|
174
+ range = ranges[message.partition]
175
+
176
+ # Do not fetch more data from a partition for which we got last message from the
177
+ # expected offsets
178
+ # When all partitions are stopped, we will stop operations. This drastically
179
+ # improves performance because we no longer have to poll nils
180
+ iterator.stop_current_partition if message.offset >= range.last
181
+
182
+ partition = aggregated[message.partition]
183
+ partition[message.offset] = message
184
+ end
185
+
186
+ [
187
+ aggregated.values.map(&:values).map(&:reverse).reduce(:+),
188
+ !Sets.call(counts, page + 1).empty?
189
+ ]
190
+ end
191
+
107
192
  private
108
193
 
109
194
  # @param args [Object] anything required by the admin `#read_topic`
@@ -117,16 +202,6 @@ module Karafka
117
202
  raise
118
203
  end
119
204
 
120
- # @param topic_id [String] id of the topic
121
- # @return [Integer] number of partitions this topic has
122
- def fetch_partition_count(topic_id)
123
- ::Karafka::Admin
124
- .cluster_info
125
- .topics
126
- .find { |topic| topic[:topic_name] == topic_id }
127
- .fetch(:partition_count)
128
- end
129
-
130
205
  # @return [Integer] elements per page
131
206
  def per_page
132
207
  ::Karafka::Web.config.ui.per_page
@@ -136,16 +211,17 @@ module Karafka
136
211
  # we need to fill those with just the missing offset and handle this on the UI.
137
212
  #
138
213
  # @param messages [Array<Karafka::Messages::Message>] selected messages
214
+ # @param partition_id [Integer] number of partition for which we fill message gap
139
215
  # @param start_offset [Integer] offset of the first message (lowest) that we received
140
216
  # @param count [Integer] how many messages we wanted - we need that to fill spots to
141
217
  # have exactly the number that was requested and not more
142
218
  # @return [Array<Karafka::Messages::Message, Integer>] array with gaps filled with the
143
219
  # missing offset
144
- def fill_compacted(messages, start_offset, count)
220
+ def fill_compacted(messages, partition_id, start_offset, count)
145
221
  Array.new(count) do |index|
146
222
  messages.find do |message|
147
223
  (message.offset - start_offset) == index
148
- end || start_offset + index
224
+ end || [partition_id, start_offset + index]
149
225
  end
150
226
  end
151
227
  end
@@ -197,7 +197,9 @@ module Karafka
197
197
  # @note If fails, `connection_time` will be 1_000_000
198
198
  def connect
199
199
  started = Time.now.to_f
200
- @cluster_info = ::Karafka::Admin.cluster_info
200
+ # For status we always need uncached data, otherwise status could cache outdated
201
+ # info
202
+ @cluster_info = Models::ClusterInfo.fetch(cached: false)
201
203
  @connection_time = (Time.now.to_f - started) * 1_000
202
204
  rescue ::Rdkafka::RdkafkaError
203
205
  @connection_time = 1_000_000
@@ -81,6 +81,10 @@ module Karafka
81
81
  render_response controller.partition(topic_id, partition_id)
82
82
  end
83
83
 
84
+ r.get String do |topic_id|
85
+ render_response controller.topic(topic_id)
86
+ end
87
+
84
88
  r.get do
85
89
  render_response controller.index
86
90
  end
@@ -99,12 +103,16 @@ module Karafka
99
103
  r.on 'errors' do
100
104
  controller = Controllers::Errors.new(params)
101
105
 
106
+ r.get Integer, Integer do |partition_id, offset|
107
+ render_response controller.show(partition_id, offset)
108
+ end
109
+
102
110
  r.get Integer do |partition_id|
103
- render_response controller.index(partition_id)
111
+ render_response controller.partition(partition_id)
104
112
  end
105
113
 
106
- r.get Integer, Integer do |partition_id, offset|
107
- render_response controller.show(partition_id, offset)
114
+ r.get do
115
+ render_response controller.index
108
116
  end
109
117
  end
110
118
 
@@ -22,11 +22,13 @@ module Karafka
22
22
  def index
23
23
  @current_state = Models::State.current!
24
24
  @counters = Models::Counters.new(@current_state)
25
- @processes, @next_page = Lib::PaginateArray.new.call(
25
+ @processes, last_page = Lib::Paginations::Paginators::Arrays.call(
26
26
  Models::Processes.active(@current_state),
27
27
  @params.current_page
28
28
  )
29
29
 
30
+ paginate(@params.current_page, !last_page)
31
+
30
32
  respond
31
33
  end
32
34
 
@@ -28,8 +28,7 @@ module Karafka
28
28
  .compact
29
29
  .select(&:itself)
30
30
 
31
- @dlq_topics = Karafka::Admin
32
- .cluster_info
31
+ @dlq_topics = Models::ClusterInfo
33
32
  .topics
34
33
  .select { |topic| dlq_topic_names.include?(topic[:topic_name]) }
35
34
  .sort_by { |topic| topic[:topic_name] }
@@ -18,18 +18,45 @@ module Karafka
18
18
  module Controllers
19
19
  # Errors details controller
20
20
  class Errors < Ui::Controllers::Base
21
+ include Ui::Lib::Paginations
22
+
23
+ # Lists all the errors from all the partitions
24
+ def index
25
+ @topic_id = errors_topic
26
+ @partitions_count = Models::ClusterInfo.partitions_count(errors_topic)
27
+
28
+ @active_partitions, materialized_page, @limited = Paginators::Partitions.call(
29
+ @partitions_count, @params.current_page
30
+ )
31
+
32
+ @error_messages, next_page = Models::Message.topic_page(
33
+ errors_topic, @active_partitions, materialized_page
34
+ )
35
+
36
+ paginate(@params.current_page, next_page)
37
+
38
+ respond
39
+ end
40
+
21
41
  # @param partition_id [Integer] id of the partition of errors we are interested in
22
- def index(partition_id)
23
- errors_topic = ::Karafka::Web.config.topics.errors
42
+ def partition(partition_id)
24
43
  @partition_id = partition_id
25
- @previous_page, @error_messages, @next_page, @partitions_count = \
26
- Models::Message.page(
27
- errors_topic,
28
- @partition_id,
29
- @params.current_page
30
- )
31
-
32
44
  @watermark_offsets = Ui::Models::WatermarkOffsets.find(errors_topic, @partition_id)
45
+ @partitions_count = Models::ClusterInfo.partitions_count(errors_topic)
46
+
47
+ previous_offset, @error_messages, next_offset = Models::Message.offset_page(
48
+ errors_topic,
49
+ @partition_id,
50
+ @params.current_offset,
51
+ @watermark_offsets
52
+ )
53
+
54
+ paginate(
55
+ previous_offset,
56
+ @params.current_offset,
57
+ next_offset,
58
+ @error_messages.map(&:offset)
59
+ )
33
60
 
34
61
  respond
35
62
  end
@@ -39,7 +66,6 @@ module Karafka
39
66
  # @param partition_id [Integer]
40
67
  # @param offset [Integer]
41
68
  def show(partition_id, offset)
42
- errors_topic = ::Karafka::Web.config.topics.errors
43
69
  @partition_id = partition_id
44
70
  @offset = offset
45
71
  @error_message = Models::Message.find(
@@ -50,6 +76,13 @@ module Karafka
50
76
 
51
77
  respond
52
78
  end
79
+
80
+ private
81
+
82
+ # @return [String] errors topic
83
+ def errors_topic
84
+ ::Karafka::Web.config.topics.errors
85
+ end
53
86
  end
54
87
  end
55
88
  end
@@ -18,10 +18,11 @@ module Karafka
18
18
  module Controllers
19
19
  # Data explorer controller
20
20
  class Explorer < Ui::Controllers::Base
21
+ include Ui::Lib::Paginations
22
+
21
23
  # Lists all the topics we can explore
22
24
  def index
23
- @topics = Karafka::Admin
24
- .cluster_info
25
+ @topics = Models::ClusterInfo
25
26
  .topics
26
27
  .reject { |topic| topic[:topic_name] == '__consumer_offsets' }
27
28
  .sort_by { |topic| topic[:topic_name] }
@@ -29,6 +30,34 @@ module Karafka
29
30
  respond
30
31
  end
31
32
 
33
+ # Displays aggregated messages from (potentially) all partitions of a topic
34
+ #
35
+ # @param topic_id [String]
36
+ #
37
+ # @note This view may not be 100% accurate because we merge multiple partitions data
38
+ # into a single view and this is never accurate. It can be used however to quickly
39
+ # look at most recent data flowing, etc, hence it is still useful for aggregated
40
+ # metrics information
41
+ #
42
+ # @note We cannot use offset references here because each of the partitions may have
43
+ # completely different values
44
+ def topic(topic_id)
45
+ @topic_id = topic_id
46
+ @partitions_count = Models::ClusterInfo.partitions_count(topic_id)
47
+
48
+ @active_partitions, materialized_page, @limited = Paginators::Partitions.call(
49
+ @partitions_count, @params.current_page
50
+ )
51
+
52
+ @messages, next_page = Models::Message.topic_page(
53
+ topic_id, @active_partitions, materialized_page
54
+ )
55
+
56
+ paginate(@params.current_page, next_page)
57
+
58
+ respond
59
+ end
60
+
32
61
  # Shows messages available in a given partition
33
62
  #
34
63
  # @param topic_id [String]
@@ -36,13 +65,16 @@ module Karafka
36
65
  def partition(topic_id, partition_id)
37
66
  @topic_id = topic_id
38
67
  @partition_id = partition_id
39
-
40
68
  @watermark_offsets = Ui::Models::WatermarkOffsets.find(topic_id, partition_id)
69
+ @partitions_count = Models::ClusterInfo.partitions_count(topic_id)
41
70
 
42
- @previous_page, @messages, @next_page, @partitions_count = Ui::Models::Message.page(
43
- @topic_id,
44
- @partition_id,
45
- @params.current_page
71
+ previous_offset, @messages, next_offset = current_partition_data
72
+
73
+ paginate(
74
+ previous_offset,
75
+ @params.current_offset,
76
+ next_offset,
77
+ @messages.map(&:offset)
46
78
  )
47
79
 
48
80
  respond
@@ -74,6 +106,19 @@ module Karafka
74
106
 
75
107
  respond
76
108
  end
109
+
110
+ private
111
+
112
+ # Fetches current page data
113
+ # @return [Array] fetched data with pagination information for the requested partition
114
+ def current_partition_data
115
+ Ui::Models::Message.offset_page(
116
+ @topic_id,
117
+ @partition_id,
118
+ @params.current_offset,
119
+ @watermark_offsets
120
+ )
121
+ end
77
122
  end
78
123
  end
79
124
  end
@@ -1,14 +1,16 @@
1
1
  <li class="breadcrumb-item">
2
- <a href="<%= root_path('errors', 0) %>">
2
+ <a href="<%= root_path('errors') %>">
3
3
  Errors
4
4
  </a>
5
5
  </li>
6
6
 
7
- <li class="breadcrumb-item">
8
- <a href="<%= root_path('errors', @partition_id) %>">
9
- Partition <%= @partition_id %>
10
- </a>
11
- </li>
7
+ <% if @partition_id %>
8
+ <li class="breadcrumb-item">
9
+ <a href="<%= root_path('errors', @partition_id) %>">
10
+ Partition <%= @partition_id %>
11
+ </a>
12
+ </li>
13
+ <% end %>
12
14
 
13
15
  <% if @offset %>
14
16
  <li class="breadcrumb-item">
@@ -1,4 +1,4 @@
1
- <% if error_msg.is_a?(Integer) %>
1
+ <% if error_msg.is_a?(Array) %>
2
2
  <tr>
3
3
  <td colspan="5" class="text-center text-muted">
4
4
  This error has either been removed or compacted and is no longer available.
@@ -3,5 +3,5 @@
3
3
  <% else %>
4
4
  <option value="<%= root_path('errors', partition) %>">
5
5
  <% end %>
6
- <%= partition %>
6
+ <%= partition || 'All' %>
7
7
  </option>
@@ -0,0 +1,21 @@
1
+ <table class="processes bg-white table table-hover table-bordered table-striped mb-0 align-middle">
2
+ <thead>
3
+ <tr class="align-middle">
4
+ <th>Origin</th>
5
+ <th>Process name</th>
6
+ <th>Error</th>
7
+ <th>Occurred at</th>
8
+ <th></th>
9
+ </tr>
10
+ </thead>
11
+
12
+ <tbody>
13
+ <%==
14
+ each_partial(
15
+ @error_messages,
16
+ 'errors/error',
17
+ local: :error_msg
18
+ )
19
+ %>
20
+ </tbody>
21
+ </table>
@@ -0,0 +1,31 @@
1
+ <div class="container mb-4">
2
+ <div class="row">
3
+ <div class="col">
4
+ <h3>
5
+ Errors
6
+ </h3>
7
+ </div>
8
+
9
+ <div class="col">
10
+ <div class="col-auto text-end">
11
+ <label class="col-form-label">Partition</label>
12
+ </div>
13
+ </div>
14
+
15
+ <div class="col pt-1 mb-0 pb-0">
16
+ <div class="col-auto">
17
+ <select class="form-select form-select-sm mb-0 form-control" id="current-partition">
18
+ <%==
19
+ each_partial(
20
+ [nil] + @partitions_count.times.to_a,
21
+ 'errors/partition_option',
22
+ local: :partition
23
+ )
24
+ %>
25
+ </select>
26
+ </div>
27
+ </div>
28
+ </div>
29
+
30
+ <hr>
31
+ </div>