karafka-web 0.6.0 → 0.6.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/CHANGELOG.md +22 -1
  4. data/Gemfile.lock +1 -1
  5. data/lib/karafka/web/config.rb +2 -0
  6. data/lib/karafka/web/tracking/consumers/contracts/report.rb +7 -3
  7. data/lib/karafka/web/tracking/consumers/reporter.rb +5 -3
  8. data/lib/karafka/web/tracking/consumers/sampler.rb +2 -1
  9. data/lib/karafka/web/tracking/sampler.rb +5 -0
  10. data/lib/karafka/web/ui/base.rb +6 -2
  11. data/lib/karafka/web/ui/controllers/base.rb +17 -0
  12. data/lib/karafka/web/ui/controllers/cluster.rb +5 -2
  13. data/lib/karafka/web/ui/controllers/consumers.rb +3 -1
  14. data/lib/karafka/web/ui/controllers/errors.rb +19 -6
  15. data/lib/karafka/web/ui/controllers/jobs.rb +3 -1
  16. data/lib/karafka/web/ui/controllers/requests/params.rb +10 -0
  17. data/lib/karafka/web/ui/lib/paginations/base.rb +61 -0
  18. data/lib/karafka/web/ui/lib/paginations/offset_based.rb +96 -0
  19. data/lib/karafka/web/ui/lib/paginations/page_based.rb +70 -0
  20. data/lib/karafka/web/ui/lib/paginations/paginators/arrays.rb +33 -0
  21. data/lib/karafka/web/ui/lib/paginations/paginators/base.rb +23 -0
  22. data/lib/karafka/web/ui/lib/paginations/paginators/partitions.rb +52 -0
  23. data/lib/karafka/web/ui/lib/paginations/paginators/sets.rb +85 -0
  24. data/lib/karafka/web/ui/lib/ttl_cache.rb +74 -0
  25. data/lib/karafka/web/ui/models/cluster_info.rb +59 -0
  26. data/lib/karafka/web/ui/models/message.rb +114 -38
  27. data/lib/karafka/web/ui/models/status.rb +34 -8
  28. data/lib/karafka/web/ui/pro/app.rb +11 -3
  29. data/lib/karafka/web/ui/pro/controllers/consumers.rb +3 -1
  30. data/lib/karafka/web/ui/pro/controllers/dlq.rb +1 -2
  31. data/lib/karafka/web/ui/pro/controllers/errors.rb +43 -10
  32. data/lib/karafka/web/ui/pro/controllers/explorer.rb +52 -7
  33. data/lib/karafka/web/ui/pro/views/consumers/consumer/_metrics.erb +6 -1
  34. data/lib/karafka/web/ui/pro/views/errors/_breadcrumbs.erb +8 -6
  35. data/lib/karafka/web/ui/pro/views/errors/_error.erb +1 -1
  36. data/lib/karafka/web/ui/pro/views/errors/_partition_option.erb +1 -1
  37. data/lib/karafka/web/ui/pro/views/errors/_table.erb +21 -0
  38. data/lib/karafka/web/ui/pro/views/errors/_title_with_select.erb +31 -0
  39. data/lib/karafka/web/ui/pro/views/errors/index.erb +9 -56
  40. data/lib/karafka/web/ui/pro/views/errors/partition.erb +17 -0
  41. data/lib/karafka/web/ui/pro/views/explorer/_breadcrumbs.erb +1 -1
  42. data/lib/karafka/web/ui/pro/views/explorer/_message.erb +8 -2
  43. data/lib/karafka/web/ui/pro/views/explorer/_partition_option.erb +1 -1
  44. data/lib/karafka/web/ui/pro/views/explorer/_topic.erb +1 -1
  45. data/lib/karafka/web/ui/pro/views/explorer/partition/_messages.erb +1 -0
  46. data/lib/karafka/web/ui/pro/views/explorer/partition.erb +1 -1
  47. data/lib/karafka/web/ui/pro/views/explorer/topic/_empty.erb +3 -0
  48. data/lib/karafka/web/ui/pro/views/explorer/topic/_limited.erb +4 -0
  49. data/lib/karafka/web/ui/pro/views/explorer/topic/_partitions.erb +11 -0
  50. data/lib/karafka/web/ui/pro/views/explorer/topic.erb +49 -0
  51. data/lib/karafka/web/ui/pro/views/shared/_navigation.erb +1 -1
  52. data/lib/karafka/web/ui/views/cluster/_partition.erb +1 -1
  53. data/lib/karafka/web/ui/views/errors/_error.erb +1 -1
  54. data/lib/karafka/web/ui/views/shared/_pagination.erb +16 -12
  55. data/lib/karafka/web/ui/views/status/failures/_initial_state.erb +1 -10
  56. data/lib/karafka/web/ui/views/status/info/_components.erb +6 -1
  57. data/lib/karafka/web/ui/views/status/show.erb +6 -1
  58. data/lib/karafka/web/ui/views/status/successes/_connection.erb +1 -0
  59. data/lib/karafka/web/ui/views/status/warnings/_connection.erb +11 -0
  60. data/lib/karafka/web/version.rb +1 -1
  61. data.tar.gz.sig +0 -0
  62. metadata +20 -3
  63. metadata.gz.sig +0 -0
  64. data/lib/karafka/web/ui/lib/paginate_array.rb +0 -38
@@ -0,0 +1,85 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Ui
6
+ module Lib
7
+ module Paginations
8
+ module Paginators
9
+ # Paginator that allows us to take several lists/sets and iterate over them in a
10
+ # round-robin fashion.
11
+ #
12
+ # It does not have to iterate over all the elements from each set for higher pages
13
+ # making it much more effective than the naive implementation.
14
+ class Sets < Base
15
+ class << self
16
+ # @param counts [Array<Integer>] sets elements counts
17
+ # @param current_page [Integer] page number
18
+ # @return [Hash<Integer, Range>] hash with integer keys indicating the count
19
+ # location and the range needed to be taken of elements (counting backwards) for
20
+ # each partition
21
+ def call(counts, current_page)
22
+ return {} if current_page < 1
23
+
24
+ lists = counts.dup.map.with_index { |el, i| [i, el] }
25
+
26
+ curr_item_index = 0
27
+ curr_list_index = 0
28
+ items_to_skip_count = per_page * (current_page - 1)
29
+
30
+ loop do
31
+ lists_count = lists.length
32
+ return {} if lists_count.zero?
33
+
34
+ shortest_list_count = lists.map(&:last).min
35
+ mover = (shortest_list_count - curr_item_index)
36
+ items_we_are_considering_count = lists_count * mover
37
+
38
+ if items_we_are_considering_count >= items_to_skip_count
39
+ curr_item_index += items_to_skip_count / lists_count
40
+ curr_list_index = items_to_skip_count % lists_count
41
+ break
42
+ else
43
+ curr_item_index = shortest_list_count
44
+ lists.delete_if { |x| x.last == shortest_list_count }
45
+ items_to_skip_count -= items_we_are_considering_count
46
+ end
47
+ end
48
+
49
+ page_items = []
50
+ largest_list_count = lists.map(&:last).max
51
+
52
+ while page_items.length < per_page && curr_item_index < largest_list_count
53
+ curr_list = lists[curr_list_index]
54
+
55
+ if curr_item_index < curr_list.last
56
+ page_items << [curr_list.first, curr_item_index]
57
+ end
58
+
59
+ curr_list_index += 1
60
+ if curr_list_index == lists.length
61
+ curr_list_index = 0
62
+ curr_item_index += 1
63
+ end
64
+ end
65
+
66
+ hashed = Hash.new { |h, k| h[k] = [] }
67
+
68
+ page_items.each do |el|
69
+ hashed[el.first] << el.last
70
+ end
71
+
72
+ hashed.each do |key, value|
73
+ hashed[key] = (value.first..value.last)
74
+ end
75
+
76
+ hashed
77
+ end
78
+ end
79
+ end
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
85
+ end
@@ -0,0 +1,74 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Ui
6
+ # Non info related extra components used in the UI
7
+ module Lib
8
+ # Ttl Cache for caching things in-memory
9
+ # @note It **is** thread-safe
10
+ class TtlCache
11
+ include ::Karafka::Core::Helpers::Time
12
+
13
+ # @param ttl [Integer] time in ms how long should this cache keep data
14
+ def initialize(ttl)
15
+ @ttl = ttl
16
+ @times = {}
17
+ @values = {}
18
+ @mutex = Mutex.new
19
+ end
20
+
21
+ # Reads data from the cache
22
+ #
23
+ # @param key [String, Symbol] key for the cache read
24
+ # @return [Object] anything that was cached
25
+ def read(key)
26
+ @mutex.synchronize do
27
+ evict
28
+ @values[key]
29
+ end
30
+ end
31
+
32
+ # Writes to the cache
33
+ #
34
+ # @param key [String, Symbol] key for the cache
35
+ # @param value [Object] value we want to cache
36
+ # @return [Object] value we have written
37
+ def write(key, value)
38
+ @mutex.synchronize do
39
+ @times[key] = monotonic_now + @ttl
40
+ @values[key] = value
41
+ end
42
+ end
43
+
44
+ # Reads from the cache and if value not present, will run the block and store its result
45
+ # in the cache
46
+ #
47
+ # @param key [String, Symbol] key for the cache read
48
+ # @return [Object] anything that was cached or yielded
49
+ def fetch(key)
50
+ @mutex.synchronize do
51
+ evict
52
+
53
+ return @values[key] if @values.key?(key)
54
+
55
+ @values[key] = yield
56
+ end
57
+ end
58
+
59
+ private
60
+
61
+ # Removes expired elements from the cache
62
+ def evict
63
+ @times.each do |key, time|
64
+ next if time >= monotonic_now
65
+
66
+ @times.delete(key)
67
+ @values.delete(key)
68
+ end
69
+ end
70
+ end
71
+ end
72
+ end
73
+ end
74
+ end
@@ -0,0 +1,59 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Ui
6
+ module Models
7
+ # Wraps around the `Karafka::Admin#cluster_info` with caching and some additional aliases
8
+ # so we can reference relevant information easily
9
+ class ClusterInfo
10
+ class << self
11
+ # Gets us all the cluster metadata info
12
+ #
13
+ # @param cached [Boolean] should we use cached data (true by default)
14
+ # @return [Rdkafka::Metadata] cluster metadata info
15
+ def fetch(cached: true)
16
+ cache = ::Karafka::Web.config.ui.cache
17
+
18
+ cluster_info = cache.read(:cluster_info)
19
+
20
+ if cluster_info.nil? || !cached
21
+ cluster_info = cache.write(:cluster_info, Karafka::Admin.cluster_info)
22
+ end
23
+
24
+ cluster_info
25
+ end
26
+
27
+ # Returns us all the info about available topics from the cluster
28
+ #
29
+ # @param cached [Boolean] should we use cached data (true by default)
30
+ # @return [Array<Ui::Models::Topic>] topics details
31
+ def topics(cached: true)
32
+ fetch(cached: cached)
33
+ .topics
34
+ .map { |topic| Topic.new(topic) }
35
+ end
36
+
37
+ # Fetches us details about particular topic
38
+ #
39
+ # @param topic_name [String] name of the topic we are looking for
40
+ # @param cached [Boolean] should we use cached data (true by default)
41
+ # @return [Ui::Models::Topic] topic details
42
+ def topic(topic_name, cached: true)
43
+ topics(cached: cached)
44
+ .find { |topic_data| topic_data.topic_name == topic_name }
45
+ .tap { |topic| topic || raise(Web::Errors::Ui::NotFoundError, topic_name) }
46
+ end
47
+
48
+ # @param topic_name [String] name of the topic we are looking for
49
+ # @param cached [Boolean] should we use cached data (true by default)
50
+ # @return [Integer] number of partitions in a given topic
51
+ def partitions_count(topic_name, cached: true)
52
+ topic(topic_name, cached: cached).partition_count
53
+ end
54
+ end
55
+ end
56
+ end
57
+ end
58
+ end
59
+ end
@@ -7,6 +7,8 @@ module Karafka
7
7
  # A proxy between `::Karafka::Messages::Message` and web UI
8
8
  # We work with the Karafka messages but use this model to wrap the work needed.
9
9
  class Message
10
+ extend Lib::Paginations::Paginators
11
+
10
12
  class << self
11
13
  # Looks for a message from a given topic partition
12
14
  #
@@ -30,41 +32,44 @@ module Karafka
30
32
  )
31
33
  end
32
34
 
33
- # Fetches requested page of Kafka messages.
35
+ # Fetches requested `page_count` number of Kafka messages starting from the oldest
36
+ # requested `start_offset`. If `start_offset` is `-1`, will fetch the most recent
37
+ # results
34
38
  #
35
39
  # @param topic_id [String]
36
40
  # @param partition_id [Integer]
37
- # @param page [Integer]
38
- # @return [Array] We return both page data as well as all the details needed to build
41
+ # @param start_offset [Integer] oldest offset from which we want to get the data
42
+ # @param watermark_offsets [Ui::Models::WatermarkOffsets] watermark offsets
43
+ # @return [Array] We return page data as well as all the details needed to build
39
44
  # the pagination details.
40
- def page(topic_id, partition_id, page)
41
- low_offset, high_offset = Karafka::Admin.read_watermark_offsets(
42
- topic_id,
43
- partition_id
44
- )
45
+ def offset_page(topic_id, partition_id, start_offset, watermark_offsets)
46
+ low_offset = watermark_offsets.low
47
+ high_offset = watermark_offsets.high
45
48
 
46
- partitions_count = fetch_partition_count(topic_id)
49
+ # If we start from offset -1, it means we want first page with the most recent
50
+ # results. We obtain this page by using the offset based on the high watermark
51
+ # off
52
+ start_offset = high_offset - per_page if start_offset == -1
47
53
 
48
- no_data_result = [false, [], false, partitions_count]
54
+ # No previous pages, no data, and no more offsets
55
+ no_data_result = [false, [], false]
49
56
 
50
- # If there is not even one message, we need to early exit
51
- # If low and high watermark offsets are of the same value, it means no data in the
52
- # topic is present
57
+ # If there is no data, we return the no results result
53
58
  return no_data_result if low_offset == high_offset
54
59
 
55
- # We add plus one because we compute previous offset from which we want to start and
56
- # not previous page leading offset
57
- start_offset = high_offset - (per_page * page)
58
-
59
60
  if start_offset <= low_offset
61
+ # If this page does not contain max per page, compute how many messages we can
62
+ # fetch before stopping
60
63
  count = per_page - (low_offset - start_offset)
61
- previous_page = page < 2 ? false : page - 1
62
- next_page = false
64
+ next_offset = false
63
65
  start_offset = low_offset
64
66
  else
65
- previous_page = page < 2 ? false : page - 1
66
- next_page = page + 1
67
- count = per_page
67
+ next_offset = start_offset - per_page
68
+ # Do not go below the lowest possible offset
69
+ next_offset = low_offset if next_offset < low_offset
70
+ count = high_offset - start_offset
71
+ # If there would be more messages that we want to get, force max
72
+ count = per_page if count > per_page
68
73
  end
69
74
 
70
75
  # This code is a bit tricky. Since topics can be compacted and certain offsets may
@@ -93,17 +98,97 @@ module Karafka
93
98
 
94
99
  next unless messages
95
100
 
101
+ previous_offset = start_offset + count
102
+
96
103
  return [
97
- previous_page,
98
- fill_compacted(messages, context_offset, context_count).reverse,
99
- next_page,
100
- partitions_count
104
+ # If there is a potential previous page with more recent data, compute its
105
+ # offset
106
+ previous_offset >= high_offset ? false : previous_offset,
107
+ fill_compacted(messages, partition_id, context_offset, context_count).reverse,
108
+ next_offset
101
109
  ]
102
110
  end
103
111
 
104
112
  no_data_result
105
113
  end
106
114
 
115
+ # Fetches requested `page_count` number of Kafka messages from the topic partitions
116
+ # and merges the results. Ensures, that pagination works as expected.
117
+ #
118
+ # @param topic_id [String]
119
+ # @param partitions_ids [Array<Integer>] for which of the partitions we want to
120
+ # get the data. This is a limiting factor because of the fact that we have to
121
+ # query the watermark offsets independently
122
+ # @param page [Integer] which page we want to get
123
+ def topic_page(topic_id, partitions_ids, page)
124
+ # This is the bottleneck, for each partition we make one request :(
125
+ offsets = partitions_ids.map do |partition_id|
126
+ [partition_id, Models::WatermarkOffsets.find(topic_id, partition_id)]
127
+ end.to_h
128
+
129
+ # Count number of elements we have in each partition
130
+ # This assumes linear presence until low. If not, gaps will be filled like we fill
131
+ # for per partition view
132
+ counts = offsets.values.map { |offset| offset[:high] - offset[:low] }
133
+
134
+ # Establish initial offsets for the iterator (where to start) per partition
135
+ # We do not use the negative lookup iterator because we already can compute starting
136
+ # offsets. This saves a lot of calls to Kafka
137
+ ranges = Sets.call(counts, page).map do |partition_position, partition_range|
138
+ partition_id = partitions_ids.to_a[partition_position]
139
+ watermarks = offsets[partition_id]
140
+
141
+ lowest = watermarks[:high] - partition_range.last - 1
142
+ # We -1 because high watermark offset is the next incoming offset and not the last
143
+ # one in the topic partition
144
+ highest = watermarks[:high] - partition_range.first - 1
145
+
146
+ # This range represents offsets we want to fetch
147
+ [partition_id, lowest..highest]
148
+ end.to_h
149
+
150
+ # We start on our topic from the lowest offset for each expected partition
151
+ iterator = Karafka::Pro::Iterator.new(
152
+ { topic_id => ranges.transform_values(&:first) }
153
+ )
154
+
155
+ # Build the aggregated representation for each partition messages, so we can start
156
+ # with assumption that all the topics are fully compacted. Then we can nicely replace
157
+ # compacted `false` data with real messages, effectively ensuring that the gaps are
158
+ # filled with `false` out-of-the-box
159
+ aggregated = Hash.new { |h, k| h[k] = {} }
160
+
161
+ # We initialize the hash so we have a constant ascending order based on the partition
162
+ # number
163
+ partitions_ids.each { |i| aggregated[i] }
164
+
165
+ # We prefill all the potential offsets for each partition, so in case they were
166
+ # compacted, we get a continuous flow
167
+ ranges.each do |partition, range|
168
+ partition_aggr = aggregated[partition]
169
+ range.each { |i| partition_aggr[i] = [partition, i] }
170
+ end
171
+
172
+ # Iterate over all partitions and collect data
173
+ iterator.each do |message|
174
+ range = ranges[message.partition]
175
+
176
+ # Do not fetch more data from a partition for which we got last message from the
177
+ # expected offsets
178
+ # When all partitions are stopped, we will stop operations. This drastically
179
+ # improves performance because we no longer have to poll nils
180
+ iterator.stop_current_partition if message.offset >= range.last
181
+
182
+ partition = aggregated[message.partition]
183
+ partition[message.offset] = message
184
+ end
185
+
186
+ [
187
+ aggregated.values.map(&:values).map(&:reverse).reduce(:+),
188
+ !Sets.call(counts, page + 1).empty?
189
+ ]
190
+ end
191
+
107
192
  private
108
193
 
109
194
  # @param args [Object] anything required by the admin `#read_topic`
@@ -117,16 +202,6 @@ module Karafka
117
202
  raise
118
203
  end
119
204
 
120
- # @param topic_id [String] id of the topic
121
- # @return [Integer] number of partitions this topic has
122
- def fetch_partition_count(topic_id)
123
- ::Karafka::Admin
124
- .cluster_info
125
- .topics
126
- .find { |topic| topic[:topic_name] == topic_id }
127
- .fetch(:partition_count)
128
- end
129
-
130
205
  # @return [Integer] elements per page
131
206
  def per_page
132
207
  ::Karafka::Web.config.ui.per_page
@@ -136,16 +211,17 @@ module Karafka
136
211
  # we need to fill those with just the missing offset and handle this on the UI.
137
212
  #
138
213
  # @param messages [Array<Karafka::Messages::Message>] selected messages
214
+ # @param partition_id [Integer] number of partition for which we fill message gap
139
215
  # @param start_offset [Integer] offset of the first message (lowest) that we received
140
216
  # @param count [Integer] how many messages we wanted - we need that to fill spots to
141
217
  # have exactly the number that was requested and not more
142
218
  # @return [Array<Karafka::Messages::Message, Integer>] array with gaps filled with the
143
219
  # missing offset
144
- def fill_compacted(messages, start_offset, count)
220
+ def fill_compacted(messages, partition_id, start_offset, count)
145
221
  Array.new(count) do |index|
146
222
  messages.find do |message|
147
223
  (message.offset - start_offset) == index
148
- end || start_offset + index
224
+ end || [partition_id, start_offset + index]
149
225
  end
150
226
  end
151
227
  end
@@ -5,6 +5,7 @@ module Karafka
5
5
  module Ui
6
6
  module Models
7
7
  # Model that represents the general status of the Web UI.
8
+ #
8
9
  # We use this data to display a status page that helps with debugging on what is missing
9
10
  # in the overall setup of the Web UI.
10
11
  #
@@ -15,7 +16,18 @@ module Karafka
15
16
  Step = Struct.new(:status, :details) do
16
17
  # @return [Boolean] is the given step successfully configured and working
17
18
  def success?
18
- status == :success
19
+ status == :success || status == :warning
20
+ end
21
+
22
+ # @return [String] local namespace for partial of a given type
23
+ def partial_namespace
24
+ case status
25
+ when :success then 'successes'
26
+ when :warning then 'warnings'
27
+ when :failure then 'failures'
28
+ else
29
+ raise ::Karafka::Errors::UnsupportedCaseError, status
30
+ end
19
31
  end
20
32
 
21
33
  # @return [String] stringified status
@@ -29,11 +41,21 @@ module Karafka
29
41
  connect
30
42
  end
31
43
 
32
- # @return [Status::Step] were we able to connect to Kafka or not
44
+ # @return [Status::Step] were we able to connect to Kafka or not and how fast.
45
+ # Some people try to work with Kafka over the internet with really high latency and this
46
+ # should be highlighted in the UI as often the connection just becomes unstable
33
47
  def connection
48
+ level = if @connection_time < 1_000
49
+ :success
50
+ elsif @connection_time < 1_000_000
51
+ :warning
52
+ else
53
+ :failure
54
+ end
55
+
34
56
  Step.new(
35
- @connected ? :success : :failure,
36
- nil
57
+ level,
58
+ { time: @connection_time }
37
59
  )
38
60
  end
39
61
 
@@ -171,12 +193,16 @@ module Karafka
171
193
  topics
172
194
  end
173
195
 
174
- # Tries connecting with the cluster and sets the connection state
196
+ # Tries connecting with the cluster and saves the cluster info and the connection time
197
+ # @note If fails, `connection_time` will be 1_000_000
175
198
  def connect
176
- @cluster_info = ::Karafka::Admin.cluster_info
177
- @connected = true
199
+ started = Time.now.to_f
200
+ # For status we always need uncached data, otherwise status could cache outdated
201
+ # info
202
+ @cluster_info = Models::ClusterInfo.fetch(cached: false)
203
+ @connection_time = (Time.now.to_f - started) * 1_000
178
204
  rescue ::Rdkafka::RdkafkaError
179
- @connected = false
205
+ @connection_time = 1_000_000
180
206
  end
181
207
  end
182
208
  end
@@ -81,6 +81,10 @@ module Karafka
81
81
  render_response controller.partition(topic_id, partition_id)
82
82
  end
83
83
 
84
+ r.get String do |topic_id|
85
+ render_response controller.topic(topic_id)
86
+ end
87
+
84
88
  r.get do
85
89
  render_response controller.index
86
90
  end
@@ -99,12 +103,16 @@ module Karafka
99
103
  r.on 'errors' do
100
104
  controller = Controllers::Errors.new(params)
101
105
 
106
+ r.get Integer, Integer do |partition_id, offset|
107
+ render_response controller.show(partition_id, offset)
108
+ end
109
+
102
110
  r.get Integer do |partition_id|
103
- render_response controller.index(partition_id)
111
+ render_response controller.partition(partition_id)
104
112
  end
105
113
 
106
- r.get Integer, Integer do |partition_id, offset|
107
- render_response controller.show(partition_id, offset)
114
+ r.get do
115
+ render_response controller.index
108
116
  end
109
117
  end
110
118
 
@@ -22,11 +22,13 @@ module Karafka
22
22
  def index
23
23
  @current_state = Models::State.current!
24
24
  @counters = Models::Counters.new(@current_state)
25
- @processes, @next_page = Lib::PaginateArray.new.call(
25
+ @processes, last_page = Lib::Paginations::Paginators::Arrays.call(
26
26
  Models::Processes.active(@current_state),
27
27
  @params.current_page
28
28
  )
29
29
 
30
+ paginate(@params.current_page, !last_page)
31
+
30
32
  respond
31
33
  end
32
34
 
@@ -28,8 +28,7 @@ module Karafka
28
28
  .compact
29
29
  .select(&:itself)
30
30
 
31
- @dlq_topics = Karafka::Admin
32
- .cluster_info
31
+ @dlq_topics = Models::ClusterInfo
33
32
  .topics
34
33
  .select { |topic| dlq_topic_names.include?(topic[:topic_name]) }
35
34
  .sort_by { |topic| topic[:topic_name] }
@@ -18,18 +18,45 @@ module Karafka
18
18
  module Controllers
19
19
  # Errors details controller
20
20
  class Errors < Ui::Controllers::Base
21
+ include Ui::Lib::Paginations
22
+
23
+ # Lists all the errors from all the partitions
24
+ def index
25
+ @topic_id = errors_topic
26
+ @partitions_count = Models::ClusterInfo.partitions_count(errors_topic)
27
+
28
+ @active_partitions, materialized_page, @limited = Paginators::Partitions.call(
29
+ @partitions_count, @params.current_page
30
+ )
31
+
32
+ @error_messages, next_page = Models::Message.topic_page(
33
+ errors_topic, @active_partitions, materialized_page
34
+ )
35
+
36
+ paginate(@params.current_page, next_page)
37
+
38
+ respond
39
+ end
40
+
21
41
  # @param partition_id [Integer] id of the partition of errors we are interested in
22
- def index(partition_id)
23
- errors_topic = ::Karafka::Web.config.topics.errors
42
+ def partition(partition_id)
24
43
  @partition_id = partition_id
25
- @previous_page, @error_messages, @next_page, @partitions_count = \
26
- Models::Message.page(
27
- errors_topic,
28
- @partition_id,
29
- @params.current_page
30
- )
31
-
32
44
  @watermark_offsets = Ui::Models::WatermarkOffsets.find(errors_topic, @partition_id)
45
+ @partitions_count = Models::ClusterInfo.partitions_count(errors_topic)
46
+
47
+ previous_offset, @error_messages, next_offset = Models::Message.offset_page(
48
+ errors_topic,
49
+ @partition_id,
50
+ @params.current_offset,
51
+ @watermark_offsets
52
+ )
53
+
54
+ paginate(
55
+ previous_offset,
56
+ @params.current_offset,
57
+ next_offset,
58
+ @error_messages.map(&:offset)
59
+ )
33
60
 
34
61
  respond
35
62
  end
@@ -39,7 +66,6 @@ module Karafka
39
66
  # @param partition_id [Integer]
40
67
  # @param offset [Integer]
41
68
  def show(partition_id, offset)
42
- errors_topic = ::Karafka::Web.config.topics.errors
43
69
  @partition_id = partition_id
44
70
  @offset = offset
45
71
  @error_message = Models::Message.find(
@@ -50,6 +76,13 @@ module Karafka
50
76
 
51
77
  respond
52
78
  end
79
+
80
+ private
81
+
82
+ # @return [String] errors topic
83
+ def errors_topic
84
+ ::Karafka::Web.config.topics.errors
85
+ end
53
86
  end
54
87
  end
55
88
  end