karafka-web 0.6.2 → 0.6.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/CHANGELOG.md +2 -8
  4. data/Gemfile.lock +1 -1
  5. data/lib/karafka/web/config.rb +0 -2
  6. data/lib/karafka/web/ui/base.rb +2 -6
  7. data/lib/karafka/web/ui/controllers/base.rb +0 -17
  8. data/lib/karafka/web/ui/controllers/cluster.rb +2 -5
  9. data/lib/karafka/web/ui/controllers/consumers.rb +1 -3
  10. data/lib/karafka/web/ui/controllers/errors.rb +6 -19
  11. data/lib/karafka/web/ui/controllers/jobs.rb +1 -3
  12. data/lib/karafka/web/ui/controllers/requests/params.rb +0 -10
  13. data/lib/karafka/web/ui/lib/paginate_array.rb +38 -0
  14. data/lib/karafka/web/ui/models/message.rb +38 -114
  15. data/lib/karafka/web/ui/models/status.rb +1 -3
  16. data/lib/karafka/web/ui/pro/app.rb +3 -11
  17. data/lib/karafka/web/ui/pro/controllers/consumers.rb +1 -3
  18. data/lib/karafka/web/ui/pro/controllers/dlq.rb +2 -1
  19. data/lib/karafka/web/ui/pro/controllers/errors.rb +10 -43
  20. data/lib/karafka/web/ui/pro/controllers/explorer.rb +7 -52
  21. data/lib/karafka/web/ui/pro/views/errors/_breadcrumbs.erb +6 -8
  22. data/lib/karafka/web/ui/pro/views/errors/_error.erb +1 -1
  23. data/lib/karafka/web/ui/pro/views/errors/_partition_option.erb +1 -1
  24. data/lib/karafka/web/ui/pro/views/errors/index.erb +56 -9
  25. data/lib/karafka/web/ui/pro/views/explorer/_breadcrumbs.erb +1 -1
  26. data/lib/karafka/web/ui/pro/views/explorer/_message.erb +2 -8
  27. data/lib/karafka/web/ui/pro/views/explorer/_partition_option.erb +1 -1
  28. data/lib/karafka/web/ui/pro/views/explorer/_topic.erb +1 -1
  29. data/lib/karafka/web/ui/pro/views/explorer/partition/_messages.erb +0 -1
  30. data/lib/karafka/web/ui/pro/views/explorer/partition.erb +1 -1
  31. data/lib/karafka/web/ui/pro/views/shared/_navigation.erb +1 -1
  32. data/lib/karafka/web/ui/views/cluster/_partition.erb +1 -1
  33. data/lib/karafka/web/ui/views/errors/_error.erb +1 -1
  34. data/lib/karafka/web/ui/views/shared/_pagination.erb +12 -16
  35. data/lib/karafka/web/version.rb +1 -1
  36. data.tar.gz.sig +0 -0
  37. metadata +2 -17
  38. metadata.gz.sig +0 -0
  39. data/lib/karafka/web/ui/lib/paginations/base.rb +0 -61
  40. data/lib/karafka/web/ui/lib/paginations/offset_based.rb +0 -96
  41. data/lib/karafka/web/ui/lib/paginations/page_based.rb +0 -70
  42. data/lib/karafka/web/ui/lib/paginations/paginators/arrays.rb +0 -33
  43. data/lib/karafka/web/ui/lib/paginations/paginators/base.rb +0 -23
  44. data/lib/karafka/web/ui/lib/paginations/paginators/partitions.rb +0 -52
  45. data/lib/karafka/web/ui/lib/paginations/paginators/sets.rb +0 -85
  46. data/lib/karafka/web/ui/lib/ttl_cache.rb +0 -74
  47. data/lib/karafka/web/ui/models/cluster_info.rb +0 -59
  48. data/lib/karafka/web/ui/pro/views/errors/_table.erb +0 -21
  49. data/lib/karafka/web/ui/pro/views/errors/_title_with_select.erb +0 -31
  50. data/lib/karafka/web/ui/pro/views/errors/partition.erb +0 -17
  51. data/lib/karafka/web/ui/pro/views/explorer/topic/_empty.erb +0 -3
  52. data/lib/karafka/web/ui/pro/views/explorer/topic/_limited.erb +0 -4
  53. data/lib/karafka/web/ui/pro/views/explorer/topic/_partitions.erb +0 -11
  54. data/lib/karafka/web/ui/pro/views/explorer/topic.erb +0 -49
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d52e64643f448374a2f4efcb5e27089d5f36c8b8cde0eac5e2434c59b07d371c
4
- data.tar.gz: '068a2ee3c3d82eeccdde1e001e6788c71980769c58013dd5290ad47002eac47e'
3
+ metadata.gz: 12e92e2fc2ceb4c30ab825881074948d32c6041d02b882f074a8d2e94ccae7a4
4
+ data.tar.gz: 3cc07b90ac8037f9509cf7c42f12fa1ce22e1fdca8d67109ed2aef19faa6c4c4
5
5
  SHA512:
6
- metadata.gz: 0ef49085501fafc176d09c6813d3bafc7cb5d56231e2b655462ed12da02effe623a46c1934a8f17e9913dccf8da6405877ddede9a0f519b350613d0fbf4a66a1
7
- data.tar.gz: 8251fe9ac27bab1b0992cb8f9baae68fe15100b48814e245589c6a38cc608ee5fa2d65d62faff0fdc8712dcff7685d82a0e87677d56bcc73fd35b07766f02af8
6
+ metadata.gz: 11baf2c9e72a216c8cec688922c76dc5d66eee5ac52e0225b0379b490bc5f5b556c9545482d51b7c88eb8ba9a85eec8b74642e98ce8c4c836d44828412638944
7
+ data.tar.gz: '010109c8fe106e8ecfa27e7ab75e657868ef01e6c9d5677074d70017a193a22475a8ab2c0938ba545d76602d03564f697b99d0802ae02ff75e7e5aed940a0d54'
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,13 +1,7 @@
1
1
  # Karafka Web changelog
2
2
 
3
- ## 0.7.0 (Unreleased)
4
- - **[Feature]** Introduce per-topic data exploration in the Explorer.
5
- - [Improvement] Introduce in-memory cluster state cached to improve performance.
6
- - [Improvement] Switch to offset based pagination instead of per-page pagination.
7
- - [Improvement] Avoid double-reading of watermark offsets for explorer and errors display.
8
- - [Improvement] When no params needed for a page, do not include empty params.
9
- - [Improvement] Do not include page when page is 1 in the url.
10
- - [Refactor] Reorganize pagination engine to support offset based pagination.
3
+ ## 0.6.3 (2023-07-22)
4
+ - [Fix] Remove files from 0.7.0 accidentally added to the release.
11
5
 
12
6
  ## 0.6.2 (2023-07-22)
13
7
  - [Fix] Fix extensive CPU usage when using HPET clock instead of TSC due to interrupt frequency.
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka-web (0.6.2)
4
+ karafka-web (0.6.3)
5
5
  erubi (~> 1.4)
6
6
  karafka (>= 2.1.4, < 3.0.0)
7
7
  karafka-core (>= 2.0.13, < 3.0.0)
@@ -80,8 +80,6 @@ module Karafka
80
80
  end
81
81
 
82
82
  setting :ui do
83
- setting :cache, default: Ui::Lib::TtlCache.new(60_000 * 5)
84
-
85
83
  # Should the payload be decrypted for the Pro Web UI. Default to `false` due to security
86
84
  # reasons
87
85
  setting :decrypt, default: false
@@ -68,12 +68,8 @@ module Karafka
68
68
  # Allows us to build current path with additional params
69
69
  # @param query_data [Hash] query params we want to add to the current path
70
70
  path :current do |query_data = {}|
71
- q = query_data
72
- .select { |_, v| v }
73
- .map { |k, v| "#{k}=#{CGI.escape(v.to_s)}" }
74
- .join('&')
75
-
76
- [request.path, q].compact.delete_if(&:empty?).join('?')
71
+ q = query_data.map { |k, v| "#{k}=#{CGI.escape(v.to_s)}" }.join('&')
72
+ "#{request.path}?#{q}"
77
73
  end
78
74
 
79
75
  # Sets appropriate template variables based on the response object and renders the
@@ -12,8 +12,6 @@ module Karafka
12
12
  @params = params
13
13
  end
14
14
 
15
- private
16
-
17
15
  # Builds the respond data object with assigned attributes based on instance variables.
18
16
  #
19
17
  # @return [Responses::Data] data that should be used to render appropriate view
@@ -35,21 +33,6 @@ module Karafka
35
33
  attributes
36
34
  )
37
35
  end
38
-
39
- # Initializes the expected pagination engine and assigns expected arguments
40
- # @param args Any arguments accepted by the selected pagination engine
41
- def paginate(*args)
42
- engine = case args.count
43
- when 2
44
- Ui::Lib::Paginations::PageBased
45
- when 4
46
- Ui::Lib::Paginations::OffsetBased
47
- else
48
- raise ::Karafka::Errors::UnsupportedCaseError, args.count
49
- end
50
-
51
- @pagination = engine.new(*args)
52
- end
53
36
  end
54
37
  end
55
38
  end
@@ -8,8 +8,7 @@ module Karafka
8
8
  class Cluster < Base
9
9
  # List cluster info data
10
10
  def index
11
- # Make sure, that for the cluster view we always get the most recent cluster state
12
- @cluster_info = Models::ClusterInfo.fetch(cached: false)
11
+ @cluster_info = Karafka::Admin.cluster_info
13
12
 
14
13
  partitions_total = []
15
14
 
@@ -19,13 +18,11 @@ module Karafka
19
18
  end
20
19
  end
21
20
 
22
- @partitions, last_page = Ui::Lib::Paginations::Paginators::Arrays.call(
21
+ @partitions, @next_page = Ui::Lib::PaginateArray.new.call(
23
22
  partitions_total,
24
23
  @params.current_page
25
24
  )
26
25
 
27
- paginate(@params.current_page, !last_page)
28
-
29
26
  respond
30
27
  end
31
28
 
@@ -11,13 +11,11 @@ module Karafka
11
11
  def index
12
12
  @current_state = Models::State.current!
13
13
  @counters = Models::Counters.new(@current_state)
14
- @processes, last_page = Ui::Lib::Paginations::Paginators::Arrays.call(
14
+ @processes, @next_page = Lib::PaginateArray.new.call(
15
15
  Models::Processes.active(@current_state),
16
16
  @params.current_page
17
17
  )
18
18
 
19
- paginate(@params.current_page, !last_page)
20
-
21
19
  respond
22
20
  end
23
21
  end
@@ -10,16 +10,14 @@ module Karafka
10
10
  class Errors < Base
11
11
  # Lists first page of the errors
12
12
  def index
13
- @watermark_offsets = Ui::Models::WatermarkOffsets.find(errors_topic, 0)
14
- previous_offset, @error_messages, next_offset, = current_page_data
15
-
16
- paginate(
17
- previous_offset,
18
- @params.current_offset,
19
- next_offset,
20
- @error_messages.map(&:offset)
13
+ @previous_page, @error_messages, @next_page, = Models::Message.page(
14
+ errors_topic,
15
+ 0,
16
+ @params.current_page
21
17
  )
22
18
 
19
+ @watermark_offsets = Ui::Models::WatermarkOffsets.find(errors_topic, 0)
20
+
23
21
  respond
24
22
  end
25
23
 
@@ -36,17 +34,6 @@ module Karafka
36
34
 
37
35
  private
38
36
 
39
- # @return [Array] Array with requested messages as well as pagination details and other
40
- # obtained metadata
41
- def current_page_data
42
- Models::Message.offset_page(
43
- errors_topic,
44
- 0,
45
- @params.current_offset,
46
- @watermark_offsets
47
- )
48
- end
49
-
50
37
  # @return [String] errors topic
51
38
  def errors_topic
52
39
  ::Karafka::Web.config.topics.errors
@@ -19,13 +19,11 @@ module Karafka
19
19
  end
20
20
  end
21
21
 
22
- @jobs, last_page = Ui::Lib::Paginations::Paginators::Arrays.call(
22
+ @jobs, @next_page = Ui::Lib::PaginateArray.new.call(
23
23
  jobs_total,
24
24
  @params.current_page
25
25
  )
26
26
 
27
- paginate(@params.current_page, !last_page)
28
-
29
27
  respond
30
28
  end
31
29
  end
@@ -22,16 +22,6 @@ module Karafka
22
22
  page.positive? ? page : 1
23
23
  end
24
24
  end
25
-
26
- # @return [Integer] offset from which we want to start. `-1` indicates, that we want
27
- # to show the first page discovered based on the high watermark offset. If no offset
28
- # is provided, we go with the high offset first page approach
29
- def current_offset
30
- @current_offset ||= begin
31
- offset = @request_params.fetch('offset', -1).to_i
32
- offset < -1 ? -1 : offset
33
- end
34
- end
35
25
  end
36
26
  end
37
27
  end
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Ui
6
+ module Lib
7
+ # A simple wrapper for paginating array related data structures
8
+ class PaginateArray
9
+ # @param array [Array] array we want to paginate
10
+ # @param current_page [Integer] page we want to be on
11
+ # @return [Array<Array, <Integer, nil>>] Array with two elements: first is the array with
12
+ # data of the given page and second is the next page number of nil in case there is
13
+ # no next page (end of data)
14
+ def call(array, current_page)
15
+ slices = array.each_slice(per_page).to_a
16
+
17
+ current_data = slices[current_page - 1] || []
18
+
19
+ if slices.count >= current_page - 1 && current_data.size >= per_page
20
+ next_page = current_page + 1
21
+ else
22
+ next_page = nil
23
+ end
24
+
25
+ [current_data, next_page]
26
+ end
27
+
28
+ private
29
+
30
+ # @return [Integer] how many elements should we display in the UI
31
+ def per_page
32
+ ::Karafka::Web.config.ui.per_page
33
+ end
34
+ end
35
+ end
36
+ end
37
+ end
38
+ end
@@ -7,8 +7,6 @@ module Karafka
7
7
  # A proxy between `::Karafka::Messages::Message` and web UI
8
8
  # We work with the Karafka messages but use this model to wrap the work needed.
9
9
  class Message
10
- extend Lib::Paginations::Paginators
11
-
12
10
  class << self
13
11
  # Looks for a message from a given topic partition
14
12
  #
@@ -32,44 +30,41 @@ module Karafka
32
30
  )
33
31
  end
34
32
 
35
- # Fetches requested `page_count` number of Kafka messages starting from the oldest
36
- # requested `start_offset`. If `start_offset` is `-1`, will fetch the most recent
37
- # results
33
+ # Fetches requested page of Kafka messages.
38
34
  #
39
35
  # @param topic_id [String]
40
36
  # @param partition_id [Integer]
41
- # @param start_offset [Integer] oldest offset from which we want to get the data
42
- # @param watermark_offsets [Ui::Models::WatermarkOffsets] watermark offsets
43
- # @return [Array] We return page data as well as all the details needed to build
37
+ # @param page [Integer]
38
+ # @return [Array] We return both page data as well as all the details needed to build
44
39
  # the pagination details.
45
- def offset_page(topic_id, partition_id, start_offset, watermark_offsets)
46
- low_offset = watermark_offsets.low
47
- high_offset = watermark_offsets.high
40
+ def page(topic_id, partition_id, page)
41
+ low_offset, high_offset = Karafka::Admin.read_watermark_offsets(
42
+ topic_id,
43
+ partition_id
44
+ )
48
45
 
49
- # If we start from offset -1, it means we want first page with the most recent
50
- # results. We obtain this page by using the offset based on the high watermark
51
- # off
52
- start_offset = high_offset - per_page if start_offset == -1
46
+ partitions_count = fetch_partition_count(topic_id)
53
47
 
54
- # No previous pages, no data, and no more offsets
55
- no_data_result = [false, [], false]
48
+ no_data_result = [false, [], false, partitions_count]
56
49
 
57
- # If there is no data, we return the no results result
50
+ # If there is not even one message, we need to early exit
51
+ # If low and high watermark offsets are of the same value, it means no data in the
52
+ # topic is present
58
53
  return no_data_result if low_offset == high_offset
59
54
 
55
+ # We add plus one because we compute previous offset from which we want to start and
56
+ # not previous page leading offset
57
+ start_offset = high_offset - (per_page * page)
58
+
60
59
  if start_offset <= low_offset
61
- # If this page does not contain max per page, compute how many messages we can
62
- # fetch before stopping
63
60
  count = per_page - (low_offset - start_offset)
64
- next_offset = false
61
+ previous_page = page < 2 ? false : page - 1
62
+ next_page = false
65
63
  start_offset = low_offset
66
64
  else
67
- next_offset = start_offset - per_page
68
- # Do not go below the lowest possible offset
69
- next_offset = low_offset if next_offset < low_offset
70
- count = high_offset - start_offset
71
- # If there would be more messages that we want to get, force max
72
- count = per_page if count > per_page
65
+ previous_page = page < 2 ? false : page - 1
66
+ next_page = page + 1
67
+ count = per_page
73
68
  end
74
69
 
75
70
  # This code is a bit tricky. Since topics can be compacted and certain offsets may
@@ -98,97 +93,17 @@ module Karafka
98
93
 
99
94
  next unless messages
100
95
 
101
- previous_offset = start_offset + count
102
-
103
96
  return [
104
- # If there is a potential previous page with more recent data, compute its
105
- # offset
106
- previous_offset >= high_offset ? false : previous_offset,
107
- fill_compacted(messages, partition_id, context_offset, context_count).reverse,
108
- next_offset
97
+ previous_page,
98
+ fill_compacted(messages, context_offset, context_count).reverse,
99
+ next_page,
100
+ partitions_count
109
101
  ]
110
102
  end
111
103
 
112
104
  no_data_result
113
105
  end
114
106
 
115
- # Fetches requested `page_count` number of Kafka messages from the topic partitions
116
- # and merges the results. Ensures, that pagination works as expected.
117
- #
118
- # @param topic_id [String]
119
- # @param partitions_ids [Array<Integer>] for which of the partitions we want to
120
- # get the data. This is a limiting factor because of the fact that we have to
121
- # query the watermark offsets independently
122
- # @param page [Integer] which page we want to get
123
- def topic_page(topic_id, partitions_ids, page)
124
- # This is the bottleneck, for each partition we make one request :(
125
- offsets = partitions_ids.map do |partition_id|
126
- [partition_id, Models::WatermarkOffsets.find(topic_id, partition_id)]
127
- end.to_h
128
-
129
- # Count number of elements we have in each partition
130
- # This assumes linear presence until low. If not, gaps will be filled like we fill
131
- # for per partition view
132
- counts = offsets.values.map { |offset| offset[:high] - offset[:low] }
133
-
134
- # Establish initial offsets for the iterator (where to start) per partition
135
- # We do not use the negative lookup iterator because we already can compute starting
136
- # offsets. This saves a lot of calls to Kafka
137
- ranges = Sets.call(counts, page).map do |partition_position, partition_range|
138
- partition_id = partitions_ids.to_a[partition_position]
139
- watermarks = offsets[partition_id]
140
-
141
- lowest = watermarks[:high] - partition_range.last - 1
142
- # We -1 because high watermark offset is the next incoming offset and not the last
143
- # one in the topic partition
144
- highest = watermarks[:high] - partition_range.first - 1
145
-
146
- # This range represents offsets we want to fetch
147
- [partition_id, lowest..highest]
148
- end.to_h
149
-
150
- # We start on our topic from the lowest offset for each expected partition
151
- iterator = Karafka::Pro::Iterator.new(
152
- { topic_id => ranges.transform_values(&:first) }
153
- )
154
-
155
- # Build the aggregated representation for each partition messages, so we can start
156
- # with assumption that all the topics are fully compacted. Then we can nicely replace
157
- # compacted `false` data with real messages, effectively ensuring that the gaps are
158
- # filled with `false` out-of-the-box
159
- aggregated = Hash.new { |h, k| h[k] = {} }
160
-
161
- # We initialize the hash so we have a constant ascending order based on the partition
162
- # number
163
- partitions_ids.each { |i| aggregated[i] }
164
-
165
- # We prefill all the potential offsets for each partition, so in case they were
166
- # compacted, we get a continuous flow
167
- ranges.each do |partition, range|
168
- partition_aggr = aggregated[partition]
169
- range.each { |i| partition_aggr[i] = [partition, i] }
170
- end
171
-
172
- # Iterate over all partitions and collect data
173
- iterator.each do |message|
174
- range = ranges[message.partition]
175
-
176
- # Do not fetch more data from a partition for which we got last message from the
177
- # expected offsets
178
- # When all partitions are stopped, we will stop operations. This drastically
179
- # improves performance because we no longer have to poll nils
180
- iterator.stop_current_partition if message.offset >= range.last
181
-
182
- partition = aggregated[message.partition]
183
- partition[message.offset] = message
184
- end
185
-
186
- [
187
- aggregated.values.map(&:values).map(&:reverse).reduce(:+),
188
- !Sets.call(counts, page + 1).empty?
189
- ]
190
- end
191
-
192
107
  private
193
108
 
194
109
  # @param args [Object] anything required by the admin `#read_topic`
@@ -202,6 +117,16 @@ module Karafka
202
117
  raise
203
118
  end
204
119
 
120
+ # @param topic_id [String] id of the topic
121
+ # @return [Integer] number of partitions this topic has
122
+ def fetch_partition_count(topic_id)
123
+ ::Karafka::Admin
124
+ .cluster_info
125
+ .topics
126
+ .find { |topic| topic[:topic_name] == topic_id }
127
+ .fetch(:partition_count)
128
+ end
129
+
205
130
  # @return [Integer] elements per page
206
131
  def per_page
207
132
  ::Karafka::Web.config.ui.per_page
@@ -211,17 +136,16 @@ module Karafka
211
136
  # we need to fill those with just the missing offset and handle this on the UI.
212
137
  #
213
138
  # @param messages [Array<Karafka::Messages::Message>] selected messages
214
- # @param partition_id [Integer] number of partition for which we fill message gap
215
139
  # @param start_offset [Integer] offset of the first message (lowest) that we received
216
140
  # @param count [Integer] how many messages we wanted - we need that to fill spots to
217
141
  # have exactly the number that was requested and not more
218
142
  # @return [Array<Karafka::Messages::Message, Integer>] array with gaps filled with the
219
143
  # missing offset
220
- def fill_compacted(messages, partition_id, start_offset, count)
144
+ def fill_compacted(messages, start_offset, count)
221
145
  Array.new(count) do |index|
222
146
  messages.find do |message|
223
147
  (message.offset - start_offset) == index
224
- end || [partition_id, start_offset + index]
148
+ end || start_offset + index
225
149
  end
226
150
  end
227
151
  end
@@ -197,9 +197,7 @@ module Karafka
197
197
  # @note If fails, `connection_time` will be 1_000_000
198
198
  def connect
199
199
  started = Time.now.to_f
200
- # For status we always need uncached data, otherwise status could cache outdated
201
- # info
202
- @cluster_info = Models::ClusterInfo.fetch(cached: false)
200
+ @cluster_info = ::Karafka::Admin.cluster_info
203
201
  @connection_time = (Time.now.to_f - started) * 1_000
204
202
  rescue ::Rdkafka::RdkafkaError
205
203
  @connection_time = 1_000_000
@@ -81,10 +81,6 @@ module Karafka
81
81
  render_response controller.partition(topic_id, partition_id)
82
82
  end
83
83
 
84
- r.get String do |topic_id|
85
- render_response controller.topic(topic_id)
86
- end
87
-
88
84
  r.get do
89
85
  render_response controller.index
90
86
  end
@@ -103,16 +99,12 @@ module Karafka
103
99
  r.on 'errors' do
104
100
  controller = Controllers::Errors.new(params)
105
101
 
106
- r.get Integer, Integer do |partition_id, offset|
107
- render_response controller.show(partition_id, offset)
108
- end
109
-
110
102
  r.get Integer do |partition_id|
111
- render_response controller.partition(partition_id)
103
+ render_response controller.index(partition_id)
112
104
  end
113
105
 
114
- r.get do
115
- render_response controller.index
106
+ r.get Integer, Integer do |partition_id, offset|
107
+ render_response controller.show(partition_id, offset)
116
108
  end
117
109
  end
118
110
 
@@ -22,13 +22,11 @@ module Karafka
22
22
  def index
23
23
  @current_state = Models::State.current!
24
24
  @counters = Models::Counters.new(@current_state)
25
- @processes, last_page = Lib::Paginations::Paginators::Arrays.call(
25
+ @processes, @next_page = Lib::PaginateArray.new.call(
26
26
  Models::Processes.active(@current_state),
27
27
  @params.current_page
28
28
  )
29
29
 
30
- paginate(@params.current_page, !last_page)
31
-
32
30
  respond
33
31
  end
34
32
 
@@ -28,7 +28,8 @@ module Karafka
28
28
  .compact
29
29
  .select(&:itself)
30
30
 
31
- @dlq_topics = Models::ClusterInfo
31
+ @dlq_topics = Karafka::Admin
32
+ .cluster_info
32
33
  .topics
33
34
  .select { |topic| dlq_topic_names.include?(topic[:topic_name]) }
34
35
  .sort_by { |topic| topic[:topic_name] }
@@ -18,45 +18,18 @@ module Karafka
18
18
  module Controllers
19
19
  # Errors details controller
20
20
  class Errors < Ui::Controllers::Base
21
- include Ui::Lib::Paginations
22
-
23
- # Lists all the errors from all the partitions
24
- def index
25
- @topic_id = errors_topic
26
- @partitions_count = Models::ClusterInfo.partitions_count(errors_topic)
27
-
28
- @active_partitions, materialized_page, @limited = Paginators::Partitions.call(
29
- @partitions_count, @params.current_page
30
- )
31
-
32
- @error_messages, next_page = Models::Message.topic_page(
33
- errors_topic, @active_partitions, materialized_page
34
- )
35
-
36
- paginate(@params.current_page, next_page)
37
-
38
- respond
39
- end
40
-
41
21
  # @param partition_id [Integer] id of the partition of errors we are interested in
42
- def partition(partition_id)
22
+ def index(partition_id)
23
+ errors_topic = ::Karafka::Web.config.topics.errors
43
24
  @partition_id = partition_id
44
- @watermark_offsets = Ui::Models::WatermarkOffsets.find(errors_topic, @partition_id)
45
- @partitions_count = Models::ClusterInfo.partitions_count(errors_topic)
46
-
47
- previous_offset, @error_messages, next_offset = Models::Message.offset_page(
48
- errors_topic,
49
- @partition_id,
50
- @params.current_offset,
51
- @watermark_offsets
52
- )
25
+ @previous_page, @error_messages, @next_page, @partitions_count = \
26
+ Models::Message.page(
27
+ errors_topic,
28
+ @partition_id,
29
+ @params.current_page
30
+ )
53
31
 
54
- paginate(
55
- previous_offset,
56
- @params.current_offset,
57
- next_offset,
58
- @error_messages.map(&:offset)
59
- )
32
+ @watermark_offsets = Ui::Models::WatermarkOffsets.find(errors_topic, @partition_id)
60
33
 
61
34
  respond
62
35
  end
@@ -66,6 +39,7 @@ module Karafka
66
39
  # @param partition_id [Integer]
67
40
  # @param offset [Integer]
68
41
  def show(partition_id, offset)
42
+ errors_topic = ::Karafka::Web.config.topics.errors
69
43
  @partition_id = partition_id
70
44
  @offset = offset
71
45
  @error_message = Models::Message.find(
@@ -76,13 +50,6 @@ module Karafka
76
50
 
77
51
  respond
78
52
  end
79
-
80
- private
81
-
82
- # @return [String] errors topic
83
- def errors_topic
84
- ::Karafka::Web.config.topics.errors
85
- end
86
53
  end
87
54
  end
88
55
  end