karafka-web 0.7.7 → 0.7.8

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1e4dd8f19fdad3697e837ed96aa8fafabae6eac4efe8cbf75aaa788eccebc44c
4
- data.tar.gz: 33814043050afda80109fc14082a09de0f5499041fe2f4a24a90b25f7cd501c8
3
+ metadata.gz: 2d46244304483184c3be59a0686497a9f948e9b666dccf00961b560e60db4655
4
+ data.tar.gz: 329686ce548063787fe06290d67650793ebef6c014f43b753780ad120671a8b1
5
5
  SHA512:
6
- metadata.gz: 8c84de9a33977904aadffa3ff51af275f4301e7375f9194343b4980d82b6b4fdf4dc57aeb0f1e0cef8c66499b048fb595ec4cf7d1bdbd3011995a0223c4a70ad
7
- data.tar.gz: 4c82722b6fab042562ee3f390d10775b1de7e0c77406c623e7e17f36860a9954ef132cbc540d30449c1212a9d507203e8fb9500393c92b2be09ccbc06c281f0d
6
+ metadata.gz: 5a18a3c23f63382c6c41dc5ac7f82f893bc14a55e7ec68af9cf16da0f47a45aee77ca9626973570cb9d6abe9355e0712f117424e00619b7b0198748c3bfbeb55
7
+ data.tar.gz: f639bd90ca21c09fcd798fc289079cafeb9c752d261ecf88df9762e347df738bf260c658f0f15b3b08c3708daf091217c0159d85ac975380463a65a45d876d0b
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,5 +1,13 @@
1
1
  # Karafka Web changelog
2
2
 
3
+ ## 0.7.8 (2023-10-24)
4
+ - [Enhancement] Support transactional producer usage with Web UI.
5
+ - [Fix] Fix a bug where critical errors (like `IRB::Abort`) would not abort the ongoing transaction.
6
+ - [Fix] Prevent a scenario where an ongoing transactional producer would have stats emitted and an error that could not have been dispatched because of the transaction, creating a dead-lock.
7
+ - [Fix] Make sure that the `recent` displays the most recent non-compacted, non-system message.
8
+ - [Fix] Improve the `recent` message display to compensate for aborted transactions.
9
+ - [Fix] Fix `ReferenceError: response is not defined` that occurs when Web UI returns refresh non 200.
10
+
3
11
  ## 0.7.7 (2023-10-20)
4
12
  - [Fix] Remove `thor` as a CLI engine due to breaking changes.
5
13
 
data/Gemfile.lock CHANGED
@@ -1,9 +1,9 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka-web (0.7.7)
4
+ karafka-web (0.7.8)
5
5
  erubi (~> 1.4)
6
- karafka (>= 2.2.8.beta1, < 3.0.0)
6
+ karafka (>= 2.2.9, < 3.0.0)
7
7
  karafka-core (>= 2.2.2, < 3.0.0)
8
8
  roda (~> 3.68, >= 3.69)
9
9
  tilt (~> 2.0)
@@ -36,9 +36,9 @@ GEM
36
36
  ffi (1.16.3)
37
37
  i18n (1.14.1)
38
38
  concurrent-ruby (~> 1.0)
39
- karafka (2.2.8.beta1)
39
+ karafka (2.2.9)
40
40
  karafka-core (>= 2.2.2, < 2.3.0)
41
- waterdrop (>= 2.6.6, < 3.0.0)
41
+ waterdrop (>= 2.6.10, < 3.0.0)
42
42
  zeitwerk (~> 2.3)
43
43
  karafka-core (2.2.3)
44
44
  concurrent-ruby (>= 1.1)
@@ -47,7 +47,7 @@ GEM
47
47
  ffi (~> 1.15)
48
48
  mini_portile2 (~> 2.6)
49
49
  rake (> 12)
50
- mini_portile2 (2.8.4)
50
+ mini_portile2 (2.8.5)
51
51
  minitest (5.20.0)
52
52
  mutex_m (0.1.2)
53
53
  rack (3.0.8)
@@ -82,8 +82,8 @@ GEM
82
82
  tilt (2.3.0)
83
83
  tzinfo (2.0.6)
84
84
  concurrent-ruby (~> 1.0)
85
- waterdrop (2.6.7)
86
- karafka-core (>= 2.1.1, < 3.0.0)
85
+ waterdrop (2.6.10)
86
+ karafka-core (>= 2.2.3, < 3.0.0)
87
87
  zeitwerk (~> 2.3)
88
88
  webrick (1.8.1)
89
89
  zeitwerk (2.6.12)
data/karafka-web.gemspec CHANGED
@@ -17,7 +17,7 @@ Gem::Specification.new do |spec|
17
17
  spec.licenses = %w[LGPL-3.0 Commercial]
18
18
 
19
19
  spec.add_dependency 'erubi', '~> 1.4'
20
- spec.add_dependency 'karafka', '>= 2.2.8.beta1', '< 3.0.0'
20
+ spec.add_dependency 'karafka', '>= 2.2.9', '< 3.0.0'
21
21
  spec.add_dependency 'karafka-core', '>= 2.2.2', '< 3.0.0'
22
22
  spec.add_dependency 'roda', '~> 3.68', '>= 3.69'
23
23
  spec.add_dependency 'tilt', '~> 2.0'
@@ -36,6 +36,10 @@ module Karafka
36
36
  # 5 seconds should be enough
37
37
  setting :interval, default: 5_000
38
38
 
39
+ # Main Web UI reporting scheduler that runs a background thread and reports periodically
40
+ # from the consumer reporter and producer reporter
41
+ setting :scheduler, default: Tracking::Scheduler.new
42
+
39
43
  setting :consumers do
40
44
  # Reports the metrics collected in the sampler
41
45
  setting :reporter, default: Tracking::Consumers::Reporter.new
@@ -58,8 +62,7 @@ module Karafka
58
62
  setting :sampler, default: Tracking::Producers::Sampler.new
59
63
 
60
64
  setting :listeners, default: [
61
- Tracking::Producers::Listeners::Errors.new,
62
- Tracking::Producers::Listeners::Reporter.new
65
+ Tracking::Producers::Listeners::Errors.new
63
66
  ]
64
67
  end
65
68
  end
@@ -114,6 +117,37 @@ module Karafka
114
117
  # In some cases you may want to limit what is being displayed due to the type of data you
115
118
  # are dealing with
116
119
  setting :visibility_filter, default: Ui::Models::VisibilityFilter.new
120
+
121
+ # Specific kafka settings that are tuned to operate within the Web UI interface.
122
+ #
123
+ # Please do not change them unless you know what you are doing as their misconfiguration
124
+ # may cause Web UI to misbehave
125
+ #
126
+ # The settings are inherited as follows:
127
+ # 1. root routing level `kafka` settings
128
+ # 2. admin `kafka` settings
129
+ # 3. web ui `kafka` settings from here
130
+ #
131
+ # Those settings impact ONLY Web UI interface and do not affect other scopes. This is done
132
+ # on purpose as we want to improve responsiveness of the interface by tuning some of the
133
+ # settings and this is not that relevant for processing itself.
134
+ #
135
+ # option [Hash] extra changes to the default admin kafka settings
136
+ setting :kafka, default: {
137
+ # optimizes the responsiveness of the Web UI in three scenarios:
138
+ # - topics to which writes happen only in transactions so EOF is yield faster
139
+ # - heavily compacted topics
140
+ # - Web UI topics read operations when using transactional producer
141
+ #
142
+ # This can be configured to be higher if you do not use transactional WaterDrop producer.
143
+ # This value is used when last message (first from the high watermark offset) is the
144
+ # transaction commit message. In cases like this the EOF gets propagated after this time
145
+ # so we have to wait. Default 500ms means, that for some views, where we take our data
146
+ # that might have been committed via transactional producer, we would wait for 1 second
147
+ # to get needed data. If you are experiencing timeouts or other issues with the Web IU
148
+ # interface, you can increase this.
149
+ 'fetch.wait.max.ms': 100
150
+ }
117
151
  end
118
152
  end
119
153
  end
@@ -14,7 +14,10 @@ module Karafka
14
14
  metrics_message = ::Karafka::Admin.read_topic(
15
15
  Karafka::Web.config.topics.consumers.metrics,
16
16
  0,
17
- 1
17
+ # We need to take more in case there would be transactions running.
18
+ # In theory we could take two but this compensates for any involuntary
19
+ # revocations and cases where two producers would write to the same state
20
+ 5
18
21
  ).last
19
22
 
20
23
  return metrics_message.payload if metrics_message
@@ -14,7 +14,10 @@ module Karafka
14
14
  state_message = ::Karafka::Admin.read_topic(
15
15
  Karafka::Web.config.topics.consumers.states,
16
16
  0,
17
- 1
17
+ # We need to take more in case there would be transactions running.
18
+ # In theory we could take two but this compensates for any involuntary
19
+ # revocations and cases where two producers would write to the same state
20
+ 5
18
21
  ).last
19
22
 
20
23
  return state_message.payload if state_message
@@ -5,10 +5,7 @@ module Karafka
5
5
  module Tracking
6
6
  module Consumers
7
7
  # Reports the collected data about the process and sends it, so we can use it in the UI
8
- class Reporter
9
- include ::Karafka::Core::Helpers::Time
10
- include ::Karafka::Helpers::Async
11
-
8
+ class Reporter < Tracking::Reporter
12
9
  # Minimum number of messages to produce to produce them in sync mode
13
10
  # This acts as a small back-off not to overload the system in case we would have
14
11
  # extremely big number of errors happening
@@ -21,12 +18,31 @@ module Karafka
21
18
  MUTEX = Mutex.new
22
19
 
23
20
  def initialize
21
+ super
24
22
  # Move back so first report is dispatched fast to indicate, that the process is alive
25
23
  @tracked_at = monotonic_now - 10_000
26
24
  @report_contract = Consumers::Contracts::Report.new
27
25
  @error_contract = Tracking::Contracts::Error.new
28
26
  end
29
27
 
28
+ # We never report in initializing phase because things are not yet fully configured
29
+ # We never report in the initialized because server is not yet ready until Karafka is
30
+ # fully running and some of the things like listeners are not yet available
31
+ #
32
+ # This method will also be `false` in case we are not running in `karafka server` or
33
+ # in embedding, because in those cases Karafka does not go beyond the `initialized` phase
34
+ #
35
+ # @return [Boolean] are we able to report consumer state
36
+ def active?
37
+ # If we do not have a producer that we could use to report or it was closed, we cannot
38
+ # and should not report
39
+ return false unless super
40
+ return false if ::Karafka::App.initializing?
41
+ return false if ::Karafka::App.initialized?
42
+
43
+ true
44
+ end
45
+
30
46
  # Dispatches the current state from sampler to appropriate topics
31
47
  #
32
48
  # @param forced [Boolean] should we report bypassing the time frequency or should we
@@ -41,11 +57,6 @@ module Karafka
41
57
  sampler.sample
42
58
 
43
59
  MUTEX.synchronize do
44
- # Start background thread only when needed
45
- # This prevents us from starting it too early or for non-consumer processes where
46
- # Karafka is being included
47
- async_call unless @running
48
-
49
60
  return unless report?(forced)
50
61
 
51
62
  @tracked_at = monotonic_now
@@ -97,31 +108,11 @@ module Karafka
97
108
 
98
109
  private
99
110
 
100
- # Reports the process state once in a while
101
- def call
102
- @running = true
103
-
104
- # We won't track more often anyhow but want to try frequently not to miss a window
105
- # We need to convert the sleep interval into seconds for sleep
106
- sleep_time = ::Karafka::Web.config.tracking.interval.to_f / 1_000 / 10
107
-
108
- loop do
109
- report
110
-
111
- sleep(sleep_time)
112
- end
113
- end
114
-
115
111
  # @param forced [Boolean] is this report forced. Forced means that as long as we can
116
112
  # flush we will flush
117
113
  # @return [Boolean] Should we report or is it not yet time to do so
118
114
  def report?(forced)
119
- # We never report in initializing phase because things are not yet fully configured
120
- return false if ::Karafka::App.initializing?
121
- # We never report in the initialized because server is not yet ready until Karafka is
122
- # fully running and some of the things like listeners are not yet available
123
- return false if ::Karafka::App.initialized?
124
-
115
+ return false unless active?
125
116
  return true if forced
126
117
 
127
118
  (monotonic_now - @tracked_at) >= ::Karafka::Web.config.tracking.interval
@@ -9,9 +9,7 @@ module Karafka
9
9
  # @note Producer reported does not have to operate with the `forced` dispatch mainly
10
10
  # because there is no expectation on immediate status updates for producers and their
11
11
  # dispatch flow is always periodic based.
12
- class Reporter
13
- include ::Karafka::Core::Helpers::Time
14
-
12
+ class Reporter < Tracking::Reporter
15
13
  # Minimum number of messages to produce to produce them in sync mode
16
14
  # This acts as a small back-off not to overload the system in case we would have
17
15
  # extremely big number of errors happening
@@ -24,6 +22,7 @@ module Karafka
24
22
  MUTEX = Mutex.new
25
23
 
26
24
  def initialize
25
+ super
27
26
  # If there are any errors right after we started sampling, dispatch them immediately
28
27
  @tracked_at = monotonic_now - 10_000
29
28
  @error_contract = Tracking::Contracts::Error.new
@@ -62,7 +61,7 @@ module Karafka
62
61
 
63
62
  # @return [Boolean] Should we report or is it not yet time to do so
64
63
  def report?
65
- return false unless ::Karafka.producer.status.active?
64
+ return false unless active?
66
65
 
67
66
  (monotonic_now - @tracked_at) >= ::Karafka::Web.config.tracking.interval
68
67
  end
@@ -93,6 +92,9 @@ module Karafka
93
92
  # and we can just safely ignore this
94
93
  rescue WaterDrop::Errors::ProducerClosedError
95
94
  nil
95
+ rescue StandardError => e
96
+ p '------------------------------------------------'
97
+ p e
96
98
  end
97
99
  end
98
100
  end
@@ -26,6 +26,9 @@ module Karafka
26
26
  # We cannot report and track the same time, that is why we use mutex here. To make sure
27
27
  # that samples aggregations and counting does not interact with reporter flushing.
28
28
  def track
29
+ # Prevents deadlocks when something producer related fails in the Web UI reporter
30
+ return yield(self) if Reporter::MUTEX.owned?
31
+
29
32
  Reporter::MUTEX.synchronize do
30
33
  yield(self)
31
34
  end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Tracking
6
+ # Base reporter from which all the reports should inherit
7
+ class Reporter
8
+ include ::Karafka::Core::Helpers::Time
9
+
10
+ # Can this reporter report. Since some reporters may report only in part of the processes
11
+ # where Karafka is used (like `karafka server`) each may implement more complex rules.
12
+ #
13
+ # The basic is not to report unless we have a producer and this producer is active
14
+ #
15
+ # @return [Boolean]
16
+ def active?
17
+ return false unless ::Karafka::App.producer
18
+ return false unless ::Karafka::App.producer.status.active?
19
+
20
+ true
21
+ end
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,46 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Tracking
6
+ # Triggers reporters to report in an async mode in a separate thread
7
+ # We report this way to prevent any potential dead-locks in cases we would be emitting
8
+ # statistics during transactions.
9
+ #
10
+ # We should never use the notifications thread for sensitive IO bound operations.
11
+ class Scheduler
12
+ include ::Karafka::Helpers::Async
13
+
14
+ # Creates the scheduler and runs its internal reporting
15
+ def initialize
16
+ async_call
17
+ end
18
+
19
+ private
20
+
21
+ # Reports the process state once in a while
22
+ def call
23
+ # We won't track more often anyhow but want to try frequently not to miss a window
24
+ # We need to convert the sleep interval into seconds for sleep
25
+ sleep_time = ::Karafka::Web.config.tracking.interval.to_f / 1_000 / 10
26
+
27
+ loop do
28
+ # Not every reporter may be active at a given stage or in a context of a given process
29
+ # We select only those that decided that they are active.
30
+ reporters.select(&:active?).each(&:report)
31
+
32
+ sleep(sleep_time)
33
+ end
34
+ end
35
+
36
+ # @return [Array] consumers and producers reporters
37
+ def reporters
38
+ @reporters ||= [
39
+ ::Karafka::Web.config.tracking.consumers.reporter,
40
+ ::Karafka::Web.config.tracking.producers.reporter
41
+ ].freeze
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Ui
6
+ module Lib
7
+ # Wrapper around Karafka Admin that alters its behaviours or injects Web UI interface
8
+ # specific settings that optimize the responsiveness of the UI when operating on topics
9
+ #
10
+ # @note Not all commands need those optimizations, hence we alter only those that need
11
+ # that and we only expose those admin commands that are used in the Web-UI interface
12
+ # component.
13
+ #
14
+ # @note We expose here only admin methods used in the Web UI interface. Processing uses the
15
+ # `Karafka::Admin` with the defaults
16
+ class Admin
17
+ class << self
18
+ extend Forwardable
19
+
20
+ def_delegators ::Karafka::Admin, :read_watermark_offsets, :cluster_info
21
+
22
+ # Allows us to read messages from the topic
23
+ #
24
+ # @param name [String, Symbol] topic name
25
+ # @param partition [Integer] partition
26
+ # @param count [Integer] how many messages we want to get at most
27
+ # @param start_offset [Integer, Time] offset from which we should start. If -1 is provided
28
+ # (default) we will start from the latest offset. If time is provided, the appropriate
29
+ # offset will be resolved. If negative beyond -1 is provided, we move backwards more.
30
+ # @param settings [Hash] kafka extra settings (optional)
31
+ #
32
+ # @return [Array<Karafka::Messages::Message>] array with messages
33
+ def read_topic(name, partition, count, start_offset = -1, settings = {})
34
+ ::Karafka::Admin.read_topic(
35
+ name,
36
+ partition,
37
+ count,
38
+ start_offset,
39
+ # Merge our Web UI specific settings
40
+ config.merge(settings)
41
+ )
42
+ end
43
+
44
+ private
45
+
46
+ # @return [Hash] kafka config for Web UI interface.
47
+ # @note It does **not** affect tracking or processing
48
+ def config
49
+ ::Karafka::Web.config.ui.kafka
50
+ end
51
+ end
52
+ end
53
+ end
54
+ end
55
+ end
56
+ end
@@ -4,7 +4,7 @@ module Karafka
4
4
  module Web
5
5
  module Ui
6
6
  module Models
7
- # Wraps around the `Karafka::Admin#cluster_info` with caching and some additional aliases
7
+ # Wraps around the `Lib::Admin#cluster_info` with caching and some additional aliases
8
8
  # so we can reference relevant information easily
9
9
  class ClusterInfo
10
10
  class << self
@@ -18,7 +18,7 @@ module Karafka
18
18
  cluster_info = cache.read(:cluster_info)
19
19
 
20
20
  if cluster_info.nil? || !cached
21
- cluster_info = cache.write(:cluster_info, Karafka::Admin.cluster_info)
21
+ cluster_info = cache.write(:cluster_info, Lib::Admin.cluster_info)
22
22
  end
23
23
 
24
24
  cluster_info
@@ -32,10 +32,12 @@ module Karafka
32
32
 
33
33
  # @return [::Karafka::Messages::Message, nil] most recent state or nil if none
34
34
  def fetch
35
- ::Karafka::Admin.read_topic(
35
+ Lib::Admin.read_topic(
36
36
  Karafka::Web.config.topics.consumers.metrics,
37
37
  0,
38
- 1
38
+ # We need to take last two and not the last because in case of a transactional
39
+ # producer, the last one will match the transaction commit message
40
+ 2
39
41
  ).last
40
42
  end
41
43
  end
@@ -41,10 +41,12 @@ module Karafka
41
41
 
42
42
  # @return [::Karafka::Messages::Message, nil] most recent state or nil if none
43
43
  def fetch
44
- ::Karafka::Admin.read_topic(
44
+ Lib::Admin.read_topic(
45
45
  Karafka::Web.config.topics.consumers.states,
46
46
  0,
47
- 1
47
+ # We need to take last two and not the last because in case of a transactional
48
+ # producer, the last one will match the transaction commit message
49
+ 2
48
50
  ).last
49
51
  end
50
52
 
@@ -25,7 +25,7 @@ module Karafka
25
25
 
26
26
  MAX_ERROR_PARTITIONS.times do |partition|
27
27
  begin
28
- offsets = Karafka::Admin.read_watermark_offsets(
28
+ offsets = Lib::Admin.read_watermark_offsets(
29
29
  ::Karafka::Web.config.topics.errors,
30
30
  partition
31
31
  )
@@ -17,7 +17,7 @@ module Karafka
17
17
  # @param offset [Integer]
18
18
  # @raise [::Karafka::Web::Errors::Ui::NotFoundError] when not found
19
19
  def find(topic_id, partition_id, offset)
20
- message = Karafka::Admin.read_topic(
20
+ message = Lib::Admin.read_topic(
21
21
  topic_id,
22
22
  partition_id,
23
23
  1,
@@ -195,7 +195,7 @@ module Karafka
195
195
  # @return [Array<Karafka::Messages::Message>, false] topic partition messages or false
196
196
  # in case we hit a non-existing offset
197
197
  def read_topic(*args)
198
- ::Karafka::Admin.read_topic(*args)
198
+ Lib::Admin.read_topic(*args)
199
199
  rescue Rdkafka::RdkafkaError => e
200
200
  return false if e.code == :auto_offset_reset
201
201
 
@@ -44,7 +44,7 @@ module Karafka
44
44
  .map { |process| process[:offset] }
45
45
  .sort
46
46
 
47
- ::Karafka::Admin.read_topic(
47
+ Lib::Admin.read_topic(
48
48
  ::Karafka::Web.config.topics.consumers.reports,
49
49
  0,
50
50
  # We set 10k here because we start from the latest offset of the reports, hence
@@ -13,7 +13,7 @@ module Karafka
13
13
  # @param partition_id [Integer]
14
14
  # @return [WatermarkOffsets]
15
15
  def find(topic_id, partition_id)
16
- offsets = ::Karafka::Admin.read_watermark_offsets(topic_id, partition_id)
16
+ offsets = Lib::Admin.read_watermark_offsets(topic_id, partition_id)
17
17
 
18
18
  new(
19
19
  low: offsets.first,
@@ -132,11 +132,22 @@ module Karafka
132
132
  active_partitions, = Paginators::Partitions.call(partitions_count, 1)
133
133
  end
134
134
 
135
- # This selects first page with most recent messages
136
- messages, = Models::Message.topic_page(topic_id, active_partitions, 1)
135
+ recent = nil
137
136
 
138
- # Selects newest out of all partitions
139
- recent = messages.max_by(&:timestamp)
137
+ # This selects first pages with most recent messages and moves to next if first
138
+ # contains only compacted data, etc.
139
+ #
140
+ # We do it until we find a message we could refer to (if doable) within first
141
+ # ten pages
142
+ 10.times do |page|
143
+ messages, = Models::Message.topic_page(topic_id, active_partitions, page + 1)
144
+
145
+ # Selects newest out of all partitions
146
+ # Reject compacted messages and transaction-related once
147
+ recent = messages.reject { |message| message.is_a?(Array) }.max_by(&:timestamp)
148
+
149
+ break if recent
150
+ end
140
151
 
141
152
  recent || raise(::Karafka::Web::Errors::Ui::NotFoundError)
142
153
 
@@ -184,7 +195,7 @@ module Karafka
184
195
  # @param partition_id [Integer]
185
196
  # @param time [Time] time of the message
186
197
  def closest(topic_id, partition_id, time)
187
- target = ::Karafka::Admin.read_topic(topic_id, partition_id, 1, time).first
198
+ target = Lib::Admin.read_topic(topic_id, partition_id, 1, time).first
188
199
 
189
200
  partition_path = "explorer/#{topic_id}/#{partition_id}"
190
201
  partition_path += "?offset=#{target.offset}" if target
@@ -1,7 +1,7 @@
1
1
  <% if error_msg.is_a?(Array) %>
2
2
  <tr>
3
3
  <td colspan="5" class="text-center text-muted">
4
- This error has either been removed or compacted and is no longer available.
4
+ This offset does not contain error data. The message may have been compacted or is a system entry.
5
5
  </td>
6
6
  </tr>
7
7
  <% else %>
@@ -7,7 +7,7 @@
7
7
  <%= message[1] %>
8
8
  </td>
9
9
  <td colspan="3" class="text-center text-muted">
10
- This message has either been removed or compacted and is no longer available.
10
+ This offset does not contain any data. The message may have been compacted or is a system entry.
11
11
  </td>
12
12
  </tr>
13
13
  <% else %>
@@ -73,7 +73,7 @@ function setLivePollButton() {
73
73
 
74
74
  function checkResponse(resp) {
75
75
  if (!resp.ok) {
76
- throw response.error();
76
+ throw resp;
77
77
  }
78
78
  return resp
79
79
  }
@@ -1,7 +1,7 @@
1
1
  <% if error_msg.is_a?(Array) %>
2
2
  <tr>
3
3
  <td colspan="5" class="text-center text-muted">
4
- This error has either been removed or compacted and is no longer available.
4
+ This offset does not contain error data. The message may have been compacted or is a system entry.
5
5
  </td>
6
6
  </tr>
7
7
  <% else %>
@@ -3,6 +3,6 @@
3
3
  module Karafka
4
4
  module Web
5
5
  # Current gem version
6
- VERSION = '0.7.7'
6
+ VERSION = '0.7.8'
7
7
  end
8
8
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka-web
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.7
4
+ version: 0.7.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
36
36
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
37
37
  -----END CERTIFICATE-----
38
- date: 2023-10-20 00:00:00.000000000 Z
38
+ date: 2023-10-24 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: erubi
@@ -57,7 +57,7 @@ dependencies:
57
57
  requirements:
58
58
  - - ">="
59
59
  - !ruby/object:Gem::Version
60
- version: 2.2.8.beta1
60
+ version: 2.2.9
61
61
  - - "<"
62
62
  - !ruby/object:Gem::Version
63
63
  version: 3.0.0
@@ -67,7 +67,7 @@ dependencies:
67
67
  requirements:
68
68
  - - ">="
69
69
  - !ruby/object:Gem::Version
70
- version: 2.2.8.beta1
70
+ version: 2.2.9
71
71
  - - "<"
72
72
  - !ruby/object:Gem::Version
73
73
  version: 3.0.0
@@ -225,10 +225,11 @@ files:
225
225
  - lib/karafka/web/tracking/memoized_shell.rb
226
226
  - lib/karafka/web/tracking/producers/listeners/base.rb
227
227
  - lib/karafka/web/tracking/producers/listeners/errors.rb
228
- - lib/karafka/web/tracking/producers/listeners/reporter.rb
229
228
  - lib/karafka/web/tracking/producers/reporter.rb
230
229
  - lib/karafka/web/tracking/producers/sampler.rb
230
+ - lib/karafka/web/tracking/reporter.rb
231
231
  - lib/karafka/web/tracking/sampler.rb
232
+ - lib/karafka/web/tracking/scheduler.rb
232
233
  - lib/karafka/web/tracking/ttl_array.rb
233
234
  - lib/karafka/web/tracking/ttl_hash.rb
234
235
  - lib/karafka/web/ui/app.rb
@@ -247,6 +248,7 @@ files:
247
248
  - lib/karafka/web/ui/controllers/status.rb
248
249
  - lib/karafka/web/ui/helpers/application_helper.rb
249
250
  - lib/karafka/web/ui/helpers/paths_helper.rb
251
+ - lib/karafka/web/ui/lib/admin.rb
250
252
  - lib/karafka/web/ui/lib/hash_proxy.rb
251
253
  - lib/karafka/web/ui/lib/paginations/base.rb
252
254
  - lib/karafka/web/ui/lib/paginations/offset_based.rb
metadata.gz.sig CHANGED
Binary file
@@ -1,21 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Karafka
4
- module Web
5
- module Tracking
6
- module Producers
7
- module Listeners
8
- # Special listener that we use to report data about producers states
9
- # We don't have to have a separate thread for reporting, because producers have their
10
- # own internal threads for changes polling and we can utilize this thread
11
- class Reporter < Base
12
- # @param _event [Karafka::Core::Monitoring::Event]
13
- def on_statistics_emitted(_event)
14
- reporter.report
15
- end
16
- end
17
- end
18
- end
19
- end
20
- end
21
- end