karafka-web 0.7.7 → 0.7.9

Sign up to get free protection for your applications and to get access to all the features.
Files changed (36) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/CHANGELOG.md +11 -0
  4. data/Gemfile.lock +9 -9
  5. data/karafka-web.gemspec +2 -2
  6. data/lib/karafka/web/config.rb +53 -3
  7. data/lib/karafka/web/management/create_initial_states.rb +2 -2
  8. data/lib/karafka/web/management/enable.rb +21 -1
  9. data/lib/karafka/web/processing/consumer.rb +1 -1
  10. data/lib/karafka/web/processing/consumers/metrics.rb +4 -1
  11. data/lib/karafka/web/processing/consumers/state.rb +4 -1
  12. data/lib/karafka/web/tracking/consumers/reporter.rb +23 -32
  13. data/lib/karafka/web/tracking/producers/reporter.rb +8 -6
  14. data/lib/karafka/web/tracking/producers/sampler.rb +3 -0
  15. data/lib/karafka/web/tracking/reporter.rb +25 -0
  16. data/lib/karafka/web/tracking/scheduler.rb +46 -0
  17. data/lib/karafka/web/ui/lib/admin.rb +56 -0
  18. data/lib/karafka/web/ui/models/cluster_info.rb +2 -2
  19. data/lib/karafka/web/ui/models/consumers_metrics.rb +4 -2
  20. data/lib/karafka/web/ui/models/consumers_state.rb +4 -2
  21. data/lib/karafka/web/ui/models/counters.rb +1 -1
  22. data/lib/karafka/web/ui/models/message.rb +2 -2
  23. data/lib/karafka/web/ui/models/processes.rb +1 -1
  24. data/lib/karafka/web/ui/models/watermark_offsets.rb +1 -1
  25. data/lib/karafka/web/ui/pro/controllers/explorer.rb +16 -5
  26. data/lib/karafka/web/ui/pro/controllers/messages.rb +1 -1
  27. data/lib/karafka/web/ui/pro/views/errors/_error.erb +1 -1
  28. data/lib/karafka/web/ui/pro/views/explorer/_message.erb +1 -1
  29. data/lib/karafka/web/ui/public/javascripts/live_poll.js +1 -1
  30. data/lib/karafka/web/ui/views/errors/_error.erb +1 -1
  31. data/lib/karafka/web/version.rb +1 -1
  32. data/lib/karafka/web.rb +6 -0
  33. data.tar.gz.sig +0 -0
  34. metadata +9 -7
  35. metadata.gz.sig +0 -0
  36. data/lib/karafka/web/tracking/producers/listeners/reporter.rb +0 -21
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1e4dd8f19fdad3697e837ed96aa8fafabae6eac4efe8cbf75aaa788eccebc44c
4
- data.tar.gz: 33814043050afda80109fc14082a09de0f5499041fe2f4a24a90b25f7cd501c8
3
+ metadata.gz: 24bc5fe488a1304886f3f7f697a8662463f4c21fae60daf52d317852f9f43669
4
+ data.tar.gz: eaa8e4b7d9b9d70200c8929e8fa65ed7906a1b4085465b15525bfee670ebbcc9
5
5
  SHA512:
6
- metadata.gz: 8c84de9a33977904aadffa3ff51af275f4301e7375f9194343b4980d82b6b4fdf4dc57aeb0f1e0cef8c66499b048fb595ec4cf7d1bdbd3011995a0223c4a70ad
7
- data.tar.gz: 4c82722b6fab042562ee3f390d10775b1de7e0c77406c623e7e17f36860a9954ef132cbc540d30449c1212a9d507203e8fb9500393c92b2be09ccbc06c281f0d
6
+ metadata.gz: 0ba2c668103a3b25169d86f2c0624fae04d144694040694ff79c4932bf81fe648e5ba11223be5a5f9e8d8ba0bbbac9da84f2ec5312e81d07b35873707b7b754c
7
+ data.tar.gz: b0ec51820b50dfde4f88e942e3b0dcc2400170d0fbe256503294f366e2f4fe0057249bb525528b789bed254ba6ec035d5502e515042bd094999663155f8a221d
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  # Karafka Web changelog
2
2
 
3
+ ## 0.7.9 (2023-10-25)
4
+ - [Enhancement] Allow for `Karafka::Web.producer` reconfiguration from the default (`Karafka.producer`).
5
+ - [Change] Rely on `karafka-core` `>=` `2.2.4` to support lazy loaded custom web producer.
6
+
7
+ ## 0.7.8 (2023-10-24)
8
+ - [Enhancement] Support transactional producer usage with Web UI.
9
+ - [Fix] Prevent a scenario where an ongoing transactional producer would have stats emitted and an error that could not have been dispatched because of the transaction, creating a dead-lock.
10
+ - [Fix] Make sure that the `recent` displays the most recent non-compacted, non-system message.
11
+ - [Fix] Improve the `recent` message display to compensate for aborted transactions.
12
+ - [Fix] Fix `ReferenceError: response is not defined` that occurs when Web UI returns refresh non 200.
13
+
3
14
  ## 0.7.7 (2023-10-20)
4
15
  - [Fix] Remove `thor` as a CLI engine due to breaking changes.
5
16
 
data/Gemfile.lock CHANGED
@@ -1,10 +1,10 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka-web (0.7.7)
4
+ karafka-web (0.7.9)
5
5
  erubi (~> 1.4)
6
- karafka (>= 2.2.8.beta1, < 3.0.0)
7
- karafka-core (>= 2.2.2, < 3.0.0)
6
+ karafka (>= 2.2.9, < 3.0.0)
7
+ karafka-core (>= 2.2.4, < 3.0.0)
8
8
  roda (~> 3.68, >= 3.69)
9
9
  tilt (~> 2.0)
10
10
 
@@ -36,18 +36,18 @@ GEM
36
36
  ffi (1.16.3)
37
37
  i18n (1.14.1)
38
38
  concurrent-ruby (~> 1.0)
39
- karafka (2.2.8.beta1)
39
+ karafka (2.2.9)
40
40
  karafka-core (>= 2.2.2, < 2.3.0)
41
- waterdrop (>= 2.6.6, < 3.0.0)
41
+ waterdrop (>= 2.6.10, < 3.0.0)
42
42
  zeitwerk (~> 2.3)
43
- karafka-core (2.2.3)
43
+ karafka-core (2.2.4)
44
44
  concurrent-ruby (>= 1.1)
45
45
  karafka-rdkafka (>= 0.13.6, < 0.14.0)
46
46
  karafka-rdkafka (0.13.6)
47
47
  ffi (~> 1.15)
48
48
  mini_portile2 (~> 2.6)
49
49
  rake (> 12)
50
- mini_portile2 (2.8.4)
50
+ mini_portile2 (2.8.5)
51
51
  minitest (5.20.0)
52
52
  mutex_m (0.1.2)
53
53
  rack (3.0.8)
@@ -82,8 +82,8 @@ GEM
82
82
  tilt (2.3.0)
83
83
  tzinfo (2.0.6)
84
84
  concurrent-ruby (~> 1.0)
85
- waterdrop (2.6.7)
86
- karafka-core (>= 2.1.1, < 3.0.0)
85
+ waterdrop (2.6.10)
86
+ karafka-core (>= 2.2.3, < 3.0.0)
87
87
  zeitwerk (~> 2.3)
88
88
  webrick (1.8.1)
89
89
  zeitwerk (2.6.12)
data/karafka-web.gemspec CHANGED
@@ -17,8 +17,8 @@ Gem::Specification.new do |spec|
17
17
  spec.licenses = %w[LGPL-3.0 Commercial]
18
18
 
19
19
  spec.add_dependency 'erubi', '~> 1.4'
20
- spec.add_dependency 'karafka', '>= 2.2.8.beta1', '< 3.0.0'
21
- spec.add_dependency 'karafka-core', '>= 2.2.2', '< 3.0.0'
20
+ spec.add_dependency 'karafka', '>= 2.2.9', '< 3.0.0'
21
+ spec.add_dependency 'karafka-core', '>= 2.2.4', '< 3.0.0'
22
22
  spec.add_dependency 'roda', '~> 3.68', '>= 3.69'
23
23
  spec.add_dependency 'tilt', '~> 2.0'
24
24
 
@@ -11,6 +11,17 @@ module Karafka
11
11
  # This is used both in the processing for eviction and in the UI
12
12
  setting :ttl, default: 30_000
13
13
 
14
+ # Producer for the Web UI. By default it is a `Karafka.producer`, however it may be
15
+ # overwritten if we want to use a separate instance in case of heavy usage of the
16
+ # transactional producer as a default. In cases like this, Karafka may not be able to report
17
+ # data because it uses this producer and it may be locked because of the transaction in a
18
+ # user space.
19
+ setting(
20
+ :producer,
21
+ constructor: -> { ::Karafka.producer },
22
+ lazy: true
23
+ )
24
+
14
25
  # Topics naming - used for processing and UI
15
26
  setting :topics do
16
27
  # All the errors encountered will be dispatched to this topic for inspection
@@ -36,12 +47,18 @@ module Karafka
36
47
  # 5 seconds should be enough
37
48
  setting :interval, default: 5_000
38
49
 
50
+ # Main Web UI reporting scheduler that runs a background thread and reports periodically
51
+ # from the consumer reporter and producer reporter
52
+ setting :scheduler, default: Tracking::Scheduler.new
53
+
39
54
  setting :consumers do
40
- # Reports the metrics collected in the sampler
55
+ # Reports the metrics collected in the consumer sampler
41
56
  setting :reporter, default: Tracking::Consumers::Reporter.new
42
57
 
58
+ # Samples for fetching and storing metrics samples about the consumer process
43
59
  setting :sampler, default: Tracking::Consumers::Sampler.new
44
60
 
61
+ # Listeners needed for the Web UI to track consumer related changes
45
62
  setting :listeners, default: [
46
63
  Tracking::Consumers::Listeners::Status.new,
47
64
  Tracking::Consumers::Listeners::Errors.new,
@@ -53,13 +70,15 @@ module Karafka
53
70
  end
54
71
 
55
72
  setting :producers do
73
+ # Reports the metrics collected in the producer sampler
56
74
  setting :reporter, default: Tracking::Producers::Reporter.new
57
75
 
76
+ # Sampler for errors from producers
58
77
  setting :sampler, default: Tracking::Producers::Sampler.new
59
78
 
79
+ # Listeners needed for the Web UI to track producers related stuff
60
80
  setting :listeners, default: [
61
- Tracking::Producers::Listeners::Errors.new,
62
- Tracking::Producers::Listeners::Reporter.new
81
+ Tracking::Producers::Listeners::Errors.new
63
82
  ]
64
83
  end
65
84
  end
@@ -114,6 +133,37 @@ module Karafka
114
133
  # In some cases you may want to limit what is being displayed due to the type of data you
115
134
  # are dealing with
116
135
  setting :visibility_filter, default: Ui::Models::VisibilityFilter.new
136
+
137
+ # Specific kafka settings that are tuned to operate within the Web UI interface.
138
+ #
139
+ # Please do not change them unless you know what you are doing as their misconfiguration
140
+ # may cause Web UI to misbehave
141
+ #
142
+ # The settings are inherited as follows:
143
+ # 1. root routing level `kafka` settings
144
+ # 2. admin `kafka` settings
145
+ # 3. web ui `kafka` settings from here
146
+ #
147
+ # Those settings impact ONLY Web UI interface and do not affect other scopes. This is done
148
+ # on purpose as we want to improve responsiveness of the interface by tuning some of the
149
+ # settings and this is not that relevant for processing itself.
150
+ #
151
+ # option [Hash] extra changes to the default admin kafka settings
152
+ setting :kafka, default: {
153
+ # optimizes the responsiveness of the Web UI in three scenarios:
154
+ # - topics to which writes happen only in transactions so EOF is yield faster
155
+ # - heavily compacted topics
156
+ # - Web UI topics read operations when using transactional producer
157
+ #
158
+ # This can be configured to be higher if you do not use transactional WaterDrop producer.
159
+ # This value is used when last message (first from the high watermark offset) is the
160
+ # transaction commit message. In cases like this the EOF gets propagated after this time
161
+ # so we have to wait. Default 500ms means, that for some views, where we take our data
162
+ # that might have been committed via transactional producer, we would wait for 1 second
163
+ # to get needed data. If you are experiencing timeouts or other issues with the Web IU
164
+ # interface, you can increase this.
165
+ 'fetch.wait.max.ms': 100
166
+ }
117
167
  end
118
168
  end
119
169
  end
@@ -55,7 +55,7 @@ module Karafka
55
55
  exists('consumers state')
56
56
  else
57
57
  creating('consumers state')
58
- ::Karafka.producer.produce_sync(
58
+ ::Karafka::Web.producer.produce_sync(
59
59
  topic: Karafka::Web.config.topics.consumers.states,
60
60
  key: Karafka::Web.config.topics.consumers.states,
61
61
  payload: DEFAULT_STATE.to_json
@@ -67,7 +67,7 @@ module Karafka
67
67
  exists('consumers metrics')
68
68
  else
69
69
  creating('consumers metrics')
70
- ::Karafka.producer.produce_sync(
70
+ ::Karafka::Web.producer.produce_sync(
71
71
  topic: Karafka::Web.config.topics.consumers.metrics,
72
72
  key: Karafka::Web.config.topics.consumers.metrics,
73
73
  payload: DEFAULT_METRICS.merge(dispatched_at: Time.now.to_f).to_json
@@ -11,6 +11,7 @@ module Karafka
11
11
  def call
12
12
  extend_routing
13
13
  subscribe_to_monitor
14
+ subscribe_to_close_web_producer
14
15
  end
15
16
 
16
17
  private
@@ -71,9 +72,28 @@ module Karafka
71
72
  ::Karafka.monitor.subscribe(listener)
72
73
  end
73
74
 
74
- # Installs all the producer related listeners
75
+ # Installs all the producer related listeners into Karafka default listener and
76
+ # into Karafka::Web listener in case it would be different than the Karafka one
75
77
  ::Karafka::Web.config.tracking.producers.listeners.each do |listener|
76
78
  ::Karafka.producer.monitor.subscribe(listener)
79
+
80
+ # Do not instrument twice in case only one default producer is used
81
+ next if ::Karafka.producer == ::Karafka::Web.producer
82
+
83
+ ::Karafka::Web.producer.monitor.subscribe(listener)
84
+ end
85
+ end
86
+
87
+ # In most cases we want to close the producer if possible.
88
+ # While we cannot do it easily in user processes and we should rely on WaterDrop
89
+ # finalization logic, we can do it in `karafka server` on terminate
90
+ #
91
+ # In other places, this producer anyhow should not be used.
92
+ def subscribe_to_close_web_producer
93
+ ::Karafka::App.monitor.subscribe('app.terminated') do
94
+ # If Web producer is the same as `Karafka.producer` it will do nothing as you can
95
+ # call `#close` multiple times without side effects
96
+ ::Karafka::Web.producer.close
77
97
  end
78
98
  end
79
99
  end
@@ -114,7 +114,7 @@ module Karafka
114
114
  def flush
115
115
  @flushed_at = monotonic_now
116
116
 
117
- producer.produce_many_async(
117
+ ::Karafka::Web.producer.produce_many_async(
118
118
  [
119
119
  {
120
120
  topic: Karafka::Web.config.topics.consumers.states,
@@ -14,7 +14,10 @@ module Karafka
14
14
  metrics_message = ::Karafka::Admin.read_topic(
15
15
  Karafka::Web.config.topics.consumers.metrics,
16
16
  0,
17
- 1
17
+ # We need to take more in case there would be transactions running.
18
+ # In theory we could take two but this compensates for any involuntary
19
+ # revocations and cases where two producers would write to the same state
20
+ 5
18
21
  ).last
19
22
 
20
23
  return metrics_message.payload if metrics_message
@@ -14,7 +14,10 @@ module Karafka
14
14
  state_message = ::Karafka::Admin.read_topic(
15
15
  Karafka::Web.config.topics.consumers.states,
16
16
  0,
17
- 1
17
+ # We need to take more in case there would be transactions running.
18
+ # In theory we could take two but this compensates for any involuntary
19
+ # revocations and cases where two producers would write to the same state
20
+ 5
18
21
  ).last
19
22
 
20
23
  return state_message.payload if state_message
@@ -5,10 +5,7 @@ module Karafka
5
5
  module Tracking
6
6
  module Consumers
7
7
  # Reports the collected data about the process and sends it, so we can use it in the UI
8
- class Reporter
9
- include ::Karafka::Core::Helpers::Time
10
- include ::Karafka::Helpers::Async
11
-
8
+ class Reporter < Tracking::Reporter
12
9
  # Minimum number of messages to produce to produce them in sync mode
13
10
  # This acts as a small back-off not to overload the system in case we would have
14
11
  # extremely big number of errors happening
@@ -21,12 +18,31 @@ module Karafka
21
18
  MUTEX = Mutex.new
22
19
 
23
20
  def initialize
21
+ super
24
22
  # Move back so first report is dispatched fast to indicate, that the process is alive
25
23
  @tracked_at = monotonic_now - 10_000
26
24
  @report_contract = Consumers::Contracts::Report.new
27
25
  @error_contract = Tracking::Contracts::Error.new
28
26
  end
29
27
 
28
+ # We never report in initializing phase because things are not yet fully configured
29
+ # We never report in the initialized because server is not yet ready until Karafka is
30
+ # fully running and some of the things like listeners are not yet available
31
+ #
32
+ # This method will also be `false` in case we are not running in `karafka server` or
33
+ # in embedding, because in those cases Karafka does not go beyond the `initialized` phase
34
+ #
35
+ # @return [Boolean] are we able to report consumer state
36
+ def active?
37
+ # If we do not have a producer that we could use to report or it was closed, we cannot
38
+ # and should not report
39
+ return false unless super
40
+ return false if ::Karafka::App.initializing?
41
+ return false if ::Karafka::App.initialized?
42
+
43
+ true
44
+ end
45
+
30
46
  # Dispatches the current state from sampler to appropriate topics
31
47
  #
32
48
  # @param forced [Boolean] should we report bypassing the time frequency or should we
@@ -41,11 +57,6 @@ module Karafka
41
57
  sampler.sample
42
58
 
43
59
  MUTEX.synchronize do
44
- # Start background thread only when needed
45
- # This prevents us from starting it too early or for non-consumer processes where
46
- # Karafka is being included
47
- async_call unless @running
48
-
49
60
  return unless report?(forced)
50
61
 
51
62
  @tracked_at = monotonic_now
@@ -97,31 +108,11 @@ module Karafka
97
108
 
98
109
  private
99
110
 
100
- # Reports the process state once in a while
101
- def call
102
- @running = true
103
-
104
- # We won't track more often anyhow but want to try frequently not to miss a window
105
- # We need to convert the sleep interval into seconds for sleep
106
- sleep_time = ::Karafka::Web.config.tracking.interval.to_f / 1_000 / 10
107
-
108
- loop do
109
- report
110
-
111
- sleep(sleep_time)
112
- end
113
- end
114
-
115
111
  # @param forced [Boolean] is this report forced. Forced means that as long as we can
116
112
  # flush we will flush
117
113
  # @return [Boolean] Should we report or is it not yet time to do so
118
114
  def report?(forced)
119
- # We never report in initializing phase because things are not yet fully configured
120
- return false if ::Karafka::App.initializing?
121
- # We never report in the initialized because server is not yet ready until Karafka is
122
- # fully running and some of the things like listeners are not yet available
123
- return false if ::Karafka::App.initialized?
124
-
115
+ return false unless active?
125
116
  return true if forced
126
117
 
127
118
  (monotonic_now - @tracked_at) >= ::Karafka::Web.config.tracking.interval
@@ -144,9 +135,9 @@ module Karafka
144
135
  # slowdown any processing.
145
136
  def produce(messages)
146
137
  if messages.count >= PRODUCE_SYNC_THRESHOLD
147
- ::Karafka.producer.produce_many_sync(messages)
138
+ ::Karafka::Web.producer.produce_many_sync(messages)
148
139
  else
149
- ::Karafka.producer.produce_many_async(messages)
140
+ ::Karafka::Web.producer.produce_many_async(messages)
150
141
  end
151
142
  # Since we run this in a background thread, there may be a case upon shutdown, where the
152
143
  # producer is closed right before a potential dispatch. It is not worth dealing with this
@@ -9,9 +9,7 @@ module Karafka
9
9
  # @note Producer reported does not have to operate with the `forced` dispatch mainly
10
10
  # because there is no expectation on immediate status updates for producers and their
11
11
  # dispatch flow is always periodic based.
12
- class Reporter
13
- include ::Karafka::Core::Helpers::Time
14
-
12
+ class Reporter < Tracking::Reporter
15
13
  # Minimum number of messages to produce to produce them in sync mode
16
14
  # This acts as a small back-off not to overload the system in case we would have
17
15
  # extremely big number of errors happening
@@ -24,6 +22,7 @@ module Karafka
24
22
  MUTEX = Mutex.new
25
23
 
26
24
  def initialize
25
+ super
27
26
  # If there are any errors right after we started sampling, dispatch them immediately
28
27
  @tracked_at = monotonic_now - 10_000
29
28
  @error_contract = Tracking::Contracts::Error.new
@@ -62,7 +61,7 @@ module Karafka
62
61
 
63
62
  # @return [Boolean] Should we report or is it not yet time to do so
64
63
  def report?
65
- return false unless ::Karafka.producer.status.active?
64
+ return false unless active?
66
65
 
67
66
  (monotonic_now - @tracked_at) >= ::Karafka::Web.config.tracking.interval
68
67
  end
@@ -84,15 +83,18 @@ module Karafka
84
83
  # slowdown any processing.
85
84
  def produce(messages)
86
85
  if messages.count >= PRODUCE_SYNC_THRESHOLD
87
- ::Karafka.producer.produce_many_sync(messages)
86
+ ::Karafka::Web.producer.produce_many_sync(messages)
88
87
  else
89
- ::Karafka.producer.produce_many_async(messages)
88
+ ::Karafka::Web.producer.produce_many_async(messages)
90
89
  end
91
90
  # Since we run this in a background thread, there may be a case upon shutdown, where the
92
91
  # producer is closed right before a potential dispatch. It is not worth dealing with this
93
92
  # and we can just safely ignore this
94
93
  rescue WaterDrop::Errors::ProducerClosedError
95
94
  nil
95
+ rescue StandardError => e
96
+ p '------------------------------------------------'
97
+ p e
96
98
  end
97
99
  end
98
100
  end
@@ -26,6 +26,9 @@ module Karafka
26
26
  # We cannot report and track the same time, that is why we use mutex here. To make sure
27
27
  # that samples aggregations and counting does not interact with reporter flushing.
28
28
  def track
29
+ # Prevents deadlocks when something producer related fails in the Web UI reporter
30
+ return yield(self) if Reporter::MUTEX.owned?
31
+
29
32
  Reporter::MUTEX.synchronize do
30
33
  yield(self)
31
34
  end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Tracking
6
+ # Base reporter from which all the reports should inherit
7
+ class Reporter
8
+ include ::Karafka::Core::Helpers::Time
9
+
10
+ # Can this reporter report. Since some reporters may report only in part of the processes
11
+ # where Karafka is used (like `karafka server`) each may implement more complex rules.
12
+ #
13
+ # The basic is not to report unless we have a producer and this producer is active
14
+ #
15
+ # @return [Boolean]
16
+ def active?
17
+ return false unless ::Karafka::Web.producer
18
+ return false unless ::Karafka::Web.producer.status.active?
19
+
20
+ true
21
+ end
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,46 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Tracking
6
+ # Triggers reporters to report in an async mode in a separate thread
7
+ # We report this way to prevent any potential dead-locks in cases we would be emitting
8
+ # statistics during transactions.
9
+ #
10
+ # We should never use the notifications thread for sensitive IO bound operations.
11
+ class Scheduler
12
+ include ::Karafka::Helpers::Async
13
+
14
+ # Creates the scheduler and runs its internal reporting
15
+ def initialize
16
+ async_call
17
+ end
18
+
19
+ private
20
+
21
+ # Reports the process state once in a while
22
+ def call
23
+ # We won't track more often anyhow but want to try frequently not to miss a window
24
+ # We need to convert the sleep interval into seconds for sleep
25
+ sleep_time = ::Karafka::Web.config.tracking.interval.to_f / 1_000 / 10
26
+
27
+ loop do
28
+ # Not every reporter may be active at a given stage or in a context of a given process
29
+ # We select only those that decided that they are active.
30
+ reporters.select(&:active?).each(&:report)
31
+
32
+ sleep(sleep_time)
33
+ end
34
+ end
35
+
36
+ # @return [Array] consumers and producers reporters
37
+ def reporters
38
+ @reporters ||= [
39
+ ::Karafka::Web.config.tracking.consumers.reporter,
40
+ ::Karafka::Web.config.tracking.producers.reporter
41
+ ].freeze
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Ui
6
+ module Lib
7
+ # Wrapper around Karafka Admin that alters its behaviours or injects Web UI interface
8
+ # specific settings that optimize the responsiveness of the UI when operating on topics
9
+ #
10
+ # @note Not all commands need those optimizations, hence we alter only those that need
11
+ # that and we only expose those admin commands that are used in the Web-UI interface
12
+ # component.
13
+ #
14
+ # @note We expose here only admin methods used in the Web UI interface. Processing uses the
15
+ # `Karafka::Admin` with the defaults
16
+ class Admin
17
+ class << self
18
+ extend Forwardable
19
+
20
+ def_delegators ::Karafka::Admin, :read_watermark_offsets, :cluster_info
21
+
22
+ # Allows us to read messages from the topic
23
+ #
24
+ # @param name [String, Symbol] topic name
25
+ # @param partition [Integer] partition
26
+ # @param count [Integer] how many messages we want to get at most
27
+ # @param start_offset [Integer, Time] offset from which we should start. If -1 is provided
28
+ # (default) we will start from the latest offset. If time is provided, the appropriate
29
+ # offset will be resolved. If negative beyond -1 is provided, we move backwards more.
30
+ # @param settings [Hash] kafka extra settings (optional)
31
+ #
32
+ # @return [Array<Karafka::Messages::Message>] array with messages
33
+ def read_topic(name, partition, count, start_offset = -1, settings = {})
34
+ ::Karafka::Admin.read_topic(
35
+ name,
36
+ partition,
37
+ count,
38
+ start_offset,
39
+ # Merge our Web UI specific settings
40
+ config.merge(settings)
41
+ )
42
+ end
43
+
44
+ private
45
+
46
+ # @return [Hash] kafka config for Web UI interface.
47
+ # @note It does **not** affect tracking or processing
48
+ def config
49
+ ::Karafka::Web.config.ui.kafka
50
+ end
51
+ end
52
+ end
53
+ end
54
+ end
55
+ end
56
+ end
@@ -4,7 +4,7 @@ module Karafka
4
4
  module Web
5
5
  module Ui
6
6
  module Models
7
- # Wraps around the `Karafka::Admin#cluster_info` with caching and some additional aliases
7
+ # Wraps around the `Lib::Admin#cluster_info` with caching and some additional aliases
8
8
  # so we can reference relevant information easily
9
9
  class ClusterInfo
10
10
  class << self
@@ -18,7 +18,7 @@ module Karafka
18
18
  cluster_info = cache.read(:cluster_info)
19
19
 
20
20
  if cluster_info.nil? || !cached
21
- cluster_info = cache.write(:cluster_info, Karafka::Admin.cluster_info)
21
+ cluster_info = cache.write(:cluster_info, Lib::Admin.cluster_info)
22
22
  end
23
23
 
24
24
  cluster_info
@@ -32,10 +32,12 @@ module Karafka
32
32
 
33
33
  # @return [::Karafka::Messages::Message, nil] most recent state or nil if none
34
34
  def fetch
35
- ::Karafka::Admin.read_topic(
35
+ Lib::Admin.read_topic(
36
36
  Karafka::Web.config.topics.consumers.metrics,
37
37
  0,
38
- 1
38
+ # We need to take last two and not the last because in case of a transactional
39
+ # producer, the last one will match the transaction commit message
40
+ 2
39
41
  ).last
40
42
  end
41
43
  end
@@ -41,10 +41,12 @@ module Karafka
41
41
 
42
42
  # @return [::Karafka::Messages::Message, nil] most recent state or nil if none
43
43
  def fetch
44
- ::Karafka::Admin.read_topic(
44
+ Lib::Admin.read_topic(
45
45
  Karafka::Web.config.topics.consumers.states,
46
46
  0,
47
- 1
47
+ # We need to take last two and not the last because in case of a transactional
48
+ # producer, the last one will match the transaction commit message
49
+ 2
48
50
  ).last
49
51
  end
50
52
 
@@ -25,7 +25,7 @@ module Karafka
25
25
 
26
26
  MAX_ERROR_PARTITIONS.times do |partition|
27
27
  begin
28
- offsets = Karafka::Admin.read_watermark_offsets(
28
+ offsets = Lib::Admin.read_watermark_offsets(
29
29
  ::Karafka::Web.config.topics.errors,
30
30
  partition
31
31
  )
@@ -17,7 +17,7 @@ module Karafka
17
17
  # @param offset [Integer]
18
18
  # @raise [::Karafka::Web::Errors::Ui::NotFoundError] when not found
19
19
  def find(topic_id, partition_id, offset)
20
- message = Karafka::Admin.read_topic(
20
+ message = Lib::Admin.read_topic(
21
21
  topic_id,
22
22
  partition_id,
23
23
  1,
@@ -195,7 +195,7 @@ module Karafka
195
195
  # @return [Array<Karafka::Messages::Message>, false] topic partition messages or false
196
196
  # in case we hit a non-existing offset
197
197
  def read_topic(*args)
198
- ::Karafka::Admin.read_topic(*args)
198
+ Lib::Admin.read_topic(*args)
199
199
  rescue Rdkafka::RdkafkaError => e
200
200
  return false if e.code == :auto_offset_reset
201
201
 
@@ -44,7 +44,7 @@ module Karafka
44
44
  .map { |process| process[:offset] }
45
45
  .sort
46
46
 
47
- ::Karafka::Admin.read_topic(
47
+ Lib::Admin.read_topic(
48
48
  ::Karafka::Web.config.topics.consumers.reports,
49
49
  0,
50
50
  # We set 10k here because we start from the latest offset of the reports, hence
@@ -13,7 +13,7 @@ module Karafka
13
13
  # @param partition_id [Integer]
14
14
  # @return [WatermarkOffsets]
15
15
  def find(topic_id, partition_id)
16
- offsets = ::Karafka::Admin.read_watermark_offsets(topic_id, partition_id)
16
+ offsets = Lib::Admin.read_watermark_offsets(topic_id, partition_id)
17
17
 
18
18
  new(
19
19
  low: offsets.first,
@@ -132,11 +132,22 @@ module Karafka
132
132
  active_partitions, = Paginators::Partitions.call(partitions_count, 1)
133
133
  end
134
134
 
135
- # This selects first page with most recent messages
136
- messages, = Models::Message.topic_page(topic_id, active_partitions, 1)
135
+ recent = nil
137
136
 
138
- # Selects newest out of all partitions
139
- recent = messages.max_by(&:timestamp)
137
+ # This selects first pages with most recent messages and moves to next if first
138
+ # contains only compacted data, etc.
139
+ #
140
+ # We do it until we find a message we could refer to (if doable) within first
141
+ # ten pages
142
+ 10.times do |page|
143
+ messages, = Models::Message.topic_page(topic_id, active_partitions, page + 1)
144
+
145
+ # Selects newest out of all partitions
146
+ # Reject compacted messages and transaction-related once
147
+ recent = messages.reject { |message| message.is_a?(Array) }.max_by(&:timestamp)
148
+
149
+ break if recent
150
+ end
140
151
 
141
152
  recent || raise(::Karafka::Web::Errors::Ui::NotFoundError)
142
153
 
@@ -184,7 +195,7 @@ module Karafka
184
195
  # @param partition_id [Integer]
185
196
  # @param time [Time] time of the message
186
197
  def closest(topic_id, partition_id, time)
187
- target = ::Karafka::Admin.read_topic(topic_id, partition_id, 1, time).first
198
+ target = Lib::Admin.read_topic(topic_id, partition_id, 1, time).first
188
199
 
189
200
  partition_path = "explorer/#{topic_id}/#{partition_id}"
190
201
  partition_path += "?offset=#{target.offset}" if target
@@ -28,7 +28,7 @@ module Karafka
28
28
  def republish(topic_id, partition_id, offset)
29
29
  message = Ui::Models::Message.find(topic_id, partition_id, offset)
30
30
 
31
- delivery = ::Karafka.producer.produce_sync(
31
+ delivery = ::Karafka::Web.producer.produce_sync(
32
32
  topic: topic_id,
33
33
  partition: partition_id,
34
34
  payload: message.raw_payload,
@@ -1,7 +1,7 @@
1
1
  <% if error_msg.is_a?(Array) %>
2
2
  <tr>
3
3
  <td colspan="5" class="text-center text-muted">
4
- This error has either been removed or compacted and is no longer available.
4
+ This offset does not contain error data. The message may have been compacted or is a system entry.
5
5
  </td>
6
6
  </tr>
7
7
  <% else %>
@@ -7,7 +7,7 @@
7
7
  <%= message[1] %>
8
8
  </td>
9
9
  <td colspan="3" class="text-center text-muted">
10
- This message has either been removed or compacted and is no longer available.
10
+ This offset does not contain any data. The message may have been compacted or is a system entry.
11
11
  </td>
12
12
  </tr>
13
13
  <% else %>
@@ -73,7 +73,7 @@ function setLivePollButton() {
73
73
 
74
74
  function checkResponse(resp) {
75
75
  if (!resp.ok) {
76
- throw response.error();
76
+ throw resp;
77
77
  }
78
78
  return resp
79
79
  }
@@ -1,7 +1,7 @@
1
1
  <% if error_msg.is_a?(Array) %>
2
2
  <tr>
3
3
  <td colspan="5" class="text-center text-muted">
4
- This error has either been removed or compacted and is no longer available.
4
+ This offset does not contain error data. The message may have been compacted or is a system entry.
5
5
  </td>
6
6
  </tr>
7
7
  <% else %>
@@ -3,6 +3,6 @@
3
3
  module Karafka
4
4
  module Web
5
5
  # Current gem version
6
- VERSION = '0.7.7'
6
+ VERSION = '0.7.9'
7
7
  end
8
8
  end
data/lib/karafka/web.rb CHANGED
@@ -13,6 +13,12 @@ module Karafka
13
13
  # Karafka Web UI + Karafka web monitoring
14
14
  module Web
15
15
  class << self
16
+ # @return [WaterDrop::Producer, nil] waterdrop messages producer or nil if not yet fully
17
+ # initialized. It may not be fully initialized until the configuration is done
18
+ def producer
19
+ @producer ||= Web.config.producer
20
+ end
21
+
16
22
  # @return [String] root path of this gem
17
23
  def gem_root
18
24
  Pathname.new(File.expand_path('../..', __dir__))
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka-web
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.7
4
+ version: 0.7.9
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
36
36
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
37
37
  -----END CERTIFICATE-----
38
- date: 2023-10-20 00:00:00.000000000 Z
38
+ date: 2023-10-25 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: erubi
@@ -57,7 +57,7 @@ dependencies:
57
57
  requirements:
58
58
  - - ">="
59
59
  - !ruby/object:Gem::Version
60
- version: 2.2.8.beta1
60
+ version: 2.2.9
61
61
  - - "<"
62
62
  - !ruby/object:Gem::Version
63
63
  version: 3.0.0
@@ -67,7 +67,7 @@ dependencies:
67
67
  requirements:
68
68
  - - ">="
69
69
  - !ruby/object:Gem::Version
70
- version: 2.2.8.beta1
70
+ version: 2.2.9
71
71
  - - "<"
72
72
  - !ruby/object:Gem::Version
73
73
  version: 3.0.0
@@ -77,7 +77,7 @@ dependencies:
77
77
  requirements:
78
78
  - - ">="
79
79
  - !ruby/object:Gem::Version
80
- version: 2.2.2
80
+ version: 2.2.4
81
81
  - - "<"
82
82
  - !ruby/object:Gem::Version
83
83
  version: 3.0.0
@@ -87,7 +87,7 @@ dependencies:
87
87
  requirements:
88
88
  - - ">="
89
89
  - !ruby/object:Gem::Version
90
- version: 2.2.2
90
+ version: 2.2.4
91
91
  - - "<"
92
92
  - !ruby/object:Gem::Version
93
93
  version: 3.0.0
@@ -225,10 +225,11 @@ files:
225
225
  - lib/karafka/web/tracking/memoized_shell.rb
226
226
  - lib/karafka/web/tracking/producers/listeners/base.rb
227
227
  - lib/karafka/web/tracking/producers/listeners/errors.rb
228
- - lib/karafka/web/tracking/producers/listeners/reporter.rb
229
228
  - lib/karafka/web/tracking/producers/reporter.rb
230
229
  - lib/karafka/web/tracking/producers/sampler.rb
230
+ - lib/karafka/web/tracking/reporter.rb
231
231
  - lib/karafka/web/tracking/sampler.rb
232
+ - lib/karafka/web/tracking/scheduler.rb
232
233
  - lib/karafka/web/tracking/ttl_array.rb
233
234
  - lib/karafka/web/tracking/ttl_hash.rb
234
235
  - lib/karafka/web/ui/app.rb
@@ -247,6 +248,7 @@ files:
247
248
  - lib/karafka/web/ui/controllers/status.rb
248
249
  - lib/karafka/web/ui/helpers/application_helper.rb
249
250
  - lib/karafka/web/ui/helpers/paths_helper.rb
251
+ - lib/karafka/web/ui/lib/admin.rb
250
252
  - lib/karafka/web/ui/lib/hash_proxy.rb
251
253
  - lib/karafka/web/ui/lib/paginations/base.rb
252
254
  - lib/karafka/web/ui/lib/paginations/offset_based.rb
metadata.gz.sig CHANGED
Binary file
@@ -1,21 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Karafka
4
- module Web
5
- module Tracking
6
- module Producers
7
- module Listeners
8
- # Special listener that we use to report data about producers states
9
- # We don't have to have a separate thread for reporting, because producers have their
10
- # own internal threads for changes polling and we can utilize this thread
11
- class Reporter < Base
12
- # @param _event [Karafka::Core::Monitoring::Event]
13
- def on_statistics_emitted(_event)
14
- reporter.report
15
- end
16
- end
17
- end
18
- end
19
- end
20
- end
21
- end