karafka-web 0.7.2 → 0.7.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/CHANGELOG.md +7 -0
- data/Gemfile.lock +1 -1
- data/lib/karafka/web/processing/consumer.rb +23 -6
- data/lib/karafka/web/processing/consumers/aggregators/metrics.rb +4 -4
- data/lib/karafka/web/processing/consumers/aggregators/state.rb +2 -2
- data/lib/karafka/web/processing/consumers/schema_manager.rb +14 -7
- data/lib/karafka/web/tracking/consumers/sampler.rb +11 -2
- data/lib/karafka/web/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +2 -2
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7a31ebcde158ca18cc353d3fefad976b7dfb8d9fe3b3114ea8f43b533229434a
|
4
|
+
data.tar.gz: f9870e5268ebebb38c838dc8a220030beaebfe30d4d53cd63537c5c2f442e2d4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: '02152094838c0be1606f49b4963e7d0791f1800bc7b7fbf71240866ed84e73bc264322415b48272faf46c715d70568fc054fc7183406d182d3c95eeb664019db'
|
7
|
+
data.tar.gz: 787a16d7f91f7ccb907a3cbea0599337bd7525b1f2982f023b639227df1b52511afcc763422c71d0e2bf34216d3f3a4fe6794ae8824abeea9302b84071de8ca5
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,12 @@
|
|
1
1
|
# Karafka Web changelog
|
2
2
|
|
3
|
+
## 0.7.4 (2023-09-19)
|
4
|
+
- [Improvement] Skip aggregations on older schemas during upgrades. This only skips process-reports (that are going to be rolled) on the 5s window in case of an upgrade that should not be a rolling one anyhow. This simplifies the operations and minimizes the risk on breaking upgrades.
|
5
|
+
- [Fix] Fix not working `ps` for macOS.
|
6
|
+
|
7
|
+
## 0.7.3 (2023-09-18)
|
8
|
+
- [Improvement] Mitigate a case where a race-condition during upgrade would crash data.
|
9
|
+
|
3
10
|
## 0.7.2 (2023-09-18)
|
4
11
|
- [Improvement] Display hidden by accident errors for OSS metrics.
|
5
12
|
- [Improvement] Use a five second cache for non-production environments to improve dev experience.
|
data/Gemfile.lock
CHANGED
@@ -26,6 +26,7 @@ module Karafka
|
|
26
26
|
|
27
27
|
# We set this that way so we report with first batch and so we report as fast as possible
|
28
28
|
@flushed_at = monotonic_now - @flush_interval
|
29
|
+
@established = false
|
29
30
|
end
|
30
31
|
|
31
32
|
# Aggregates consumers state into a single current state representation
|
@@ -34,10 +35,24 @@ module Karafka
|
|
34
35
|
|
35
36
|
# If there is even one incompatible message, we need to stop
|
36
37
|
consumers_messages.each do |message|
|
37
|
-
|
38
|
+
case @schema_manager.call(message)
|
39
|
+
when :current
|
40
|
+
true
|
41
|
+
when :newer
|
42
|
+
@schema_manager.invalidate!
|
43
|
+
|
38
44
|
dispatch
|
39
45
|
|
40
46
|
raise ::Karafka::Web::Errors::Processing::IncompatibleSchemaError
|
47
|
+
# Older reports mean someone is in the middle of upgrade. Schema change related
|
48
|
+
# upgrades always should happen without a rolling-upgrade, hence we can reject those
|
49
|
+
# requests without significant or any impact on data quality but without having to
|
50
|
+
# worry about backwards compatibility. Errors are tracked independently, so it should
|
51
|
+
# not be a problem.
|
52
|
+
when :older
|
53
|
+
next
|
54
|
+
else
|
55
|
+
raise ::Karafka::Errors::UnsupportedCaseError
|
41
56
|
end
|
42
57
|
|
43
58
|
# We need to run the aggregations on each message in order to compensate for
|
@@ -45,6 +60,10 @@ module Karafka
|
|
45
60
|
@state_aggregator.add(message.payload, message.offset)
|
46
61
|
@metrics_aggregator.add_report(message.payload)
|
47
62
|
@metrics_aggregator.add_stats(@state_aggregator.stats)
|
63
|
+
# Indicates that we had at least one report we used to enrich data
|
64
|
+
# If there were no state changes, there is no reason to flush data. This can occur
|
65
|
+
# when we had some messages but we skipped them for any reason on a first run
|
66
|
+
@established = true
|
48
67
|
|
49
68
|
# Optimize memory usage in pro
|
50
69
|
message.clean! if Karafka.pro?
|
@@ -59,17 +78,15 @@ module Karafka
|
|
59
78
|
|
60
79
|
# Flush final state on shutdown
|
61
80
|
def shutdown
|
62
|
-
|
63
|
-
|
64
|
-
materialize
|
65
|
-
validate!
|
66
|
-
flush
|
81
|
+
dispatch
|
67
82
|
end
|
68
83
|
|
69
84
|
private
|
70
85
|
|
71
86
|
# Flushes the state of the Web-UI to the DB
|
72
87
|
def dispatch
|
88
|
+
return unless @established
|
89
|
+
|
73
90
|
materialize
|
74
91
|
validate!
|
75
92
|
flush
|
@@ -102,21 +102,21 @@ module Karafka
|
|
102
102
|
partitions_data = topic_details.fetch(:partitions).values
|
103
103
|
|
104
104
|
lags = partitions_data
|
105
|
-
.map { |p_details| p_details
|
105
|
+
.map { |p_details| p_details.fetch(:lag, -1) }
|
106
106
|
.reject(&:negative?)
|
107
107
|
|
108
108
|
lags_stored = partitions_data
|
109
|
-
.map { |p_details| p_details.fetch(:lag_stored) }
|
109
|
+
.map { |p_details| p_details.fetch(:lag_stored, -1) }
|
110
110
|
.reject(&:negative?)
|
111
111
|
|
112
112
|
offsets_hi = partitions_data
|
113
|
-
.map { |p_details| p_details.fetch(:hi_offset) }
|
113
|
+
.map { |p_details| p_details.fetch(:hi_offset, -1) }
|
114
114
|
.reject(&:negative?)
|
115
115
|
|
116
116
|
# Last stable offsets freeze durations - we pick the max freeze to indicate
|
117
117
|
# the longest open transaction that potentially may be hanging
|
118
118
|
ls_offsets_fd = partitions_data
|
119
|
-
.map { |p_details| p_details.fetch(:ls_offset_fd) }
|
119
|
+
.map { |p_details| p_details.fetch(:ls_offset_fd, 0) }
|
120
120
|
.reject(&:negative?)
|
121
121
|
|
122
122
|
# If there is no lag that would not be negative, it means we did not mark
|
@@ -152,8 +152,8 @@ module Karafka
|
|
152
152
|
stats[:listeners] += report_process[:listeners] || 0
|
153
153
|
stats[:processes] += 1
|
154
154
|
stats[:rss] += report_process[:memory_usage]
|
155
|
-
stats[:lag] += lags.reject(&:negative?).sum
|
156
|
-
stats[:lag_stored] += lags_stored.reject(&:negative?).sum
|
155
|
+
stats[:lag] += lags.compact.reject(&:negative?).sum
|
156
|
+
stats[:lag_stored] += lags_stored.compact.reject(&:negative?).sum
|
157
157
|
utilization += report_stats[:utilization]
|
158
158
|
end
|
159
159
|
|
@@ -27,12 +27,8 @@ module Karafka
|
|
27
27
|
end
|
28
28
|
|
29
29
|
# @param message [Karafka::Messages::Message] consumer report
|
30
|
-
# @return [
|
31
|
-
|
32
|
-
# @note The state switch is one-direction only. If we encounter an incompatible message
|
33
|
-
# we need to stop processing so further checks even with valid should not switch it
|
34
|
-
# back to valid
|
35
|
-
def compatible?(message)
|
30
|
+
# @return [Symbol] is the given message using older, newer or current schema
|
31
|
+
def call(message)
|
36
32
|
schema_version = message.payload[:schema_version]
|
37
33
|
|
38
34
|
# Save on memory allocation by reusing
|
@@ -40,8 +36,19 @@ module Karafka
|
|
40
36
|
# an object with each message
|
41
37
|
message_version = @cache[schema_version] ||= ::Gem::Version.new(schema_version)
|
42
38
|
|
43
|
-
return
|
39
|
+
return :older if message_version < CURRENT_VERSION
|
40
|
+
return :newer if message_version > CURRENT_VERSION
|
41
|
+
|
42
|
+
:current
|
43
|
+
end
|
44
44
|
|
45
|
+
# Moves the schema manager state to incompatible to indicate in the Web-UI that we
|
46
|
+
# cannot move forward because schema is incompatible.
|
47
|
+
#
|
48
|
+
# @note The state switch is one-direction only. If we encounter an incompatible message
|
49
|
+
# we need to stop processing so further checks even with valid should not switch it
|
50
|
+
# back to valid
|
51
|
+
def invalidate!
|
45
52
|
@valid = false
|
46
53
|
end
|
47
54
|
|
@@ -247,11 +247,20 @@ module Karafka
|
|
247
247
|
# Loads our ps results into memory so we can extract from them whatever we need
|
248
248
|
def memory_threads_ps
|
249
249
|
@memory_threads_ps = case RUBY_PLATFORM
|
250
|
-
when /
|
250
|
+
when /linux/
|
251
251
|
@shell
|
252
|
-
.call('ps -A -o rss=,thcount
|
252
|
+
.call('ps -A -o rss=,thcount=,pid=')
|
253
253
|
.split("\n")
|
254
254
|
.map { |row| row.strip.split(' ').map(&:to_i) }
|
255
|
+
# thcount is not available on macos ps
|
256
|
+
# because of that we inject 0 as threads count similar to how
|
257
|
+
# we do on windows
|
258
|
+
when /darwin|bsd/
|
259
|
+
@shell
|
260
|
+
.call('ps -A -o rss=,pid=')
|
261
|
+
.split("\n")
|
262
|
+
.map { |row| row.strip.split(' ').map(&:to_i) }
|
263
|
+
.map { |row| [row.first, 0, row.last] }
|
255
264
|
else
|
256
265
|
@memory_threads_ps = false
|
257
266
|
end
|
data/lib/karafka/web/version.rb
CHANGED
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: karafka-web
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Maciej Mensfeld
|
@@ -35,7 +35,7 @@ cert_chain:
|
|
35
35
|
AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
|
36
36
|
msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
|
37
37
|
-----END CERTIFICATE-----
|
38
|
-
date: 2023-09-
|
38
|
+
date: 2023-09-19 00:00:00.000000000 Z
|
39
39
|
dependencies:
|
40
40
|
- !ruby/object:Gem::Dependency
|
41
41
|
name: erubi
|
metadata.gz.sig
CHANGED
Binary file
|