karafka-web 0.7.9 → 0.8.0.rc1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.github/workflows/ci.yml +21 -6
- data/.ruby-version +1 -1
- data/CHANGELOG.md +66 -0
- data/Gemfile.lock +22 -22
- data/docker-compose.yml +3 -1
- data/karafka-web.gemspec +2 -2
- data/lib/karafka/web/config.rb +16 -3
- data/lib/karafka/web/contracts/config.rb +7 -2
- data/lib/karafka/web/errors.rb +12 -0
- data/lib/karafka/web/inflector.rb +33 -0
- data/lib/karafka/web/installer.rb +20 -11
- data/lib/karafka/web/management/actions/base.rb +36 -0
- data/lib/karafka/web/management/actions/clean_boot_file.rb +33 -0
- data/lib/karafka/web/management/actions/create_initial_states.rb +77 -0
- data/lib/karafka/web/management/actions/create_topics.rb +139 -0
- data/lib/karafka/web/management/actions/delete_topics.rb +30 -0
- data/lib/karafka/web/management/actions/enable.rb +117 -0
- data/lib/karafka/web/management/actions/extend_boot_file.rb +39 -0
- data/lib/karafka/web/management/actions/migrate_states_data.rb +18 -0
- data/lib/karafka/web/management/migrations/0_base.rb +58 -0
- data/lib/karafka/web/management/migrations/0_set_initial_consumers_metrics.rb +36 -0
- data/lib/karafka/web/management/migrations/0_set_initial_consumers_state.rb +43 -0
- data/lib/karafka/web/management/migrations/1699543515_fill_missing_received_and_sent_bytes_in_consumers_metrics.rb +26 -0
- data/lib/karafka/web/management/migrations/1699543515_fill_missing_received_and_sent_bytes_in_consumers_state.rb +23 -0
- data/lib/karafka/web/management/migrations/1700234522_introduce_waiting_in_consumers_metrics.rb +24 -0
- data/lib/karafka/web/management/migrations/1700234522_introduce_waiting_in_consumers_state.rb +20 -0
- data/lib/karafka/web/management/migrations/1700234522_remove_processing_from_consumers_metrics.rb +24 -0
- data/lib/karafka/web/management/migrations/1700234522_remove_processing_from_consumers_state.rb +20 -0
- data/lib/karafka/web/management/migrations/1704722380_split_listeners_into_active_and_paused_in_metrics.rb +36 -0
- data/lib/karafka/web/management/migrations/1704722380_split_listeners_into_active_and_paused_in_states.rb +32 -0
- data/lib/karafka/web/management/migrator.rb +117 -0
- data/lib/karafka/web/processing/consumer.rb +39 -38
- data/lib/karafka/web/processing/consumers/aggregators/metrics.rb +15 -7
- data/lib/karafka/web/processing/consumers/aggregators/state.rb +8 -3
- data/lib/karafka/web/processing/consumers/contracts/aggregated_stats.rb +5 -1
- data/lib/karafka/web/processing/publisher.rb +59 -0
- data/lib/karafka/web/tracking/consumers/contracts/job.rb +3 -2
- data/lib/karafka/web/tracking/consumers/contracts/partition.rb +1 -0
- data/lib/karafka/web/tracking/consumers/contracts/report.rb +6 -1
- data/lib/karafka/web/tracking/consumers/contracts/subscription_group.rb +10 -1
- data/lib/karafka/web/tracking/consumers/listeners/connections.rb +49 -0
- data/lib/karafka/web/tracking/consumers/listeners/pausing.rb +7 -4
- data/lib/karafka/web/tracking/consumers/listeners/processing.rb +78 -70
- data/lib/karafka/web/tracking/consumers/listeners/statistics.rb +40 -13
- data/lib/karafka/web/tracking/consumers/sampler.rb +82 -25
- data/lib/karafka/web/tracking/helpers/ttls/array.rb +72 -0
- data/lib/karafka/web/tracking/helpers/ttls/hash.rb +34 -0
- data/lib/karafka/web/tracking/helpers/ttls/stats.rb +49 -0
- data/lib/karafka/web/tracking/helpers/ttls/windows.rb +32 -0
- data/lib/karafka/web/tracking/reporter.rb +1 -0
- data/lib/karafka/web/ui/app.rb +22 -4
- data/lib/karafka/web/ui/base.rb +18 -2
- data/lib/karafka/web/ui/controllers/base.rb +34 -4
- data/lib/karafka/web/ui/controllers/become_pro.rb +1 -1
- data/lib/karafka/web/ui/controllers/cluster.rb +33 -9
- data/lib/karafka/web/ui/controllers/consumers.rb +8 -2
- data/lib/karafka/web/ui/controllers/dashboard.rb +2 -2
- data/lib/karafka/web/ui/controllers/errors.rb +2 -2
- data/lib/karafka/web/ui/controllers/jobs.rb +55 -5
- data/lib/karafka/web/ui/controllers/requests/params.rb +5 -0
- data/lib/karafka/web/ui/controllers/responses/deny.rb +15 -0
- data/lib/karafka/web/ui/controllers/responses/file.rb +23 -0
- data/lib/karafka/web/ui/controllers/responses/{data.rb → render.rb} +3 -3
- data/lib/karafka/web/ui/controllers/routing.rb +11 -2
- data/lib/karafka/web/ui/controllers/status.rb +1 -1
- data/lib/karafka/web/ui/helpers/application_helper.rb +70 -0
- data/lib/karafka/web/ui/lib/hash_proxy.rb +29 -14
- data/lib/karafka/web/ui/lib/sorter.rb +170 -0
- data/lib/karafka/web/ui/models/counters.rb +6 -0
- data/lib/karafka/web/ui/models/health.rb +23 -2
- data/lib/karafka/web/ui/models/jobs.rb +48 -0
- data/lib/karafka/web/ui/models/metrics/charts/aggregated.rb +33 -0
- data/lib/karafka/web/ui/models/metrics/charts/topics.rb +1 -10
- data/lib/karafka/web/ui/models/process.rb +2 -1
- data/lib/karafka/web/ui/models/status.rb +23 -7
- data/lib/karafka/web/ui/models/topic.rb +3 -1
- data/lib/karafka/web/ui/models/visibility_filter.rb +16 -0
- data/lib/karafka/web/ui/pro/app.rb +44 -6
- data/lib/karafka/web/ui/pro/controllers/cluster.rb +1 -0
- data/lib/karafka/web/ui/pro/controllers/consumers.rb +52 -6
- data/lib/karafka/web/ui/pro/controllers/dashboard.rb +1 -1
- data/lib/karafka/web/ui/pro/controllers/dlq.rb +1 -1
- data/lib/karafka/web/ui/pro/controllers/errors.rb +3 -3
- data/lib/karafka/web/ui/pro/controllers/explorer.rb +8 -8
- data/lib/karafka/web/ui/pro/controllers/health.rb +34 -2
- data/lib/karafka/web/ui/pro/controllers/jobs.rb +11 -0
- data/lib/karafka/web/ui/pro/controllers/messages.rb +42 -0
- data/lib/karafka/web/ui/pro/controllers/routing.rb +11 -2
- data/lib/karafka/web/ui/pro/views/consumers/_breadcrumbs.erb +8 -2
- data/lib/karafka/web/ui/pro/views/consumers/_consumer.erb +14 -8
- data/lib/karafka/web/ui/pro/views/consumers/_counters.erb +8 -6
- data/lib/karafka/web/ui/pro/views/consumers/consumer/_job.erb +4 -1
- data/lib/karafka/web/ui/pro/views/consumers/consumer/_no_jobs.erb +1 -1
- data/lib/karafka/web/ui/pro/views/consumers/consumer/_partition.erb +1 -3
- data/lib/karafka/web/ui/pro/views/consumers/consumer/_subscription_group.erb +28 -11
- data/lib/karafka/web/ui/pro/views/consumers/consumer/_tabs.erb +10 -3
- data/lib/karafka/web/ui/pro/views/consumers/index.erb +3 -3
- data/lib/karafka/web/ui/pro/views/consumers/pending_jobs.erb +43 -0
- data/lib/karafka/web/ui/pro/views/consumers/{jobs.erb → running_jobs.erb} +11 -10
- data/lib/karafka/web/ui/pro/views/dashboard/index.erb +7 -1
- data/lib/karafka/web/ui/pro/views/explorer/message/_message_actions.erb +18 -0
- data/lib/karafka/web/ui/pro/views/explorer/message/_metadata.erb +43 -0
- data/lib/karafka/web/ui/pro/views/explorer/message/_payload.erb +21 -0
- data/lib/karafka/web/ui/pro/views/explorer/message/_payload_actions.erb +19 -0
- data/lib/karafka/web/ui/pro/views/explorer/show.erb +9 -84
- data/lib/karafka/web/ui/pro/views/health/_breadcrumbs.erb +8 -0
- data/lib/karafka/web/ui/pro/views/health/_partition.erb +1 -3
- data/lib/karafka/web/ui/pro/views/health/_partition_offset.erb +4 -4
- data/lib/karafka/web/ui/pro/views/health/_partition_times.erb +32 -0
- data/lib/karafka/web/ui/pro/views/health/_tabs.erb +9 -0
- data/lib/karafka/web/ui/pro/views/health/changes.erb +66 -0
- data/lib/karafka/web/ui/pro/views/health/offsets.erb +14 -14
- data/lib/karafka/web/ui/pro/views/health/overview.erb +11 -11
- data/lib/karafka/web/ui/pro/views/jobs/_job.erb +1 -1
- data/lib/karafka/web/ui/pro/views/jobs/_no_jobs.erb +1 -1
- data/lib/karafka/web/ui/pro/views/jobs/pending.erb +39 -0
- data/lib/karafka/web/ui/pro/views/jobs/running.erb +39 -0
- data/lib/karafka/web/ui/pro/views/routing/_consumer_group.erb +2 -2
- data/lib/karafka/web/ui/pro/views/routing/_topic.erb +9 -0
- data/lib/karafka/web/ui/pro/views/routing/show.erb +12 -0
- data/lib/karafka/web/ui/pro/views/shared/_navigation.erb +1 -1
- data/lib/karafka/web/ui/public/javascripts/application.js +10 -0
- data/lib/karafka/web/ui/public/stylesheets/application.css +4 -0
- data/lib/karafka/web/ui/views/cluster/_breadcrumbs.erb +16 -0
- data/lib/karafka/web/ui/views/cluster/_tabs.erb +27 -0
- data/lib/karafka/web/ui/views/cluster/brokers.erb +27 -0
- data/lib/karafka/web/ui/views/cluster/topics.erb +35 -0
- data/lib/karafka/web/ui/views/consumers/_counters.erb +8 -6
- data/lib/karafka/web/ui/views/consumers/_summary.erb +2 -2
- data/lib/karafka/web/ui/views/consumers/index.erb +3 -3
- data/lib/karafka/web/ui/views/dashboard/_ranges_selector.erb +23 -7
- data/lib/karafka/web/ui/views/dashboard/index.erb +19 -8
- data/lib/karafka/web/ui/views/errors/show.erb +2 -23
- data/lib/karafka/web/ui/views/jobs/_breadcrumbs.erb +17 -1
- data/lib/karafka/web/ui/views/jobs/_job.erb +1 -1
- data/lib/karafka/web/ui/views/jobs/_no_jobs.erb +1 -1
- data/lib/karafka/web/ui/views/jobs/_tabs.erb +27 -0
- data/lib/karafka/web/ui/views/jobs/{index.erb → pending.erb} +9 -7
- data/lib/karafka/web/ui/{pro/views/jobs/index.erb → views/jobs/running.erb} +9 -11
- data/lib/karafka/web/ui/views/routing/_consumer_group.erb +14 -12
- data/lib/karafka/web/ui/views/shared/_navigation.erb +1 -1
- data/lib/karafka/web/ui/views/shared/_pagination.erb +1 -1
- data/lib/karafka/web/ui/views/shared/exceptions/not_allowed.erb +37 -0
- data/lib/karafka/web/ui/views/status/show.erb +17 -2
- data/lib/karafka/web/ui/views/status/warnings/_routing_topics_presence.erb +15 -0
- data/lib/karafka/web/version.rb +1 -1
- data/lib/karafka/web.rb +6 -2
- data.tar.gz.sig +0 -0
- metadata +61 -26
- metadata.gz.sig +0 -0
- data/lib/karafka/web/management/base.rb +0 -34
- data/lib/karafka/web/management/clean_boot_file.rb +0 -31
- data/lib/karafka/web/management/create_initial_states.rb +0 -101
- data/lib/karafka/web/management/create_topics.rb +0 -133
- data/lib/karafka/web/management/delete_topics.rb +0 -28
- data/lib/karafka/web/management/enable.rb +0 -102
- data/lib/karafka/web/management/extend_boot_file.rb +0 -37
- data/lib/karafka/web/tracking/ttl_array.rb +0 -59
- data/lib/karafka/web/tracking/ttl_hash.rb +0 -16
- data/lib/karafka/web/ui/pro/views/dashboard/_ranges_selector.erb +0 -39
- data/lib/karafka/web/ui/views/cluster/index.erb +0 -74
@@ -0,0 +1,170 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Web
|
5
|
+
module Ui
|
6
|
+
module Lib
|
7
|
+
# Sorting engine for deep in-memory structures
|
8
|
+
# It supports hashes, arrays and hash proxies.
|
9
|
+
#
|
10
|
+
# @note It handles sorting in place by mutating appropriate resources and sub-components
|
11
|
+
class Sorter
|
12
|
+
# We can support only two order types
|
13
|
+
ALLOWED_ORDERS = %w[asc desc].freeze
|
14
|
+
|
15
|
+
# Max depth for nested sorting
|
16
|
+
MAX_DEPTH = 8
|
17
|
+
|
18
|
+
private_constant :ALLOWED_ORDERS, :MAX_DEPTH
|
19
|
+
|
20
|
+
# @param sort_query [String] query for sorting or empty string if no sorting needed
|
21
|
+
# @param allowed_attributes [Array<String>] attributes on which we allow to sort. Since
|
22
|
+
# we can sort on method invocations, this needs to be limited and provided on a per
|
23
|
+
# controller basis.
|
24
|
+
def initialize(sort_query, allowed_attributes:)
|
25
|
+
field, order = sort_query.split(' ')
|
26
|
+
|
27
|
+
@order = order.to_s.downcase
|
28
|
+
@order = ALLOWED_ORDERS.first unless ALLOWED_ORDERS.include?(@order)
|
29
|
+
|
30
|
+
# Normalize the key since we do not operate on capitalized values
|
31
|
+
@field = field.to_s.downcase
|
32
|
+
|
33
|
+
@field = '' unless allowed_attributes.include?(@field)
|
34
|
+
|
35
|
+
# Things we have already seen and sorted. Prevents crashing on the circular
|
36
|
+
# dependencies sorting when same resources are present in different parts of the three
|
37
|
+
@seen = {}
|
38
|
+
end
|
39
|
+
|
40
|
+
# Sorts the structure and returns it sorted.
|
41
|
+
#
|
42
|
+
# @param resource [Hash, Array, Lib::HashProxy] structure we want to sort
|
43
|
+
# @param current_depth []
|
44
|
+
def call(resource, current_depth = 0)
|
45
|
+
# Skip if there is no sort field at all
|
46
|
+
return resource if @field.empty?
|
47
|
+
# Skip if we've already seen this resource
|
48
|
+
# We use object id instead of full object as the objects can get big
|
49
|
+
return resource if @seen.key?(resource.object_id)
|
50
|
+
# Skip if we are too deep
|
51
|
+
return resource if current_depth > MAX_DEPTH
|
52
|
+
|
53
|
+
@seen[resource.object_id] = nil
|
54
|
+
|
55
|
+
case resource
|
56
|
+
when Array
|
57
|
+
sort_array!(resource, current_depth)
|
58
|
+
when Hash
|
59
|
+
sort_hash!(resource, current_depth)
|
60
|
+
when Lib::HashProxy
|
61
|
+
# We can short hash in place here, because it will be still references (the same)
|
62
|
+
# in the hash proxy object, so we can do it that way
|
63
|
+
sort_hash!(resource.to_h, current_depth)
|
64
|
+
when Enumerable
|
65
|
+
sort_array!(resource, current_depth)
|
66
|
+
end
|
67
|
+
|
68
|
+
resource
|
69
|
+
end
|
70
|
+
|
71
|
+
private
|
72
|
+
|
73
|
+
# Sorts the hash in place
|
74
|
+
#
|
75
|
+
# @param hash [Hash] hash we want to sort
|
76
|
+
# @param current_depth [Integer] current depth of sorting from root
|
77
|
+
def sort_hash!(hash, current_depth)
|
78
|
+
# Run sorting on each value, since we may have nested hashes and arrays
|
79
|
+
hash.each do |key, value|
|
80
|
+
previous_key = @parent_key
|
81
|
+
@parent_key = key.to_s.downcase
|
82
|
+
call(value, current_depth + 1)
|
83
|
+
@parent_key = previous_key
|
84
|
+
end
|
85
|
+
|
86
|
+
# We cannot short hashes that are not type aligned. That is, we cannot compare
|
87
|
+
# nested hashes with integers, etc. In some cases we could (Float vs Integer), however
|
88
|
+
# for the same of simplicity, we do not to that
|
89
|
+
return unless hash.values.map(&:class).uniq.size == 1
|
90
|
+
|
91
|
+
# Allows sorting based on parent key when hash contains another hash where we want to
|
92
|
+
# sort based on the keys and not based on the value
|
93
|
+
if @parent_key == @field
|
94
|
+
# We also should not modify hashes that do not have values that are sortable
|
95
|
+
# false is sortable but nil is not
|
96
|
+
sorted = hash.sort_by { |key, _| key.to_s }
|
97
|
+
else
|
98
|
+
values = hash.values.map { |value| sortable_value(value) }
|
99
|
+
|
100
|
+
return if values.any?(&:nil?)
|
101
|
+
return unless values.map(&:class).uniq.size == 1
|
102
|
+
|
103
|
+
# Generate new hash that will have things in our desired order
|
104
|
+
sorted = hash.sort_by { |_, value| sortable_value(value) }
|
105
|
+
end
|
106
|
+
|
107
|
+
sorted.reverse! if desc?
|
108
|
+
|
109
|
+
# Clear our hash and inject the new values in the order in which we want to have them
|
110
|
+
# Such clear and merge will ensure things are in the order we desired them
|
111
|
+
hash.clear
|
112
|
+
hash.merge!(sorted.to_h)
|
113
|
+
end
|
114
|
+
|
115
|
+
# Sorts an array in-place based on a specified attribute.
|
116
|
+
#
|
117
|
+
# The method iterates over each element in the array and applies the transformation.
|
118
|
+
#
|
119
|
+
# @param array [Array<Object>] The array of elements to be sorted
|
120
|
+
# @param current_depth [Integer] The current depth of the sorting operation,
|
121
|
+
# used in the `call` method to handle nested structures or recursion.
|
122
|
+
# @note This method modifies the array in place (mutates the caller).
|
123
|
+
def sort_array!(array, current_depth)
|
124
|
+
# Sort arrays containing hashes by a specific attribute
|
125
|
+
array.map! { |element| call(element, current_depth + 1) }
|
126
|
+
|
127
|
+
values = array.map { |element| sortable_value(element) }
|
128
|
+
|
129
|
+
return if values.any?(&:nil?)
|
130
|
+
return unless values.map(&:class).uniq.size == 1
|
131
|
+
|
132
|
+
array.sort_by! { |element| sortable_value(element) }
|
133
|
+
array.reverse! if desc?
|
134
|
+
end
|
135
|
+
|
136
|
+
# @return [Boolean] true if we sort in desc, otherwise false
|
137
|
+
def desc?
|
138
|
+
@order == 'desc'
|
139
|
+
end
|
140
|
+
|
141
|
+
# Extracts the attribute based on which we should sort (if present)
|
142
|
+
#
|
143
|
+
# @param element [Object] takes the element object and depending on its type, tries to
|
144
|
+
# figure out the value based on which we may sort
|
145
|
+
# @return [Object, nil] sortable value or nil if nothing to sort
|
146
|
+
def sortable_value(element)
|
147
|
+
result = nil
|
148
|
+
result = element[@field] || element[@field.to_sym] if element.is_a?(Hash)
|
149
|
+
result = element.public_send(@field) if element.respond_to?(@field)
|
150
|
+
|
151
|
+
# We cannot sort on some of the types and some require mapping, thus we convert
|
152
|
+
# types here when needed
|
153
|
+
case result
|
154
|
+
when Hash
|
155
|
+
nil
|
156
|
+
when Lib::HashProxy
|
157
|
+
nil
|
158
|
+
when true
|
159
|
+
1
|
160
|
+
when false
|
161
|
+
0
|
162
|
+
else
|
163
|
+
result
|
164
|
+
end
|
165
|
+
end
|
166
|
+
end
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
170
|
+
end
|
@@ -17,6 +17,12 @@ module Karafka
|
|
17
17
|
@hash[:errors] = estimate_errors_count
|
18
18
|
end
|
19
19
|
|
20
|
+
# @return [Integer] number of jobs that are not yet running. This includes jobs on the
|
21
|
+
# workers queue as well as jobs in the scheduling
|
22
|
+
def pending
|
23
|
+
enqueued + waiting
|
24
|
+
end
|
25
|
+
|
20
26
|
private
|
21
27
|
|
22
28
|
# Estimates the number of errors present in the errors topic.
|
@@ -15,7 +15,7 @@ module Karafka
|
|
15
15
|
fetch_topics_data(state, stats)
|
16
16
|
fetch_rebalance_ages(state, stats)
|
17
17
|
|
18
|
-
stats
|
18
|
+
sort_structure(stats)
|
19
19
|
end
|
20
20
|
|
21
21
|
private
|
@@ -49,7 +49,7 @@ module Karafka
|
|
49
49
|
dispatched_at - rebalance_age_ms / 1_000
|
50
50
|
end
|
51
51
|
|
52
|
-
stats[cg_name][:rebalance_ages] ||=
|
52
|
+
stats[cg_name][:rebalance_ages] ||= Set.new
|
53
53
|
stats[cg_name][:rebalance_ages] += ages
|
54
54
|
end
|
55
55
|
|
@@ -80,6 +80,27 @@ module Karafka
|
|
80
80
|
end
|
81
81
|
end
|
82
82
|
end
|
83
|
+
|
84
|
+
# Sorts data so we always present it in an alphabetical order
|
85
|
+
#
|
86
|
+
# @param stats [Hash] stats hash
|
87
|
+
# @return [Hash] sorted data
|
88
|
+
def sort_structure(stats)
|
89
|
+
# Ensure that partitions for all topics are in correct order
|
90
|
+
# Ensure topics are in alphabetical order always
|
91
|
+
stats.each_value do |cg_data|
|
92
|
+
topics = cg_data[:topics]
|
93
|
+
|
94
|
+
topics.each do |topic_name, t_data|
|
95
|
+
topics[topic_name] = Hash[t_data.sort_by { |key, _| key }]
|
96
|
+
end
|
97
|
+
|
98
|
+
cg_data[:topics] = Hash[topics.sort_by { |key, _| key }]
|
99
|
+
end
|
100
|
+
|
101
|
+
# Ensure that all consumer groups are always in the same order
|
102
|
+
Hash[stats.sort_by { |key, _| key }]
|
103
|
+
end
|
83
104
|
end
|
84
105
|
end
|
85
106
|
end
|
@@ -0,0 +1,48 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Web
|
5
|
+
module Ui
|
6
|
+
module Models
|
7
|
+
# Model representing group of jobs
|
8
|
+
#
|
9
|
+
# It simplifies filtering on running jobs and others, etc
|
10
|
+
class Jobs
|
11
|
+
include Enumerable
|
12
|
+
extend Forwardable
|
13
|
+
|
14
|
+
# Last three methods are needed to provide sorting
|
15
|
+
def_delegators :@jobs_array, :empty?, :size, :map!, :sort_by!, :reverse!
|
16
|
+
|
17
|
+
# @param jobs_array [Array<Job>] all jobs we want to enclose
|
18
|
+
def initialize(jobs_array)
|
19
|
+
@jobs_array = jobs_array
|
20
|
+
end
|
21
|
+
|
22
|
+
# @return [Jobs] running jobs
|
23
|
+
def running
|
24
|
+
select { |job| job.status == 'running' }
|
25
|
+
end
|
26
|
+
|
27
|
+
# @return [Jobs] pending jobs
|
28
|
+
def pending
|
29
|
+
select { |job| job.status == 'pending' }
|
30
|
+
end
|
31
|
+
|
32
|
+
# Creates a new Jobs object with selected jobs
|
33
|
+
# @param block [Proc] select proc
|
34
|
+
# @return [Jobs] selected jobs enclosed with the Jobs object
|
35
|
+
def select(&block)
|
36
|
+
self.class.new(super(&block))
|
37
|
+
end
|
38
|
+
|
39
|
+
# Allows for iteration over jobs
|
40
|
+
# @param block [Proc] block to call for each job
|
41
|
+
def each(&block)
|
42
|
+
@jobs_array.each(&block)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
@@ -12,9 +12,28 @@ module Karafka
|
|
12
12
|
# @param aggregated [Hash] all aggregated for all periods
|
13
13
|
# @param period [Symbol] period that we are interested in
|
14
14
|
def initialize(aggregated, period)
|
15
|
+
@period = period
|
15
16
|
@data = aggregated.to_h.fetch(period)
|
16
17
|
end
|
17
18
|
|
19
|
+
# @return [String] JSON with bytes sent and bytes received metrics
|
20
|
+
def data_transfers
|
21
|
+
scale_factor = Processing::TimeSeriesTracker::TIME_RANGES
|
22
|
+
.fetch(@period)
|
23
|
+
.fetch(:resolution)
|
24
|
+
.then { |factor| factor / 1_024.to_f }
|
25
|
+
|
26
|
+
received = bytes_received.map do |element|
|
27
|
+
[element[0], element[1] * scale_factor]
|
28
|
+
end
|
29
|
+
|
30
|
+
sent = bytes_sent.map do |element|
|
31
|
+
[element[0], element[1] * scale_factor]
|
32
|
+
end
|
33
|
+
|
34
|
+
{ received: received, sent: sent }.to_json
|
35
|
+
end
|
36
|
+
|
18
37
|
# @param args [Array<String>] names of aggregated we want to show
|
19
38
|
# @return [String] JSON with data about all the charts we were interested in
|
20
39
|
def with(*args)
|
@@ -24,6 +43,20 @@ module Karafka
|
|
24
43
|
.to_json
|
25
44
|
end
|
26
45
|
|
46
|
+
# @return [Array<Array<Symbol, Integer>>] active listeners statistics
|
47
|
+
def active_listeners
|
48
|
+
listeners.map do |listener|
|
49
|
+
[listener[0], listener[1].fetch(:active)]
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
# @return [Array<Array<Symbol, Integer>>] standby listeners statistics
|
54
|
+
def standby_listeners
|
55
|
+
listeners.map do |listener|
|
56
|
+
[listener[0], listener[1].fetch(:standby)]
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
27
60
|
# @param method_name [String]
|
28
61
|
# @param include_private [Boolean]
|
29
62
|
def respond_to_missing?(method_name, include_private = false)
|
@@ -76,20 +76,11 @@ module Karafka
|
|
76
76
|
topic_without_cg = topic.split('[').first
|
77
77
|
|
78
78
|
metrics.each do |current|
|
79
|
-
ls_offset = current.last[:ls_offset] || 0
|
80
79
|
ls_offset_fd = current.last[:ls_offset_fd] || 0
|
81
|
-
hi_offset = current.last[:hi_offset] || 0
|
82
80
|
|
83
81
|
# We convert this to seconds from milliseconds due to our Web UI precision
|
84
82
|
# Reporting is in ms for consistency
|
85
|
-
normalized_fd = (ls_offset_fd / 1_000).round
|
86
|
-
# In case ls_offset and hi_offset are the same, it means we're reached eof
|
87
|
-
# and we just don't have more data. In cases like this, LSO freeze duration
|
88
|
-
# will grow because LSO will remain unchanged, but it does not mean it is
|
89
|
-
# frozen. It means there is just no more data in the topic partition
|
90
|
-
# This means we need to nullify this case, otherwise it would report, that
|
91
|
-
# lso is hanging.
|
92
|
-
normalized_fd = 0 if ls_offset == hi_offset
|
83
|
+
normalized_fd = (ls_offset_fd / 1_000.0).round
|
93
84
|
|
94
85
|
topics[topic_without_cg][current.first] << normalized_fd
|
95
86
|
end
|
@@ -244,18 +244,34 @@ module Karafka
|
|
244
244
|
)
|
245
245
|
end
|
246
246
|
|
247
|
+
# @return [Status::Step] are there any active topics in the routing that are not present
|
248
|
+
# in the cluster (does not apply to patterns)
|
249
|
+
def routing_topics_presence
|
250
|
+
if consumers_reports_schema_state.success?
|
251
|
+
existing = @cluster_info.topics.map { |topic| topic[:topic_name] }
|
252
|
+
|
253
|
+
missing = ::Karafka::App
|
254
|
+
.routes
|
255
|
+
.flat_map(&:topics)
|
256
|
+
.flat_map { |topics| topics.map(&:itself) }
|
257
|
+
.select(&:active?)
|
258
|
+
.reject { |topic| topic.respond_to?(:patterns?) ? topic.patterns? : false }
|
259
|
+
.map(&:name)
|
260
|
+
.uniq
|
261
|
+
.then { |routed_topics| routed_topics - existing }
|
262
|
+
|
263
|
+
Step.new(missing.empty? ? :success : :warning, missing)
|
264
|
+
else
|
265
|
+
Step.new(:halted, [])
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
247
269
|
# @return [Status::Step] is Pro enabled with all of its features.
|
248
270
|
# @note It's not an error not to have it but we want to warn, that some of the features
|
249
271
|
# may not work without Pro.
|
250
272
|
def pro_subscription
|
251
|
-
status = if consumers_reports_schema_state.success?
|
252
|
-
::Karafka.pro? ? :success : :warning
|
253
|
-
else
|
254
|
-
:halted
|
255
|
-
end
|
256
|
-
|
257
273
|
Step.new(
|
258
|
-
|
274
|
+
::Karafka.pro? ? :success : :warning,
|
259
275
|
nil
|
260
276
|
)
|
261
277
|
end
|
@@ -8,7 +8,9 @@ module Karafka
|
|
8
8
|
class Topic < Lib::HashProxy
|
9
9
|
# @return [Array<Partition>] All topic partitions data
|
10
10
|
def partitions
|
11
|
-
super.
|
11
|
+
super.map do |partition_id, partition_hash|
|
12
|
+
partition_hash[:partition_id] = partition_id
|
13
|
+
|
12
14
|
Partition.new(partition_hash)
|
13
15
|
end
|
14
16
|
end
|
@@ -26,6 +26,22 @@ module Karafka
|
|
26
26
|
def payload?(message)
|
27
27
|
!message.headers.key?('encryption')
|
28
28
|
end
|
29
|
+
|
30
|
+
# Should it be allowed to download this message raw payload
|
31
|
+
#
|
32
|
+
# @param message [::Karafka::Messages::Message]
|
33
|
+
# @return [Boolean] true if downloads allowed
|
34
|
+
def download?(message)
|
35
|
+
payload?(message)
|
36
|
+
end
|
37
|
+
|
38
|
+
# Should it be allowed to download the deserialized and sanitized payload as JSON
|
39
|
+
#
|
40
|
+
# @param message [::Karafka::Messages::Message]
|
41
|
+
# @return [Boolean] true if exports allowed
|
42
|
+
def export?(message)
|
43
|
+
payload?(message)
|
44
|
+
end
|
29
45
|
end
|
30
46
|
end
|
31
47
|
end
|
@@ -49,8 +49,16 @@ module Karafka
|
|
49
49
|
r.on 'consumers' do
|
50
50
|
controller = Controllers::Consumers.new(params)
|
51
51
|
|
52
|
-
r.
|
53
|
-
|
52
|
+
r.on String, 'jobs' do |process_id|
|
53
|
+
r.get 'running' do
|
54
|
+
controller.running_jobs(process_id)
|
55
|
+
end
|
56
|
+
|
57
|
+
r.get 'pending' do
|
58
|
+
controller.pending_jobs(process_id)
|
59
|
+
end
|
60
|
+
|
61
|
+
r.redirect root_path("consumers/#{process_id}/jobs/running")
|
54
62
|
end
|
55
63
|
|
56
64
|
r.get String, 'subscriptions' do |process_id|
|
@@ -67,9 +75,18 @@ module Karafka
|
|
67
75
|
end
|
68
76
|
end
|
69
77
|
|
70
|
-
r.
|
78
|
+
r.on 'jobs' do
|
71
79
|
controller = Controllers::Jobs.new(params)
|
72
|
-
|
80
|
+
|
81
|
+
r.get 'running' do
|
82
|
+
controller.running
|
83
|
+
end
|
84
|
+
|
85
|
+
r.get 'pending' do
|
86
|
+
controller.pending
|
87
|
+
end
|
88
|
+
|
89
|
+
r.redirect root_path('jobs/running')
|
73
90
|
end
|
74
91
|
|
75
92
|
r.on 'routing' do
|
@@ -133,6 +150,14 @@ module Karafka
|
|
133
150
|
r.post String, Integer, Integer, 'republish' do |topic_id, partition_id, offset|
|
134
151
|
controller.republish(topic_id, partition_id, offset)
|
135
152
|
end
|
153
|
+
|
154
|
+
r.get String, Integer, Integer, 'download' do |topic_id, partition_id, offset|
|
155
|
+
controller.download(topic_id, partition_id, offset)
|
156
|
+
end
|
157
|
+
|
158
|
+
r.get String, Integer, Integer, 'export' do |topic_id, partition_id, offset|
|
159
|
+
controller.export(topic_id, partition_id, offset)
|
160
|
+
end
|
136
161
|
end
|
137
162
|
|
138
163
|
r.on 'health' do
|
@@ -146,14 +171,27 @@ module Karafka
|
|
146
171
|
controller.overview
|
147
172
|
end
|
148
173
|
|
174
|
+
r.get 'changes' do
|
175
|
+
controller.changes
|
176
|
+
end
|
177
|
+
|
149
178
|
r.get do
|
150
179
|
r.redirect root_path('health/overview')
|
151
180
|
end
|
152
181
|
end
|
153
182
|
|
154
|
-
r.
|
183
|
+
r.on 'cluster' do
|
155
184
|
controller = Controllers::Cluster.new(params)
|
156
|
-
|
185
|
+
|
186
|
+
r.get 'brokers' do
|
187
|
+
controller.brokers
|
188
|
+
end
|
189
|
+
|
190
|
+
r.get 'topics' do
|
191
|
+
controller.topics
|
192
|
+
end
|
193
|
+
|
194
|
+
r.redirect root_path('cluster/brokers')
|
157
195
|
end
|
158
196
|
|
159
197
|
r.on 'errors' do
|
@@ -18,18 +18,39 @@ module Karafka
|
|
18
18
|
module Controllers
|
19
19
|
# Controller for displaying consumers states and details about them
|
20
20
|
class Consumers < Ui::Controllers::Base
|
21
|
+
self.sortable_attributes = %w[
|
22
|
+
name
|
23
|
+
started_at
|
24
|
+
lag_stored
|
25
|
+
id
|
26
|
+
lag_stored_d
|
27
|
+
committed_offset
|
28
|
+
stored_offset
|
29
|
+
fetch_state
|
30
|
+
poll_state
|
31
|
+
lso_risk_state
|
32
|
+
topic
|
33
|
+
consumer
|
34
|
+
type
|
35
|
+
messages
|
36
|
+
first_offset
|
37
|
+
last_offset
|
38
|
+
updated_at
|
39
|
+
].freeze
|
40
|
+
|
21
41
|
# Consumers list
|
22
42
|
def index
|
23
43
|
@current_state = Models::ConsumersState.current!
|
24
44
|
@counters = Models::Counters.new(@current_state)
|
45
|
+
|
25
46
|
@processes, last_page = Lib::Paginations::Paginators::Arrays.call(
|
26
|
-
Models::Processes.active(@current_state),
|
47
|
+
refine(Models::Processes.active(@current_state)),
|
27
48
|
@params.current_page
|
28
49
|
)
|
29
50
|
|
30
51
|
paginate(@params.current_page, !last_page)
|
31
52
|
|
32
|
-
|
53
|
+
render
|
33
54
|
end
|
34
55
|
|
35
56
|
# @param process_id [String] id of the process we're interested in
|
@@ -37,19 +58,44 @@ module Karafka
|
|
37
58
|
current_state = Models::ConsumersState.current!
|
38
59
|
@process = Models::Process.find(current_state, process_id)
|
39
60
|
|
40
|
-
|
61
|
+
render
|
62
|
+
end
|
63
|
+
|
64
|
+
# Renders details about running jobs
|
65
|
+
#
|
66
|
+
# @param process_id [String] id of the process we're interested in
|
67
|
+
def running_jobs(process_id)
|
68
|
+
details(process_id)
|
69
|
+
|
70
|
+
@running_jobs = @process.jobs.running
|
71
|
+
|
72
|
+
refine(@running_jobs)
|
73
|
+
|
74
|
+
render
|
41
75
|
end
|
42
76
|
|
77
|
+
# Renders details about pending jobs
|
78
|
+
#
|
43
79
|
# @param process_id [String] id of the process we're interested in
|
44
|
-
def
|
80
|
+
def pending_jobs(process_id)
|
45
81
|
details(process_id)
|
46
|
-
|
82
|
+
|
83
|
+
@pending_jobs = @process.jobs.pending
|
84
|
+
|
85
|
+
refine(@pending_jobs)
|
86
|
+
|
87
|
+
render
|
47
88
|
end
|
48
89
|
|
49
90
|
# @param process_id [String] id of the process we're interested in
|
50
91
|
def subscriptions(process_id)
|
51
92
|
details(process_id)
|
52
|
-
|
93
|
+
|
94
|
+
# We want to have sorting but on a per subscription group basis and not to sort
|
95
|
+
# everything
|
96
|
+
@process.consumer_groups.each { |subscription_group| refine(subscription_group) }
|
97
|
+
|
98
|
+
render
|
53
99
|
end
|
54
100
|
end
|
55
101
|
end
|
@@ -35,7 +35,7 @@ module Karafka
|
|
35
35
|
|
36
36
|
paginate(@params.current_page, next_page)
|
37
37
|
|
38
|
-
|
38
|
+
render
|
39
39
|
end
|
40
40
|
|
41
41
|
# @param partition_id [Integer] id of the partition of errors we are interested in
|
@@ -58,7 +58,7 @@ module Karafka
|
|
58
58
|
@error_messages.map(&:offset)
|
59
59
|
)
|
60
60
|
|
61
|
-
|
61
|
+
render
|
62
62
|
end
|
63
63
|
|
64
64
|
# Shows given error details
|
@@ -77,7 +77,7 @@ module Karafka
|
|
77
77
|
watermark_offsets = Ui::Models::WatermarkOffsets.find(errors_topic, partition_id)
|
78
78
|
paginate(offset, watermark_offsets.low, watermark_offsets.high)
|
79
79
|
|
80
|
-
|
80
|
+
render
|
81
81
|
end
|
82
82
|
|
83
83
|
private
|