karafka-web 0.11.2 → 0.11.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +24 -0
  3. data/Gemfile +0 -2
  4. data/Gemfile.lock +78 -39
  5. data/bin/integrations +44 -0
  6. data/bin/rspecs +6 -2
  7. data/bin/verify_kafka_warnings +1 -1
  8. data/config/locales/errors.yml +1 -0
  9. data/docker-compose.yml +1 -3
  10. data/karafka-web.gemspec +2 -2
  11. data/lib/karafka/web/app.rb +2 -3
  12. data/lib/karafka/web/cli/help.rb +1 -1
  13. data/lib/karafka/web/config.rb +8 -0
  14. data/lib/karafka/web/contracts/base.rb +2 -4
  15. data/lib/karafka/web/contracts/config.rb +5 -5
  16. data/lib/karafka/web/deserializer.rb +6 -1
  17. data/lib/karafka/web/errors.rb +8 -5
  18. data/lib/karafka/web/management/actions/enable.rb +14 -1
  19. data/lib/karafka/web/management/migrations/consumers_reports/1761645571_rename_process_name_to_id.rb +38 -0
  20. data/lib/karafka/web/management/migrator.rb +3 -2
  21. data/lib/karafka/web/pro/commanding/commands/base.rb +1 -1
  22. data/lib/karafka/web/pro/commanding/contracts/config.rb +2 -4
  23. data/lib/karafka/web/pro/commanding/handlers/partitions/tracker.rb +2 -3
  24. data/lib/karafka/web/pro/ui/controllers/scheduled_messages/schedules_controller.rb +1 -2
  25. data/lib/karafka/web/pro/ui/controllers/topics/distributions_controller.rb +1 -3
  26. data/lib/karafka/web/pro/ui/lib/branding/contracts/config.rb +2 -4
  27. data/lib/karafka/web/pro/ui/lib/policies/contracts/config.rb +2 -4
  28. data/lib/karafka/web/pro/ui/lib/search/contracts/config.rb +3 -5
  29. data/lib/karafka/web/pro/ui/lib/search/contracts/form.rb +3 -5
  30. data/lib/karafka/web/pro/ui/lib/search/runner.rb +14 -1
  31. data/lib/karafka/web/pro/ui/routes/errors.rb +3 -3
  32. data/lib/karafka/web/pro/ui/routes/explorer.rb +3 -3
  33. data/lib/karafka/web/pro/ui/views/health/_no_partition_data.erb +9 -0
  34. data/lib/karafka/web/pro/ui/views/health/_partitions_with_fallback.erb +41 -0
  35. data/lib/karafka/web/pro/ui/views/health/changes.erb +12 -13
  36. data/lib/karafka/web/pro/ui/views/health/lags.erb +12 -13
  37. data/lib/karafka/web/pro/ui/views/health/offsets.erb +12 -13
  38. data/lib/karafka/web/pro/ui/views/health/overview.erb +15 -16
  39. data/lib/karafka/web/processing/consumer.rb +8 -3
  40. data/lib/karafka/web/processing/consumers/aggregators/metrics.rb +1 -1
  41. data/lib/karafka/web/processing/consumers/aggregators/state.rb +10 -6
  42. data/lib/karafka/web/processing/consumers/contracts/state.rb +6 -1
  43. data/lib/karafka/web/processing/consumers/reports_migrator.rb +49 -0
  44. data/lib/karafka/web/processing/time_series_tracker.rb +1 -1
  45. data/lib/karafka/web/tracking/consumers/contracts/report.rb +1 -1
  46. data/lib/karafka/web/tracking/consumers/contracts/topic.rb +1 -0
  47. data/lib/karafka/web/tracking/consumers/listeners/errors.rb +2 -1
  48. data/lib/karafka/web/tracking/consumers/listeners/processing.rb +46 -0
  49. data/lib/karafka/web/tracking/consumers/listeners/statistics.rb +1 -0
  50. data/lib/karafka/web/tracking/consumers/sampler/enrichers/base.rb +20 -0
  51. data/lib/karafka/web/tracking/consumers/sampler/enrichers/consumer_groups.rb +116 -0
  52. data/lib/karafka/web/tracking/consumers/sampler/metrics/base.rb +20 -0
  53. data/lib/karafka/web/tracking/consumers/sampler/metrics/container.rb +113 -0
  54. data/lib/karafka/web/tracking/consumers/sampler/metrics/jobs.rb +60 -0
  55. data/lib/karafka/web/tracking/consumers/sampler/metrics/network.rb +48 -0
  56. data/lib/karafka/web/tracking/consumers/sampler/metrics/os.rb +206 -0
  57. data/lib/karafka/web/tracking/consumers/sampler/metrics/server.rb +33 -0
  58. data/lib/karafka/web/tracking/consumers/sampler.rb +34 -215
  59. data/lib/karafka/web/tracking/contracts/error.rb +1 -0
  60. data/lib/karafka/web/tracking/helpers/ttls/hash.rb +2 -3
  61. data/lib/karafka/web/tracking/helpers/ttls/stats.rb +1 -2
  62. data/lib/karafka/web/tracking/producers/listeners/base.rb +1 -1
  63. data/lib/karafka/web/tracking/producers/listeners/errors.rb +2 -1
  64. data/lib/karafka/web/tracking/ui/errors.rb +76 -0
  65. data/lib/karafka/web/ui/base.rb +19 -9
  66. data/lib/karafka/web/ui/controllers/requests/execution_wrapper.rb +2 -4
  67. data/lib/karafka/web/ui/controllers/requests/params.rb +1 -1
  68. data/lib/karafka/web/ui/helpers/application_helper.rb +1 -1
  69. data/lib/karafka/web/ui/helpers/paths_helper.rb +6 -9
  70. data/lib/karafka/web/ui/lib/sorter.rb +1 -1
  71. data/lib/karafka/web/ui/models/health.rb +14 -9
  72. data/lib/karafka/web/ui/models/jobs.rb +4 -6
  73. data/lib/karafka/web/ui/models/message.rb +7 -8
  74. data/lib/karafka/web/ui/models/metrics/aggregated.rb +4 -4
  75. data/lib/karafka/web/ui/models/metrics/charts/aggregated.rb +1 -2
  76. data/lib/karafka/web/ui/models/metrics/charts/topics.rb +2 -2
  77. data/lib/karafka/web/ui/models/metrics/topics.rb +3 -4
  78. data/lib/karafka/web/ui/models/recurring_tasks/schedule.rb +1 -1
  79. data/lib/karafka/web/ui/public/javascripts/application.min.js.gz +0 -0
  80. data/lib/karafka/web/ui/public/stylesheets/application.min.css +199 -105
  81. data/lib/karafka/web/ui/public/stylesheets/application.min.css.br +0 -0
  82. data/lib/karafka/web/ui/public/stylesheets/application.min.css.gz +0 -0
  83. data/lib/karafka/web/ui/public/stylesheets/libs/highlight_dark.min.css.gz +0 -0
  84. data/lib/karafka/web/ui/public/stylesheets/libs/highlight_light.min.css.gz +0 -0
  85. data/lib/karafka/web/ui/routes/errors.rb +3 -3
  86. data/lib/karafka/web/ui/views/shared/exceptions/unhandled_error.erb +42 -0
  87. data/lib/karafka/web/version.rb +1 -1
  88. data/lib/karafka/web.rb +10 -13
  89. data/package-lock.json +184 -240
  90. data/package.json +3 -3
  91. data/renovate.json +13 -0
  92. metadata +19 -4
@@ -15,7 +15,7 @@ module Karafka
15
15
  # Current schema version
16
16
  # This is used for detecting incompatible changes and not using outdated data during
17
17
  # upgrades
18
- SCHEMA_VERSION = '1.4.1'
18
+ SCHEMA_VERSION = '1.5.0'
19
19
 
20
20
  # Counters that count events occurrences during the given window
21
21
  COUNTERS_BASE = {
@@ -72,6 +72,17 @@ module Karafka
72
72
  @memory_total_usage = 0
73
73
  @memory_usage = 0
74
74
  @cpu_usage = [-1, -1, -1]
75
+
76
+ # Select and instantiate appropriate system metrics collector based on environment
77
+ # Use container-aware collector if cgroups are available, otherwise use OS-based
78
+ metrics_class = if Metrics::Container.active?
79
+ Metrics::Container
80
+ else
81
+ Metrics::Os
82
+ end
83
+ @system_metrics = metrics_class.new(@shell)
84
+ @network_metrics = Metrics::Network.new(@windows)
85
+ @server_metrics = Metrics::Server.new
75
86
  end
76
87
 
77
88
  # We cannot report and track the same time, that is why we use mutex here. To make sure
@@ -94,7 +105,7 @@ module Karafka
94
105
  started_at: started_at,
95
106
  status: ::Karafka::App.config.internal.status.to_s,
96
107
  execution_mode: ::Karafka::Server.execution_mode.to_s,
97
- listeners: listeners,
108
+ listeners: @server_metrics.listeners,
98
109
  workers: workers,
99
110
  memory_usage: @memory_usage,
100
111
  memory_total_usage: @memory_total_usage,
@@ -103,8 +114,8 @@ module Karafka
103
114
  threads: threads,
104
115
  cpu_usage: @cpu_usage,
105
116
  tags: Karafka::Process.tags,
106
- bytes_received: bytes_received,
107
- bytes_sent: bytes_sent
117
+ bytes_received: @network_metrics.bytes_received,
118
+ bytes_sent: @network_metrics.bytes_sent
108
119
  },
109
120
 
110
121
  versions: {
@@ -117,8 +128,8 @@ module Karafka
117
128
  librdkafka: librdkafka_version
118
129
  },
119
130
 
120
- stats: jobs_queue_statistics.merge(
121
- utilization: utilization
131
+ stats: jobs_metrics.jobs_queue_statistics.merge(
132
+ utilization: jobs_metrics.utilization
122
133
  ).merge(total: @counters),
123
134
 
124
135
  consumer_groups: enriched_consumer_groups,
@@ -156,133 +167,44 @@ module Karafka
156
167
  @started_at ||= float_now
157
168
  end
158
169
 
159
- # @return [Numeric] % utilization of all the threads. 100% means all the threads are
160
- # utilized all the time within the given time window. 0% means, nothing is happening
161
- # most if not all the time.
162
- def utilization
163
- totals = windows.m1[:processed_total_time]
164
-
165
- return 0 if totals.empty?
166
-
167
- timefactor = float_now - @started_at
168
- timefactor = timefactor > 60 ? 60 : timefactor
169
-
170
- # We divide by 1_000 to convert from milliseconds
171
- # We multiply by 100 to have it in % scale
172
- (totals.sum / 1_000 / workers / timefactor * 100).round(2)
173
- end
174
-
175
- # @return [Hash] number of active and standby listeners
176
- def listeners
177
- if Karafka::Server.listeners
178
- active = Karafka::Server.listeners.count(&:active?)
179
- total = Karafka::Server.listeners.count.to_i
180
-
181
- { active: active, standby: total - active }
182
- else
183
- { active: 0, standby: 0 }
184
- end
170
+ # @return [Metrics::Jobs] jobs metrics instance
171
+ # @note Lazy initialization since it depends on started_at and workers
172
+ def jobs_metrics
173
+ @jobs_metrics ||= Metrics::Jobs.new(@windows, started_at, workers)
185
174
  end
186
175
 
187
176
  # @return [Integer] memory used by this process in kilobytes
188
177
  def memory_usage
189
- pid = ::Process.pid
190
-
191
- case RUBY_PLATFORM
192
- # Reading this that way is cheaper than running a shell command
193
- when /linux/
194
- IO.readlines("/proc/#{pid}/status").each do |line|
195
- next unless line.start_with?('VmRSS:')
196
-
197
- break line.split[1].to_i
198
- end
199
- when /darwin|bsd/
200
- @shell
201
- .call("ps -o pid,rss -p #{pid}")
202
- .lines
203
- .last
204
- .split
205
- .last
206
- .to_i
207
- else
208
- 0
209
- end
210
- end
211
-
212
- # @return [Hash] job queue statistics
213
- def jobs_queue_statistics
214
- # We return empty stats in case jobs queue is not yet initialized
215
- base = Karafka::Server.jobs_queue&.statistics || { busy: 0, enqueued: 0 }
216
- stats = base.slice(:busy, :enqueued, :waiting)
217
- stats[:waiting] ||= 0
218
- # busy - represents number of jobs that are being executed currently
219
- # enqueued - jobs that are in the queue but not being picked up yet
220
- # waiting - jobs that are not scheduled on the queue but will be
221
- # be enqueued in case of advanced schedulers
222
- stats
178
+ @system_metrics.memory_usage
223
179
  end
224
180
 
225
181
  # Total memory used in the OS
226
182
  def memory_total_usage
227
- return 0 unless @memory_threads_ps
228
-
229
- @memory_threads_ps.map(&:first).sum
183
+ @system_metrics.memory_total_usage(@memory_threads_ps)
230
184
  end
231
185
 
232
- # @return [Integer] total amount of memory
186
+ # @return [Integer] total amount of memory in kilobytes
187
+ # In containerized environments (Docker/Kubernetes), this returns the container's
188
+ # memory limit. Otherwise, returns the host's total memory.
233
189
  def memory_size
234
- @memory_size ||= case RUBY_PLATFORM
235
- when /linux/
236
- mem_info = File.read('/proc/meminfo')
237
- mem_total_line = mem_info.match(/MemTotal:\s*(?<total>\d+)/)
238
- mem_total_line['total'].to_i
239
- when /darwin|bsd/
240
- @shell
241
- .call('sysctl -a')
242
- .split("\n")
243
- .find { |line| line.start_with?('hw.memsize:') }
244
- .to_s
245
- .split(' ')
246
- .last
247
- .to_i
248
- else
249
- 0
250
- end
190
+ @memory_size ||= @system_metrics.memory_size
251
191
  end
252
192
 
253
193
  # @return [Array<Float>] load averages for last 1, 5 and 15 minutes
254
194
  def cpu_usage
255
- case RUBY_PLATFORM
256
- when /linux/
257
- File
258
- .read('/proc/loadavg')
259
- .split(' ')
260
- .first(3)
261
- .map(&:to_f)
262
- when /darwin|bsd/
263
- @shell
264
- .call('w | head -1')
265
- .strip
266
- .split(' ')
267
- .map(&:to_f)
268
- .last(3)
269
- else
270
- [-1, -1, -1]
271
- end
195
+ @system_metrics.cpu_usage
272
196
  end
273
197
 
274
198
  # @return [Integer] number of process threads.
275
199
  # @note This returns total number of threads from the OS perspective including native
276
200
  # extensions threads, etc.
277
201
  def threads
278
- return 0 unless @memory_threads_ps
279
-
280
- @memory_threads_ps.find { |row| row.last == ::Process.pid }[1]
202
+ @system_metrics.threads(@memory_threads_ps)
281
203
  end
282
204
 
283
205
  # @return [Integer] CPU count
284
206
  def cpus
285
- @cpus ||= Etc.nprocessors
207
+ @cpus ||= @system_metrics.cpus
286
208
  end
287
209
 
288
210
  # @return [Integer] number of threads that process work
@@ -292,119 +214,16 @@ module Karafka
292
214
 
293
215
  # Loads our ps results into memory so we can extract from them whatever we need
294
216
  def memory_threads_ps
295
- @memory_threads_ps = case RUBY_PLATFORM
296
- when /linux/
297
- page_size = Karafka::Web::Tracking::Helpers::Sysconf.page_size
298
- status_file = "/proc/#{::Process.pid}/status"
299
-
300
- pid = status_file.match(%r{/proc/(\d+)/status})[1]
301
-
302
- # Extract thread count from /proc/<pid>/status
303
- thcount = File.read(status_file)[/^Threads:\s+(\d+)/, 1].to_i
304
-
305
- # Extract RSS from /proc/<pid>/statm (second field)
306
- statm_file = "/proc/#{pid}/statm"
307
- rss_pages = File.read(statm_file).split[1].to_i rescue 0
308
- # page size is retrieved from Sysconf
309
- rss_kb = (rss_pages * page_size) / 1024
310
-
311
- [[rss_kb, thcount, pid.to_i]]
312
- # thcount is not available on macos ps
313
- # because of that we inject 0 as threads count similar to how
314
- # we do on windows
315
- when /darwin|bsd/
316
- @shell
317
- .call('ps -A -o rss=,pid=')
318
- .split("\n")
319
- .map { |row| row.strip.split(' ').map(&:to_i) }
320
- .map { |row| [row.first, 0, row.last] }
321
- else
322
- @memory_threads_ps = false
323
- end
217
+ @memory_threads_ps = @system_metrics.memory_threads_ps
324
218
  end
325
219
 
326
220
  # Consumer group details need to be enriched with details about polling that comes from
327
221
  # Karafka level. It is also time based, hence we need to materialize it only at the
328
222
  # moment of message dispatch to have it accurate.
329
223
  def enriched_consumer_groups
330
- @consumer_groups.each_value do |cg_details|
331
- cg_details.each do
332
- cg_details.fetch(:subscription_groups, {}).each do |sg_id, sg_details|
333
- # This should be always available, since the subscription group polled at time
334
- # is first initialized before we start polling, there should be no case where
335
- # we have statistics about a given subscription group but we do not have the
336
- # sg reference
337
- sg_tracking = subscription_groups.fetch(sg_id)
338
-
339
- polled_at = sg_tracking.fetch(:polled_at)
340
- sg_details[:state][:poll_age] = (monotonic_now - polled_at).round(2)
341
-
342
- sg_details[:topics].each do |topic_name, topic_details|
343
- topic_details[:partitions].each do |partition_id, partition_details|
344
- # Always assume non-transactional as default. Will be overwritten by the
345
- # consumer level details if collected
346
- partition_details[:transactional] ||= false
347
-
348
- # If we have stored offset or stored lag, it means it's not a transactional
349
- # consumer at all so we can skip enrichment
350
- next if partition_details[:lag_stored].positive?
351
- next if partition_details[:stored_offset].positive?
352
- next unless sg_tracking[:topics].key?(topic_name)
353
- next unless sg_tracking[:topics][topic_name].key?(partition_id)
354
-
355
- k_partition_details = sg_tracking[:topics][topic_name][partition_id]
356
-
357
- # If seek offset was not yey set, nothing to enrich
358
- next unless k_partition_details[:seek_offset].positive?
359
-
360
- partition_details[:transactional] = k_partition_details[:transactional]
361
-
362
- # Seek offset is always +1 from the last stored in Karafka
363
- seek_offset = k_partition_details[:seek_offset]
364
- stored_offset = seek_offset - 1
365
-
366
- # In case of transactions we have to compute the lag ourselves
367
- # -1 because ls offset (or high watermark) is last + 1
368
- lag = partition_details[:ls_offset] - seek_offset
369
- # This can happen if ls_offset is refreshed slower than our stored offset
370
- # fetching from Karafka transactional layer
371
- lag = 0 if lag.negative?
372
-
373
- partition_details[:lag] = lag
374
- partition_details[:lag_d] = 0
375
- partition_details[:lag_stored] = lag
376
- partition_details[:lag_stored_d] = 0
377
- partition_details[:stored_offset] = stored_offset
378
- partition_details[:committed_offset] = stored_offset
379
- end
380
- end
381
- end
382
- end
383
- end
384
-
385
- @consumer_groups
386
- end
387
-
388
- # @return [Integer] number of bytes received per second out of a one minute time window
389
- # by all the consumers
390
- # @note We use one minute window to compensate for cases where metrics would be reported
391
- # or recorded faster or slower. This normalizes data
392
- def bytes_received
393
- @windows
394
- .m1
395
- .stats_from { |k, _v| k.end_with?('rxbytes') }
396
- .rps
397
- .round
398
- end
399
-
400
- # @return [Integer] number of bytes sent per second out of a one minute time window by
401
- # all the consumers
402
- def bytes_sent
403
- @windows
404
- .m1
405
- .stats_from { |k, _v| k.end_with?('txbytes') }
406
- .rps
407
- .round
224
+ Enrichers::ConsumerGroups
225
+ .new(@consumer_groups, @subscription_groups)
226
+ .call
408
227
  end
409
228
  end
410
229
  end
@@ -12,6 +12,7 @@ module Karafka
12
12
  configure
13
13
 
14
14
  required(:schema_version) { |val| val.is_a?(String) }
15
+ required(:id) { |val| val.is_a?(String) && !val.empty? }
15
16
  required(:type) { |val| val.is_a?(String) && !val.empty? }
16
17
  required(:error_class) { |val| val.is_a?(String) && !val.empty? }
17
18
  required(:error_message) { |val| val.is_a?(String) }
@@ -18,13 +18,12 @@ module Karafka
18
18
  # interested in using for aggregated stats. Once filtered, builds a Stats object out
19
19
  # of the candidates
20
20
  #
21
- # @param block [Proc] block for selection of elements for stats
22
21
  # @yieldparam [String] key
23
22
  # @yieldparam [Ttls::Array] samples
24
23
  # @return [Stats]
25
- def stats_from(&block)
24
+ def stats_from(&)
26
25
  Stats.new(
27
- select(&block)
26
+ select(&)
28
27
  )
29
28
  end
30
29
 
@@ -17,8 +17,7 @@ module Karafka
17
17
  def initialize(ttls_hash)
18
18
  @data = ttls_hash
19
19
  .values
20
- .map(&:samples)
21
- .map(&:to_a)
20
+ .map { |value| value.samples.to_a }
22
21
  .delete_if { |samples| samples.size < 2 }
23
22
  .map { |samples| samples.map(&:values) }
24
23
  end
@@ -12,7 +12,7 @@ module Karafka
12
12
  extend Forwardable
13
13
 
14
14
  def_delegators :sampler, :track
15
- def_delegators :reporter, :report, :report!
15
+ def_delegators :reporter, :report
16
16
 
17
17
  private
18
18
 
@@ -10,7 +10,7 @@ module Karafka
10
10
  include Tracking::Helpers::ErrorInfo
11
11
 
12
12
  # Schema used by producers error reporting
13
- SCHEMA_VERSION = '1.1.0'
13
+ SCHEMA_VERSION = '1.2.0'
14
14
 
15
15
  private_constant :SCHEMA_VERSION
16
16
 
@@ -34,6 +34,7 @@ module Karafka
34
34
 
35
35
  {
36
36
  schema_version: SCHEMA_VERSION,
37
+ id: SecureRandom.uuid,
37
38
  producer_id: event[:producer_id],
38
39
  type: type,
39
40
  error_class: error_class,
@@ -0,0 +1,76 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Web
5
+ module Tracking
6
+ # Namespace for UI specific tracking
7
+ module Ui
8
+ # Listener for tracking and reporting Web UI errors directly to Kafka
9
+ #
10
+ # Unlike consumer and producer errors that are collected in samplers and dispatched
11
+ # periodically, UI errors need to be dispatched immediately and asynchronously from
12
+ # the web process (Puma/Rack) since there's no background reporter running in the web UI.
13
+ class Errors
14
+ include ::Karafka::Core::Helpers::Time
15
+ include Tracking::Helpers::ErrorInfo
16
+
17
+ # Schema used by UI error reporting
18
+ SCHEMA_VERSION = '1.2.0'
19
+
20
+ private_constant :SCHEMA_VERSION
21
+
22
+ # Tracks any UI related errors and dispatches them to Kafka
23
+ #
24
+ # @param event [Karafka::Core::Monitoring::Event]
25
+ def on_error_occurred(event)
26
+ # Only process UI errors, ignore all other error types
27
+ return unless event[:type] == 'web.ui.error'
28
+
29
+ error_class, error_message, backtrace = extract_error_info(event[:error])
30
+
31
+ error_data = {
32
+ schema_version: SCHEMA_VERSION,
33
+ id: SecureRandom.uuid,
34
+ type: event[:type],
35
+ error_class: error_class,
36
+ error_message: error_message,
37
+ backtrace: backtrace,
38
+ details: {},
39
+ occurred_at: float_now,
40
+ process: {
41
+ id: process_id
42
+ }
43
+ }
44
+
45
+ # Validate the error data
46
+ Tracking::Contracts::Error.new.validate!(error_data)
47
+
48
+ # Dispatch error to Kafka asynchronously
49
+ dispatch(error_data)
50
+ rescue StandardError => e
51
+ # If we fail to report an error, log it but don't raise to avoid error loops
52
+ ::Karafka.logger.error("Failed to report UI error: #{e.message}")
53
+ end
54
+
55
+ private
56
+
57
+ # @return [String] unique process identifier
58
+ def process_id
59
+ @process_id ||= Tracking::Sampler.new.process_id
60
+ end
61
+
62
+ # Dispatches error to Kafka
63
+ # @param error_data [Hash] error data to dispatch
64
+ def dispatch(error_data)
65
+ ::Karafka::Web.producer.produce_async(
66
+ topic: ::Karafka::Web.config.topics.errors.name,
67
+ payload: Zlib::Deflate.deflate(error_data.to_json),
68
+ key: process_id,
69
+ headers: { 'zlib' => 'true' }
70
+ )
71
+ end
72
+ end
73
+ end
74
+ end
75
+ end
76
+ end
@@ -91,13 +91,8 @@ module Karafka
91
91
  response.write result.content
92
92
  end
93
93
 
94
- # Display appropriate error specific to a given error type
95
- plugin :error_handler, classes: [
96
- ::Rdkafka::RdkafkaError,
97
- Errors::Ui::NotFoundError,
98
- Errors::Ui::ProOnlyError,
99
- Errors::Ui::ForbiddenError
100
- ] do |e|
94
+ # Catch all unhandled exceptions, report them to Karafka monitoring, and display error page
95
+ plugin :error_handler do |e|
101
96
  @error = true
102
97
 
103
98
  case e
@@ -107,9 +102,24 @@ module Karafka
107
102
  when Errors::Ui::ForbiddenError
108
103
  response.status = 403
109
104
  view 'shared/exceptions/not_allowed'
110
- else
105
+ when Errors::Ui::NotFoundError
106
+ response.status = 404
107
+ view 'shared/exceptions/not_found'
108
+ when ::Rdkafka::RdkafkaError
111
109
  response.status = 404
112
110
  view 'shared/exceptions/not_found'
111
+ else
112
+ # Report unhandled errors to Karafka monitoring
113
+ ::Karafka.monitor.instrument(
114
+ 'error.occurred',
115
+ error: e,
116
+ caller: self,
117
+ type: 'web.ui.error'
118
+ )
119
+
120
+ # For all other unhandled errors, show a generic error page
121
+ response.status = 500
122
+ view 'shared/exceptions/unhandled_error'
113
123
  end
114
124
  end
115
125
 
@@ -183,7 +193,7 @@ module Karafka
183
193
  def render_response(response)
184
194
  response.attributes.each do |key, value|
185
195
  instance_variable_set(
186
- "@#{key}", value
196
+ :"@#{key}", value
187
197
  )
188
198
  end
189
199
 
@@ -26,12 +26,10 @@ module Karafka
26
26
  # Delegates any method call to the controller and wraps it with before/after hooks
27
27
  #
28
28
  # @param method_name [Symbol] the name of the method being called
29
- # @param args [Array] arguments passed to the method
30
- # @param block [Proc] optional block passed to the method
31
29
  # @return [Object] the result of the delegated controller method for Roda to operate on
32
- def method_missing(method_name, *args, &block)
30
+ def method_missing(method_name, *, &)
33
31
  @controller.run_before_hooks(method_name)
34
- result = @controller.public_send(method_name, *args, &block)
32
+ result = @controller.public_send(method_name, *, &)
35
33
  @controller.run_after_hooks(method_name)
36
34
  result
37
35
  end
@@ -99,7 +99,7 @@ module Karafka
99
99
  def current_offset
100
100
  @current_offset ||= begin
101
101
  offset = @request_params.fetch('offset', -1).to_i
102
- offset < -1 ? -1 : offset
102
+ [offset, -1].max
103
103
  end
104
104
  end
105
105
 
@@ -247,7 +247,7 @@ module Karafka
247
247
  else
248
248
  name = attribute.to_s.tr('_', ' ').tr('?', '')
249
249
  # Always capitalize the name
250
- name = name.split(' ').map(&:capitalize).join(' ')
250
+ name = name.split.map(&:capitalize).join(' ')
251
251
  end
252
252
  end
253
253
 
@@ -87,27 +87,24 @@ module Karafka
87
87
 
88
88
  # Helps build topics paths
89
89
  #
90
- # @param args [Array<String>] path params for the topics scope
91
90
  # @return [String] topics scope path
92
- def topics_path(*args)
93
- root_path('topics', *args)
91
+ def topics_path(*)
92
+ root_path('topics', *)
94
93
  end
95
94
 
96
95
  # Helps build consumers paths
97
96
  #
98
- # @param args [Array<String>] path params for consumers scope
99
97
  # @return [String] consumers scope path
100
- def consumers_path(*args)
101
- root_path('consumers', *args)
98
+ def consumers_path(*)
99
+ root_path('consumers', *)
102
100
  end
103
101
 
104
102
  # Helps build per-consumer scope paths
105
103
  #
106
104
  # @param consumer_id [String] consumer process id
107
- # @param args [Array<String>] other path components
108
105
  # @return [String] per consumer specific path
109
- def consumer_path(consumer_id, *args)
110
- consumers_path(consumer_id, *args)
106
+ def consumer_path(consumer_id, *)
107
+ consumers_path(consumer_id, *)
111
108
  end
112
109
 
113
110
  # Helps build scheduled messages paths.
@@ -22,7 +22,7 @@ module Karafka
22
22
  # we can sort on method invocations, this needs to be limited and provided on a per
23
23
  # controller basis.
24
24
  def initialize(sort_query, allowed_attributes:)
25
- field, order = sort_query.split(' ')
25
+ field, order = sort_query.split
26
26
 
27
27
  @order = order.to_s.downcase
28
28
  @order = ALLOWED_ORDERS.first unless ALLOWED_ORDERS.include?(@order)
@@ -57,10 +57,15 @@ module Karafka
57
57
  pt_id = partition.id
58
58
 
59
59
  stats[cg_id] ||= { topics: {} }
60
- stats[cg_id][:topics][t_name] ||= {}
61
- stats[cg_id][:topics][t_name][pt_id] = partition
62
- stats[cg_id][:topics][t_name][pt_id][:process] = process
63
- stats[cg_id][:topics][t_name][pt_id][:subscription_group_id] = sg_id
60
+
61
+ stats[cg_id][:topics][t_name] ||= {
62
+ partitions: {},
63
+ partitions_count: topic.partitions_cnt
64
+ }
65
+
66
+ stats[cg_id][:topics][t_name][:partitions][pt_id] = partition
67
+ stats[cg_id][:topics][t_name][:partitions][pt_id][:process] = process
68
+ stats[cg_id][:topics][t_name][:partitions][pt_id][:subscription_group_id] = sg_id
64
69
  end
65
70
  end
66
71
 
@@ -74,7 +79,7 @@ module Karafka
74
79
 
75
80
  ages = consumer_group[:subscription_groups].values.map do |sub_group_details|
76
81
  rebalance_age_ms = sub_group_details[:state][:rebalance_age] || 0
77
- dispatched_at - rebalance_age_ms / 1_000
82
+ dispatched_at - (rebalance_age_ms / 1_000)
78
83
  end
79
84
 
80
85
  stats[cg_name][:rebalance_ages] ||= Set.new
@@ -119,15 +124,15 @@ module Karafka
119
124
  stats.each_value do |cg_data|
120
125
  topics = cg_data[:topics]
121
126
 
122
- topics.each do |topic_name, t_data|
123
- topics[topic_name] = Hash[t_data.sort_by { |key, _| key }]
127
+ topics.each_value do |t_data|
128
+ t_data[:partitions] = t_data[:partitions].sort_by { |key, _| key }.to_h
124
129
  end
125
130
 
126
- cg_data[:topics] = Hash[topics.sort_by { |key, _| key }]
131
+ cg_data[:topics] = topics.sort_by { |key, _| key }.to_h
127
132
  end
128
133
 
129
134
  # Ensure that all consumer groups are always in the same order
130
- Hash[stats.sort_by { |key, _| key }]
135
+ stats.sort_by { |key, _| key }.to_h
131
136
  end
132
137
  end
133
138
  end
@@ -30,16 +30,14 @@ module Karafka
30
30
  end
31
31
 
32
32
  # Creates a new Jobs object with selected jobs
33
- # @param block [Proc] select proc
34
33
  # @return [Jobs] selected jobs enclosed with the Jobs object
35
- def select(&block)
36
- self.class.new(super(&block))
34
+ def select(&)
35
+ self.class.new(super)
37
36
  end
38
37
 
39
38
  # Allows for iteration over jobs
40
- # @param block [Proc] block to call for each job
41
- def each(&block)
42
- @jobs_array.each(&block)
39
+ def each(&)
40
+ @jobs_array.each(&)
43
41
  end
44
42
  end
45
43
  end