karafka 2.5.0.rc2 → 2.5.1.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/{ci.yml → ci_linux_ubuntu_x86_64_gnu.yml} +54 -30
  3. data/.github/workflows/ci_macos_arm64.yml +148 -0
  4. data/.github/workflows/push.yml +2 -2
  5. data/.github/workflows/trigger-wiki-refresh.yml +30 -0
  6. data/.github/workflows/verify-action-pins.yml +1 -1
  7. data/.ruby-version +1 -1
  8. data/CHANGELOG.md +29 -2
  9. data/Gemfile +2 -1
  10. data/Gemfile.lock +56 -27
  11. data/README.md +2 -2
  12. data/bin/integrations +3 -1
  13. data/bin/verify_kafka_warnings +2 -1
  14. data/config/locales/errors.yml +153 -152
  15. data/config/locales/pro_errors.yml +135 -134
  16. data/karafka.gemspec +3 -3
  17. data/lib/active_job/queue_adapters/karafka_adapter.rb +30 -1
  18. data/lib/karafka/active_job/dispatcher.rb +19 -9
  19. data/lib/karafka/admin/acl.rb +7 -8
  20. data/lib/karafka/admin/configs/config.rb +2 -2
  21. data/lib/karafka/admin/configs/resource.rb +2 -2
  22. data/lib/karafka/admin/configs.rb +3 -7
  23. data/lib/karafka/admin/consumer_groups.rb +351 -0
  24. data/lib/karafka/admin/topics.rb +206 -0
  25. data/lib/karafka/admin.rb +42 -451
  26. data/lib/karafka/base_consumer.rb +22 -0
  27. data/lib/karafka/{pro/contracts/server_cli_options.rb → cli/contracts/server.rb} +4 -12
  28. data/lib/karafka/cli/info.rb +1 -1
  29. data/lib/karafka/cli/install.rb +0 -2
  30. data/lib/karafka/connection/client.rb +8 -0
  31. data/lib/karafka/connection/listener.rb +5 -1
  32. data/lib/karafka/connection/status.rb +12 -9
  33. data/lib/karafka/errors.rb +0 -8
  34. data/lib/karafka/instrumentation/assignments_tracker.rb +16 -0
  35. data/lib/karafka/instrumentation/logger_listener.rb +109 -50
  36. data/lib/karafka/pro/active_job/dispatcher.rb +5 -0
  37. data/lib/karafka/pro/cleaner/messages/messages.rb +18 -8
  38. data/lib/karafka/pro/cli/contracts/server.rb +106 -0
  39. data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
  40. data/lib/karafka/pro/loader.rb +1 -1
  41. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +1 -1
  42. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +1 -1
  43. data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +9 -0
  44. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
  45. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +9 -0
  46. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +1 -1
  47. data/lib/karafka/pro/routing/features/delaying/topic.rb +9 -0
  48. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +1 -1
  49. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +1 -1
  50. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +9 -0
  51. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +1 -1
  52. data/lib/karafka/pro/routing/features/expiring/topic.rb +9 -0
  53. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +1 -1
  54. data/lib/karafka/pro/routing/features/filtering/topic.rb +9 -0
  55. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +1 -1
  56. data/lib/karafka/pro/routing/features/inline_insights/topic.rb +9 -0
  57. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +1 -1
  58. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +9 -0
  59. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +1 -1
  60. data/lib/karafka/pro/routing/features/multiplexing.rb +1 -1
  61. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +1 -1
  62. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +9 -0
  63. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +1 -1
  64. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +1 -1
  65. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +1 -1
  66. data/lib/karafka/pro/routing/features/patterns/topic.rb +9 -0
  67. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +1 -1
  68. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +1 -1
  69. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +9 -0
  70. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +1 -1
  71. data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +9 -0
  72. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +1 -1
  73. data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +9 -0
  74. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +1 -1
  75. data/lib/karafka/pro/routing/features/swarm/topic.rb +9 -0
  76. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +1 -1
  77. data/lib/karafka/pro/routing/features/throttling/topic.rb +9 -0
  78. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -1
  79. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +9 -0
  80. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +1 -1
  81. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +9 -3
  82. data/lib/karafka/pro/swarm/liveness_listener.rb +17 -2
  83. data/lib/karafka/processing/executor.rb +1 -1
  84. data/lib/karafka/routing/builder.rb +0 -3
  85. data/lib/karafka/routing/consumer_group.rb +1 -4
  86. data/lib/karafka/routing/contracts/consumer_group.rb +84 -0
  87. data/lib/karafka/routing/contracts/routing.rb +61 -0
  88. data/lib/karafka/routing/contracts/topic.rb +83 -0
  89. data/lib/karafka/routing/features/active_job/contracts/topic.rb +1 -1
  90. data/lib/karafka/routing/features/active_job/topic.rb +9 -0
  91. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
  92. data/lib/karafka/routing/features/dead_letter_queue/topic.rb +9 -0
  93. data/lib/karafka/routing/features/declaratives/contracts/topic.rb +1 -1
  94. data/lib/karafka/routing/features/declaratives/topic.rb +9 -0
  95. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +1 -1
  96. data/lib/karafka/routing/features/deserializers/topic.rb +9 -0
  97. data/lib/karafka/routing/features/eofed/contracts/topic.rb +1 -1
  98. data/lib/karafka/routing/features/eofed/topic.rb +9 -0
  99. data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +1 -1
  100. data/lib/karafka/routing/features/inline_insights/topic.rb +9 -0
  101. data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +1 -1
  102. data/lib/karafka/routing/features/manual_offset_management/topic.rb +9 -0
  103. data/lib/karafka/routing/subscription_group.rb +1 -10
  104. data/lib/karafka/routing/topic.rb +9 -1
  105. data/lib/karafka/server.rb +2 -7
  106. data/lib/karafka/setup/attributes_map.rb +36 -0
  107. data/lib/karafka/setup/config.rb +6 -7
  108. data/lib/karafka/setup/contracts/config.rb +217 -0
  109. data/lib/karafka/setup/defaults_injector.rb +3 -1
  110. data/lib/karafka/swarm/node.rb +66 -6
  111. data/lib/karafka/swarm.rb +2 -2
  112. data/lib/karafka/templates/karafka.rb.erb +2 -7
  113. data/lib/karafka/version.rb +1 -1
  114. data/lib/karafka.rb +17 -18
  115. metadata +18 -15
  116. data/lib/karafka/contracts/config.rb +0 -210
  117. data/lib/karafka/contracts/consumer_group.rb +0 -81
  118. data/lib/karafka/contracts/routing.rb +0 -59
  119. data/lib/karafka/contracts/server_cli_options.rb +0 -92
  120. data/lib/karafka/contracts/topic.rb +0 -81
  121. data/lib/karafka/swarm/pidfd.rb +0 -147
@@ -49,6 +49,7 @@ module Karafka
49
49
  :illegal_generation,
50
50
  # this will not recover as fencing is permanent
51
51
  :fenced, # -144
52
+ :auto_offset_reset, # -140
52
53
  # This can happen for many reasons, including issues with static membership being fenced
53
54
  :fatal # -150
54
55
  ].freeze
@@ -436,6 +437,13 @@ module Karafka
436
437
  @wrapped_kafka.query_watermark_offsets(topic, partition)
437
438
  end
438
439
 
440
+ # @return [String] safe inspection string that is causing circular dependencies and other
441
+ # issues
442
+ def inspect
443
+ state = @closed ? 'closed' : 'open'
444
+ "#<#{self.class.name} id=#{@id.inspect} name=#{@name.inspect} state=#{state}>"
445
+ end
446
+
439
447
  private
440
448
 
441
449
  # When we cannot store an offset, it means we no longer own the partition
@@ -98,6 +98,9 @@ module Karafka
98
98
  @status.public_send("#{state}?")
99
99
  end
100
100
 
101
+ # Defined below
102
+ next if transition == :start!
103
+
101
104
  # Moves listener to a given state
102
105
  define_method transition do
103
106
  @status.public_send(transition)
@@ -266,7 +269,8 @@ module Karafka
266
269
  reset
267
270
 
268
271
  # Ruby sleep is in seconds
269
- sleep_time = reset_backoff / 10_000.0
272
+ sleep_time = reset_backoff / 1_000.0
273
+
270
274
  sleep(sleep_time) && retry
271
275
  end
272
276
 
@@ -23,16 +23,19 @@ module Karafka
23
23
 
24
24
  STATES.each do |state, transition|
25
25
  class_eval <<~RUBY, __FILE__, __LINE__ + 1
26
- # Moves status to a different state
27
- def #{transition}
28
- @mutex.synchronize do
29
- # Do not allow reverse state transitions (we always go one way) or transition to the
30
- # same state as currently
31
- return if @status && STATES.keys.index(:#{state}) <= STATES.keys.index(@status)
26
+ # Defined below
27
+ if #{transition != :stop!}
28
+ # Moves status to a different state
29
+ def #{transition}
30
+ @mutex.synchronize do
31
+ # Do not allow reverse state transitions (we always go one way) or transition to the
32
+ # same state as currently
33
+ return if @status && STATES.keys.index(:#{state}) <= STATES.keys.index(@status)
32
34
 
33
- @status = :#{state}
34
- conductor.signal
35
- monitor.instrument("connection.listener.#{state}", caller: self)
35
+ @status = :#{state}
36
+ conductor.signal
37
+ monitor.instrument("connection.listener.#{state}", caller: self)
38
+ end
36
39
  end
37
40
  end
38
41
 
@@ -133,14 +133,6 @@ module Karafka
133
133
  # optional dependencies are needed but not available.
134
134
  DependencyConstraintsError = Class.new(BaseError)
135
135
 
136
- # Raised when we were not able to open pidfd for given pid
137
- # This should not happen. If you see it, please report.
138
- PidfdOpenFailedError = Class.new(BaseError)
139
-
140
- # Failed to send signal to a process via pidfd
141
- # This should not happen. If you see it, please report.
142
- PidfdSignalFailedError = Class.new(BaseError)
143
-
144
136
  # Raised when given option/feature is not supported on a given platform or when given option
145
137
  # is not supported in a given configuration
146
138
  UnsupportedOptionError = Class.new(BaseError)
@@ -50,6 +50,22 @@ module Karafka
50
50
  end
51
51
  end
52
52
 
53
+ # @return [String] thread-safe and lock-safe inspect implementation
54
+ def inspect
55
+ info = if @mutex.try_lock
56
+ begin
57
+ assignments = @assignments.dup.transform_keys(&:name).inspect
58
+ "assignments=#{assignments}"
59
+ ensure
60
+ @mutex.unlock
61
+ end
62
+ else
63
+ 'busy'
64
+ end
65
+
66
+ "#<#{self.class.name} #{info}>"
67
+ end
68
+
53
69
  # When client is under reset due to critical issues, remove all of its assignments as we will
54
70
  # get a new set of assignments
55
71
  # @param event [Karafka::Core::Monitoring::Event]
@@ -172,9 +172,10 @@ module Karafka
172
172
  Thread.list.each do |thread|
173
173
  tid = (thread.object_id ^ ::Process.pid).to_s(36)
174
174
 
175
+ warn ''
175
176
  warn "Thread TID-#{tid} #{thread.name}"
176
177
 
177
- if thread.backtrace
178
+ if thread.backtrace && !thread.backtrace.empty?
178
179
  warn thread.backtrace.join("\n")
179
180
  else
180
181
  warn '<no backtrace available>'
@@ -341,48 +342,50 @@ module Karafka
341
342
  def on_error_occurred(event)
342
343
  type = event[:type]
343
344
  error = event[:error]
344
- details = (error.backtrace || []).join("\n")
345
+ backtrace = (error.backtrace || []).join("\n")
346
+
347
+ details = [error.to_s, error_details(event)].compact.join(' ')
345
348
 
346
349
  case type
347
350
  when 'consumer.initialized.error'
348
- error "Consumer initialized error: #{error}"
349
- error details
351
+ error "Consumer initialized error: #{details}"
352
+ error backtrace
350
353
  when 'consumer.wrap.error'
351
- error "Consumer wrap failed due to an error: #{error}"
352
- error details
354
+ error "Consumer wrap failed due to an error: #{details}"
355
+ error backtrace
353
356
  when 'consumer.consume.error'
354
- error "Consumer consuming error: #{error}"
355
- error details
357
+ error "Consumer consuming error: #{details}"
358
+ error backtrace
356
359
  when 'consumer.revoked.error'
357
- error "Consumer on revoked failed due to an error: #{error}"
358
- error details
360
+ error "Consumer on revoked failed due to an error: #{details}"
361
+ error backtrace
359
362
  when 'consumer.idle.error'
360
- error "Consumer idle failed due to an error: #{error}"
361
- error details
363
+ error "Consumer idle failed due to an error: #{details}"
364
+ error backtrace
362
365
  when 'consumer.shutdown.error'
363
- error "Consumer on shutdown failed due to an error: #{error}"
364
- error details
366
+ error "Consumer on shutdown failed due to an error: #{details}"
367
+ error backtrace
365
368
  when 'consumer.tick.error'
366
- error "Consumer on tick failed due to an error: #{error}"
367
- error details
369
+ error "Consumer on tick failed due to an error: #{details}"
370
+ error backtrace
368
371
  when 'consumer.eofed.error'
369
- error "Consumer on eofed failed due to an error: #{error}"
370
- error details
372
+ error "Consumer on eofed failed due to an error: #{details}"
373
+ error backtrace
371
374
  when 'consumer.after_consume.error'
372
- error "Consumer on after_consume failed due to an error: #{error}"
373
- error details
375
+ error "Consumer on after_consume failed due to an error: #{details}"
376
+ error backtrace
374
377
  when 'worker.process.error'
375
- fatal "Worker processing failed due to an error: #{error}"
376
- fatal details
378
+ fatal "Worker processing failed due to an error: #{details}"
379
+ fatal backtrace
377
380
  when 'connection.listener.fetch_loop.error'
378
- error "Listener fetch loop error: #{error}"
379
- error details
381
+ error "Listener fetch loop error: #{details}"
382
+ error backtrace
380
383
  when 'swarm.supervisor.error'
381
- fatal "Swarm supervisor crashed due to an error: #{error}"
382
- fatal details
384
+ fatal "Swarm supervisor crashed due to an error: #{details}"
385
+ fatal backtrace
383
386
  when 'runner.call.error'
384
- fatal "Runner crashed due to an error: #{error}"
385
- fatal details
387
+ fatal "Runner crashed due to an error: #{details}"
388
+ fatal backtrace
386
389
  when 'app.stopping.error'
387
390
  # Counts number of workers and listeners that were still active when forcing the
388
391
  # shutdown. Please note, that unless all listeners are closed, workers will not finalize
@@ -400,44 +403,44 @@ module Karafka
400
403
 
401
404
  error message
402
405
  when 'app.forceful_stopping.error'
403
- error "Forceful shutdown error occurred: #{error}"
404
- error details
406
+ error "Forceful shutdown error occurred: #{details}"
407
+ error backtrace
405
408
  when 'librdkafka.error'
406
- error "librdkafka internal error occurred: #{error}"
407
- error details
409
+ error "librdkafka internal error occurred: #{details}"
410
+ error backtrace
408
411
  # Those can occur when emitted statistics are consumed by the end user and the processing
409
412
  # of statistics fails. The statistics are emitted from librdkafka main loop thread and
410
413
  # any errors there crash the whole thread
411
414
  when 'callbacks.statistics.error'
412
- error "callbacks.statistics processing failed due to an error: #{error}"
413
- error details
415
+ error "callbacks.statistics processing failed due to an error: #{details}"
416
+ error backtrace
414
417
  when 'callbacks.error.error'
415
- error "callbacks.error processing failed due to an error: #{error}"
416
- error details
418
+ error "callbacks.error processing failed due to an error: #{details}"
419
+ error backtrace
417
420
  # Those will only occur when retries in the client fail and when they did not stop after
418
421
  # back-offs
419
422
  when 'connection.client.poll.error'
420
- error "Data polling error occurred: #{error}"
421
- error details
423
+ error "Data polling error occurred: #{details}"
424
+ error backtrace
422
425
  when 'connection.client.rebalance_callback.error'
423
- error "Rebalance callback error occurred: #{error}"
424
- error details
426
+ error "Rebalance callback error occurred: #{details}"
427
+ error backtrace
425
428
  when 'connection.client.unsubscribe.error'
426
- error "Client unsubscribe error occurred: #{error}"
427
- error details
429
+ error "Client unsubscribe error occurred: #{details}"
430
+ error backtrace
428
431
  when 'parallel_segments.reducer.error'
429
- error "Parallel segments reducer error occurred: #{error}"
430
- error details
432
+ error "Parallel segments reducer error occurred: #{details}"
433
+ error backtrace
431
434
  when 'parallel_segments.partitioner.error'
432
- error "Parallel segments partitioner error occurred: #{error}"
433
- error details
435
+ error "Parallel segments partitioner error occurred: #{details}"
436
+ error backtrace
434
437
  when 'virtual_partitions.partitioner.error'
435
- error "Virtual partitions partitioner error occurred: #{error}"
436
- error details
438
+ error "Virtual partitions partitioner error occurred: #{details}"
439
+ error backtrace
437
440
  # This handles any custom errors coming from places like Web-UI, etc
438
441
  else
439
- error "#{type} error occurred: #{error.class} - #{error}"
440
- error details
442
+ error "#{type} error occurred: #{error.class} - #{details}"
443
+ error backtrace
441
444
  end
442
445
  end
443
446
 
@@ -453,6 +456,62 @@ module Karafka
453
456
  def log_polling?
454
457
  @log_polling
455
458
  end
459
+
460
+ # Extracts some structural location
461
+ # @param event [Karafka::Core::Monitoring::Event] event details including payload
462
+ # @note It uses similar approach to the Web UI but here we collect less info because it goes
463
+ # to the logs.
464
+ def error_details(event)
465
+ caller_ref = event[:caller]
466
+
467
+ # Collect extra info if it was a consumer related error.
468
+ # Those come from user code
469
+ details = case caller_ref
470
+ when Karafka::BaseConsumer
471
+ extract_consumer_info(caller_ref)
472
+ when Karafka::Connection::Client
473
+ extract_client_info(caller_ref)
474
+ when Karafka::Connection::Listener
475
+ extract_listener_info(caller_ref)
476
+ else
477
+ {}
478
+ end
479
+
480
+ return nil if details.empty?
481
+
482
+ "[#{details.map { |label, value| "#{label}: #{value}" }.join(', ')}]"
483
+ end
484
+
485
+ # @param consumer [::Karafka::BaseConsumer]
486
+ # @return [Hash] hash with consumer specific info for details of error
487
+ def extract_consumer_info(consumer)
488
+ {
489
+ consumer_group: consumer.topic.consumer_group.id,
490
+ subscription_group: consumer.topic.subscription_group.id,
491
+ topic: consumer.topic.name,
492
+ partition: consumer.partition,
493
+ first_offset: consumer.messages.metadata.first_offset,
494
+ last_offset: consumer.messages.metadata.last_offset
495
+ }
496
+ end
497
+
498
+ # @param client [::Karafka::Connection::Client]
499
+ # @return [Hash] hash with client specific info for details of error
500
+ def extract_client_info(client)
501
+ {
502
+ consumer_group: client.subscription_group.consumer_group.id,
503
+ subscription_group: client.subscription_group.id
504
+ }
505
+ end
506
+
507
+ # @param listener [::Karafka::Connection::Listener]
508
+ # @return [Hash] hash with listener specific info for details of error
509
+ def extract_listener_info(listener)
510
+ {
511
+ consumer_group: listener.subscription_group.consumer_group.id,
512
+ subscription_group: listener.subscription_group.id
513
+ }
514
+ end
456
515
  end
457
516
  end
458
517
  end
@@ -83,6 +83,11 @@ module Karafka
83
83
  # @param job [Object] job we want to enqueue
84
84
  # @param timestamp [Time] time when job should run
85
85
  def dispatch_at(job, timestamp)
86
+ # If request is in the past, we can dispatch it directly without any envelope wrapping
87
+ # and proxy pass via scheduled messages. This improves latency and also allows for things
88
+ # like immediate continuation to work without having to have scheduled messages
89
+ return dispatch(job) if timestamp.to_f <= Time.now.to_f
90
+
86
91
  target_message = dispatch_details(job).merge!(
87
92
  topic: job.queue_name,
88
93
  payload: ::ActiveSupport::JSON.encode(serialize_job(job))
@@ -9,22 +9,32 @@ module Karafka
9
9
  module Messages
10
10
  # Extensions to the messages batch allowing for automatic cleaning of each message after
11
11
  # message is processed.
12
+ #
13
+ # This module is prepended to Karafka::Messages::Messages to add cleaning functionality.
14
+ # The implementation calls super() to maintain compatibility with other libraries that
15
+ # also prepend modules to modify the #each method (e.g., DataDog tracing).
16
+ # See: https://github.com/DataDog/dd-trace-rb/issues/4867
12
17
  module Messages
13
18
  # @param clean [Boolean] do we want to clean each message after we're done working with
14
19
  # it.
15
- # @yield block we want to execute per each message
20
+ # @param block [Proc] block we want to execute per each message
16
21
  #
17
22
  # @note Cleaning messages after we're done with each of them and did not fail does not
18
23
  # affect any other functionalities. The only thing that is crucial is to make sure,
19
24
  # that if DLQ is used, that we mark each message as consumed when using this API as
20
25
  # otherwise a cleaned message may be dispatched and that should never happen
21
- def each(clean: false)
22
- @messages_array.each do |message|
23
- yield(message)
24
-
25
- next unless clean
26
-
27
- message.clean!
26
+ #
27
+ # @note This method calls super() to ensure compatibility with other libraries that
28
+ # may have prepended modules to modify #each behavior. This preserves the method
29
+ # chain and allows instrumentation libraries to function correctly.
30
+ def each(clean: false, &block)
31
+ if clean
32
+ super() do |message|
33
+ yield(message)
34
+ message.clean!
35
+ end
36
+ else
37
+ super(&block)
28
38
  end
29
39
  end
30
40
  end
@@ -0,0 +1,106 @@
1
+ # frozen_string_literal: true
2
+
3
+ # This code is part of Karafka Pro, a commercial component not licensed under LGPL.
4
+ # See LICENSE for details.
5
+
6
+ module Karafka
7
+ module Pro
8
+ module Cli
9
+ # CLI related contracts
10
+ module Contracts
11
+ # Contract for validating correctness of the server cli command options.
12
+ # It differs slightly from the OSS one because it is aware of the routing patterns
13
+ class Server < ::Karafka::Cli::Contracts::Server
14
+ configure do |config|
15
+ config.error_messages = YAML.safe_load(
16
+ File.read(
17
+ File.join(Karafka.gem_root, 'config', 'locales', 'errors.yml')
18
+ )
19
+ ).fetch('en').fetch('validations').fetch('cli').fetch('server')
20
+ end
21
+
22
+ %i[
23
+ include
24
+ exclude
25
+ ].each do |action|
26
+ optional(:"#{action}_consumer_groups") { |cg| cg.is_a?(Array) }
27
+ optional(:"#{action}_subscription_groups") { |sg| sg.is_a?(Array) }
28
+ optional(:"#{action}_topics") { |topics| topics.is_a?(Array) }
29
+
30
+ virtual do |data, errors|
31
+ next unless errors.empty?
32
+
33
+ value = data.fetch(:"#{action}_consumer_groups")
34
+
35
+ # If there were no consumer_groups declared in the server cli, it means that we will
36
+ # run all of them and no need to validate them here at all
37
+ next if value.empty?
38
+ next if (value - Karafka::App.consumer_groups.map(&:name)).empty?
39
+
40
+ # Found unknown consumer groups
41
+ [[[:"#{action}_consumer_groups"], :consumer_groups_inclusion]]
42
+ end
43
+
44
+ virtual do |data, errors|
45
+ next unless errors.empty?
46
+
47
+ value = data.fetch(:"#{action}_subscription_groups")
48
+
49
+ # If there were no subscription_groups declared in the server cli, it means that we
50
+ # will run all of them and no need to validate them here at all
51
+ next if value.empty?
52
+
53
+ subscription_groups = Karafka::App
54
+ .consumer_groups
55
+ .map(&:subscription_groups)
56
+ .flatten
57
+ .map(&:name)
58
+
59
+ next if (value - subscription_groups).empty?
60
+
61
+ # Found unknown subscription groups
62
+ [[[:"#{action}_subscription_groups"], :subscription_groups_inclusion]]
63
+ end
64
+
65
+ virtual do |data, errors|
66
+ next unless errors.empty?
67
+
68
+ value = data.fetch(:"#{action}_topics")
69
+
70
+ # If there were no topics declared in the server cli, it means that we will
71
+ # run all of them and no need to validate them here at all
72
+ next if value.empty?
73
+
74
+ topics = Karafka::App
75
+ .consumer_groups
76
+ .map(&:subscription_groups)
77
+ .flatten
78
+ .map(&:topics)
79
+ .map { |gtopics| gtopics.map(&:name) }
80
+ .flatten
81
+
82
+ next if (value - topics).empty?
83
+
84
+ # If there are any patterns defined, we cannot report on topics inclusions because
85
+ # topics may be added during boot or runtime. We go with simple assumption:
86
+ # if there are patterns defined, we do not check the inclusions at all
87
+ next unless Karafka::App.consumer_groups.map(&:patterns).flatten.empty?
88
+
89
+ # Found unknown topics
90
+ [[[:"#{action}_topics"], :topics_inclusion]]
91
+ end
92
+ end
93
+
94
+ # Makes sure we have anything to subscribe to when we start the server
95
+ virtual do |_, errors|
96
+ next unless errors.empty?
97
+
98
+ next unless Karafka::App.subscription_groups.empty?
99
+
100
+ [[%i[include_topics], :topics_missing]]
101
+ end
102
+ end
103
+ end
104
+ end
105
+ end
106
+ end
@@ -15,7 +15,7 @@ module Karafka
15
15
  File.read(
16
16
  File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
17
17
  )
18
- ).fetch('en').fetch('validations').fetch('config')
18
+ ).fetch('en').fetch('validations').fetch('setup').fetch('config')
19
19
  end
20
20
 
21
21
  nested(:encryption) do
@@ -87,7 +87,7 @@ module Karafka
87
87
  def reconfigure(config)
88
88
  icfg = config.internal
89
89
 
90
- icfg.cli.contract = Contracts::ServerCliOptions.new
90
+ icfg.cli.contract = Cli::Contracts::Server.new
91
91
 
92
92
  # Use manager that supports multiplexing
93
93
  icfg.connection.manager = Connection::Manager.new
@@ -15,7 +15,7 @@ module Karafka
15
15
  File.read(
16
16
  File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
17
17
  )
18
- ).fetch('en').fetch('validations').fetch('config')
18
+ ).fetch('en').fetch('validations').fetch('setup').fetch('config')
19
19
  end
20
20
 
21
21
  nested(:recurring_tasks) do
@@ -17,7 +17,7 @@ module Karafka
17
17
  File.read(
18
18
  File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
19
  )
20
- ).fetch('en').fetch('validations').fetch('topic')
20
+ ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
21
  end
22
22
 
23
23
  nested(:adaptive_iterator) do
@@ -10,6 +10,15 @@ module Karafka
10
10
  class AdaptiveIterator < Base
11
11
  # Topic extension allowing us to enable and configure adaptive iterator
12
12
  module Topic
13
+ # This method calls the parent class initializer and then sets up the
14
+ # extra instance variable to nil. The explicit initialization
15
+ # to nil is included as an optimization for Ruby's object shapes system,
16
+ # which improves memory layout and access performance.
17
+ def initialize(...)
18
+ super
19
+ @adaptive_iterator = nil
20
+ end
21
+
13
22
  # @param active [Boolean] should we use the automatic adaptive iterator
14
23
  # @param safety_margin [Integer]
15
24
  # How big of a margin we leave ourselves so we can safely communicate back with
@@ -17,7 +17,7 @@ module Karafka
17
17
  File.read(
18
18
  File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
19
  )
20
- ).fetch('en').fetch('validations').fetch('topic')
20
+ ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
21
  end
22
22
 
23
23
  nested(:dead_letter_queue) do
@@ -10,6 +10,15 @@ module Karafka
10
10
  class DeadLetterQueue < Base
11
11
  # Expansions to the topic API in DLQ
12
12
  module Topic
13
+ # This method calls the parent class initializer and then sets up the
14
+ # extra instance variable to nil. The explicit initialization
15
+ # to nil is included as an optimization for Ruby's object shapes system,
16
+ # which improves memory layout and access performance.
17
+ def initialize(...)
18
+ super
19
+ @dead_letter_queue = nil
20
+ end
21
+
13
22
  # @param strategy [#call, nil] Strategy we want to use or nil if a default strategy
14
23
  # (same as in OSS) should be applied
15
24
  # @param args [Hash] Pro DLQ arguments
@@ -17,7 +17,7 @@ module Karafka
17
17
  File.read(
18
18
  File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
19
  )
20
- ).fetch('en').fetch('validations').fetch('topic')
20
+ ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
21
  end
22
22
 
23
23
  nested(:delaying) do
@@ -10,6 +10,15 @@ module Karafka
10
10
  class Delaying < Base
11
11
  # Topic delaying API extensions
12
12
  module Topic
13
+ # This method calls the parent class initializer and then sets up the
14
+ # extra instance variable to nil. The explicit initialization
15
+ # to nil is included as an optimization for Ruby's object shapes system,
16
+ # which improves memory layout and access performance.
17
+ def initialize(...)
18
+ super
19
+ @delaying = nil
20
+ end
21
+
13
22
  # @param delay [Integer, nil] minimum age of a message we want to process
14
23
  def delaying(delay = nil)
15
24
  # Those settings are used for validation
@@ -16,7 +16,7 @@ module Karafka
16
16
  File.read(
17
17
  File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
18
18
  )
19
- ).fetch('en').fetch('validations').fetch('consumer_group')
19
+ ).fetch('en').fetch('validations').fetch('routing').fetch('consumer_group')
20
20
 
21
21
  virtual do |data, errors|
22
22
  next unless errors.empty?
@@ -17,7 +17,7 @@ module Karafka
17
17
  File.read(
18
18
  File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
19
  )
20
- ).fetch('en').fetch('validations').fetch('topic')
20
+ ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
21
  end
22
22
 
23
23
  nested(:direct_assignments) do
@@ -10,6 +10,15 @@ module Karafka
10
10
  class DirectAssignments < Base
11
11
  # Topic extensions for direct assignments
12
12
  module Topic
13
+ # This method calls the parent class initializer and then sets up the
14
+ # extra instance variable to nil. The explicit initialization
15
+ # to nil is included as an optimization for Ruby's object shapes system,
16
+ # which improves memory layout and access performance.
17
+ def initialize(...)
18
+ super
19
+ @direct_assignments = nil
20
+ end
21
+
13
22
  # Allows for direct assignment of
14
23
  # @param partitions_or_all [true, Array<Integer>] informs Karafka that we want
15
24
  # to use direct assignments instead of automatic for this topic. It also allows us
@@ -17,7 +17,7 @@ module Karafka
17
17
  File.read(
18
18
  File.join(Karafka.gem_root, 'config', 'locales', 'pro_errors.yml')
19
19
  )
20
- ).fetch('en').fetch('validations').fetch('topic')
20
+ ).fetch('en').fetch('validations').fetch('routing').fetch('topic')
21
21
  end
22
22
 
23
23
  nested(:expiring) do