karafka 2.5.0 → 2.5.1.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/{ci.yml → ci_linux_ubuntu_x86_64_gnu.yml} +54 -30
  3. data/.github/workflows/ci_macos_arm64.yml +148 -0
  4. data/.github/workflows/push.yml +2 -2
  5. data/.github/workflows/trigger-wiki-refresh.yml +30 -0
  6. data/.github/workflows/verify-action-pins.yml +1 -1
  7. data/.ruby-version +1 -1
  8. data/CHANGELOG.md +28 -1
  9. data/Gemfile +2 -1
  10. data/Gemfile.lock +55 -26
  11. data/README.md +2 -2
  12. data/bin/integrations +3 -1
  13. data/bin/verify_kafka_warnings +2 -1
  14. data/config/locales/errors.yml +153 -152
  15. data/config/locales/pro_errors.yml +135 -134
  16. data/karafka.gemspec +3 -3
  17. data/lib/active_job/queue_adapters/karafka_adapter.rb +30 -1
  18. data/lib/karafka/active_job/dispatcher.rb +19 -9
  19. data/lib/karafka/admin/acl.rb +7 -8
  20. data/lib/karafka/admin/configs/config.rb +2 -2
  21. data/lib/karafka/admin/configs/resource.rb +2 -2
  22. data/lib/karafka/admin/configs.rb +3 -7
  23. data/lib/karafka/admin/consumer_groups.rb +351 -0
  24. data/lib/karafka/admin/topics.rb +206 -0
  25. data/lib/karafka/admin.rb +42 -451
  26. data/lib/karafka/base_consumer.rb +22 -0
  27. data/lib/karafka/{pro/contracts/server_cli_options.rb → cli/contracts/server.rb} +4 -12
  28. data/lib/karafka/cli/info.rb +1 -1
  29. data/lib/karafka/cli/install.rb +0 -2
  30. data/lib/karafka/connection/client.rb +8 -0
  31. data/lib/karafka/connection/listener.rb +5 -1
  32. data/lib/karafka/connection/status.rb +12 -9
  33. data/lib/karafka/errors.rb +0 -8
  34. data/lib/karafka/instrumentation/assignments_tracker.rb +16 -0
  35. data/lib/karafka/instrumentation/logger_listener.rb +109 -50
  36. data/lib/karafka/pro/active_job/dispatcher.rb +5 -0
  37. data/lib/karafka/pro/cleaner/messages/messages.rb +18 -8
  38. data/lib/karafka/pro/cli/contracts/server.rb +106 -0
  39. data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
  40. data/lib/karafka/pro/loader.rb +1 -1
  41. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +1 -1
  42. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +1 -1
  43. data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +9 -0
  44. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
  45. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +9 -0
  46. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +1 -1
  47. data/lib/karafka/pro/routing/features/delaying/topic.rb +9 -0
  48. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +1 -1
  49. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +1 -1
  50. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +9 -0
  51. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +1 -1
  52. data/lib/karafka/pro/routing/features/expiring/topic.rb +9 -0
  53. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +1 -1
  54. data/lib/karafka/pro/routing/features/filtering/topic.rb +9 -0
  55. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +1 -1
  56. data/lib/karafka/pro/routing/features/inline_insights/topic.rb +9 -0
  57. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +1 -1
  58. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +9 -0
  59. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +1 -1
  60. data/lib/karafka/pro/routing/features/multiplexing.rb +1 -1
  61. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +1 -1
  62. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +9 -0
  63. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +1 -1
  64. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +1 -1
  65. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +1 -1
  66. data/lib/karafka/pro/routing/features/patterns/topic.rb +9 -0
  67. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +1 -1
  68. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +1 -1
  69. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +9 -0
  70. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +1 -1
  71. data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +9 -0
  72. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +1 -1
  73. data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +9 -0
  74. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +1 -1
  75. data/lib/karafka/pro/routing/features/swarm/topic.rb +9 -0
  76. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +1 -1
  77. data/lib/karafka/pro/routing/features/throttling/topic.rb +9 -0
  78. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -1
  79. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +9 -0
  80. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +1 -1
  81. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +9 -3
  82. data/lib/karafka/pro/swarm/liveness_listener.rb +17 -2
  83. data/lib/karafka/processing/executor.rb +1 -1
  84. data/lib/karafka/routing/builder.rb +0 -3
  85. data/lib/karafka/routing/consumer_group.rb +1 -4
  86. data/lib/karafka/routing/contracts/consumer_group.rb +84 -0
  87. data/lib/karafka/routing/contracts/routing.rb +61 -0
  88. data/lib/karafka/routing/contracts/topic.rb +83 -0
  89. data/lib/karafka/routing/features/active_job/contracts/topic.rb +1 -1
  90. data/lib/karafka/routing/features/active_job/topic.rb +9 -0
  91. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
  92. data/lib/karafka/routing/features/dead_letter_queue/topic.rb +9 -0
  93. data/lib/karafka/routing/features/declaratives/contracts/topic.rb +1 -1
  94. data/lib/karafka/routing/features/declaratives/topic.rb +9 -0
  95. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +1 -1
  96. data/lib/karafka/routing/features/deserializers/topic.rb +9 -0
  97. data/lib/karafka/routing/features/eofed/contracts/topic.rb +1 -1
  98. data/lib/karafka/routing/features/eofed/topic.rb +9 -0
  99. data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +1 -1
  100. data/lib/karafka/routing/features/inline_insights/topic.rb +9 -0
  101. data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +1 -1
  102. data/lib/karafka/routing/features/manual_offset_management/topic.rb +9 -0
  103. data/lib/karafka/routing/subscription_group.rb +1 -10
  104. data/lib/karafka/routing/topic.rb +9 -1
  105. data/lib/karafka/server.rb +2 -7
  106. data/lib/karafka/setup/attributes_map.rb +36 -0
  107. data/lib/karafka/setup/config.rb +6 -7
  108. data/lib/karafka/setup/contracts/config.rb +217 -0
  109. data/lib/karafka/setup/defaults_injector.rb +3 -1
  110. data/lib/karafka/swarm/node.rb +66 -6
  111. data/lib/karafka/swarm.rb +2 -2
  112. data/lib/karafka/templates/karafka.rb.erb +2 -7
  113. data/lib/karafka/version.rb +1 -1
  114. data/lib/karafka.rb +17 -18
  115. metadata +18 -15
  116. data/lib/karafka/contracts/config.rb +0 -210
  117. data/lib/karafka/contracts/consumer_group.rb +0 -81
  118. data/lib/karafka/contracts/routing.rb +0 -59
  119. data/lib/karafka/contracts/server_cli_options.rb +0 -92
  120. data/lib/karafka/contracts/topic.rb +0 -81
  121. data/lib/karafka/swarm/pidfd.rb +0 -147
data/bin/integrations CHANGED
@@ -46,7 +46,9 @@ class Scenario
46
46
  'shutdown/on_hanging_listener_and_shutdown_spec.rb' => [2].freeze,
47
47
  'swarm/forceful_shutdown_of_hanging_spec.rb' => [2].freeze,
48
48
  'swarm/with_blocking_at_exit_spec.rb' => [2].freeze,
49
- 'instrumentation/post_errors_instrumentation_error_spec.rb' => [1].freeze,
49
+ # Segfault in the below spec can be expected because we pretty much force terminate handing
50
+ # C stuff. This spec is still useful as it catches other things
51
+ 'instrumentation/post_errors_instrumentation_error_spec.rb' => [1, 139].freeze,
50
52
  'cli/declaratives/delete/existing_with_exit_code_spec.rb' => [2].freeze,
51
53
  'cli/declaratives/create/new_with_exit_code_spec.rb' => [2].freeze,
52
54
  'cli/declaratives/plan/when_changes_with_detailed_exit_code_spec.rb' => [2].freeze,
@@ -10,10 +10,11 @@ allowed_patterns=(
10
10
  "Replayed PartitionRecord for"
11
11
  "Previous leader None and previous leader epoch"
12
12
  "Creating new"
13
+ "Unloaded transaction metadata"
13
14
  )
14
15
 
15
16
  # Get all warnings
16
- warnings=$(docker logs --since=0 kafka | grep WARN)
17
+ warnings=$(docker logs --since=0 kafka | grep "] WARN ")
17
18
  exit_code=0
18
19
 
19
20
  while IFS= read -r line; do
@@ -1,161 +1,162 @@
1
1
  en:
2
2
  validations:
3
- config:
4
- license.entity_format: needs to be a string
5
- license.token_format: needs to be either false or a string
6
- license.expires_on_format: needs to be a valid date
7
-
8
- missing: needs to be present
9
- client_id_format: 'needs to be a string with a Kafka accepted format'
10
- group_id_format: 'needs to be a string with a Kafka accepted format'
11
- concurrency_format: needs to be an integer bigger than 0
12
- consumer_persistence_format: needs to be either true or false
13
- pause_timeout_format: needs to be an integer bigger than 0
14
- pause_max_timeout_format: needs to be an integer bigger than 0
15
- pause_with_exponential_backoff_format: needs to be either true or false
16
- strict_topics_namespacing_format: needs to be either true or false
17
- strict_declarative_topics_format: needs to be either true or false
18
- shutdown_timeout_format: needs to be an integer bigger than 0
19
- max_wait_time_format: needs to be an integer bigger than 0
20
- max_wait_time_max_wait_time_vs_swarm_node_report_timeout: >
21
- cannot be more than 80% of internal.swarm.node_report_timeout.
22
- Decrease max_wait_time or increase node_report_timeout
23
- kafka_format: needs to be a filled hash
24
- key_must_be_a_symbol: All keys under the kafka settings scope need to be symbols
25
- max_timeout_vs_pause_max_timeout: pause_timeout must be less or equal to pause_max_timeout
26
- shutdown_timeout_vs_max_wait_time: shutdown_timeout must be more than max_wait_time
27
- worker_thread_priority_format: must be between -3 and 3
28
-
29
- oauth.token_provider_listener_format: 'must be false or respond to #on_oauthbearer_token_refresh'
30
-
31
- internal.processing.jobs_builder_format: cannot be nil
32
- internal.processing.jobs_queue_class_format: cannot be nil
33
- internal.processing.scheduler_class_format: cannot be nil
34
- internal.processing.coordinator_class_format: cannot be nil
35
- internal.processing.partitioner_class_format: cannot be nil
36
- internal.processing.strategy_selector_format: cannot be nil
37
- internal.processing.expansions_selector_format: cannot be nil
38
- internal.processing.executor_class_format: cannot be nil
39
- internal.processing.worker_job_call_wrapper_format: 'needs to be false or respond to #wrap'
40
- internal.processing.errors_tracker_class_format: 'needs to be nil or a class'
41
-
42
- internal.active_job.dispatcher_format: cannot be nil
43
- internal.active_job.job_options_contract_format: cannot be nil
44
- internal.active_job.consumer_class_format: cannot be nil
45
-
46
- internal.status_format: needs to be present
47
- internal.process_format: needs to be present
48
- internal.tick_interval_format: needs to be an integer bigger or equal to 1000
49
- internal.supervision_sleep_format: needs to be an integer bigger than 0
50
- internal.forceful_exit_code_format: needs to be an integer bigger or equal to 0
51
-
52
- internal.routing.builder_format: needs to be present
53
- internal.routing.subscription_groups_builder_format: needs to be present
54
-
55
- internal.connection.manager_format: needs to be present
56
- internal.connection.conductor_format: needs to be present
57
- internal.connection.reset_backoff_format: needs to be an integer bigger or equal to 1000
58
- internal.connection.proxy.query_watermark_offsets.timeout_format: needs to be an integer bigger than 0
59
- internal.connection.proxy.query_watermark_offsets.max_attempts_format: needs to be an integer bigger than 0
60
- internal.connection.proxy.query_watermark_offsets.wait_time_format: needs to be an integer bigger than 0
61
- internal.connection.proxy.offsets_for_times.timeout_format: needs to be an integer bigger than 0
62
- internal.connection.proxy.offsets_for_times.max_attempts_format: needs to be an integer bigger than 0
63
- internal.connection.proxy.offsets_for_times.wait_time_format: needs to be an integer bigger than 0
64
- internal.connection.proxy.committed.timeout_format: needs to be an integer bigger than 0
65
- internal.connection.proxy.committed.max_attempts_format: needs to be an integer bigger than 0
66
- internal.connection.proxy.committed.wait_time_format: needs to be an integer bigger than 0
67
- internal.connection.proxy.commit.max_attempts_format: needs to be an integer bigger than 0
68
- internal.connection.proxy.commit.wait_time_format: needs to be an integer bigger than 0
69
- internal.connection.proxy.metadata.timeout_format: needs to be an integer bigger than 0
70
- internal.connection.proxy.metadata.max_attempts_format: needs to be an integer bigger than 0
71
- internal.connection.proxy.metadata.wait_time_format: needs to be an integer bigger than 0
72
- internal.connection.listener_thread_priority_format: must be between -3 and 3
73
-
74
- internal.swarm.manager_format: cannot be nil
75
- internal.swarm.orphaned_exit_code_format: needs to be an integer bigger or equal to 0
76
- internal.swarm.pidfd_open_syscall_format: needs to be an integer bigger or equal to 0
77
- internal.swarm.pidfd_signal_syscall_format: needs to be an integer bigger or equal to 0
78
- internal.swarm.supervision_interval_format: needs to be an integer bigger or equal to 1000
79
- internal.swarm.liveness_interval_format: needs to be an integer bigger or equal to 1000
80
- internal.swarm.liveness_listener_format: cannot be nil
81
- internal.swarm.node_report_timeout_format: needs to be an integer bigger or equal to 1000
82
- internal.swarm.node_restart_timeout_format: needs to be an integer bigger or equal to 1000
83
-
84
- admin.kafka_format: needs to be a hash
85
- admin.group_id_format: 'needs to be a string with a Kafka accepted format'
86
- admin.max_wait_time_format: 'needs to be an integer bigger than 0'
87
- admin.retry_backoff_format: 'needs to be an integer bigger than 100'
88
- admin.max_retries_duration_format: 'needs to be an integer bigger than 1000'
89
-
90
- swarm.nodes_format: 'needs to be an integer bigger than 0'
91
- swarm.node_format: needs to be false or node instance
92
-
93
- server_cli_options:
94
- missing: needs to be present
95
- consumer_groups_inclusion: Unknown consumer group name
96
- subscription_groups_inclusion: Unknown subscription group name
97
- topics_inclusion: Unknown topic name
98
- topics_missing: No topics to subscribe to
99
-
100
- topic:
101
- kafka: needs to be a hash with kafka scope settings details
102
- kafka_format: needs to be a filled hash
103
- missing: needs to be present
104
- max_messages_format: 'needs to be an integer bigger than 0'
105
- max_wait_time_format: 'needs to be an integer bigger than 0'
106
- name_format: 'needs to be a string with a Kafka accepted format'
107
- deserializers_format: needs to be present
108
- consumer_format: needs to be present
109
- id_format: 'needs to be a string with a Kafka accepted format'
110
- initial_offset_format: needs to be either earliest or latest
111
- subscription_group_details.name_format: must be a non-empty string
112
- manual_offset_management.active_format: needs to be either true or false
113
- manual_offset_management_must_be_enabled: cannot be disabled for ActiveJob topics
114
- inline_insights.active_format: needs to be either true or false
115
- consumer_active_job_missing: ActiveJob needs to be available
116
-
117
- dead_letter_queue.max_retries_format: needs to be equal or bigger than 0
118
- dead_letter_queue.topic_format: 'needs to be a string with a Kafka accepted format'
119
- dead_letter_queue.active_format: needs to be either true or false
120
- dead_letter_queue.independent_format: needs to be either true or false
121
- dead_letter_queue.transactional_format: needs to be either true or false
122
- dead_letter_queue.dispatch_method_format: 'needs to be either #produce_sync or #produce_async'
123
- dead_letter_queue.marking_method_format: 'needs to be either #mark_as_consumed or #mark_as_consumed!'
124
- dead_letter_queue.mark_after_dispatch_format: 'needs to be true, false or nil'
125
-
126
- active_format: needs to be either true or false
127
-
128
- eofed.active_format: needs to be either true or false
129
- eofed.kafka_enable: 'cannot be enabled without enable.partition.eof set to true'
130
-
131
- declaratives.partitions_format: needs to be more or equal to 1
132
- declaratives.active_format: needs to be true
133
- declaratives.replication_factor_format: needs to be more or equal to 1
134
- declaratives.details_format: needs to be a hash with only symbol keys
135
-
136
- inconsistent_namespacing: |
137
- needs to follow a consistent namespacing style using either dots (.) or underscores (_), but not both.
138
- This ensures proper Kafka metrics reporting and avoids name collisions.
139
- To disable this validation, set config.strict_topics_namespacing to false.
140
-
141
- deserializers.active_format: 'needs to be true'
142
- deserializers.payload_format: 'needs to respond to #call'
143
- deserializers.key_format: 'needs to respond to #call'
144
- deserializers.headers_format: 'needs to respond to #call'
145
-
146
- consumer_group:
147
- missing: needs to be present
148
- topics_names_not_unique: all topic names within a single consumer group must be unique
149
- topics_many_consumers_same_topic: 'topic within a single consumer group cannot have distinct consumers'
150
- id_format: 'needs to be a string with a Kafka accepted format'
151
- topics_format: needs to be a non-empty array
152
- topics_namespaced_names_not_unique: |
153
- all topic names within a single consumer group must be unique considering namespacing styles
154
- disable this validation by setting config.strict_topics_namespacing to false
3
+ setup:
4
+ config:
5
+ license.entity_format: needs to be a string
6
+ license.token_format: needs to be either false or a string
7
+ license.expires_on_format: needs to be a valid date
8
+
9
+ missing: needs to be present
10
+ client_id_format: 'needs to be a string with a Kafka accepted format'
11
+ group_id_format: 'needs to be a string with a Kafka accepted format'
12
+ concurrency_format: needs to be an integer bigger than 0
13
+ consumer_persistence_format: needs to be either true or false
14
+ pause_timeout_format: needs to be an integer bigger than 0
15
+ pause_max_timeout_format: needs to be an integer bigger than 0
16
+ pause_with_exponential_backoff_format: needs to be either true or false
17
+ strict_topics_namespacing_format: needs to be either true or false
18
+ strict_declarative_topics_format: needs to be either true or false
19
+ shutdown_timeout_format: needs to be an integer bigger than 0
20
+ max_wait_time_format: needs to be an integer bigger than 0
21
+ max_wait_time_max_wait_time_vs_swarm_node_report_timeout: >
22
+ cannot be more than 80% of internal.swarm.node_report_timeout.
23
+ Decrease max_wait_time or increase node_report_timeout
24
+ kafka_format: needs to be a filled hash
25
+ key_must_be_a_symbol: All keys under the kafka settings scope need to be symbols
26
+ max_timeout_vs_pause_max_timeout: pause_timeout must be less or equal to pause_max_timeout
27
+ shutdown_timeout_vs_max_wait_time: shutdown_timeout must be more than max_wait_time
28
+ worker_thread_priority_format: must be between -3 and 3
29
+
30
+ oauth.token_provider_listener_format: 'must be false or respond to #on_oauthbearer_token_refresh'
31
+
32
+ internal.processing.jobs_builder_format: cannot be nil
33
+ internal.processing.jobs_queue_class_format: cannot be nil
34
+ internal.processing.scheduler_class_format: cannot be nil
35
+ internal.processing.coordinator_class_format: cannot be nil
36
+ internal.processing.partitioner_class_format: cannot be nil
37
+ internal.processing.strategy_selector_format: cannot be nil
38
+ internal.processing.expansions_selector_format: cannot be nil
39
+ internal.processing.executor_class_format: cannot be nil
40
+ internal.processing.worker_job_call_wrapper_format: 'needs to be false or respond to #wrap'
41
+ internal.processing.errors_tracker_class_format: 'needs to be nil or a class'
42
+
43
+ internal.active_job.dispatcher_format: cannot be nil
44
+ internal.active_job.job_options_contract_format: cannot be nil
45
+ internal.active_job.consumer_class_format: cannot be nil
46
+
47
+ internal.status_format: needs to be present
48
+ internal.process_format: needs to be present
49
+ internal.tick_interval_format: needs to be an integer bigger or equal to 1000
50
+ internal.supervision_sleep_format: needs to be an integer bigger than 0
51
+ internal.forceful_exit_code_format: needs to be an integer bigger or equal to 0
52
+ internal.forceful_shutdown_wait_format: needs to be an integer bigger or equal to 0
53
+
54
+ internal.routing.builder_format: needs to be present
55
+ internal.routing.subscription_groups_builder_format: needs to be present
56
+
57
+ internal.connection.manager_format: needs to be present
58
+ internal.connection.conductor_format: needs to be present
59
+ internal.connection.reset_backoff_format: needs to be an integer bigger or equal to 1000
60
+ internal.connection.proxy.query_watermark_offsets.timeout_format: needs to be an integer bigger than 0
61
+ internal.connection.proxy.query_watermark_offsets.max_attempts_format: needs to be an integer bigger than 0
62
+ internal.connection.proxy.query_watermark_offsets.wait_time_format: needs to be an integer bigger than 0
63
+ internal.connection.proxy.offsets_for_times.timeout_format: needs to be an integer bigger than 0
64
+ internal.connection.proxy.offsets_for_times.max_attempts_format: needs to be an integer bigger than 0
65
+ internal.connection.proxy.offsets_for_times.wait_time_format: needs to be an integer bigger than 0
66
+ internal.connection.proxy.committed.timeout_format: needs to be an integer bigger than 0
67
+ internal.connection.proxy.committed.max_attempts_format: needs to be an integer bigger than 0
68
+ internal.connection.proxy.committed.wait_time_format: needs to be an integer bigger than 0
69
+ internal.connection.proxy.commit.max_attempts_format: needs to be an integer bigger than 0
70
+ internal.connection.proxy.commit.wait_time_format: needs to be an integer bigger than 0
71
+ internal.connection.proxy.metadata.timeout_format: needs to be an integer bigger than 0
72
+ internal.connection.proxy.metadata.max_attempts_format: needs to be an integer bigger than 0
73
+ internal.connection.proxy.metadata.wait_time_format: needs to be an integer bigger than 0
74
+ internal.connection.listener_thread_priority_format: must be between -3 and 3
75
+
76
+ internal.swarm.manager_format: cannot be nil
77
+ internal.swarm.orphaned_exit_code_format: needs to be an integer bigger or equal to 0
78
+ internal.swarm.supervision_interval_format: needs to be an integer bigger or equal to 1000
79
+ internal.swarm.liveness_interval_format: needs to be an integer bigger or equal to 1000
80
+ internal.swarm.liveness_listener_format: cannot be nil
81
+ internal.swarm.node_report_timeout_format: needs to be an integer bigger or equal to 1000
82
+ internal.swarm.node_restart_timeout_format: needs to be an integer bigger or equal to 1000
83
+
84
+ admin.kafka_format: needs to be a hash
85
+ admin.group_id_format: 'needs to be a string with a Kafka accepted format'
86
+ admin.max_wait_time_format: 'needs to be an integer bigger than 0'
87
+ admin.retry_backoff_format: 'needs to be an integer bigger than 100'
88
+ admin.max_retries_duration_format: 'needs to be an integer bigger than 1000'
89
+
90
+ swarm.nodes_format: 'needs to be an integer bigger than 0'
91
+ swarm.node_format: needs to be false or node instance
92
+
93
+ cli:
94
+ server:
95
+ missing: needs to be present
96
+ consumer_groups_inclusion: Unknown consumer group name
97
+ subscription_groups_inclusion: Unknown subscription group name
98
+ topics_inclusion: Unknown topic name
99
+ topics_missing: No topics to subscribe to
155
100
 
156
101
  routing:
157
102
  without_declarative_definition: lacks explicit declarative topics definition
158
103
 
104
+ topic:
105
+ kafka: needs to be a hash with kafka scope settings details
106
+ kafka_format: needs to be a filled hash
107
+ missing: needs to be present
108
+ max_messages_format: 'needs to be an integer bigger than 0'
109
+ max_wait_time_format: 'needs to be an integer bigger than 0'
110
+ name_format: 'needs to be a string with a Kafka accepted format'
111
+ deserializers_format: needs to be present
112
+ consumer_format: needs to be present
113
+ id_format: 'needs to be a string with a Kafka accepted format'
114
+ initial_offset_format: needs to be either earliest or latest
115
+ subscription_group_details.name_format: must be a non-empty string
116
+ manual_offset_management.active_format: needs to be either true or false
117
+ manual_offset_management_must_be_enabled: cannot be disabled for ActiveJob topics
118
+ inline_insights.active_format: needs to be either true or false
119
+ consumer_active_job_missing: ActiveJob needs to be available
120
+
121
+ dead_letter_queue.max_retries_format: needs to be equal or bigger than 0
122
+ dead_letter_queue.topic_format: 'needs to be a string with a Kafka accepted format'
123
+ dead_letter_queue.active_format: needs to be either true or false
124
+ dead_letter_queue.independent_format: needs to be either true or false
125
+ dead_letter_queue.transactional_format: needs to be either true or false
126
+ dead_letter_queue.dispatch_method_format: 'needs to be either #produce_sync or #produce_async'
127
+ dead_letter_queue.marking_method_format: 'needs to be either #mark_as_consumed or #mark_as_consumed!'
128
+ dead_letter_queue.mark_after_dispatch_format: 'needs to be true, false or nil'
129
+
130
+ active_format: needs to be either true or false
131
+
132
+ eofed.active_format: needs to be either true or false
133
+ eofed.kafka_enable: 'cannot be enabled without enable.partition.eof set to true'
134
+
135
+ declaratives.partitions_format: needs to be more or equal to 1
136
+ declaratives.active_format: needs to be true
137
+ declaratives.replication_factor_format: needs to be more or equal to 1
138
+ declaratives.details_format: needs to be a hash with only symbol keys
139
+
140
+ inconsistent_namespacing: |
141
+ needs to follow a consistent namespacing style using either dots (.) or underscores (_), but not both.
142
+ This ensures proper Kafka metrics reporting and avoids name collisions.
143
+ To disable this validation, set config.strict_topics_namespacing to false.
144
+
145
+ deserializers.active_format: 'needs to be true'
146
+ deserializers.payload_format: 'needs to respond to #call'
147
+ deserializers.key_format: 'needs to respond to #call'
148
+ deserializers.headers_format: 'needs to respond to #call'
149
+
150
+ consumer_group:
151
+ missing: needs to be present
152
+ topics_names_not_unique: all topic names within a single consumer group must be unique
153
+ topics_many_consumers_same_topic: 'topic within a single consumer group cannot have distinct consumers'
154
+ id_format: 'needs to be a string with a Kafka accepted format'
155
+ topics_format: needs to be a non-empty array
156
+ topics_namespaced_names_not_unique: |
157
+ all topic names within a single consumer group must be unique considering namespacing styles
158
+ disable this validation by setting config.strict_topics_namespacing to false
159
+
159
160
  job_options:
160
161
  missing: needs to be present
161
162
  dispatch_method_format: needs to be either :produce_async or :produce_sync