karafka 2.5.0.rc2 → 2.5.1.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/{ci.yml → ci_linux_ubuntu_x86_64_gnu.yml} +54 -30
  3. data/.github/workflows/ci_macos_arm64.yml +148 -0
  4. data/.github/workflows/push.yml +2 -2
  5. data/.github/workflows/trigger-wiki-refresh.yml +30 -0
  6. data/.github/workflows/verify-action-pins.yml +1 -1
  7. data/.ruby-version +1 -1
  8. data/CHANGELOG.md +29 -2
  9. data/Gemfile +2 -1
  10. data/Gemfile.lock +56 -27
  11. data/README.md +2 -2
  12. data/bin/integrations +3 -1
  13. data/bin/verify_kafka_warnings +2 -1
  14. data/config/locales/errors.yml +153 -152
  15. data/config/locales/pro_errors.yml +135 -134
  16. data/karafka.gemspec +3 -3
  17. data/lib/active_job/queue_adapters/karafka_adapter.rb +30 -1
  18. data/lib/karafka/active_job/dispatcher.rb +19 -9
  19. data/lib/karafka/admin/acl.rb +7 -8
  20. data/lib/karafka/admin/configs/config.rb +2 -2
  21. data/lib/karafka/admin/configs/resource.rb +2 -2
  22. data/lib/karafka/admin/configs.rb +3 -7
  23. data/lib/karafka/admin/consumer_groups.rb +351 -0
  24. data/lib/karafka/admin/topics.rb +206 -0
  25. data/lib/karafka/admin.rb +42 -451
  26. data/lib/karafka/base_consumer.rb +22 -0
  27. data/lib/karafka/{pro/contracts/server_cli_options.rb → cli/contracts/server.rb} +4 -12
  28. data/lib/karafka/cli/info.rb +1 -1
  29. data/lib/karafka/cli/install.rb +0 -2
  30. data/lib/karafka/connection/client.rb +8 -0
  31. data/lib/karafka/connection/listener.rb +5 -1
  32. data/lib/karafka/connection/status.rb +12 -9
  33. data/lib/karafka/errors.rb +0 -8
  34. data/lib/karafka/instrumentation/assignments_tracker.rb +16 -0
  35. data/lib/karafka/instrumentation/logger_listener.rb +109 -50
  36. data/lib/karafka/pro/active_job/dispatcher.rb +5 -0
  37. data/lib/karafka/pro/cleaner/messages/messages.rb +18 -8
  38. data/lib/karafka/pro/cli/contracts/server.rb +106 -0
  39. data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
  40. data/lib/karafka/pro/loader.rb +1 -1
  41. data/lib/karafka/pro/recurring_tasks/contracts/config.rb +1 -1
  42. data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +1 -1
  43. data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +9 -0
  44. data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
  45. data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +9 -0
  46. data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +1 -1
  47. data/lib/karafka/pro/routing/features/delaying/topic.rb +9 -0
  48. data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +1 -1
  49. data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +1 -1
  50. data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +9 -0
  51. data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +1 -1
  52. data/lib/karafka/pro/routing/features/expiring/topic.rb +9 -0
  53. data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +1 -1
  54. data/lib/karafka/pro/routing/features/filtering/topic.rb +9 -0
  55. data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +1 -1
  56. data/lib/karafka/pro/routing/features/inline_insights/topic.rb +9 -0
  57. data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +1 -1
  58. data/lib/karafka/pro/routing/features/long_running_job/topic.rb +9 -0
  59. data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +1 -1
  60. data/lib/karafka/pro/routing/features/multiplexing.rb +1 -1
  61. data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +1 -1
  62. data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +9 -0
  63. data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +1 -1
  64. data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +1 -1
  65. data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +1 -1
  66. data/lib/karafka/pro/routing/features/patterns/topic.rb +9 -0
  67. data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +1 -1
  68. data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +1 -1
  69. data/lib/karafka/pro/routing/features/periodic_job/topic.rb +9 -0
  70. data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +1 -1
  71. data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +9 -0
  72. data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +1 -1
  73. data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +9 -0
  74. data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +1 -1
  75. data/lib/karafka/pro/routing/features/swarm/topic.rb +9 -0
  76. data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +1 -1
  77. data/lib/karafka/pro/routing/features/throttling/topic.rb +9 -0
  78. data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -1
  79. data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +9 -0
  80. data/lib/karafka/pro/scheduled_messages/contracts/config.rb +1 -1
  81. data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +9 -3
  82. data/lib/karafka/pro/swarm/liveness_listener.rb +17 -2
  83. data/lib/karafka/processing/executor.rb +1 -1
  84. data/lib/karafka/routing/builder.rb +0 -3
  85. data/lib/karafka/routing/consumer_group.rb +1 -4
  86. data/lib/karafka/routing/contracts/consumer_group.rb +84 -0
  87. data/lib/karafka/routing/contracts/routing.rb +61 -0
  88. data/lib/karafka/routing/contracts/topic.rb +83 -0
  89. data/lib/karafka/routing/features/active_job/contracts/topic.rb +1 -1
  90. data/lib/karafka/routing/features/active_job/topic.rb +9 -0
  91. data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
  92. data/lib/karafka/routing/features/dead_letter_queue/topic.rb +9 -0
  93. data/lib/karafka/routing/features/declaratives/contracts/topic.rb +1 -1
  94. data/lib/karafka/routing/features/declaratives/topic.rb +9 -0
  95. data/lib/karafka/routing/features/deserializers/contracts/topic.rb +1 -1
  96. data/lib/karafka/routing/features/deserializers/topic.rb +9 -0
  97. data/lib/karafka/routing/features/eofed/contracts/topic.rb +1 -1
  98. data/lib/karafka/routing/features/eofed/topic.rb +9 -0
  99. data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +1 -1
  100. data/lib/karafka/routing/features/inline_insights/topic.rb +9 -0
  101. data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +1 -1
  102. data/lib/karafka/routing/features/manual_offset_management/topic.rb +9 -0
  103. data/lib/karafka/routing/subscription_group.rb +1 -10
  104. data/lib/karafka/routing/topic.rb +9 -1
  105. data/lib/karafka/server.rb +2 -7
  106. data/lib/karafka/setup/attributes_map.rb +36 -0
  107. data/lib/karafka/setup/config.rb +6 -7
  108. data/lib/karafka/setup/contracts/config.rb +217 -0
  109. data/lib/karafka/setup/defaults_injector.rb +3 -1
  110. data/lib/karafka/swarm/node.rb +66 -6
  111. data/lib/karafka/swarm.rb +2 -2
  112. data/lib/karafka/templates/karafka.rb.erb +2 -7
  113. data/lib/karafka/version.rb +1 -1
  114. data/lib/karafka.rb +17 -18
  115. metadata +18 -15
  116. data/lib/karafka/contracts/config.rb +0 -210
  117. data/lib/karafka/contracts/consumer_group.rb +0 -81
  118. data/lib/karafka/contracts/routing.rb +0 -59
  119. data/lib/karafka/contracts/server_cli_options.rb +0 -92
  120. data/lib/karafka/contracts/topic.rb +0 -81
  121. data/lib/karafka/swarm/pidfd.rb +0 -147
@@ -1,105 +1,108 @@
1
1
  en:
2
2
  validations:
3
- topic:
4
- virtual_partitions.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
5
- virtual_partitions.max_partitions_format: needs to be equal or more than 1
6
- virtual_partitions.offset_metadata_strategy_format: needs to be either :exact or :current
7
- virtual_partitions.reducer_format: "needs to respond to `#call`"
8
- virtual_partitions.distribution_format: "needs to be either :consistent or :balanced"
9
-
10
- long_running_job.active_format: needs to be either true or false
11
-
12
- dead_letter_queue_with_virtual_partitions: when using Dead Letter Queue with Virtual Partitions, at least one retry is required.
13
- dead_letter_queue.strategy_format: 'needs to respond to #call'
14
- dead_letter_queue.strategy_missing: needs to be present
15
-
16
- throttling.active_format: needs to be either true or false
17
- throttling.limit_format: needs to be equal or more than 1
18
- throttling.interval_format: needs to be equal or more than 1
19
-
20
- filtering.active_missing: needs to be present
21
- filtering.factory_format: 'needs to respond to #call'
22
- filtering.factories_format: 'needs to contain only factories responding to #call'
23
- filtering.active_format: 'needs to be boolean'
24
-
25
- expiring.ttl_format: 'needs to be equal or more than 0 and an integer'
26
- expiring.active_format: 'needs to be boolean'
27
-
28
- delaying.delay_format: 'needs to be equal or more than 0 and an integer'
29
- delaying.active_format: 'needs to be boolean'
30
-
31
- pause_timeout_format: needs to be an integer bigger than 0
32
- pause_max_timeout_format: needs to be an integer bigger than 0
33
- pause_with_exponential_backoff_format: needs to be either true or false
34
- pause_timeout_max_timeout_vs_pause_max_timeout: pause_timeout must be less or equal to pause_max_timeout
35
-
36
- patterns.active_format: 'needs to be boolean'
37
- patterns.type_format: 'needs to be :matcher, :discovered or :regular'
38
-
39
- periodic_job.active_missing: needs to be present
40
- periodic_job.active_format: 'needs to be boolean'
41
- periodic_job.interval_missing: 'needs to be present'
42
- periodic_job.interval_format: 'needs to be an integer equal or more than 100'
43
- periodic_job.during_pause_format: 'needs to be boolean'
44
- periodic_job.during_retry_format: 'needs to be boolean'
45
- periodic_job.materialized_format: 'needs to be boolean'
46
- periodic_job.materialized_missing: 'needs to be present'
47
-
48
- inline_insights.active_format: 'needs to be boolean'
49
- inline_insights.required_format: 'needs to be boolean'
50
-
51
- offset_metadata.active_format: 'needs to be boolean'
52
- offset_metadata.cache_format: 'needs to be boolean'
53
- offset_metadata.deserializer_missing: needs to be present
54
- offset_metadata.deserializer_format: 'needs to respond to #call'
55
-
56
- subscription_group_details.multiplexing_min_format: 'needs to be an integer equal or more than 1'
57
- subscription_group_details.multiplexing_max_format: 'needs to be an integer equal or more than 1'
58
- subscription_group_details_multiplexing_min_max_mismatch: 'min needs to be equal or less than max'
59
- subscription_group_details_multiplexing_boot_mismatch: 'boot needs to be between min and max'
60
- subscription_group_details.multiplexing_boot_format: 'needs to be an integer equal or more than 1'
61
- subscription_group_details.multiplexing_boot_not_dynamic: 'needs to be equal to max when not in dynamic mode'
62
- subscription_group_details_multiplexing_one_not_enough: 'min and max cannot equal 1'
63
- subscription_group_details.multiplexing_scale_delay_format: 'needs to be an integer equal or more than 1000'
64
-
65
- swarm.active_format: needs to be true
66
- swarm.nodes_format: needs to be a range, array of nodes ids or a hash with direct assignments
67
- swarm_nodes_with_non_existent_nodes: includes unreachable nodes ids
68
-
69
- recurring_tasks.active_format: 'needs to be boolean'
70
- scheduled_messages.active_format: 'needs to be boolean'
71
- scheduled_messages.active_missing: 'needs to be boolean'
72
-
73
- direct_assignments.active_missing: needs to be present
74
- direct_assignments.active_format: 'needs to be boolean'
75
- direct_assignments.partitions_missing: 'needs to be present'
76
- direct_assignments.partitions_format: 'needs to be true, list of partitions or a range of partitions (finite)'
77
- direct_assignments_active_but_empty: 'cannot be empty and active at the same time'
78
- direct_assignments_swarm_not_complete: 'cannot have partitions that are assigned but not allocated'
79
- direct_assignments_swarm_overbooked: 'cannot allocate partitions in swarm that were not assigned'
80
- direct_assignments_patterns_active: 'patterns cannot be used with direct assignments'
81
-
82
- adaptive_iterator.active_missing: needs to be present
83
- adaptive_iterator.active_format: 'needs to be boolean'
84
- adaptive_iterator.marking_method_format: 'needs to be either #mark_as_consumed or #mark_as_consumed!'
85
- adaptive_iterator.clean_after_yielding_format: 'needs to be boolean'
86
- adaptive_iterator.safety_margin_format: 'needs to be between 1 and 99'
87
- adaptive_iterator_with_virtual_partitions: 'cannot be used with virtual partitions'
88
- adaptive_iterator_with_long_running_job: 'cannot be used with long running jobs'
89
-
90
- consumer_group:
91
- patterns_format: must be an array with hashes
92
- patterns_missing: needs to be present
93
- patterns_regexps_not_unique: 'must be unique within consumer group'
94
-
95
- direct_assignments_homogenous: 'single consumer group cannot mix regular and direct assignments'
96
-
97
- parallel_segments.partitioner_format: needs to be defined and needs to respond to `#call`
98
- parallel_segments.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
99
- parallel_segments.count_format: needs to be equal or more than 1
100
- parallel_segments.active_format: needs to be boolean
101
- parallel_segments.reducer_format: "needs to respond to `#call`"
102
- parallel_segments.merge_key_format: "needs to be a non-empty string"
3
+ routing:
4
+ swarm_nodes_not_used: 'At least one of the nodes has no assignments'
5
+
6
+ topic:
7
+ virtual_partitions.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
8
+ virtual_partitions.max_partitions_format: needs to be equal or more than 1
9
+ virtual_partitions.offset_metadata_strategy_format: needs to be either :exact or :current
10
+ virtual_partitions.reducer_format: "needs to respond to `#call`"
11
+ virtual_partitions.distribution_format: "needs to be either :consistent or :balanced"
12
+
13
+ long_running_job.active_format: needs to be either true or false
14
+
15
+ dead_letter_queue_with_virtual_partitions: when using Dead Letter Queue with Virtual Partitions, at least one retry is required.
16
+ dead_letter_queue.strategy_format: 'needs to respond to #call'
17
+ dead_letter_queue.strategy_missing: needs to be present
18
+
19
+ throttling.active_format: needs to be either true or false
20
+ throttling.limit_format: needs to be equal or more than 1
21
+ throttling.interval_format: needs to be equal or more than 1
22
+
23
+ filtering.active_missing: needs to be present
24
+ filtering.factory_format: 'needs to respond to #call'
25
+ filtering.factories_format: 'needs to contain only factories responding to #call'
26
+ filtering.active_format: 'needs to be boolean'
27
+
28
+ expiring.ttl_format: 'needs to be equal or more than 0 and an integer'
29
+ expiring.active_format: 'needs to be boolean'
30
+
31
+ delaying.delay_format: 'needs to be equal or more than 0 and an integer'
32
+ delaying.active_format: 'needs to be boolean'
33
+
34
+ pause_timeout_format: needs to be an integer bigger than 0
35
+ pause_max_timeout_format: needs to be an integer bigger than 0
36
+ pause_with_exponential_backoff_format: needs to be either true or false
37
+ pause_timeout_max_timeout_vs_pause_max_timeout: pause_timeout must be less or equal to pause_max_timeout
38
+
39
+ patterns.active_format: 'needs to be boolean'
40
+ patterns.type_format: 'needs to be :matcher, :discovered or :regular'
41
+
42
+ periodic_job.active_missing: needs to be present
43
+ periodic_job.active_format: 'needs to be boolean'
44
+ periodic_job.interval_missing: 'needs to be present'
45
+ periodic_job.interval_format: 'needs to be an integer equal or more than 100'
46
+ periodic_job.during_pause_format: 'needs to be boolean'
47
+ periodic_job.during_retry_format: 'needs to be boolean'
48
+ periodic_job.materialized_format: 'needs to be boolean'
49
+ periodic_job.materialized_missing: 'needs to be present'
50
+
51
+ inline_insights.active_format: 'needs to be boolean'
52
+ inline_insights.required_format: 'needs to be boolean'
53
+
54
+ offset_metadata.active_format: 'needs to be boolean'
55
+ offset_metadata.cache_format: 'needs to be boolean'
56
+ offset_metadata.deserializer_missing: needs to be present
57
+ offset_metadata.deserializer_format: 'needs to respond to #call'
58
+
59
+ subscription_group_details.multiplexing_min_format: 'needs to be an integer equal or more than 1'
60
+ subscription_group_details.multiplexing_max_format: 'needs to be an integer equal or more than 1'
61
+ subscription_group_details_multiplexing_min_max_mismatch: 'min needs to be equal or less than max'
62
+ subscription_group_details_multiplexing_boot_mismatch: 'boot needs to be between min and max'
63
+ subscription_group_details.multiplexing_boot_format: 'needs to be an integer equal or more than 1'
64
+ subscription_group_details.multiplexing_boot_not_dynamic: 'needs to be equal to max when not in dynamic mode'
65
+ subscription_group_details_multiplexing_one_not_enough: 'min and max cannot equal 1'
66
+ subscription_group_details.multiplexing_scale_delay_format: 'needs to be an integer equal or more than 1000'
67
+
68
+ swarm.active_format: needs to be true
69
+ swarm.nodes_format: needs to be a range, array of nodes ids or a hash with direct assignments
70
+ swarm_nodes_with_non_existent_nodes: includes unreachable nodes ids
71
+
72
+ recurring_tasks.active_format: 'needs to be boolean'
73
+ scheduled_messages.active_format: 'needs to be boolean'
74
+ scheduled_messages.active_missing: 'needs to be boolean'
75
+
76
+ direct_assignments.active_missing: needs to be present
77
+ direct_assignments.active_format: 'needs to be boolean'
78
+ direct_assignments.partitions_missing: 'needs to be present'
79
+ direct_assignments.partitions_format: 'needs to be true, list of partitions or a range of partitions (finite)'
80
+ direct_assignments_active_but_empty: 'cannot be empty and active at the same time'
81
+ direct_assignments_swarm_not_complete: 'cannot have partitions that are assigned but not allocated'
82
+ direct_assignments_swarm_overbooked: 'cannot allocate partitions in swarm that were not assigned'
83
+ direct_assignments_patterns_active: 'patterns cannot be used with direct assignments'
84
+
85
+ adaptive_iterator.active_missing: needs to be present
86
+ adaptive_iterator.active_format: 'needs to be boolean'
87
+ adaptive_iterator.marking_method_format: 'needs to be either #mark_as_consumed or #mark_as_consumed!'
88
+ adaptive_iterator.clean_after_yielding_format: 'needs to be boolean'
89
+ adaptive_iterator.safety_margin_format: 'needs to be between 1 and 99'
90
+ adaptive_iterator_with_virtual_partitions: 'cannot be used with virtual partitions'
91
+ adaptive_iterator_with_long_running_job: 'cannot be used with long running jobs'
92
+
93
+ consumer_group:
94
+ patterns_format: must be an array with hashes
95
+ patterns_missing: needs to be present
96
+ patterns_regexps_not_unique: 'must be unique within consumer group'
97
+
98
+ direct_assignments_homogenous: 'single consumer group cannot mix regular and direct assignments'
99
+
100
+ parallel_segments.partitioner_format: needs to be defined and needs to respond to `#call`
101
+ parallel_segments.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
102
+ parallel_segments.count_format: needs to be equal or more than 1
103
+ parallel_segments.active_format: needs to be boolean
104
+ parallel_segments.reducer_format: "needs to respond to `#call`"
105
+ parallel_segments.merge_key_format: "needs to be a non-empty string"
103
106
 
104
107
  pattern:
105
108
  regexp_format: must be a regular expression
@@ -107,40 +110,38 @@ en:
107
110
  regexp_string_format: 'needs to be a string and start with ^'
108
111
  missing: needs to be present
109
112
 
110
- config:
111
- encryption.active_format: 'needs to be either true or false'
112
- encryption.public_key_invalid: 'is not a valid public RSA key'
113
- encryption.public_key_needs_to_be_public: 'is a private RSA key not a public one'
114
- encryption.private_keys_format: 'needs to be a hash of version and private key value'
115
- encryption.private_keys_need_to_be_private: 'all keys need to be private'
116
- encryption.version_format: must be a non-empty string
117
- encryption.public_key_format: 'is not a valid public RSA key'
118
- encryption.private_keys_invalid: 'contains an invalid private RSA key string'
119
- encryption.fingerprinter_missing: 'needs to be false or respond to #hexdigest method'
120
- encryption.fingerprinter_format: 'needs to be false or respond to #hexdigest method'
121
-
122
- patterns.ttl_format: needs to be an integer bigger than 0
123
- patterns.ttl_missing: needs to be present
124
-
125
- recurring_tasks.consumer_class_format: 'needs to inherit from Karafka::BaseConsumer'
126
- recurring_tasks.group_id_format: 'needs to be a string with a Kafka accepted format'
127
- recurring_tasks.topics.schedules.name_format: 'needs to be a string with a Kafka accepted format'
128
- recurring_tasks.topics.logs.name_format: 'needs to be a string with a Kafka accepted format'
129
- recurring_tasks.interval_format: 'needs to be equal or more than 1000 and an integer'
130
- recurring_tasks.deserializer_format: 'needs to be configured'
131
- recurring_tasks.logging_format: needs to be a boolean
132
-
133
- scheduled_messages.consumer_class_format: 'must be a class'
134
- scheduled_messages.dispatcher_class_format: 'must be a class'
135
- scheduled_messages.flush_batch_size_format: needs to be an integer bigger than 0
136
- scheduled_messages.interval_format: needs to be an integer bigger or equal to 1000
137
- scheduled_messages.deserializers.headers_format: cannot be nil
138
- scheduled_messages.deserializers.payload_format: cannot be nil
139
- scheduled_messages.group_id_format: 'needs to be a string with a Kafka accepted format'
140
- scheduled_messages.states_postfix_format: 'needs to be a string with a Kafka accepted format'
141
-
142
- routing:
143
- swarm_nodes_not_used: 'At least one of the nodes has no assignments'
113
+ setup:
114
+ config:
115
+ encryption.active_format: 'needs to be either true or false'
116
+ encryption.public_key_invalid: 'is not a valid public RSA key'
117
+ encryption.public_key_needs_to_be_public: 'is a private RSA key not a public one'
118
+ encryption.private_keys_format: 'needs to be a hash of version and private key value'
119
+ encryption.private_keys_need_to_be_private: 'all keys need to be private'
120
+ encryption.version_format: must be a non-empty string
121
+ encryption.public_key_format: 'is not a valid public RSA key'
122
+ encryption.private_keys_invalid: 'contains an invalid private RSA key string'
123
+ encryption.fingerprinter_missing: 'needs to be false or respond to #hexdigest method'
124
+ encryption.fingerprinter_format: 'needs to be false or respond to #hexdigest method'
125
+
126
+ patterns.ttl_format: needs to be an integer bigger than 0
127
+ patterns.ttl_missing: needs to be present
128
+
129
+ recurring_tasks.consumer_class_format: 'needs to inherit from Karafka::BaseConsumer'
130
+ recurring_tasks.group_id_format: 'needs to be a string with a Kafka accepted format'
131
+ recurring_tasks.topics.schedules.name_format: 'needs to be a string with a Kafka accepted format'
132
+ recurring_tasks.topics.logs.name_format: 'needs to be a string with a Kafka accepted format'
133
+ recurring_tasks.interval_format: 'needs to be equal or more than 1000 and an integer'
134
+ recurring_tasks.deserializer_format: 'needs to be configured'
135
+ recurring_tasks.logging_format: needs to be a boolean
136
+
137
+ scheduled_messages.consumer_class_format: 'must be a class'
138
+ scheduled_messages.dispatcher_class_format: 'must be a class'
139
+ scheduled_messages.flush_batch_size_format: needs to be an integer bigger than 0
140
+ scheduled_messages.interval_format: needs to be an integer bigger or equal to 1000
141
+ scheduled_messages.deserializers.headers_format: cannot be nil
142
+ scheduled_messages.deserializers.payload_format: cannot be nil
143
+ scheduled_messages.group_id_format: 'needs to be a string with a Kafka accepted format'
144
+ scheduled_messages.states_postfix_format: 'needs to be a string with a Kafka accepted format'
144
145
 
145
146
  recurring_tasks:
146
147
  id_format: 'can include only alphanumeric characters (a-z, A-Z, 0-9), hyphens (-), and underscores (_)'
data/karafka.gemspec CHANGED
@@ -22,12 +22,12 @@ Gem::Specification.new do |spec|
22
22
  DESC
23
23
 
24
24
  spec.add_dependency 'base64', '~> 0.2'
25
- spec.add_dependency 'karafka-core', '>= 2.5.2', '< 2.6.0'
26
- spec.add_dependency 'karafka-rdkafka', '>= 0.19.5'
25
+ spec.add_dependency 'karafka-core', '>= 2.5.6', '< 2.6.0'
26
+ spec.add_dependency 'karafka-rdkafka', '>= 0.21.0'
27
27
  spec.add_dependency 'waterdrop', '>= 2.8.3', '< 3.0.0'
28
28
  spec.add_dependency 'zeitwerk', '~> 2.3'
29
29
 
30
- spec.required_ruby_version = '>= 3.0.0'
30
+ spec.required_ruby_version = '>= 3.1.0'
31
31
 
32
32
  spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(spec)/}) }
33
33
  spec.executables = %w[karafka]
@@ -4,9 +4,33 @@
4
4
  module ActiveJob
5
5
  # ActiveJob queue adapters
6
6
  module QueueAdapters
7
+ # Determine the appropriate base class for the Karafka adapter.
8
+ #
9
+ # This complex inheritance logic addresses a Rails 7.1 compatibility issue where
10
+ # ActiveJob::QueueAdapters::AbstractAdapter is not properly autoloaded during
11
+ # early initialization phases, causing "uninitialized constant" errors.
12
+ #
13
+ # The issue occurs because:
14
+ # 1. AbstractAdapter is autoloaded, not directly required in Rails 7+
15
+ # 2. Rails 7.1 has specific timing issues during the boot process
16
+ # 3. Queue adapters may be loaded before Rails completes initialization
17
+ #
18
+ # Inheritance strategy:
19
+ # - Rails 7.1: Inherit from Object (avoids AbstractAdapter autoloading issues)
20
+ # - Other Rails versions: Inherit from AbstractAdapter (normal behavior)
21
+ # - No Rails: Inherit from Object (standalone ActiveJob usage)
22
+ #
23
+ # @see https://github.com/sidekiq/sidekiq/issues/6746 Similar issue in Sidekiq
24
+ base = if defined?(Rails) && defined?(Rails::VERSION)
25
+ (Rails::VERSION::MAJOR == 7 && Rails::VERSION::MINOR < 2 ? Object : AbstractAdapter)
26
+ else
27
+ # Fallback when Rails is not loaded
28
+ Object
29
+ end
30
+
7
31
  # Karafka adapter for enqueuing jobs
8
32
  # This is here for ease of integration with ActiveJob.
9
- class KarafkaAdapter
33
+ class KarafkaAdapter < base
10
34
  include Karafka::Helpers::ConfigImporter.new(
11
35
  dispatcher: %i[internal active_job dispatcher]
12
36
  )
@@ -40,6 +64,11 @@ module ActiveJob
40
64
  def enqueue_after_transaction_commit?
41
65
  true
42
66
  end
67
+
68
+ # @return [Boolean] should we stop the job. Used by the ActiveJob continuation feature
69
+ def stopping?
70
+ Karafka::App.done?
71
+ end
43
72
  end
44
73
  end
45
74
  end
@@ -46,17 +46,27 @@ module Karafka
46
46
  end
47
47
  end
48
48
 
49
- # Raises info, that Karafka backend does not support scheduling jobs
49
+ # Raises info, that Karafka backend does not support scheduling jobs if someone wants to
50
+ # schedule jobs in the future. It works for past and present because we want to support
51
+ # things like continuation and `#retry_on` API with no wait and no jitter
50
52
  #
51
- # @param _job [Object] job we cannot enqueue
52
- # @param _timestamp [Time] time when job should run
53
+ # @param job [Object] job we cannot enqueue
54
+ # @param timestamp [Time] time when job should run
53
55
  #
54
- # @note Karafka Pro supports this feature
55
- def dispatch_at(_job, _timestamp)
56
- raise NotImplementedError, <<~ERROR_MESSAGE
57
- This queueing backend does not support scheduling jobs.
58
- Consider using Karafka Pro, which supports this via the Scheduled Messages feature.
59
- ERROR_MESSAGE
56
+ # @note Karafka Pro supports future jobs
57
+ #
58
+ # @note In order for jobs to work with this you need to set jitter to false and no wait
59
+ def dispatch_at(job, timestamp)
60
+ # Dispatch at is used by some of the ActiveJob features that actually do not back-off
61
+ # but things go via this API nonetheless.
62
+ if timestamp.to_f <= Time.now.to_f
63
+ dispatch(job)
64
+ else
65
+ raise NotImplementedError, <<~ERROR_MESSAGE
66
+ This queueing backend does not support scheduling future jobs.
67
+ Consider using Karafka Pro, which supports this via the Scheduled Messages feature.
68
+ ERROR_MESSAGE
69
+ end
60
70
  end
61
71
 
62
72
  private
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Karafka
4
- module Admin
4
+ class Admin
5
5
  # Struct and set of operations for ACLs management that simplifies their usage.
6
6
  # It allows to use Ruby symbol based definitions instead of usage of librdkafka types
7
7
  # (it allows to use rdkafka numerical types as well out of the box)
@@ -10,11 +10,7 @@ module Karafka
10
10
  #
11
11
  # This API works based on ability to create a `Karafka:Admin::Acl` object that can be then used
12
12
  # using `#create`, `#delete` and `#describe` class API.
13
- class Acl
14
- extend Helpers::ConfigImporter.new(
15
- max_wait_time: %i[admin max_wait_time]
16
- )
17
-
13
+ class Acl < Admin
18
14
  # Types of resources for which we can assign permissions.
19
15
  #
20
16
  # Resource refers to any entity within the Kafka ecosystem for which access control can be
@@ -31,7 +27,9 @@ module Karafka
31
27
  # use when you want to assign acl to a given consumer group
32
28
  consumer_group: Rdkafka::Bindings::RD_KAFKA_RESOURCE_GROUP,
33
29
  # use when you want to assign acl to a given broker
34
- broker: Rdkafka::Bindings::RD_KAFKA_RESOURCE_BROKER
30
+ broker: Rdkafka::Bindings::RD_KAFKA_RESOURCE_BROKER,
31
+ # use when you want to assign acl to a transactional id
32
+ transactional_id: Rdkafka::Bindings::RD_KAFKA_RESOURCE_TRANSACTIONAL_ID
35
33
  }.freeze
36
34
 
37
35
  # Resource pattern types define how ACLs (Access Control Lists) are applied to resources,
@@ -165,7 +163,7 @@ module Karafka
165
163
  # Yields admin instance, allows to run Acl operations and awaits on the final result
166
164
  # Makes sure that admin is closed afterwards.
167
165
  def with_admin_wait
168
- Admin.with_admin do |admin|
166
+ with_admin do |admin|
169
167
  yield(admin).wait(max_wait_timeout: max_wait_time)
170
168
  end
171
169
  end
@@ -229,6 +227,7 @@ module Karafka
229
227
  @host = host
230
228
  @operation = map(operation, OPERATIONS_MAP)
231
229
  @permission_type = map(permission_type, PERMISSION_TYPES_MAP)
230
+ super()
232
231
  freeze
233
232
  end
234
233
 
@@ -1,8 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Karafka
4
- module Admin
5
- module Configs
4
+ class Admin
5
+ class Configs
6
6
  # Represents a single config entry that is related to a resource
7
7
  class Config
8
8
  attr_reader :name, :value, :synonyms
@@ -1,8 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Karafka
4
- module Admin
5
- module Configs
4
+ class Admin
5
+ class Configs
6
6
  # Represents a single resource in the context of configuration management
7
7
  class Resource
8
8
  # Types of resources that have workable configs.
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Karafka
4
- module Admin
4
+ class Admin
5
5
  # Namespace for admin operations related to configuration management
6
6
  #
7
7
  # At the moment Karafka supports configuration management for brokers and topics
@@ -9,11 +9,7 @@ module Karafka
9
9
  # You can describe configuration as well as alter it.
10
10
  #
11
11
  # Altering is done in the incremental way.
12
- module Configs
13
- extend Helpers::ConfigImporter.new(
14
- max_wait_time: %i[admin max_wait_time]
15
- )
16
-
12
+ class Configs < Admin
17
13
  class << self
18
14
  # Fetches given resources configurations from Kafka
19
15
  #
@@ -97,7 +93,7 @@ module Karafka
97
93
  # Yields admin instance, allows to run Acl operations and awaits on the final result
98
94
  # Makes sure that admin is closed afterwards.
99
95
  def with_admin_wait
100
- Admin.with_admin do |admin|
96
+ with_admin do |admin|
101
97
  yield(admin).wait(max_wait_timeout: max_wait_time)
102
98
  end
103
99
  end