karafka 2.5.0.rc2 → 2.5.1.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/{ci.yml → ci_linux_ubuntu_x86_64_gnu.yml} +54 -30
- data/.github/workflows/ci_macos_arm64.yml +148 -0
- data/.github/workflows/push.yml +2 -2
- data/.github/workflows/trigger-wiki-refresh.yml +30 -0
- data/.github/workflows/verify-action-pins.yml +1 -1
- data/.ruby-version +1 -1
- data/CHANGELOG.md +29 -2
- data/Gemfile +2 -1
- data/Gemfile.lock +56 -27
- data/README.md +2 -2
- data/bin/integrations +3 -1
- data/bin/verify_kafka_warnings +2 -1
- data/config/locales/errors.yml +153 -152
- data/config/locales/pro_errors.yml +135 -134
- data/karafka.gemspec +3 -3
- data/lib/active_job/queue_adapters/karafka_adapter.rb +30 -1
- data/lib/karafka/active_job/dispatcher.rb +19 -9
- data/lib/karafka/admin/acl.rb +7 -8
- data/lib/karafka/admin/configs/config.rb +2 -2
- data/lib/karafka/admin/configs/resource.rb +2 -2
- data/lib/karafka/admin/configs.rb +3 -7
- data/lib/karafka/admin/consumer_groups.rb +351 -0
- data/lib/karafka/admin/topics.rb +206 -0
- data/lib/karafka/admin.rb +42 -451
- data/lib/karafka/base_consumer.rb +22 -0
- data/lib/karafka/{pro/contracts/server_cli_options.rb → cli/contracts/server.rb} +4 -12
- data/lib/karafka/cli/info.rb +1 -1
- data/lib/karafka/cli/install.rb +0 -2
- data/lib/karafka/connection/client.rb +8 -0
- data/lib/karafka/connection/listener.rb +5 -1
- data/lib/karafka/connection/status.rb +12 -9
- data/lib/karafka/errors.rb +0 -8
- data/lib/karafka/instrumentation/assignments_tracker.rb +16 -0
- data/lib/karafka/instrumentation/logger_listener.rb +109 -50
- data/lib/karafka/pro/active_job/dispatcher.rb +5 -0
- data/lib/karafka/pro/cleaner/messages/messages.rb +18 -8
- data/lib/karafka/pro/cli/contracts/server.rb +106 -0
- data/lib/karafka/pro/encryption/contracts/config.rb +1 -1
- data/lib/karafka/pro/loader.rb +1 -1
- data/lib/karafka/pro/recurring_tasks/contracts/config.rb +1 -1
- data/lib/karafka/pro/routing/features/adaptive_iterator/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/adaptive_iterator/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/dead_letter_queue/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/delaying/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/delaying/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/direct_assignments/contracts/consumer_group.rb +1 -1
- data/lib/karafka/pro/routing/features/direct_assignments/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/direct_assignments/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/expiring/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/expiring/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/filtering/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/filtering/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/inline_insights/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/inline_insights/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/long_running_job/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/long_running_job/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/multiplexing/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/multiplexing.rb +1 -1
- data/lib/karafka/pro/routing/features/offset_metadata/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/offset_metadata/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/parallel_segments/contracts/consumer_group.rb +1 -1
- data/lib/karafka/pro/routing/features/patterns/contracts/consumer_group.rb +1 -1
- data/lib/karafka/pro/routing/features/patterns/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/patterns/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/pausing/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/periodic_job/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/periodic_job/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/recurring_tasks/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/recurring_tasks/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/scheduled_messages/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/scheduled_messages/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/swarm/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/swarm/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/throttling/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/throttling/topic.rb +9 -0
- data/lib/karafka/pro/routing/features/virtual_partitions/contracts/topic.rb +1 -1
- data/lib/karafka/pro/routing/features/virtual_partitions/topic.rb +9 -0
- data/lib/karafka/pro/scheduled_messages/contracts/config.rb +1 -1
- data/lib/karafka/pro/scheduled_messages/daily_buffer.rb +9 -3
- data/lib/karafka/pro/swarm/liveness_listener.rb +17 -2
- data/lib/karafka/processing/executor.rb +1 -1
- data/lib/karafka/routing/builder.rb +0 -3
- data/lib/karafka/routing/consumer_group.rb +1 -4
- data/lib/karafka/routing/contracts/consumer_group.rb +84 -0
- data/lib/karafka/routing/contracts/routing.rb +61 -0
- data/lib/karafka/routing/contracts/topic.rb +83 -0
- data/lib/karafka/routing/features/active_job/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/active_job/topic.rb +9 -0
- data/lib/karafka/routing/features/dead_letter_queue/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/dead_letter_queue/topic.rb +9 -0
- data/lib/karafka/routing/features/declaratives/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/declaratives/topic.rb +9 -0
- data/lib/karafka/routing/features/deserializers/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/deserializers/topic.rb +9 -0
- data/lib/karafka/routing/features/eofed/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/eofed/topic.rb +9 -0
- data/lib/karafka/routing/features/inline_insights/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/inline_insights/topic.rb +9 -0
- data/lib/karafka/routing/features/manual_offset_management/contracts/topic.rb +1 -1
- data/lib/karafka/routing/features/manual_offset_management/topic.rb +9 -0
- data/lib/karafka/routing/subscription_group.rb +1 -10
- data/lib/karafka/routing/topic.rb +9 -1
- data/lib/karafka/server.rb +2 -7
- data/lib/karafka/setup/attributes_map.rb +36 -0
- data/lib/karafka/setup/config.rb +6 -7
- data/lib/karafka/setup/contracts/config.rb +217 -0
- data/lib/karafka/setup/defaults_injector.rb +3 -1
- data/lib/karafka/swarm/node.rb +66 -6
- data/lib/karafka/swarm.rb +2 -2
- data/lib/karafka/templates/karafka.rb.erb +2 -7
- data/lib/karafka/version.rb +1 -1
- data/lib/karafka.rb +17 -18
- metadata +18 -15
- data/lib/karafka/contracts/config.rb +0 -210
- data/lib/karafka/contracts/consumer_group.rb +0 -81
- data/lib/karafka/contracts/routing.rb +0 -59
- data/lib/karafka/contracts/server_cli_options.rb +0 -92
- data/lib/karafka/contracts/topic.rb +0 -81
- data/lib/karafka/swarm/pidfd.rb +0 -147
@@ -1,105 +1,108 @@
|
|
1
1
|
en:
|
2
2
|
validations:
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
3
|
+
routing:
|
4
|
+
swarm_nodes_not_used: 'At least one of the nodes has no assignments'
|
5
|
+
|
6
|
+
topic:
|
7
|
+
virtual_partitions.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
|
8
|
+
virtual_partitions.max_partitions_format: needs to be equal or more than 1
|
9
|
+
virtual_partitions.offset_metadata_strategy_format: needs to be either :exact or :current
|
10
|
+
virtual_partitions.reducer_format: "needs to respond to `#call`"
|
11
|
+
virtual_partitions.distribution_format: "needs to be either :consistent or :balanced"
|
12
|
+
|
13
|
+
long_running_job.active_format: needs to be either true or false
|
14
|
+
|
15
|
+
dead_letter_queue_with_virtual_partitions: when using Dead Letter Queue with Virtual Partitions, at least one retry is required.
|
16
|
+
dead_letter_queue.strategy_format: 'needs to respond to #call'
|
17
|
+
dead_letter_queue.strategy_missing: needs to be present
|
18
|
+
|
19
|
+
throttling.active_format: needs to be either true or false
|
20
|
+
throttling.limit_format: needs to be equal or more than 1
|
21
|
+
throttling.interval_format: needs to be equal or more than 1
|
22
|
+
|
23
|
+
filtering.active_missing: needs to be present
|
24
|
+
filtering.factory_format: 'needs to respond to #call'
|
25
|
+
filtering.factories_format: 'needs to contain only factories responding to #call'
|
26
|
+
filtering.active_format: 'needs to be boolean'
|
27
|
+
|
28
|
+
expiring.ttl_format: 'needs to be equal or more than 0 and an integer'
|
29
|
+
expiring.active_format: 'needs to be boolean'
|
30
|
+
|
31
|
+
delaying.delay_format: 'needs to be equal or more than 0 and an integer'
|
32
|
+
delaying.active_format: 'needs to be boolean'
|
33
|
+
|
34
|
+
pause_timeout_format: needs to be an integer bigger than 0
|
35
|
+
pause_max_timeout_format: needs to be an integer bigger than 0
|
36
|
+
pause_with_exponential_backoff_format: needs to be either true or false
|
37
|
+
pause_timeout_max_timeout_vs_pause_max_timeout: pause_timeout must be less or equal to pause_max_timeout
|
38
|
+
|
39
|
+
patterns.active_format: 'needs to be boolean'
|
40
|
+
patterns.type_format: 'needs to be :matcher, :discovered or :regular'
|
41
|
+
|
42
|
+
periodic_job.active_missing: needs to be present
|
43
|
+
periodic_job.active_format: 'needs to be boolean'
|
44
|
+
periodic_job.interval_missing: 'needs to be present'
|
45
|
+
periodic_job.interval_format: 'needs to be an integer equal or more than 100'
|
46
|
+
periodic_job.during_pause_format: 'needs to be boolean'
|
47
|
+
periodic_job.during_retry_format: 'needs to be boolean'
|
48
|
+
periodic_job.materialized_format: 'needs to be boolean'
|
49
|
+
periodic_job.materialized_missing: 'needs to be present'
|
50
|
+
|
51
|
+
inline_insights.active_format: 'needs to be boolean'
|
52
|
+
inline_insights.required_format: 'needs to be boolean'
|
53
|
+
|
54
|
+
offset_metadata.active_format: 'needs to be boolean'
|
55
|
+
offset_metadata.cache_format: 'needs to be boolean'
|
56
|
+
offset_metadata.deserializer_missing: needs to be present
|
57
|
+
offset_metadata.deserializer_format: 'needs to respond to #call'
|
58
|
+
|
59
|
+
subscription_group_details.multiplexing_min_format: 'needs to be an integer equal or more than 1'
|
60
|
+
subscription_group_details.multiplexing_max_format: 'needs to be an integer equal or more than 1'
|
61
|
+
subscription_group_details_multiplexing_min_max_mismatch: 'min needs to be equal or less than max'
|
62
|
+
subscription_group_details_multiplexing_boot_mismatch: 'boot needs to be between min and max'
|
63
|
+
subscription_group_details.multiplexing_boot_format: 'needs to be an integer equal or more than 1'
|
64
|
+
subscription_group_details.multiplexing_boot_not_dynamic: 'needs to be equal to max when not in dynamic mode'
|
65
|
+
subscription_group_details_multiplexing_one_not_enough: 'min and max cannot equal 1'
|
66
|
+
subscription_group_details.multiplexing_scale_delay_format: 'needs to be an integer equal or more than 1000'
|
67
|
+
|
68
|
+
swarm.active_format: needs to be true
|
69
|
+
swarm.nodes_format: needs to be a range, array of nodes ids or a hash with direct assignments
|
70
|
+
swarm_nodes_with_non_existent_nodes: includes unreachable nodes ids
|
71
|
+
|
72
|
+
recurring_tasks.active_format: 'needs to be boolean'
|
73
|
+
scheduled_messages.active_format: 'needs to be boolean'
|
74
|
+
scheduled_messages.active_missing: 'needs to be boolean'
|
75
|
+
|
76
|
+
direct_assignments.active_missing: needs to be present
|
77
|
+
direct_assignments.active_format: 'needs to be boolean'
|
78
|
+
direct_assignments.partitions_missing: 'needs to be present'
|
79
|
+
direct_assignments.partitions_format: 'needs to be true, list of partitions or a range of partitions (finite)'
|
80
|
+
direct_assignments_active_but_empty: 'cannot be empty and active at the same time'
|
81
|
+
direct_assignments_swarm_not_complete: 'cannot have partitions that are assigned but not allocated'
|
82
|
+
direct_assignments_swarm_overbooked: 'cannot allocate partitions in swarm that were not assigned'
|
83
|
+
direct_assignments_patterns_active: 'patterns cannot be used with direct assignments'
|
84
|
+
|
85
|
+
adaptive_iterator.active_missing: needs to be present
|
86
|
+
adaptive_iterator.active_format: 'needs to be boolean'
|
87
|
+
adaptive_iterator.marking_method_format: 'needs to be either #mark_as_consumed or #mark_as_consumed!'
|
88
|
+
adaptive_iterator.clean_after_yielding_format: 'needs to be boolean'
|
89
|
+
adaptive_iterator.safety_margin_format: 'needs to be between 1 and 99'
|
90
|
+
adaptive_iterator_with_virtual_partitions: 'cannot be used with virtual partitions'
|
91
|
+
adaptive_iterator_with_long_running_job: 'cannot be used with long running jobs'
|
92
|
+
|
93
|
+
consumer_group:
|
94
|
+
patterns_format: must be an array with hashes
|
95
|
+
patterns_missing: needs to be present
|
96
|
+
patterns_regexps_not_unique: 'must be unique within consumer group'
|
97
|
+
|
98
|
+
direct_assignments_homogenous: 'single consumer group cannot mix regular and direct assignments'
|
99
|
+
|
100
|
+
parallel_segments.partitioner_format: needs to be defined and needs to respond to `#call`
|
101
|
+
parallel_segments.partitioner_respond_to_call: needs to be defined and needs to respond to `#call`
|
102
|
+
parallel_segments.count_format: needs to be equal or more than 1
|
103
|
+
parallel_segments.active_format: needs to be boolean
|
104
|
+
parallel_segments.reducer_format: "needs to respond to `#call`"
|
105
|
+
parallel_segments.merge_key_format: "needs to be a non-empty string"
|
103
106
|
|
104
107
|
pattern:
|
105
108
|
regexp_format: must be a regular expression
|
@@ -107,40 +110,38 @@ en:
|
|
107
110
|
regexp_string_format: 'needs to be a string and start with ^'
|
108
111
|
missing: needs to be present
|
109
112
|
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
routing:
|
143
|
-
swarm_nodes_not_used: 'At least one of the nodes has no assignments'
|
113
|
+
setup:
|
114
|
+
config:
|
115
|
+
encryption.active_format: 'needs to be either true or false'
|
116
|
+
encryption.public_key_invalid: 'is not a valid public RSA key'
|
117
|
+
encryption.public_key_needs_to_be_public: 'is a private RSA key not a public one'
|
118
|
+
encryption.private_keys_format: 'needs to be a hash of version and private key value'
|
119
|
+
encryption.private_keys_need_to_be_private: 'all keys need to be private'
|
120
|
+
encryption.version_format: must be a non-empty string
|
121
|
+
encryption.public_key_format: 'is not a valid public RSA key'
|
122
|
+
encryption.private_keys_invalid: 'contains an invalid private RSA key string'
|
123
|
+
encryption.fingerprinter_missing: 'needs to be false or respond to #hexdigest method'
|
124
|
+
encryption.fingerprinter_format: 'needs to be false or respond to #hexdigest method'
|
125
|
+
|
126
|
+
patterns.ttl_format: needs to be an integer bigger than 0
|
127
|
+
patterns.ttl_missing: needs to be present
|
128
|
+
|
129
|
+
recurring_tasks.consumer_class_format: 'needs to inherit from Karafka::BaseConsumer'
|
130
|
+
recurring_tasks.group_id_format: 'needs to be a string with a Kafka accepted format'
|
131
|
+
recurring_tasks.topics.schedules.name_format: 'needs to be a string with a Kafka accepted format'
|
132
|
+
recurring_tasks.topics.logs.name_format: 'needs to be a string with a Kafka accepted format'
|
133
|
+
recurring_tasks.interval_format: 'needs to be equal or more than 1000 and an integer'
|
134
|
+
recurring_tasks.deserializer_format: 'needs to be configured'
|
135
|
+
recurring_tasks.logging_format: needs to be a boolean
|
136
|
+
|
137
|
+
scheduled_messages.consumer_class_format: 'must be a class'
|
138
|
+
scheduled_messages.dispatcher_class_format: 'must be a class'
|
139
|
+
scheduled_messages.flush_batch_size_format: needs to be an integer bigger than 0
|
140
|
+
scheduled_messages.interval_format: needs to be an integer bigger or equal to 1000
|
141
|
+
scheduled_messages.deserializers.headers_format: cannot be nil
|
142
|
+
scheduled_messages.deserializers.payload_format: cannot be nil
|
143
|
+
scheduled_messages.group_id_format: 'needs to be a string with a Kafka accepted format'
|
144
|
+
scheduled_messages.states_postfix_format: 'needs to be a string with a Kafka accepted format'
|
144
145
|
|
145
146
|
recurring_tasks:
|
146
147
|
id_format: 'can include only alphanumeric characters (a-z, A-Z, 0-9), hyphens (-), and underscores (_)'
|
data/karafka.gemspec
CHANGED
@@ -22,12 +22,12 @@ Gem::Specification.new do |spec|
|
|
22
22
|
DESC
|
23
23
|
|
24
24
|
spec.add_dependency 'base64', '~> 0.2'
|
25
|
-
spec.add_dependency 'karafka-core', '>= 2.5.
|
26
|
-
spec.add_dependency 'karafka-rdkafka', '>= 0.
|
25
|
+
spec.add_dependency 'karafka-core', '>= 2.5.6', '< 2.6.0'
|
26
|
+
spec.add_dependency 'karafka-rdkafka', '>= 0.21.0'
|
27
27
|
spec.add_dependency 'waterdrop', '>= 2.8.3', '< 3.0.0'
|
28
28
|
spec.add_dependency 'zeitwerk', '~> 2.3'
|
29
29
|
|
30
|
-
spec.required_ruby_version = '>= 3.
|
30
|
+
spec.required_ruby_version = '>= 3.1.0'
|
31
31
|
|
32
32
|
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(spec)/}) }
|
33
33
|
spec.executables = %w[karafka]
|
@@ -4,9 +4,33 @@
|
|
4
4
|
module ActiveJob
|
5
5
|
# ActiveJob queue adapters
|
6
6
|
module QueueAdapters
|
7
|
+
# Determine the appropriate base class for the Karafka adapter.
|
8
|
+
#
|
9
|
+
# This complex inheritance logic addresses a Rails 7.1 compatibility issue where
|
10
|
+
# ActiveJob::QueueAdapters::AbstractAdapter is not properly autoloaded during
|
11
|
+
# early initialization phases, causing "uninitialized constant" errors.
|
12
|
+
#
|
13
|
+
# The issue occurs because:
|
14
|
+
# 1. AbstractAdapter is autoloaded, not directly required in Rails 7+
|
15
|
+
# 2. Rails 7.1 has specific timing issues during the boot process
|
16
|
+
# 3. Queue adapters may be loaded before Rails completes initialization
|
17
|
+
#
|
18
|
+
# Inheritance strategy:
|
19
|
+
# - Rails 7.1: Inherit from Object (avoids AbstractAdapter autoloading issues)
|
20
|
+
# - Other Rails versions: Inherit from AbstractAdapter (normal behavior)
|
21
|
+
# - No Rails: Inherit from Object (standalone ActiveJob usage)
|
22
|
+
#
|
23
|
+
# @see https://github.com/sidekiq/sidekiq/issues/6746 Similar issue in Sidekiq
|
24
|
+
base = if defined?(Rails) && defined?(Rails::VERSION)
|
25
|
+
(Rails::VERSION::MAJOR == 7 && Rails::VERSION::MINOR < 2 ? Object : AbstractAdapter)
|
26
|
+
else
|
27
|
+
# Fallback when Rails is not loaded
|
28
|
+
Object
|
29
|
+
end
|
30
|
+
|
7
31
|
# Karafka adapter for enqueuing jobs
|
8
32
|
# This is here for ease of integration with ActiveJob.
|
9
|
-
class KarafkaAdapter
|
33
|
+
class KarafkaAdapter < base
|
10
34
|
include Karafka::Helpers::ConfigImporter.new(
|
11
35
|
dispatcher: %i[internal active_job dispatcher]
|
12
36
|
)
|
@@ -40,6 +64,11 @@ module ActiveJob
|
|
40
64
|
def enqueue_after_transaction_commit?
|
41
65
|
true
|
42
66
|
end
|
67
|
+
|
68
|
+
# @return [Boolean] should we stop the job. Used by the ActiveJob continuation feature
|
69
|
+
def stopping?
|
70
|
+
Karafka::App.done?
|
71
|
+
end
|
43
72
|
end
|
44
73
|
end
|
45
74
|
end
|
@@ -46,17 +46,27 @@ module Karafka
|
|
46
46
|
end
|
47
47
|
end
|
48
48
|
|
49
|
-
# Raises info, that Karafka backend does not support scheduling jobs
|
49
|
+
# Raises info, that Karafka backend does not support scheduling jobs if someone wants to
|
50
|
+
# schedule jobs in the future. It works for past and present because we want to support
|
51
|
+
# things like continuation and `#retry_on` API with no wait and no jitter
|
50
52
|
#
|
51
|
-
# @param
|
52
|
-
# @param
|
53
|
+
# @param job [Object] job we cannot enqueue
|
54
|
+
# @param timestamp [Time] time when job should run
|
53
55
|
#
|
54
|
-
# @note Karafka Pro supports
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
56
|
+
# @note Karafka Pro supports future jobs
|
57
|
+
#
|
58
|
+
# @note In order for jobs to work with this you need to set jitter to false and no wait
|
59
|
+
def dispatch_at(job, timestamp)
|
60
|
+
# Dispatch at is used by some of the ActiveJob features that actually do not back-off
|
61
|
+
# but things go via this API nonetheless.
|
62
|
+
if timestamp.to_f <= Time.now.to_f
|
63
|
+
dispatch(job)
|
64
|
+
else
|
65
|
+
raise NotImplementedError, <<~ERROR_MESSAGE
|
66
|
+
This queueing backend does not support scheduling future jobs.
|
67
|
+
Consider using Karafka Pro, which supports this via the Scheduled Messages feature.
|
68
|
+
ERROR_MESSAGE
|
69
|
+
end
|
60
70
|
end
|
61
71
|
|
62
72
|
private
|
data/lib/karafka/admin/acl.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Karafka
|
4
|
-
|
4
|
+
class Admin
|
5
5
|
# Struct and set of operations for ACLs management that simplifies their usage.
|
6
6
|
# It allows to use Ruby symbol based definitions instead of usage of librdkafka types
|
7
7
|
# (it allows to use rdkafka numerical types as well out of the box)
|
@@ -10,11 +10,7 @@ module Karafka
|
|
10
10
|
#
|
11
11
|
# This API works based on ability to create a `Karafka:Admin::Acl` object that can be then used
|
12
12
|
# using `#create`, `#delete` and `#describe` class API.
|
13
|
-
class Acl
|
14
|
-
extend Helpers::ConfigImporter.new(
|
15
|
-
max_wait_time: %i[admin max_wait_time]
|
16
|
-
)
|
17
|
-
|
13
|
+
class Acl < Admin
|
18
14
|
# Types of resources for which we can assign permissions.
|
19
15
|
#
|
20
16
|
# Resource refers to any entity within the Kafka ecosystem for which access control can be
|
@@ -31,7 +27,9 @@ module Karafka
|
|
31
27
|
# use when you want to assign acl to a given consumer group
|
32
28
|
consumer_group: Rdkafka::Bindings::RD_KAFKA_RESOURCE_GROUP,
|
33
29
|
# use when you want to assign acl to a given broker
|
34
|
-
broker: Rdkafka::Bindings::RD_KAFKA_RESOURCE_BROKER
|
30
|
+
broker: Rdkafka::Bindings::RD_KAFKA_RESOURCE_BROKER,
|
31
|
+
# use when you want to assign acl to a transactional id
|
32
|
+
transactional_id: Rdkafka::Bindings::RD_KAFKA_RESOURCE_TRANSACTIONAL_ID
|
35
33
|
}.freeze
|
36
34
|
|
37
35
|
# Resource pattern types define how ACLs (Access Control Lists) are applied to resources,
|
@@ -165,7 +163,7 @@ module Karafka
|
|
165
163
|
# Yields admin instance, allows to run Acl operations and awaits on the final result
|
166
164
|
# Makes sure that admin is closed afterwards.
|
167
165
|
def with_admin_wait
|
168
|
-
|
166
|
+
with_admin do |admin|
|
169
167
|
yield(admin).wait(max_wait_timeout: max_wait_time)
|
170
168
|
end
|
171
169
|
end
|
@@ -229,6 +227,7 @@ module Karafka
|
|
229
227
|
@host = host
|
230
228
|
@operation = map(operation, OPERATIONS_MAP)
|
231
229
|
@permission_type = map(permission_type, PERMISSION_TYPES_MAP)
|
230
|
+
super()
|
232
231
|
freeze
|
233
232
|
end
|
234
233
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Karafka
|
4
|
-
|
4
|
+
class Admin
|
5
5
|
# Namespace for admin operations related to configuration management
|
6
6
|
#
|
7
7
|
# At the moment Karafka supports configuration management for brokers and topics
|
@@ -9,11 +9,7 @@ module Karafka
|
|
9
9
|
# You can describe configuration as well as alter it.
|
10
10
|
#
|
11
11
|
# Altering is done in the incremental way.
|
12
|
-
|
13
|
-
extend Helpers::ConfigImporter.new(
|
14
|
-
max_wait_time: %i[admin max_wait_time]
|
15
|
-
)
|
16
|
-
|
12
|
+
class Configs < Admin
|
17
13
|
class << self
|
18
14
|
# Fetches given resources configurations from Kafka
|
19
15
|
#
|
@@ -97,7 +93,7 @@ module Karafka
|
|
97
93
|
# Yields admin instance, allows to run Acl operations and awaits on the final result
|
98
94
|
# Makes sure that admin is closed afterwards.
|
99
95
|
def with_admin_wait
|
100
|
-
|
96
|
+
with_admin do |admin|
|
101
97
|
yield(admin).wait(max_wait_timeout: max_wait_time)
|
102
98
|
end
|
103
99
|
end
|