karafka 2.4.5 → 2.4.7
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/CHANGELOG.md +15 -1
- data/Gemfile.lock +10 -10
- data/LICENSE +1 -1
- data/docker-compose.yml +1 -1
- data/karafka.gemspec +1 -1
- data/lib/karafka/cli/server.rb +1 -0
- data/lib/karafka/cli/swarm.rb +1 -0
- data/lib/karafka/connection/client.rb +58 -7
- data/lib/karafka/embedded.rb +2 -1
- data/lib/karafka/routing/consumer_group.rb +9 -14
- data/lib/karafka/routing/subscription_group.rb +8 -0
- data/lib/karafka/server.rb +16 -0
- data/lib/karafka/setup/attributes_map.rb +4 -0
- data/lib/karafka/setup/config.rb +0 -49
- data/lib/karafka/setup/defaults_injector.rb +64 -0
- data/lib/karafka/swarm/node.rb +2 -0
- data/lib/karafka/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +4 -3
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 6c94c3a4e646233f535e63bcf7a65c88d656f534bfccd21cad87be59b525adb1
|
4
|
+
data.tar.gz: 63b6ad0491880325d6ac0e5a7367f2a938e8a36245bd1cff48d3de73da6266c6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6c04bcd1f7ed17855140e519a82a52c0cae93a3c41126120ab797bbcf172315f7f7720f12e233cc85eed4af2dd437a3364f7f5613f730c9b2e32f8b79225570f
|
7
|
+
data.tar.gz: 3441c4ac2230c5903dacd210b54c80c05f72c2804490229334d10656672c4ef9d9b3160c12cdd4d22916d55f1687a730980b0b3fc21099a003f36238f58025eb
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/CHANGELOG.md
CHANGED
@@ -1,4 +1,18 @@
|
|
1
|
-
# Karafka
|
1
|
+
# Karafka Framework Changelog
|
2
|
+
|
3
|
+
## 2.4.7 (2024-08-01)
|
4
|
+
- [Enhancement] Introduce `Karafka::Server.mode` to check in what mode Karafka process operates (`standalone`, `swarm`, `supervisor`, `embedded`).
|
5
|
+
- [Enhancement] Ensure `max.poll.interval.ms` is always present and populate it with librdkafka default.
|
6
|
+
- [Enhancement] Introduce a shutdown time limit for unsubscription wait.
|
7
|
+
- [Enhancement] Tag with `mode:swarm` each of the running swarm consumers.
|
8
|
+
- [Change] Tag with `mode:embedded` instead of `embedded` the embedded consumers.
|
9
|
+
- [Fix] License identifier `LGPL-3.0` is deprecated for SPDX (#2177).
|
10
|
+
- [Fix] Fix an issue where custom clusters would not have default settings populated same as the primary cluster.
|
11
|
+
- [Fix] Fix Rspec warnings of nil mocks.
|
12
|
+
- [Maintenance] Cover `cooperative-sticky` librdkafka issues with integration spec.
|
13
|
+
|
14
|
+
## 2.4.6 (2024-07-22)
|
15
|
+
- [Fix] Mitigate `rd_kafka_cgrp_terminated` and other `librdkafka` shutdown issues by unsubscribing fully prior to shutdown.
|
2
16
|
|
3
17
|
## 2.4.5 (2024-07-18)
|
4
18
|
- [Change] Inject `client.id` when building subscription group and not during the initial setup.
|
data/Gemfile.lock
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
karafka (2.4.
|
4
|
+
karafka (2.4.7)
|
5
5
|
base64 (~> 0.2)
|
6
6
|
karafka-core (>= 2.4.3, < 2.5.0)
|
7
7
|
waterdrop (>= 2.7.3, < 3.0.0)
|
@@ -39,13 +39,13 @@ GEM
|
|
39
39
|
activesupport (>= 6.1)
|
40
40
|
i18n (1.14.5)
|
41
41
|
concurrent-ruby (~> 1.0)
|
42
|
-
karafka-core (2.4.
|
43
|
-
karafka-rdkafka (>= 0.15.0, < 0.
|
44
|
-
karafka-rdkafka (0.
|
42
|
+
karafka-core (2.4.4)
|
43
|
+
karafka-rdkafka (>= 0.15.0, < 0.18.0)
|
44
|
+
karafka-rdkafka (0.17.1)
|
45
45
|
ffi (~> 1.15)
|
46
46
|
mini_portile2 (~> 2.6)
|
47
47
|
rake (> 12)
|
48
|
-
karafka-testing (2.4.
|
48
|
+
karafka-testing (2.4.6)
|
49
49
|
karafka (>= 2.4.0, < 2.5.0)
|
50
50
|
waterdrop (>= 2.7.0)
|
51
51
|
karafka-web (0.9.1)
|
@@ -58,7 +58,7 @@ GEM
|
|
58
58
|
minitest (5.24.0)
|
59
59
|
mutex_m (0.2.0)
|
60
60
|
ostruct (0.6.0)
|
61
|
-
rack (3.1.
|
61
|
+
rack (3.1.5)
|
62
62
|
rake (13.2.1)
|
63
63
|
roda (3.81.0)
|
64
64
|
rack
|
@@ -84,11 +84,11 @@ GEM
|
|
84
84
|
tilt (2.4.0)
|
85
85
|
tzinfo (2.0.6)
|
86
86
|
concurrent-ruby (~> 1.0)
|
87
|
-
waterdrop (2.7.
|
88
|
-
karafka-core (>= 2.4.
|
87
|
+
waterdrop (2.7.4)
|
88
|
+
karafka-core (>= 2.4.3, < 3.0.0)
|
89
89
|
karafka-rdkafka (>= 0.15.1)
|
90
90
|
zeitwerk (~> 2.3)
|
91
|
-
zeitwerk (2.6.
|
91
|
+
zeitwerk (2.6.17)
|
92
92
|
|
93
93
|
PLATFORMS
|
94
94
|
ruby
|
@@ -106,4 +106,4 @@ DEPENDENCIES
|
|
106
106
|
simplecov
|
107
107
|
|
108
108
|
BUNDLED WITH
|
109
|
-
2.5.
|
109
|
+
2.5.11
|
data/LICENSE
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
Copyright (c) Maciej Mensfeld
|
2
2
|
|
3
3
|
Karafka is an Open Source project licensed under the terms of
|
4
|
-
the LGPLv3 license. Please see <https://github.com/karafka/karafka/blob/master/LGPL>
|
4
|
+
the LGPLv3 license. Please see <https://github.com/karafka/karafka/blob/master/LICENSE-LGPL>
|
5
5
|
for license text.
|
6
6
|
|
7
7
|
Karafka has also commercial-friendly license, commercial support and commercial components.
|
data/docker-compose.yml
CHANGED
data/karafka.gemspec
CHANGED
@@ -12,7 +12,7 @@ Gem::Specification.new do |spec|
|
|
12
12
|
spec.authors = ['Maciej Mensfeld']
|
13
13
|
spec.email = %w[contact@karafka.io]
|
14
14
|
spec.homepage = 'https://karafka.io'
|
15
|
-
spec.licenses = %w[LGPL-3.0 Commercial]
|
15
|
+
spec.licenses = %w[LGPL-3.0-only Commercial]
|
16
16
|
spec.summary = 'Karafka is Ruby and Rails efficient Kafka processing framework.'
|
17
17
|
spec.description = <<-DESC
|
18
18
|
Karafka is Ruby and Rails efficient Kafka processing framework.
|
data/lib/karafka/cli/server.rb
CHANGED
data/lib/karafka/cli/swarm.rb
CHANGED
@@ -8,6 +8,8 @@ module Karafka
|
|
8
8
|
# It is threadsafe and provides some security measures so we won't end up operating on a
|
9
9
|
# closed consumer instance as it causes Ruby VM process to crash.
|
10
10
|
class Client
|
11
|
+
include ::Karafka::Core::Helpers::Time
|
12
|
+
|
11
13
|
attr_reader :rebalance_manager
|
12
14
|
|
13
15
|
# @return [Karafka::Routing::SubscriptionGroup] subscription group to which this client
|
@@ -24,7 +26,12 @@ module Karafka
|
|
24
26
|
# How many times should we retry polling in case of a failure
|
25
27
|
MAX_POLL_RETRIES = 20
|
26
28
|
|
27
|
-
|
29
|
+
# How much time of the total shutdown time can we wait for our manual unsubscribe before
|
30
|
+
# attempting to close without unsubscribe. We try to wait for 50% of the shutdown time
|
31
|
+
# before we move to a regular unsubscribe.
|
32
|
+
COOP_UNSUBSCRIBE_FACTOR = 0.5
|
33
|
+
|
34
|
+
private_constant :MAX_POLL_RETRIES, :COOP_UNSUBSCRIBE_FACTOR
|
28
35
|
|
29
36
|
# Creates a new consumer instance.
|
30
37
|
#
|
@@ -257,14 +264,36 @@ module Karafka
|
|
257
264
|
|
258
265
|
# Gracefully stops topic consumption.
|
259
266
|
def stop
|
260
|
-
#
|
261
|
-
#
|
262
|
-
#
|
267
|
+
# librdkafka has several constant issues when shutting down during rebalance. This is
|
268
|
+
# an issue that gets back every few versions of librdkafka in a limited scope, for example
|
269
|
+
# for cooperative-sticky or in a general scope. This is why we unsubscribe and wait until
|
270
|
+
# we no longer have any assignments. That way librdkafka consumer shutdown should never
|
271
|
+
# happen with rebalance associated with the given consumer instance. Since we do not want
|
272
|
+
# to wait forever, we also impose a limit on how long should we wait. This prioritizes
|
273
|
+
# shutdown stability over endless wait.
|
274
|
+
#
|
275
|
+
# The `@unsubscribing` ensures that when there would be a direct close attempt, it
|
276
|
+
# won't get into this loop again. This can happen when supervision decides it should close
|
277
|
+
# things faster
|
278
|
+
#
|
279
|
+
# @see https://github.com/confluentinc/librdkafka/issues/4792
|
263
280
|
# @see https://github.com/confluentinc/librdkafka/issues/4527
|
264
|
-
if
|
281
|
+
if unsubscribe?
|
282
|
+
@unsubscribing = true
|
283
|
+
|
284
|
+
# Give 50% of time for the final close before we reach the forceful
|
285
|
+
max_wait = ::Karafka::App.config.shutdown_timeout * COOP_UNSUBSCRIBE_FACTOR
|
286
|
+
used = 0
|
287
|
+
stopped_at = monotonic_now
|
288
|
+
|
265
289
|
unsubscribe
|
266
290
|
|
267
291
|
until assignment.empty?
|
292
|
+
used += monotonic_now - stopped_at
|
293
|
+
stopped_at = monotonic_now
|
294
|
+
|
295
|
+
break if used >= max_wait
|
296
|
+
|
268
297
|
sleep(0.1)
|
269
298
|
|
270
299
|
ping
|
@@ -658,8 +687,13 @@ module Karafka
|
|
658
687
|
subscriptions = @subscription_group.subscriptions
|
659
688
|
assignments = @subscription_group.assignments(consumer)
|
660
689
|
|
661
|
-
|
662
|
-
|
690
|
+
if subscriptions
|
691
|
+
consumer.subscribe(*subscriptions)
|
692
|
+
@mode = :subscribe
|
693
|
+
elsif assignments
|
694
|
+
consumer.assign(assignments)
|
695
|
+
@mode = :assign
|
696
|
+
end
|
663
697
|
|
664
698
|
consumer
|
665
699
|
end
|
@@ -690,6 +724,23 @@ module Karafka
|
|
690
724
|
@kafka.start
|
691
725
|
@kafka
|
692
726
|
end
|
727
|
+
|
728
|
+
# Decides whether or not we should unsubscribe prior to closing.
|
729
|
+
#
|
730
|
+
# We cannot do it when there is a static group membership assignment as it would be
|
731
|
+
# reassigned.
|
732
|
+
# We cannot do it also for assign mode because then there are no subscriptions
|
733
|
+
# We also do not do it if there are no assignments at all as it does not make sense
|
734
|
+
#
|
735
|
+
# @return [Boolean] should we unsubscribe prior to shutdown
|
736
|
+
def unsubscribe?
|
737
|
+
return false if @unsubscribing
|
738
|
+
return false if @subscription_group.kafka.key?(:'group.instance.id')
|
739
|
+
return false if @mode != :subscribe
|
740
|
+
return false if assignment.empty?
|
741
|
+
|
742
|
+
true
|
743
|
+
end
|
693
744
|
end
|
694
745
|
end
|
695
746
|
end
|
data/lib/karafka/embedded.rb
CHANGED
@@ -27,7 +27,8 @@ module Karafka
|
|
27
27
|
Thread.new do
|
28
28
|
Thread.current.name = 'karafka.embedded'
|
29
29
|
|
30
|
-
Karafka::Process.tags.add(:execution_mode, 'embedded')
|
30
|
+
Karafka::Process.tags.add(:execution_mode, 'mode:embedded')
|
31
|
+
Karafka::Server.execution_mode = :embedded
|
31
32
|
Karafka::Server.start
|
32
33
|
end
|
33
34
|
end
|
@@ -8,6 +8,12 @@ module Karafka
|
|
8
8
|
# @note A single consumer group represents Kafka consumer group, but it may not match 1:1 with
|
9
9
|
# subscription groups. There can be more subscription groups than consumer groups
|
10
10
|
class ConsumerGroup
|
11
|
+
include Helpers::ConfigImporter.new(
|
12
|
+
activity_manager: %i[internal routing activity_manager],
|
13
|
+
builder: %i[internal routing builder],
|
14
|
+
subscription_groups_builder: %i[internal routing subscription_groups_builder]
|
15
|
+
)
|
16
|
+
|
11
17
|
attr_reader :id, :topics, :name
|
12
18
|
|
13
19
|
# This is a "virtual" attribute that is not building subscription groups.
|
@@ -32,7 +38,7 @@ module Karafka
|
|
32
38
|
|
33
39
|
# @return [Boolean] true if this consumer group should be active in our current process
|
34
40
|
def active?
|
35
|
-
|
41
|
+
activity_manager.active?(:consumer_groups, name)
|
36
42
|
end
|
37
43
|
|
38
44
|
# Builds a topic representation inside of a current consumer group route
|
@@ -43,7 +49,7 @@ module Karafka
|
|
43
49
|
topic = Topic.new(name, self)
|
44
50
|
@topics << Proxy.new(
|
45
51
|
topic,
|
46
|
-
|
52
|
+
builder.defaults,
|
47
53
|
&block
|
48
54
|
).target
|
49
55
|
built_topic = @topics.last
|
@@ -72,11 +78,7 @@ module Karafka
|
|
72
78
|
# @return [Array<Routing::SubscriptionGroup>] all the subscription groups build based on
|
73
79
|
# the consumer group topics
|
74
80
|
def subscription_groups
|
75
|
-
@subscription_groups ||=
|
76
|
-
.internal
|
77
|
-
.routing
|
78
|
-
.subscription_groups_builder
|
79
|
-
.call(topics)
|
81
|
+
@subscription_groups ||= subscription_groups_builder.call(topics)
|
80
82
|
end
|
81
83
|
|
82
84
|
# Hashed version of consumer group that can be used for validation purposes
|
@@ -88,13 +90,6 @@ module Karafka
|
|
88
90
|
id: id
|
89
91
|
}.freeze
|
90
92
|
end
|
91
|
-
|
92
|
-
private
|
93
|
-
|
94
|
-
# @return [Karafka::Core::Configurable::Node] root node config
|
95
|
-
def config
|
96
|
-
::Karafka::App.config
|
97
|
-
end
|
98
93
|
end
|
99
94
|
end
|
100
95
|
end
|
@@ -120,6 +120,7 @@ module Karafka
|
|
120
120
|
def build_kafka
|
121
121
|
kafka = Setup::AttributesMap.consumer(@topics.first.kafka.dup)
|
122
122
|
|
123
|
+
inject_defaults(kafka)
|
123
124
|
inject_group_instance_id(kafka)
|
124
125
|
inject_client_id(kafka)
|
125
126
|
|
@@ -132,6 +133,13 @@ module Karafka
|
|
132
133
|
kafka
|
133
134
|
end
|
134
135
|
|
136
|
+
# Injects (if needed) defaults
|
137
|
+
#
|
138
|
+
# @param kafka [Hash] kafka level config
|
139
|
+
def inject_defaults(kafka)
|
140
|
+
Setup::DefaultsInjector.consumer(kafka)
|
141
|
+
end
|
142
|
+
|
135
143
|
# Sets (if needed) the client.id attribute
|
136
144
|
#
|
137
145
|
# @param kafka [Hash] kafka level config
|
data/lib/karafka/server.rb
CHANGED
@@ -19,6 +19,17 @@ module Karafka
|
|
19
19
|
# Jobs queue
|
20
20
|
attr_accessor :jobs_queue
|
21
21
|
|
22
|
+
# Mode in which the Karafka server is executed. It can be:
|
23
|
+
#
|
24
|
+
# - :standalone - regular karafka consumer process
|
25
|
+
# - :embedded - embedded in a different process and not supervised
|
26
|
+
# - :supervisor - swarm supervisor process
|
27
|
+
# - :swarm - one of swarm processes
|
28
|
+
#
|
29
|
+
# Sometimes it is important to know in what mode we operate, especially from UI perspective
|
30
|
+
# as not everything is possible when operating in non-standalone mode, etc.
|
31
|
+
attr_accessor :execution_mode
|
32
|
+
|
22
33
|
# Method which runs app
|
23
34
|
def run
|
24
35
|
self.listeners = []
|
@@ -171,5 +182,10 @@ module Karafka
|
|
171
182
|
config.internal.process
|
172
183
|
end
|
173
184
|
end
|
185
|
+
|
186
|
+
# Always start with standalone so there always is a value for the execution mode.
|
187
|
+
# This is overwritten quickly during boot, but just in case someone would reach it prior to
|
188
|
+
# booting, we want to have the default value.
|
189
|
+
self.execution_mode = :standalone
|
174
190
|
end
|
175
191
|
end
|
@@ -38,6 +38,7 @@ module Karafka
|
|
38
38
|
default_topic_conf
|
39
39
|
enable.auto.commit
|
40
40
|
enable.auto.offset.store
|
41
|
+
enable.metrics.push
|
41
42
|
enable.partition.eof
|
42
43
|
enable.random.seed
|
43
44
|
enable.sasl.oauthbearer.unsecure.jwt
|
@@ -89,6 +90,8 @@ module Karafka
|
|
89
90
|
reconnect.backoff.max.ms
|
90
91
|
reconnect.backoff.ms
|
91
92
|
resolve_cb
|
93
|
+
retry.backoff.max.ms
|
94
|
+
retry.backoff.ms
|
92
95
|
sasl.kerberos.keytab
|
93
96
|
sasl.kerberos.kinit.cmd
|
94
97
|
sasl.kerberos.min.time.before.relogin
|
@@ -182,6 +185,7 @@ module Karafka
|
|
182
185
|
dr_msg_cb
|
183
186
|
enable.gapless.guarantee
|
184
187
|
enable.idempotence
|
188
|
+
enable.metrics.push
|
185
189
|
enable.random.seed
|
186
190
|
enable.sasl.oauthbearer.unsecure.jwt
|
187
191
|
enable.ssl.certificate.verification
|
data/lib/karafka/setup/config.rb
CHANGED
@@ -14,34 +14,6 @@ module Karafka
|
|
14
14
|
class Config
|
15
15
|
extend ::Karafka::Core::Configurable
|
16
16
|
|
17
|
-
# Defaults for kafka settings, that will be overwritten only if not present already
|
18
|
-
KAFKA_DEFAULTS = {
|
19
|
-
# We emit the statistics by default, so all the instrumentation and web-ui work out of
|
20
|
-
# the box, without requiring users to take any extra actions aside from enabling.
|
21
|
-
'statistics.interval.ms': 5_000,
|
22
|
-
'client.software.name': 'karafka',
|
23
|
-
'client.software.version': [
|
24
|
-
"v#{Karafka::VERSION}",
|
25
|
-
"rdkafka-ruby-v#{Rdkafka::VERSION}",
|
26
|
-
"librdkafka-v#{Rdkafka::LIBRDKAFKA_VERSION}"
|
27
|
-
].join('-')
|
28
|
-
}.freeze
|
29
|
-
|
30
|
-
# Contains settings that should not be used in production but make life easier in dev
|
31
|
-
KAFKA_DEV_DEFAULTS = {
|
32
|
-
# Will create non-existing topics automatically.
|
33
|
-
# Note that the broker needs to be configured with `auto.create.topics.enable=true`
|
34
|
-
# While it is not recommended in prod, it simplifies work in dev
|
35
|
-
'allow.auto.create.topics': 'true',
|
36
|
-
# We refresh the cluster state often as newly created topics in dev may not be detected
|
37
|
-
# fast enough. Fast enough means within reasonable time to provide decent user experience
|
38
|
-
# While it's only a one time thing for new topics, it can still be irritating to have to
|
39
|
-
# restart the process.
|
40
|
-
'topic.metadata.refresh.interval.ms': 5_000
|
41
|
-
}.freeze
|
42
|
-
|
43
|
-
private_constant :KAFKA_DEFAULTS, :KAFKA_DEV_DEFAULTS
|
44
|
-
|
45
17
|
# Available settings
|
46
18
|
|
47
19
|
# Namespace for Pro version related license management. If you use LGPL, no need to worry
|
@@ -350,7 +322,6 @@ module Karafka
|
|
350
322
|
Pro::Loader.pre_setup_all(config) if Karafka.pro?
|
351
323
|
|
352
324
|
configure(&block)
|
353
|
-
merge_kafka_defaults!(config)
|
354
325
|
|
355
326
|
Contracts::Config.new.validate!(config.to_h)
|
356
327
|
|
@@ -374,26 +345,6 @@ module Karafka
|
|
374
345
|
|
375
346
|
private
|
376
347
|
|
377
|
-
# Propagates the kafka setting defaults unless they are already present
|
378
|
-
# This makes it easier to set some values that users usually don't change but still allows
|
379
|
-
# them to overwrite the whole hash if they want to
|
380
|
-
# @param config [Karafka::Core::Configurable::Node] config of this producer
|
381
|
-
def merge_kafka_defaults!(config)
|
382
|
-
KAFKA_DEFAULTS.each do |key, value|
|
383
|
-
next if config.kafka.key?(key)
|
384
|
-
|
385
|
-
config.kafka[key] = value
|
386
|
-
end
|
387
|
-
|
388
|
-
return if Karafka::App.env.production?
|
389
|
-
|
390
|
-
KAFKA_DEV_DEFAULTS.each do |key, value|
|
391
|
-
next if config.kafka.key?(key)
|
392
|
-
|
393
|
-
config.kafka[key] = value
|
394
|
-
end
|
395
|
-
end
|
396
|
-
|
397
348
|
# Sets up all the components that are based on the user configuration
|
398
349
|
# @note At the moment it is only WaterDrop
|
399
350
|
def configure_components
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Karafka
|
4
|
+
module Setup
|
5
|
+
# Injector that enriches each Kafka cluster with needed defaults. User may use more than one
|
6
|
+
# cluster and define them on a per-topic basis. We use this when we build the final config
|
7
|
+
# per subscription group.
|
8
|
+
module DefaultsInjector
|
9
|
+
# Defaults for kafka settings, that will be overwritten only if not present already
|
10
|
+
CONSUMER_KAFKA_DEFAULTS = {
|
11
|
+
# We emit the statistics by default, so all the instrumentation and web-ui work out of
|
12
|
+
# the box, without requiring users to take any extra actions aside from enabling.
|
13
|
+
'statistics.interval.ms': 5_000,
|
14
|
+
'client.software.name': 'karafka',
|
15
|
+
# Same as librdkafka default, we inject it nonetheless to have it always available as
|
16
|
+
# some features may use this value for computation and it is better to ensure, we do
|
17
|
+
# always have it
|
18
|
+
'max.poll.interval.ms': 300_000,
|
19
|
+
'client.software.version': [
|
20
|
+
"v#{Karafka::VERSION}",
|
21
|
+
"rdkafka-ruby-v#{Rdkafka::VERSION}",
|
22
|
+
"librdkafka-v#{Rdkafka::LIBRDKAFKA_VERSION}"
|
23
|
+
].join('-')
|
24
|
+
}.freeze
|
25
|
+
|
26
|
+
# Contains settings that should not be used in production but make life easier in dev
|
27
|
+
CONSUMER_KAFKA_DEV_DEFAULTS = {
|
28
|
+
# Will create non-existing topics automatically.
|
29
|
+
# Note that the broker needs to be configured with `auto.create.topics.enable=true`
|
30
|
+
# While it is not recommended in prod, it simplifies work in dev
|
31
|
+
'allow.auto.create.topics': 'true',
|
32
|
+
# We refresh the cluster state often as newly created topics in dev may not be detected
|
33
|
+
# fast enough. Fast enough means within reasonable time to provide decent user experience
|
34
|
+
# While it's only a one time thing for new topics, it can still be irritating to have to
|
35
|
+
# restart the process.
|
36
|
+
'topic.metadata.refresh.interval.ms': 5_000
|
37
|
+
}.freeze
|
38
|
+
|
39
|
+
private_constant :CONSUMER_KAFKA_DEFAULTS, :CONSUMER_KAFKA_DEV_DEFAULTS
|
40
|
+
|
41
|
+
class << self
|
42
|
+
# Propagates the kafka setting defaults unless they are already present for consumer config
|
43
|
+
# This makes it easier to set some values that users usually don't change but still allows
|
44
|
+
# them to overwrite the whole hash if they want to
|
45
|
+
# @param kafka_config [Hash] kafka scoped config
|
46
|
+
def consumer(kafka_config)
|
47
|
+
CONSUMER_KAFKA_DEFAULTS.each do |key, value|
|
48
|
+
next if kafka_config.key?(key)
|
49
|
+
|
50
|
+
kafka_config[key] = value
|
51
|
+
end
|
52
|
+
|
53
|
+
return if Karafka::App.env.production?
|
54
|
+
|
55
|
+
CONSUMER_KAFKA_DEV_DEFAULTS.each do |key, value|
|
56
|
+
next if kafka_config.key?(key)
|
57
|
+
|
58
|
+
kafka_config[key] = value
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
data/lib/karafka/swarm/node.rb
CHANGED
data/lib/karafka/version.rb
CHANGED
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: karafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.4.
|
4
|
+
version: 2.4.7
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Maciej Mensfeld
|
@@ -35,7 +35,7 @@ cert_chain:
|
|
35
35
|
AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
|
36
36
|
msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
|
37
37
|
-----END CERTIFICATE-----
|
38
|
-
date: 2024-
|
38
|
+
date: 2024-08-01 00:00:00.000000000 Z
|
39
39
|
dependencies:
|
40
40
|
- !ruby/object:Gem::Dependency
|
41
41
|
name: base64
|
@@ -505,6 +505,7 @@ files:
|
|
505
505
|
- lib/karafka/server.rb
|
506
506
|
- lib/karafka/setup/attributes_map.rb
|
507
507
|
- lib/karafka/setup/config.rb
|
508
|
+
- lib/karafka/setup/defaults_injector.rb
|
508
509
|
- lib/karafka/setup/dsl.rb
|
509
510
|
- lib/karafka/status.rb
|
510
511
|
- lib/karafka/swarm.rb
|
@@ -524,7 +525,7 @@ files:
|
|
524
525
|
- renovate.json
|
525
526
|
homepage: https://karafka.io
|
526
527
|
licenses:
|
527
|
-
- LGPL-3.0
|
528
|
+
- LGPL-3.0-only
|
528
529
|
- Commercial
|
529
530
|
metadata:
|
530
531
|
funding_uri: https://karafka.io/#become-pro
|
metadata.gz.sig
CHANGED
Binary file
|