karafka 2.0.11 → 2.0.13

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 00fdec9b351cb2da897d141a0dbb97816d7b0e53d7548b86d1ff48c5a8d5af3a
4
- data.tar.gz: 514964e409f4904932ee9e7cc1aa27094820d2cbe875c75ad49c82ce41b8afbf
3
+ metadata.gz: 0a38f7b176a40f09dc07c58b8abc8b13863f66fc2f3bff4d61fcd889c0bbb213
4
+ data.tar.gz: 9962f21cb52e566843e4b6994da8ac182752523a937cffa18bd735eefadc409f
5
5
  SHA512:
6
- metadata.gz: ba9281c9bfdb3a7a2a4c4121c26d1debb4bd39af07a190172a15f34705ee65bd9a1c8ce2e3773ccb3fe749dc83025bf96e0dde31664f1c6541e5923cad87b7a5
7
- data.tar.gz: 95f150e80a77f68c16f0ecce849403a7afdcaf50f67bdaa1471074173f8b27b5a6b66d8a5cda8c3dc39106d8153ba843a35c9123883dc803b7bb05ec2c1f47c3
6
+ metadata.gz: 5b722f8b0baced05f35e907dadb523c129fe12f64066f5efe9bd81fdc0634ccfb824d463baa8f34b7918410c3f6e8a451ceaf5645bb08a8adeaaccff99ec29f1
7
+ data.tar.gz: d2649dda560f9c171f804da107df33c1a7fdf81c21bb92d33b7b967aee950a042868c566b63fb32cd495d870e4164246f8ee15aca7c169c66fb82f0994b321ff
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 2.0.13 (2022-10-14)
4
+ - Early exit upon attempts to commit current or earlier offset twice.
5
+ - Add more integration specs covering edge cases.
6
+ - Strip non producer related config when default producer is initialized (#776)
7
+
8
+ ## 2.0.12 (2022-10-06)
9
+ - Commit stored offsets upon rebalance revocation event to reduce number of messages that are re-processed.
10
+ - Support cooperative-sticky rebalance strategy.
11
+ - Replace offset commit after each batch with a per-rebalance commit.
12
+ - User instrumentation to publish internal rebalance errors.
13
+
3
14
  ## 2.0.11 (2022-09-29)
4
15
  - Report early on errors related to network and on max poll interval being exceeded to indicate critical problems that will be retries but may mean some underlying problems in the system.
5
16
  - Fix support of Ruby 2.7.0 to 2.7.2 (#1045)
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.0.11)
4
+ karafka (2.0.13)
5
5
  karafka-core (>= 2.0.2, < 3.0.0)
6
6
  rdkafka (>= 0.12)
7
7
  thor (>= 0.20)
@@ -30,7 +30,7 @@ GEM
30
30
  activesupport (>= 5.0)
31
31
  i18n (1.12.0)
32
32
  concurrent-ruby (~> 1.0)
33
- karafka-core (2.0.2)
33
+ karafka-core (2.0.3)
34
34
  concurrent-ruby (>= 1.1)
35
35
  mini_portile2 (2.8.0)
36
36
  minitest (5.16.3)
@@ -65,7 +65,7 @@ GEM
65
65
  karafka-core (>= 2.0.2, < 3.0.0)
66
66
  rdkafka (>= 0.10)
67
67
  zeitwerk (~> 2.3)
68
- zeitwerk (2.6.0)
68
+ zeitwerk (2.6.1)
69
69
 
70
70
  PLATFORMS
71
71
  x86_64-linux
@@ -149,6 +149,9 @@ module Karafka
149
149
  # processed but rather at the next one. This applies to both sync and async versions of this
150
150
  # method.
151
151
  def mark_as_consumed(message)
152
+ # Ignore earlier offsets than the one we alread committed
153
+ return true if coordinator.seek_offset > message.offset
154
+
152
155
  unless client.mark_as_consumed(message)
153
156
  coordinator.revoke
154
157
 
@@ -166,6 +169,9 @@ module Karafka
166
169
  # @return [Boolean] true if we were able to mark the offset, false otherwise. False indicates
167
170
  # that we were not able and that we have lost the partition.
168
171
  def mark_as_consumed!(message)
172
+ # Ignore earlier offsets than the one we alread committed
173
+ return true if coordinator.seek_offset > message.offset
174
+
169
175
  unless client.mark_as_consumed!(message)
170
176
  coordinator.revoke
171
177
 
@@ -118,10 +118,6 @@ module Karafka
118
118
  build_and_schedule_consumption_jobs
119
119
 
120
120
  wait
121
-
122
- # We don't use the `#commit_offsets!` here for performance reasons. This can be achieved
123
- # if needed by using manual offset management.
124
- @client.commit_offsets
125
121
  end
126
122
 
127
123
  # If we are stopping we will no longer schedule any jobs despite polling.
@@ -14,6 +14,9 @@ module Karafka
14
14
  # that are lost, are those that got revoked but did not get re-assigned back. We do not
15
15
  # expose this concept outside and we normalize to have them revoked, as it is irrelevant
16
16
  # from the rest of the code perspective as only those that are lost are truly revoked.
17
+ #
18
+ # @note For cooperative-sticky `#assigned_partitions` holds only the recently assigned
19
+ # partitions, not all the partitions that are owned
17
20
  class RebalanceManager
18
21
  # Empty array for internal usage not to create new objects
19
22
  EMPTY_ARRAY = [].freeze
@@ -0,0 +1,132 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Patches
5
+ module Rdkafka
6
+ # Binding patches that slightly change how rdkafka operates in certain places
7
+ module Bindings
8
+ include ::Rdkafka::Bindings
9
+
10
+ # Alias internally
11
+ RB = ::Rdkafka::Bindings
12
+
13
+ class << self
14
+ # Handle assignments on cooperative rebalance
15
+ #
16
+ # @param client_ptr [FFI::Pointer]
17
+ # @param code [Integer]
18
+ # @param partitions_ptr [FFI::Pointer]
19
+ def on_cooperative_rebalance(client_ptr, code, partitions_ptr)
20
+ case code
21
+ when RB::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
22
+ RB.rd_kafka_incremental_assign(client_ptr, partitions_ptr)
23
+ when RB::RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
24
+ RB.rd_kafka_commit(client_ptr, nil, false)
25
+ RB.rd_kafka_incremental_unassign(client_ptr, partitions_ptr)
26
+ else
27
+ RB.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
28
+ end
29
+ end
30
+
31
+ # Handle assignments on a eager rebalance
32
+ #
33
+ # @param client_ptr [FFI::Pointer]
34
+ # @param code [Integer]
35
+ # @param partitions_ptr [FFI::Pointer]
36
+ def on_eager_rebalance(client_ptr, code, partitions_ptr)
37
+ case code
38
+ when RB::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
39
+ RB.rd_kafka_assign(client_ptr, partitions_ptr)
40
+ when RB::RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
41
+ RB.rd_kafka_commit(client_ptr, nil, false)
42
+ RB.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
43
+ else
44
+ RB.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
45
+ end
46
+ end
47
+
48
+ # Trigger Karafka callbacks
49
+ #
50
+ # @param code [Integer]
51
+ # @param opaque [Rdkafka::Opaque]
52
+ # @param consumer [Rdkafka::Consumer]
53
+ # @param tpl [Rdkafka::Consumer::TopicPartitionList]
54
+ def trigger_callbacks(code, opaque, consumer, tpl)
55
+ case code
56
+ when RB::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
57
+ opaque.call_on_partitions_assigned(consumer, tpl)
58
+ when RB::RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
59
+ opaque.call_on_partitions_revoked(consumer, tpl)
60
+ end
61
+ rescue StandardError => e
62
+ Karafka.monitor.instrument(
63
+ 'error.occurred',
64
+ caller: self,
65
+ error: e,
66
+ type: 'connection.client.rebalance_callback.error'
67
+ )
68
+ end
69
+ end
70
+
71
+ # This patch changes few things:
72
+ # - it commits offsets (if any) upon partition revocation, so less jobs need to be
73
+ # reprocessed if they are assigned to a different process
74
+ # - reports callback errors into the errors instrumentation instead of the logger
75
+ # - catches only StandardError instead of Exception as we fully control the directly
76
+ # executed callbacks
77
+ #
78
+ # @see https://docs.confluent.io/2.0.0/clients/librdkafka/classRdKafka_1_1RebalanceCb.html
79
+ RebalanceCallback = FFI::Function.new(
80
+ :void, %i[pointer int pointer pointer]
81
+ ) do |client_ptr, code, partitions_ptr, opaque_ptr|
82
+ # Patch reference
83
+ pr = ::Karafka::Patches::Rdkafka::Bindings
84
+
85
+ if RB.rd_kafka_rebalance_protocol(client_ptr) == 'COOPERATIVE'
86
+ pr.on_cooperative_rebalance(client_ptr, code, partitions_ptr)
87
+ else
88
+ pr.on_eager_rebalance(client_ptr, code, partitions_ptr)
89
+ end
90
+
91
+ opaque = ::Rdkafka::Config.opaques[opaque_ptr.to_i]
92
+ return unless opaque
93
+
94
+ tpl = ::Rdkafka::Consumer::TopicPartitionList.from_native_tpl(partitions_ptr).freeze
95
+ consumer = ::Rdkafka::Consumer.new(client_ptr)
96
+
97
+ pr.trigger_callbacks(code, opaque, consumer, tpl)
98
+ end
99
+ end
100
+ end
101
+ end
102
+ end
103
+
104
+ # We need to replace the original callback with ours.
105
+ # At the moment there is no API in rdkafka-ruby to do so
106
+ ::Rdkafka::Bindings.send(
107
+ :remove_const,
108
+ 'RebalanceCallback'
109
+ )
110
+
111
+ ::Rdkafka::Bindings.const_set(
112
+ 'RebalanceCallback',
113
+ Karafka::Patches::Rdkafka::Bindings::RebalanceCallback
114
+ )
115
+
116
+ ::Rdkafka::Bindings.attach_function(
117
+ :rd_kafka_rebalance_protocol,
118
+ %i[pointer],
119
+ :string
120
+ )
121
+
122
+ ::Rdkafka::Bindings.attach_function(
123
+ :rd_kafka_incremental_assign,
124
+ %i[pointer pointer],
125
+ :string
126
+ )
127
+
128
+ ::Rdkafka::Bindings.attach_function(
129
+ :rd_kafka_incremental_unassign,
130
+ %i[pointer pointer],
131
+ :string
132
+ )
@@ -37,7 +37,7 @@ module Karafka
37
37
  # topics but they lack the group.id (unless explicitly) provided. To make it compatible
38
38
  # with our routing engine, we inject it before it will go to the consumer
39
39
  def kafka
40
- kafka = @topics.first.kafka.dup
40
+ kafka = Setup::AttributesMap.consumer(@topics.first.kafka.dup)
41
41
 
42
42
  kafka[:'client.id'] ||= Karafka::App.config.client_id
43
43
  kafka[:'group.id'] ||= @topics.first.consumer_group.id
@@ -0,0 +1,337 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Setup
5
+ # To simplify the overall design, in Karafka we define all the rdkafka settings in one scope
6
+ # under `kafka`. rdkafka though does not like when producer options are passed to the
7
+ # consumer configuration and issues warnings. This target map is used as a filtering layer, so
8
+ # only appropriate settings go to both producer and consumer
9
+ #
10
+ # It is built based on https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
11
+ module AttributesMap
12
+ # List of rdkafka consumer accepted attributes
13
+ CONSUMER = %i[
14
+ allow.auto.create.topics
15
+ api.version.fallback.ms
16
+ api.version.request
17
+ api.version.request.timeout.ms
18
+ auto.commit.enable
19
+ auto.commit.interval.ms
20
+ auto.offset.reset
21
+ background_event_cb
22
+ bootstrap.servers
23
+ broker.address.family
24
+ broker.address.ttl
25
+ broker.version.fallback
26
+ builtin.features
27
+ check.crcs
28
+ client.id
29
+ client.rack
30
+ closesocket_cb
31
+ connect_cb
32
+ connections.max.idle.ms
33
+ consume.callback.max.messages
34
+ consume_cb
35
+ coordinator.query.interval.ms
36
+ debug
37
+ default_topic_conf
38
+ enable.auto.commit
39
+ enable.auto.offset.store
40
+ enable.partition.eof
41
+ enable.random.seed
42
+ enable.sasl.oauthbearer.unsecure.jwt
43
+ enable.ssl.certificate.verification
44
+ enabled_events
45
+ error_cb
46
+ fetch.error.backoff.ms
47
+ fetch.max.bytes
48
+ fetch.message.max.bytes
49
+ fetch.min.bytes
50
+ fetch.wait.max.ms
51
+ group.id
52
+ group.instance.id
53
+ group.protocol.type
54
+ heartbeat.interval.ms
55
+ interceptors
56
+ internal.termination.signal
57
+ isolation.level
58
+ log.connection.close
59
+ log.queue
60
+ log.thread.name
61
+ log_cb
62
+ log_level
63
+ max.in.flight
64
+ max.in.flight.requests.per.connection
65
+ max.partition.fetch.bytes
66
+ max.poll.interval.ms
67
+ message.copy.max.bytes
68
+ message.max.bytes
69
+ metadata.broker.list
70
+ metadata.max.age.ms
71
+ oauthbearer_token_refresh_cb
72
+ offset.store.method
73
+ offset.store.path
74
+ offset.store.sync.interval.ms
75
+ offset_commit_cb
76
+ opaque
77
+ open_cb
78
+ partition.assignment.strategy
79
+ plugin.library.paths
80
+ queued.max.messages.kbytes
81
+ queued.min.messages
82
+ rebalance_cb
83
+ receive.message.max.bytes
84
+ reconnect.backoff.jitter.ms
85
+ reconnect.backoff.max.ms
86
+ reconnect.backoff.ms
87
+ sasl.kerberos.keytab
88
+ sasl.kerberos.kinit.cmd
89
+ sasl.kerberos.min.time.before.relogin
90
+ sasl.kerberos.principal
91
+ sasl.kerberos.service.name
92
+ sasl.mechanism
93
+ sasl.mechanisms
94
+ sasl.oauthbearer.client.id
95
+ sasl.oauthbearer.client.secret
96
+ sasl.oauthbearer.config
97
+ sasl.oauthbearer.extensions
98
+ sasl.oauthbearer.method
99
+ sasl.oauthbearer.scope
100
+ sasl.oauthbearer.token.endpoint.url
101
+ sasl.password
102
+ sasl.username
103
+ security.protocol
104
+ session.timeout.ms
105
+ socket.blocking.max.ms
106
+ socket.connection.setup.timeout.ms
107
+ socket.keepalive.enable
108
+ socket.max.fails
109
+ socket.nagle.disable
110
+ socket.receive.buffer.bytes
111
+ socket.send.buffer.bytes
112
+ socket.timeout.ms
113
+ socket_cb
114
+ ssl.ca.certificate.stores
115
+ ssl.ca.location
116
+ ssl.ca.pem
117
+ ssl.certificate.location
118
+ ssl.certificate.pem
119
+ ssl.certificate.verify_cb
120
+ ssl.cipher.suites
121
+ ssl.crl.location
122
+ ssl.curves.list
123
+ ssl.endpoint.identification.algorithm
124
+ ssl.engine.id
125
+ ssl.engine.location
126
+ ssl.key.location
127
+ ssl.key.password
128
+ ssl.key.pem
129
+ ssl.keystore.location
130
+ ssl.keystore.password
131
+ ssl.sigalgs.list
132
+ ssl_ca
133
+ ssl_certificate
134
+ ssl_engine_callback_data
135
+ ssl_key
136
+ statistics.interval.ms
137
+ stats_cb
138
+ throttle_cb
139
+ topic.blacklist
140
+ topic.metadata.propagation.max.ms
141
+ topic.metadata.refresh.fast.cnt
142
+ topic.metadata.refresh.fast.interval.ms
143
+ topic.metadata.refresh.interval.ms
144
+ topic.metadata.refresh.sparse
145
+ ].freeze
146
+
147
+ # List of rdkafka producer accepted attributes
148
+ PRODUCER = %i[
149
+ acks
150
+ api.version.fallback.ms
151
+ api.version.request
152
+ api.version.request.timeout.ms
153
+ background_event_cb
154
+ batch.num.messages
155
+ batch.size
156
+ bootstrap.servers
157
+ broker.address.family
158
+ broker.address.ttl
159
+ broker.version.fallback
160
+ builtin.features
161
+ client.id
162
+ client.rack
163
+ closesocket_cb
164
+ compression.codec
165
+ compression.level
166
+ compression.type
167
+ connect_cb
168
+ connections.max.idle.ms
169
+ debug
170
+ default_topic_conf
171
+ delivery.report.only.error
172
+ delivery.timeout.ms
173
+ dr_cb
174
+ dr_msg_cb
175
+ enable.gapless.guarantee
176
+ enable.idempotence
177
+ enable.random.seed
178
+ enable.sasl.oauthbearer.unsecure.jwt
179
+ enable.ssl.certificate.verification
180
+ enabled_events
181
+ error_cb
182
+ interceptors
183
+ internal.termination.signal
184
+ linger.ms
185
+ log.connection.close
186
+ log.queue
187
+ log.thread.name
188
+ log_cb
189
+ log_level
190
+ max.in.flight
191
+ max.in.flight.requests.per.connection
192
+ message.copy.max.bytes
193
+ message.max.bytes
194
+ message.send.max.retries
195
+ message.timeout.ms
196
+ metadata.broker.list
197
+ metadata.max.age.ms
198
+ msg_order_cmp
199
+ oauthbearer_token_refresh_cb
200
+ opaque
201
+ open_cb
202
+ partitioner
203
+ partitioner_cb
204
+ plugin.library.paths
205
+ produce.offset.report
206
+ queue.buffering.backpressure.threshold
207
+ queue.buffering.max.kbytes
208
+ queue.buffering.max.messages
209
+ queue.buffering.max.ms
210
+ queuing.strategy
211
+ receive.message.max.bytes
212
+ reconnect.backoff.jitter.ms
213
+ reconnect.backoff.max.ms
214
+ reconnect.backoff.ms
215
+ request.required.acks
216
+ request.timeout.ms
217
+ retries
218
+ retry.backoff.ms
219
+ sasl.kerberos.keytab
220
+ sasl.kerberos.kinit.cmd
221
+ sasl.kerberos.min.time.before.relogin
222
+ sasl.kerberos.principal
223
+ sasl.kerberos.service.name
224
+ sasl.mechanism
225
+ sasl.mechanisms
226
+ sasl.oauthbearer.client.id
227
+ sasl.oauthbearer.client.secret
228
+ sasl.oauthbearer.config
229
+ sasl.oauthbearer.extensions
230
+ sasl.oauthbearer.method
231
+ sasl.oauthbearer.scope
232
+ sasl.oauthbearer.token.endpoint.url
233
+ sasl.password
234
+ sasl.username
235
+ security.protocol
236
+ socket.blocking.max.ms
237
+ socket.connection.setup.timeout.ms
238
+ socket.keepalive.enable
239
+ socket.max.fails
240
+ socket.nagle.disable
241
+ socket.receive.buffer.bytes
242
+ socket.send.buffer.bytes
243
+ socket.timeout.ms
244
+ socket_cb
245
+ ssl.ca.certificate.stores
246
+ ssl.ca.location
247
+ ssl.ca.pem
248
+ ssl.certificate.location
249
+ ssl.certificate.pem
250
+ ssl.certificate.verify_cb
251
+ ssl.cipher.suites
252
+ ssl.crl.location
253
+ ssl.curves.list
254
+ ssl.endpoint.identification.algorithm
255
+ ssl.engine.id
256
+ ssl.engine.location
257
+ ssl.key.location
258
+ ssl.key.password
259
+ ssl.key.pem
260
+ ssl.keystore.location
261
+ ssl.keystore.password
262
+ ssl.sigalgs.list
263
+ ssl_ca
264
+ ssl_certificate
265
+ ssl_engine_callback_data
266
+ ssl_key
267
+ statistics.interval.ms
268
+ stats_cb
269
+ sticky.partitioning.linger.ms
270
+ throttle_cb
271
+ topic.blacklist
272
+ topic.metadata.propagation.max.ms
273
+ topic.metadata.refresh.fast.cnt
274
+ topic.metadata.refresh.fast.interval.ms
275
+ topic.metadata.refresh.interval.ms
276
+ topic.metadata.refresh.sparse
277
+ transaction.timeout.ms
278
+ transactional.id
279
+ ].freeze
280
+
281
+ # Location of the file with rdkafka settings list
282
+ SOURCE = 'https://raw.githubusercontent.com/edenhill/librdkafka/master/CONFIGURATION.md'
283
+
284
+ private_constant :SOURCE
285
+
286
+ class << self
287
+ # Filter the provided settings leaving only the once applicable to the consumer
288
+ # @param kafka_settings [Hash] all kafka settings
289
+ # @return [Hash] settings applicable to the consumer
290
+ def consumer(kafka_settings)
291
+ kafka_settings.slice(*CONSUMER)
292
+ end
293
+
294
+ # Filter the provided settings leaving only the once applicable to the producer
295
+ # @param kafka_settings [Hash] all kafka settings
296
+ # @return [Hash] settings applicable to the producer
297
+ def producer(kafka_settings)
298
+ kafka_settings.slice(*PRODUCER)
299
+ end
300
+
301
+ # @private
302
+ # @return [Hash<Symbol, Array<Symbol>>] hash with consumer and producer attributes list
303
+ # that is sorted.
304
+ # @note This method should not be used directly. It is only used to generate appropriate
305
+ # options list in case it would change
306
+ def generate
307
+ # Not used anywhere else, hence required here
308
+ require 'open-uri'
309
+
310
+ attributes = { consumer: Set.new, producer: Set.new }
311
+
312
+ ::URI.parse(SOURCE).open.readlines.each do |line|
313
+ next unless line.include?('|')
314
+
315
+ attribute, attribute_type = line.split('|').map(&:strip)
316
+
317
+ case attribute_type
318
+ when 'C'
319
+ attributes[:consumer] << attribute
320
+ when 'P'
321
+ attributes[:producer] << attribute
322
+ when '*'
323
+ attributes[:consumer] << attribute
324
+ attributes[:producer] << attribute
325
+ else
326
+ next
327
+ end
328
+ end
329
+
330
+ attributes.transform_values!(&:sort)
331
+ attributes.each_value { |vals| vals.map!(&:to_sym) }
332
+ attributes
333
+ end
334
+ end
335
+ end
336
+ end
337
+ end
@@ -20,7 +20,7 @@ module Karafka
20
20
  }.freeze
21
21
 
22
22
  # Contains settings that should not be used in production but make life easier in dev
23
- DEV_DEFAULTS = {
23
+ KAFKA_DEV_DEFAULTS = {
24
24
  # Will create non-existing topics automatically.
25
25
  # Note that the broker needs to be configured with `auto.create.topics.enable=true`
26
26
  # While it is not recommended in prod, it simplifies work in dev
@@ -32,7 +32,7 @@ module Karafka
32
32
  'topic.metadata.refresh.interval.ms': 5_000
33
33
  }.freeze
34
34
 
35
- private_constant :KAFKA_DEFAULTS, :DEV_DEFAULTS
35
+ private_constant :KAFKA_DEFAULTS, :KAFKA_DEV_DEFAULTS
36
36
 
37
37
  # Available settings
38
38
 
@@ -172,7 +172,7 @@ module Karafka
172
172
 
173
173
  return if Karafka::App.env.production?
174
174
 
175
- DEV_DEFAULTS.each do |key, value|
175
+ KAFKA_DEV_DEFAULTS.each do |key, value|
176
176
  next if config.kafka.key?(key)
177
177
 
178
178
  config.kafka[key] = value
@@ -185,7 +185,7 @@ module Karafka
185
185
  config.producer ||= ::WaterDrop::Producer.new do |producer_config|
186
186
  # In some cases WaterDrop updates the config and we don't want our consumer config to
187
187
  # be polluted by those updates, that's why we copy
188
- producer_config.kafka = config.kafka.dup
188
+ producer_config.kafka = AttributesMap.producer(config.kafka.dup)
189
189
  producer_config.logger = config.logger
190
190
  end
191
191
 
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.0.11'
6
+ VERSION = '2.0.13'
7
7
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.11
4
+ version: 2.0.13
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  Qf04B9ceLUaC4fPVEz10FyobjaFoY4i32xRto3XnrzeAgfEe4swLq8bQsR3w/EF3
36
36
  MGU0FeSV2Yj7Xc2x/7BzLK8xQn5l7Yy75iPF+KP3vVmDHnNl
37
37
  -----END CERTIFICATE-----
38
- date: 2022-09-29 00:00:00.000000000 Z
38
+ date: 2022-10-14 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: karafka-core
@@ -212,6 +212,7 @@ files:
212
212
  - lib/karafka/messages/messages.rb
213
213
  - lib/karafka/messages/metadata.rb
214
214
  - lib/karafka/messages/seek.rb
215
+ - lib/karafka/patches/rdkafka/bindings.rb
215
216
  - lib/karafka/patches/rdkafka/consumer.rb
216
217
  - lib/karafka/pro.rb
217
218
  - lib/karafka/pro/active_job/consumer.rb
@@ -259,6 +260,7 @@ files:
259
260
  - lib/karafka/runner.rb
260
261
  - lib/karafka/serialization/json/deserializer.rb
261
262
  - lib/karafka/server.rb
263
+ - lib/karafka/setup/attributes_map.rb
262
264
  - lib/karafka/setup/config.rb
263
265
  - lib/karafka/setup/dsl.rb
264
266
  - lib/karafka/status.rb
metadata.gz.sig CHANGED
Binary file