karafka 2.0.12 → 2.0.14

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2c407cd113e41314102fc910cf0b35e8081c55bdadae66635afe59491b58e390
4
- data.tar.gz: 336dad28cc65218e33a2b3bc42b69a166ff4e8e6a10c65a435e57f75eca5ac90
3
+ metadata.gz: 5d21c47bcabdc50e520f7eb81f17fd31f00916a902aba6e596a2a70e1586a927
4
+ data.tar.gz: eff93034c3d6275d067c5b29b1b7b8d7ad2fb8cca73b08bd64398917338077af
5
5
  SHA512:
6
- metadata.gz: 82ffee28acdaa1d126944426a7b31628380fc04aca24002ea55067f6c80a0b72a885d246a656913ea35e79fa82a9d741c35125e31b75abbbf7fb0022360e3b2b
7
- data.tar.gz: b64f9dc4fb2461f3cbe3a774231a661365af5b1be88a6c95b1543521d19d8171514eb5111f88ceb53e309b5ab433f982b9535c2bbb4919e4c07850570424605a
6
+ metadata.gz: adc869f03b9f3774f9c5f4351980be5f12f36606242da7062f8abe2886d85fb8576fa10c8f2615f4e0e30a3329abd27b943ae4bbcfe842607e32d26b988ef58a
7
+ data.tar.gz: a7412443e62cb84dcba452aedd230769860c1a8ecb84ede8c34d20b47995117fae22cb60598190436347f50f38811b0a05d55e4219b4b41c3dc382a0f5117511
checksums.yaml.gz.sig CHANGED
Binary file
@@ -18,7 +18,7 @@ jobs:
18
18
  strategy:
19
19
  fail-fast: false
20
20
  steps:
21
- - uses: actions/checkout@v2
21
+ - uses: actions/checkout@v3
22
22
  with:
23
23
  fetch-depth: 0
24
24
 
@@ -39,7 +39,7 @@ jobs:
39
39
  strategy:
40
40
  fail-fast: false
41
41
  steps:
42
- - uses: actions/checkout@v2
42
+ - uses: actions/checkout@v3
43
43
  with:
44
44
  fetch-depth: 0
45
45
  - name: Run Coditsu
@@ -64,7 +64,7 @@ jobs:
64
64
  - ruby: '3.1'
65
65
  coverage: 'true'
66
66
  steps:
67
- - uses: actions/checkout@v2
67
+ - uses: actions/checkout@v3
68
68
  - name: Install package dependencies
69
69
  run: "[ -e $APT_DEPS ] || sudo apt-get install -y --no-install-recommends $APT_DEPS"
70
70
 
@@ -97,7 +97,7 @@ jobs:
97
97
  - ruby: '3.1'
98
98
  coverage: 'true'
99
99
  steps:
100
- - uses: actions/checkout@v2
100
+ - uses: actions/checkout@v3
101
101
  - name: Install package dependencies
102
102
  run: "[ -e $APT_DEPS ] || sudo apt-get install -y --no-install-recommends $APT_DEPS"
103
103
 
data/.rspec CHANGED
@@ -1 +1,3 @@
1
1
  --require spec_helper
2
+ # Integration specs run with their one framework, not via RSpec
3
+ --exclude-pattern "spec/integrations/**/*_spec.rb"
data/CHANGELOG.md CHANGED
@@ -1,5 +1,15 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 2.0.14 (2022-10-16)
4
+ - Prevent consecutive stop signals from starting multiple supervision shutdowns.
5
+ - Provide `Karafka::Embedded` to simplify the start/stop process when running Karafka from within other process (Puma, Sidekiq, etc).
6
+ - Fix a race condition when un-pausing a long-running-job exactly upon listener resuming would crash the listener loop (#1072).
7
+
8
+ ## 2.0.13 (2022-10-14)
9
+ - Early exit upon attempts to commit current or earlier offset twice.
10
+ - Add more integration specs covering edge cases.
11
+ - Strip non producer related config when default producer is initialized (#776)
12
+
3
13
  ## 2.0.12 (2022-10-06)
4
14
  - Commit stored offsets upon rebalance revocation event to reduce number of messages that are re-processed.
5
15
  - Support cooperative-sticky rebalance strategy.
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.0.12)
4
+ karafka (2.0.14)
5
5
  karafka-core (>= 2.0.2, < 3.0.0)
6
6
  rdkafka (>= 0.12)
7
7
  thor (>= 0.20)
@@ -30,7 +30,7 @@ GEM
30
30
  activesupport (>= 5.0)
31
31
  i18n (1.12.0)
32
32
  concurrent-ruby (~> 1.0)
33
- karafka-core (2.0.2)
33
+ karafka-core (2.0.3)
34
34
  concurrent-ruby (>= 1.1)
35
35
  mini_portile2 (2.8.0)
36
36
  minitest (5.16.3)
data/bin/integrations CHANGED
@@ -24,7 +24,7 @@ ROOT_PATH = Pathname.new(File.expand_path(File.join(File.dirname(__FILE__), '../
24
24
  # When the value is high, there's a problem with thread allocation on Github CI, tht is why
25
25
  # we limit it. Locally we can run a lot of those, as many of them have sleeps and do not use a lot
26
26
  # of CPU
27
- CONCURRENCY = ENV.key?('CI') ? 3 : Etc.nprocessors * 2
27
+ CONCURRENCY = ENV.key?('CI') ? 4 : Etc.nprocessors * 2
28
28
 
29
29
  # How may bytes do we want to keep from the stdout in the buffer for when we need to print it
30
30
  MAX_BUFFER_OUTPUT = 51_200
@@ -39,10 +39,10 @@ class Scenario
39
39
  # This includes exactly those
40
40
  EXIT_CODES = {
41
41
  default: [0],
42
- 'consumption/worker_critical_error_behaviour.rb' => [0, 2].freeze,
43
- 'shutdown/on_hanging_jobs_and_a_shutdown.rb' => [2].freeze,
44
- 'shutdown/on_hanging_on_shutdown_job_and_a_shutdown.rb' => [2].freeze,
45
- 'shutdown/on_hanging_listener_and_shutdown.rb' => [2].freeze
42
+ 'consumption/worker_critical_error_behaviour_spec.rb' => [0, 2].freeze,
43
+ 'shutdown/on_hanging_jobs_and_a_shutdown_spec.rb' => [2].freeze,
44
+ 'shutdown/on_hanging_on_shutdown_job_and_a_shutdown_spec.rb' => [2].freeze,
45
+ 'shutdown/on_hanging_listener_and_shutdown_spec.rb' => [2].freeze
46
46
  }.freeze
47
47
 
48
48
  private_constant :MAX_RUN_TIME, :EXIT_CODES
@@ -202,7 +202,7 @@ class Scenario
202
202
  end
203
203
 
204
204
  # Load all the specs
205
- specs = Dir[ROOT_PATH.join('spec/integrations/**/*.rb')]
205
+ specs = Dir[ROOT_PATH.join('spec/integrations/**/*_spec.rb')]
206
206
 
207
207
  # If filters is provided, apply
208
208
  # Allows to provide several filters one after another and applies all of them
@@ -210,6 +210,7 @@ ARGV.each do |filter|
210
210
  specs.delete_if { |name| !name.include?(filter) }
211
211
  end
212
212
 
213
+
213
214
  raise ArgumentError, "No integration specs with filters: #{ARGV.join(', ')}" if specs.empty?
214
215
 
215
216
  # Randomize order
@@ -149,6 +149,9 @@ module Karafka
149
149
  # processed but rather at the next one. This applies to both sync and async versions of this
150
150
  # method.
151
151
  def mark_as_consumed(message)
152
+ # Ignore earlier offsets than the one we alread committed
153
+ return true if coordinator.seek_offset > message.offset
154
+
152
155
  unless client.mark_as_consumed(message)
153
156
  coordinator.revoke
154
157
 
@@ -166,6 +169,9 @@ module Karafka
166
169
  # @return [Boolean] true if we were able to mark the offset, false otherwise. False indicates
167
170
  # that we were not able and that we have lost the partition.
168
171
  def mark_as_consumed!(message)
172
+ # Ignore earlier offsets than the one we alread committed
173
+ return true if coordinator.seek_offset > message.offset
174
+
169
175
  unless client.mark_as_consumed!(message)
170
176
  coordinator.revoke
171
177
 
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ # Allows to start and stop Karafka as part of a different process
5
+ module Embedded
6
+ class << self
7
+ # Starts Karafka without supervision and without ownership of signals in a background thread
8
+ # so it won't interrupt other things running
9
+ def start
10
+ Thread.new { Karafka::Server.start }
11
+ end
12
+
13
+ # Stops Karafka upon any event
14
+ #
15
+ # @note This method is blocking because we want to wait until Karafka is stopped with final
16
+ # process shutdown
17
+ def stop
18
+ # Stop needs to be blocking to wait for all the things to finalize
19
+ Karafka::Server.stop
20
+ end
21
+ end
22
+ end
23
+ end
@@ -29,12 +29,19 @@ module Karafka
29
29
  # Creates an instance of process and creates empty hash for callbacks
30
30
  def initialize
31
31
  @callbacks = Hash.new { |hsh, key| hsh[key] = [] }
32
+ @supervised = false
32
33
  end
33
34
 
34
35
  # Method catches all HANDLED_SIGNALS and performs appropriate callbacks (if defined)
35
36
  # @note If there are no callbacks, this method will just ignore a given signal that was sent
36
37
  def supervise
37
38
  HANDLED_SIGNALS.each { |signal| trap_signal(signal) }
39
+ @supervised = true
40
+ end
41
+
42
+ # Is the current process supervised and are trap signals installed
43
+ def supervised?
44
+ @supervised
38
45
  end
39
46
 
40
47
  private
@@ -37,7 +37,7 @@ module Karafka
37
37
  # topics but they lack the group.id (unless explicitly) provided. To make it compatible
38
38
  # with our routing engine, we inject it before it will go to the consumer
39
39
  def kafka
40
- kafka = @topics.first.kafka.dup
40
+ kafka = Setup::AttributesMap.consumer(@topics.first.kafka.dup)
41
41
 
42
42
  kafka[:'client.id'] ||= Karafka::App.config.client_id
43
43
  kafka[:'group.id'] ||= @topics.first.consumer_group.id
@@ -31,6 +31,7 @@ module Karafka
31
31
  process.on_sigint { Thread.new { stop } }
32
32
  process.on_sigquit { Thread.new { stop } }
33
33
  process.on_sigterm { Thread.new { stop } }
34
+ process.supervise
34
35
 
35
36
  # Start is blocking until stop is called and when we stop, it will wait until
36
37
  # all of the things are ready to stop
@@ -61,7 +62,6 @@ module Karafka
61
62
  # @note We don't need to sleep because Karafka::Fetcher is locking and waiting to
62
63
  # finish loop (and it won't happen until we explicitly want to stop)
63
64
  def start
64
- process.supervise
65
65
  Karafka::App.run!
66
66
  Karafka::Runner.new.call
67
67
  end
@@ -73,6 +73,9 @@ module Karafka
73
73
  # lock them forever. If you need to run Karafka shutdown from within workers threads,
74
74
  # please start a separate thread to do so.
75
75
  def stop
76
+ # Initialize the stopping process only if Karafka was running
77
+ return unless Karafka::App.running?
78
+
76
79
  Karafka::App.stop!
77
80
 
78
81
  timeout = Karafka::App.config.shutdown_timeout
@@ -110,8 +113,12 @@ module Karafka
110
113
 
111
114
  Karafka::App.producer.close
112
115
 
116
+ # We also do not forcefully terminate everything when running in the embedded mode,
117
+ # otherwise we would overwrite the shutdown process of the process that started Karafka
118
+ return unless process.supervised?
119
+
113
120
  # exit! is not within the instrumentation as it would not trigger due to exit
114
- Kernel.exit! FORCEFUL_EXIT_CODE
121
+ Kernel.exit!(FORCEFUL_EXIT_CODE)
115
122
  ensure
116
123
  Karafka::App.stopped!
117
124
  end
@@ -0,0 +1,337 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Setup
5
+ # To simplify the overall design, in Karafka we define all the rdkafka settings in one scope
6
+ # under `kafka`. rdkafka though does not like when producer options are passed to the
7
+ # consumer configuration and issues warnings. This target map is used as a filtering layer, so
8
+ # only appropriate settings go to both producer and consumer
9
+ #
10
+ # It is built based on https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
11
+ module AttributesMap
12
+ # List of rdkafka consumer accepted attributes
13
+ CONSUMER = %i[
14
+ allow.auto.create.topics
15
+ api.version.fallback.ms
16
+ api.version.request
17
+ api.version.request.timeout.ms
18
+ auto.commit.enable
19
+ auto.commit.interval.ms
20
+ auto.offset.reset
21
+ background_event_cb
22
+ bootstrap.servers
23
+ broker.address.family
24
+ broker.address.ttl
25
+ broker.version.fallback
26
+ builtin.features
27
+ check.crcs
28
+ client.id
29
+ client.rack
30
+ closesocket_cb
31
+ connect_cb
32
+ connections.max.idle.ms
33
+ consume.callback.max.messages
34
+ consume_cb
35
+ coordinator.query.interval.ms
36
+ debug
37
+ default_topic_conf
38
+ enable.auto.commit
39
+ enable.auto.offset.store
40
+ enable.partition.eof
41
+ enable.random.seed
42
+ enable.sasl.oauthbearer.unsecure.jwt
43
+ enable.ssl.certificate.verification
44
+ enabled_events
45
+ error_cb
46
+ fetch.error.backoff.ms
47
+ fetch.max.bytes
48
+ fetch.message.max.bytes
49
+ fetch.min.bytes
50
+ fetch.wait.max.ms
51
+ group.id
52
+ group.instance.id
53
+ group.protocol.type
54
+ heartbeat.interval.ms
55
+ interceptors
56
+ internal.termination.signal
57
+ isolation.level
58
+ log.connection.close
59
+ log.queue
60
+ log.thread.name
61
+ log_cb
62
+ log_level
63
+ max.in.flight
64
+ max.in.flight.requests.per.connection
65
+ max.partition.fetch.bytes
66
+ max.poll.interval.ms
67
+ message.copy.max.bytes
68
+ message.max.bytes
69
+ metadata.broker.list
70
+ metadata.max.age.ms
71
+ oauthbearer_token_refresh_cb
72
+ offset.store.method
73
+ offset.store.path
74
+ offset.store.sync.interval.ms
75
+ offset_commit_cb
76
+ opaque
77
+ open_cb
78
+ partition.assignment.strategy
79
+ plugin.library.paths
80
+ queued.max.messages.kbytes
81
+ queued.min.messages
82
+ rebalance_cb
83
+ receive.message.max.bytes
84
+ reconnect.backoff.jitter.ms
85
+ reconnect.backoff.max.ms
86
+ reconnect.backoff.ms
87
+ sasl.kerberos.keytab
88
+ sasl.kerberos.kinit.cmd
89
+ sasl.kerberos.min.time.before.relogin
90
+ sasl.kerberos.principal
91
+ sasl.kerberos.service.name
92
+ sasl.mechanism
93
+ sasl.mechanisms
94
+ sasl.oauthbearer.client.id
95
+ sasl.oauthbearer.client.secret
96
+ sasl.oauthbearer.config
97
+ sasl.oauthbearer.extensions
98
+ sasl.oauthbearer.method
99
+ sasl.oauthbearer.scope
100
+ sasl.oauthbearer.token.endpoint.url
101
+ sasl.password
102
+ sasl.username
103
+ security.protocol
104
+ session.timeout.ms
105
+ socket.blocking.max.ms
106
+ socket.connection.setup.timeout.ms
107
+ socket.keepalive.enable
108
+ socket.max.fails
109
+ socket.nagle.disable
110
+ socket.receive.buffer.bytes
111
+ socket.send.buffer.bytes
112
+ socket.timeout.ms
113
+ socket_cb
114
+ ssl.ca.certificate.stores
115
+ ssl.ca.location
116
+ ssl.ca.pem
117
+ ssl.certificate.location
118
+ ssl.certificate.pem
119
+ ssl.certificate.verify_cb
120
+ ssl.cipher.suites
121
+ ssl.crl.location
122
+ ssl.curves.list
123
+ ssl.endpoint.identification.algorithm
124
+ ssl.engine.id
125
+ ssl.engine.location
126
+ ssl.key.location
127
+ ssl.key.password
128
+ ssl.key.pem
129
+ ssl.keystore.location
130
+ ssl.keystore.password
131
+ ssl.sigalgs.list
132
+ ssl_ca
133
+ ssl_certificate
134
+ ssl_engine_callback_data
135
+ ssl_key
136
+ statistics.interval.ms
137
+ stats_cb
138
+ throttle_cb
139
+ topic.blacklist
140
+ topic.metadata.propagation.max.ms
141
+ topic.metadata.refresh.fast.cnt
142
+ topic.metadata.refresh.fast.interval.ms
143
+ topic.metadata.refresh.interval.ms
144
+ topic.metadata.refresh.sparse
145
+ ].freeze
146
+
147
+ # List of rdkafka producer accepted attributes
148
+ PRODUCER = %i[
149
+ acks
150
+ api.version.fallback.ms
151
+ api.version.request
152
+ api.version.request.timeout.ms
153
+ background_event_cb
154
+ batch.num.messages
155
+ batch.size
156
+ bootstrap.servers
157
+ broker.address.family
158
+ broker.address.ttl
159
+ broker.version.fallback
160
+ builtin.features
161
+ client.id
162
+ client.rack
163
+ closesocket_cb
164
+ compression.codec
165
+ compression.level
166
+ compression.type
167
+ connect_cb
168
+ connections.max.idle.ms
169
+ debug
170
+ default_topic_conf
171
+ delivery.report.only.error
172
+ delivery.timeout.ms
173
+ dr_cb
174
+ dr_msg_cb
175
+ enable.gapless.guarantee
176
+ enable.idempotence
177
+ enable.random.seed
178
+ enable.sasl.oauthbearer.unsecure.jwt
179
+ enable.ssl.certificate.verification
180
+ enabled_events
181
+ error_cb
182
+ interceptors
183
+ internal.termination.signal
184
+ linger.ms
185
+ log.connection.close
186
+ log.queue
187
+ log.thread.name
188
+ log_cb
189
+ log_level
190
+ max.in.flight
191
+ max.in.flight.requests.per.connection
192
+ message.copy.max.bytes
193
+ message.max.bytes
194
+ message.send.max.retries
195
+ message.timeout.ms
196
+ metadata.broker.list
197
+ metadata.max.age.ms
198
+ msg_order_cmp
199
+ oauthbearer_token_refresh_cb
200
+ opaque
201
+ open_cb
202
+ partitioner
203
+ partitioner_cb
204
+ plugin.library.paths
205
+ produce.offset.report
206
+ queue.buffering.backpressure.threshold
207
+ queue.buffering.max.kbytes
208
+ queue.buffering.max.messages
209
+ queue.buffering.max.ms
210
+ queuing.strategy
211
+ receive.message.max.bytes
212
+ reconnect.backoff.jitter.ms
213
+ reconnect.backoff.max.ms
214
+ reconnect.backoff.ms
215
+ request.required.acks
216
+ request.timeout.ms
217
+ retries
218
+ retry.backoff.ms
219
+ sasl.kerberos.keytab
220
+ sasl.kerberos.kinit.cmd
221
+ sasl.kerberos.min.time.before.relogin
222
+ sasl.kerberos.principal
223
+ sasl.kerberos.service.name
224
+ sasl.mechanism
225
+ sasl.mechanisms
226
+ sasl.oauthbearer.client.id
227
+ sasl.oauthbearer.client.secret
228
+ sasl.oauthbearer.config
229
+ sasl.oauthbearer.extensions
230
+ sasl.oauthbearer.method
231
+ sasl.oauthbearer.scope
232
+ sasl.oauthbearer.token.endpoint.url
233
+ sasl.password
234
+ sasl.username
235
+ security.protocol
236
+ socket.blocking.max.ms
237
+ socket.connection.setup.timeout.ms
238
+ socket.keepalive.enable
239
+ socket.max.fails
240
+ socket.nagle.disable
241
+ socket.receive.buffer.bytes
242
+ socket.send.buffer.bytes
243
+ socket.timeout.ms
244
+ socket_cb
245
+ ssl.ca.certificate.stores
246
+ ssl.ca.location
247
+ ssl.ca.pem
248
+ ssl.certificate.location
249
+ ssl.certificate.pem
250
+ ssl.certificate.verify_cb
251
+ ssl.cipher.suites
252
+ ssl.crl.location
253
+ ssl.curves.list
254
+ ssl.endpoint.identification.algorithm
255
+ ssl.engine.id
256
+ ssl.engine.location
257
+ ssl.key.location
258
+ ssl.key.password
259
+ ssl.key.pem
260
+ ssl.keystore.location
261
+ ssl.keystore.password
262
+ ssl.sigalgs.list
263
+ ssl_ca
264
+ ssl_certificate
265
+ ssl_engine_callback_data
266
+ ssl_key
267
+ statistics.interval.ms
268
+ stats_cb
269
+ sticky.partitioning.linger.ms
270
+ throttle_cb
271
+ topic.blacklist
272
+ topic.metadata.propagation.max.ms
273
+ topic.metadata.refresh.fast.cnt
274
+ topic.metadata.refresh.fast.interval.ms
275
+ topic.metadata.refresh.interval.ms
276
+ topic.metadata.refresh.sparse
277
+ transaction.timeout.ms
278
+ transactional.id
279
+ ].freeze
280
+
281
+ # Location of the file with rdkafka settings list
282
+ SOURCE = 'https://raw.githubusercontent.com/edenhill/librdkafka/master/CONFIGURATION.md'
283
+
284
+ private_constant :SOURCE
285
+
286
+ class << self
287
+ # Filter the provided settings leaving only the once applicable to the consumer
288
+ # @param kafka_settings [Hash] all kafka settings
289
+ # @return [Hash] settings applicable to the consumer
290
+ def consumer(kafka_settings)
291
+ kafka_settings.slice(*CONSUMER)
292
+ end
293
+
294
+ # Filter the provided settings leaving only the once applicable to the producer
295
+ # @param kafka_settings [Hash] all kafka settings
296
+ # @return [Hash] settings applicable to the producer
297
+ def producer(kafka_settings)
298
+ kafka_settings.slice(*PRODUCER)
299
+ end
300
+
301
+ # @private
302
+ # @return [Hash<Symbol, Array<Symbol>>] hash with consumer and producer attributes list
303
+ # that is sorted.
304
+ # @note This method should not be used directly. It is only used to generate appropriate
305
+ # options list in case it would change
306
+ def generate
307
+ # Not used anywhere else, hence required here
308
+ require 'open-uri'
309
+
310
+ attributes = { consumer: Set.new, producer: Set.new }
311
+
312
+ ::URI.parse(SOURCE).open.readlines.each do |line|
313
+ next unless line.include?('|')
314
+
315
+ attribute, attribute_type = line.split('|').map(&:strip)
316
+
317
+ case attribute_type
318
+ when 'C'
319
+ attributes[:consumer] << attribute
320
+ when 'P'
321
+ attributes[:producer] << attribute
322
+ when '*'
323
+ attributes[:consumer] << attribute
324
+ attributes[:producer] << attribute
325
+ else
326
+ next
327
+ end
328
+ end
329
+
330
+ attributes.transform_values!(&:sort)
331
+ attributes.each_value { |vals| vals.map!(&:to_sym) }
332
+ attributes
333
+ end
334
+ end
335
+ end
336
+ end
337
+ end
@@ -20,7 +20,7 @@ module Karafka
20
20
  }.freeze
21
21
 
22
22
  # Contains settings that should not be used in production but make life easier in dev
23
- DEV_DEFAULTS = {
23
+ KAFKA_DEV_DEFAULTS = {
24
24
  # Will create non-existing topics automatically.
25
25
  # Note that the broker needs to be configured with `auto.create.topics.enable=true`
26
26
  # While it is not recommended in prod, it simplifies work in dev
@@ -32,7 +32,7 @@ module Karafka
32
32
  'topic.metadata.refresh.interval.ms': 5_000
33
33
  }.freeze
34
34
 
35
- private_constant :KAFKA_DEFAULTS, :DEV_DEFAULTS
35
+ private_constant :KAFKA_DEFAULTS, :KAFKA_DEV_DEFAULTS
36
36
 
37
37
  # Available settings
38
38
 
@@ -172,7 +172,7 @@ module Karafka
172
172
 
173
173
  return if Karafka::App.env.production?
174
174
 
175
- DEV_DEFAULTS.each do |key, value|
175
+ KAFKA_DEV_DEFAULTS.each do |key, value|
176
176
  next if config.kafka.key?(key)
177
177
 
178
178
  config.kafka[key] = value
@@ -185,7 +185,7 @@ module Karafka
185
185
  config.producer ||= ::WaterDrop::Producer.new do |producer_config|
186
186
  # In some cases WaterDrop updates the config and we don't want our consumer config to
187
187
  # be polluted by those updates, that's why we copy
188
- producer_config.kafka = config.kafka.dup
188
+ producer_config.kafka = AttributesMap.producer(config.kafka.dup)
189
189
  producer_config.logger = config.logger
190
190
  end
191
191
 
@@ -25,6 +25,9 @@ module Karafka
25
25
  end
26
26
 
27
27
  define_method transition do
28
+ # Do nothing if the state change would change nothing (same state)
29
+ return if @status == state
30
+
28
31
  @status = state
29
32
 
30
33
  # Skip on creation (initializing)
@@ -3,6 +3,12 @@
3
3
  module Karafka
4
4
  module TimeTrackers
5
5
  # Handles Kafka topic partition pausing and resuming with exponential back-offs.
6
+ # Since expiring and pausing can happen from both consumer and listener, this needs to be
7
+ # thread-safe.
8
+ #
9
+ # @note We do not have to worry about performance implications of a mutex wrapping most of the
10
+ # code here, as this is not a frequently used tracker. It is active only once per batch in
11
+ # case of long-running-jobs and upon errors.
6
12
  class Pause < Base
7
13
  attr_reader :count
8
14
 
@@ -36,6 +42,7 @@ module Karafka
36
42
  @timeout = timeout
37
43
  @max_timeout = max_timeout
38
44
  @exponential_backoff = exponential_backoff
45
+ @mutex = Mutex.new
39
46
  super()
40
47
  end
41
48
 
@@ -45,35 +52,47 @@ module Karafka
45
52
  # @note Providing this value can be useful when we explicitly want to pause for a certain
46
53
  # period of time, outside of any regular pausing logic
47
54
  def pause(timeout = backoff_interval)
48
- @started_at = now
49
- @ends_at = @started_at + timeout
50
- @count += 1
55
+ @mutex.synchronize do
56
+ @started_at = now
57
+ @ends_at = @started_at + timeout
58
+ @count += 1
59
+ end
51
60
  end
52
61
 
53
62
  # Marks the pause as resumed.
54
63
  def resume
55
- @started_at = nil
56
- @ends_at = nil
64
+ @mutex.synchronize do
65
+ @started_at = nil
66
+ @ends_at = nil
67
+ end
57
68
  end
58
69
 
59
70
  # Expires the pause, so it can be considered expired
60
71
  def expire
61
- @ends_at = nil
72
+ @mutex.synchronize do
73
+ @ends_at = nil
74
+ end
62
75
  end
63
76
 
64
77
  # @return [Boolean] are we paused from processing
65
78
  def paused?
66
- !@started_at.nil?
79
+ @mutex.synchronize do
80
+ !@started_at.nil?
81
+ end
67
82
  end
68
83
 
69
84
  # @return [Boolean] did the pause expire
70
85
  def expired?
71
- @ends_at ? now >= @ends_at : true
86
+ @mutex.synchronize do
87
+ @ends_at ? now >= @ends_at : true
88
+ end
72
89
  end
73
90
 
74
91
  # Resets the pause counter.
75
92
  def reset
76
- @count = 0
93
+ @mutex.synchronize do
94
+ @count = 0
95
+ end
77
96
  end
78
97
 
79
98
  private
@@ -2,7 +2,8 @@
2
2
 
3
3
  module Karafka
4
4
  module TimeTrackers
5
- # Object used to keep track of time we've used running certain operations.
5
+ # Object used to keep track of time we've used running certain operations. Polling is
6
+ # running in a single thread, thus we do not have to worry about this being thread-safe.
6
7
  #
7
8
  # @example Keep track of sleeping and stop after 3 seconds of 0.1 sleep intervals
8
9
  # time_poll = Poll.new(3000)
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.0.12'
6
+ VERSION = '2.0.14'
7
7
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.12
4
+ version: 2.0.14
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  Qf04B9ceLUaC4fPVEz10FyobjaFoY4i32xRto3XnrzeAgfEe4swLq8bQsR3w/EF3
36
36
  MGU0FeSV2Yj7Xc2x/7BzLK8xQn5l7Yy75iPF+KP3vVmDHnNl
37
37
  -----END CERTIFICATE-----
38
- date: 2022-10-06 00:00:00.000000000 Z
38
+ date: 2022-10-16 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: karafka-core
@@ -188,6 +188,7 @@ files:
188
188
  - lib/karafka/contracts/consumer_group.rb
189
189
  - lib/karafka/contracts/consumer_group_topic.rb
190
190
  - lib/karafka/contracts/server_cli_options.rb
191
+ - lib/karafka/embedded.rb
191
192
  - lib/karafka/env.rb
192
193
  - lib/karafka/errors.rb
193
194
  - lib/karafka/helpers/async.rb
@@ -260,6 +261,7 @@ files:
260
261
  - lib/karafka/runner.rb
261
262
  - lib/karafka/serialization/json/deserializer.rb
262
263
  - lib/karafka/server.rb
264
+ - lib/karafka/setup/attributes_map.rb
263
265
  - lib/karafka/setup/config.rb
264
266
  - lib/karafka/setup/dsl.rb
265
267
  - lib/karafka/status.rb
metadata.gz.sig CHANGED
Binary file