karafka 2.4.14 → 2.4.15

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 22f45da117cf90a2ecbec04dbcaf39634b1e1b85e00f521d4880954c65268ecf
4
- data.tar.gz: ce5318aaa8f52954a80981662a41ad17ab314e9706a00f2edf3e840604b87b32
3
+ metadata.gz: 5b49de31147b2a64d8927c91e25e6b607fdf1a8a7280b7cc3ba8c3663cf96b6f
4
+ data.tar.gz: 4a3d9e439e21b79b9ac6a2c8bf4e76219f970be1f6bb7316feb5c7f2f792271e
5
5
  SHA512:
6
- metadata.gz: d8ec82f91aea2bdba595fa290feec1ca0b25dbd73cd0eafac6554538d3506c47ad8db42036ed838b199a3016341bf67d6daf4f03dfced3ddcf783850c0dc97bf
7
- data.tar.gz: 2b750fce3294dda031f5a97bf5343711dcf52d1dbc00c61e029b602b302a549e9a70bb6ac4e952842e1ad840d7dd7b28115ed72118aad54917497672593de133
6
+ metadata.gz: 527a1c169ddc5a0f978f69dc3dbae6d30288997d2cfc95104ad6343d7a468c67afbde4c55051c1e7902d2c161fc1e615f8d703b9dfba123f734b9fbd7ccec5d1
7
+ data.tar.gz: a895657a8dece0c9bdb65afeeb00bc8df2e03e1957d345bc71ceab9d6cce74e5d0b4afcc251e26dfe230778a05b8742e40505a779695325844c6be630307ad82
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,5 +1,9 @@
1
1
  # Karafka Framework Changelog
2
2
 
3
+ ## 2.4.15 (2024-12-04)
4
+ - [Fix] Assignment tracker current state fetch during a rebalance loop can cause an error on multi CG setup.
5
+ - [Fix] Prevent double post-transaction offset dispatch to Kafka.
6
+
3
7
  ## 2.4.14 (2024-11-25)
4
8
  - [Enhancement] Improve low-level critical error reporting.
5
9
  - [Enhancement] Expand Kubernetes Liveness state reporting with critical errors detection.
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.4.14)
4
+ karafka (2.4.15)
5
5
  base64 (~> 0.2)
6
6
  karafka-core (>= 2.4.4, < 2.5.0)
7
7
  karafka-rdkafka (>= 0.17.2)
@@ -49,8 +49,8 @@ GEM
49
49
  activesupport (>= 6.1)
50
50
  i18n (1.14.6)
51
51
  concurrent-ruby (~> 1.0)
52
- karafka-core (2.4.4)
53
- karafka-rdkafka (>= 0.15.0, < 0.18.0)
52
+ karafka-core (2.4.5)
53
+ karafka-rdkafka (>= 0.17.6, < 0.18.0)
54
54
  karafka-rdkafka (0.17.6)
55
55
  ffi (~> 1.15)
56
56
  mini_portile2 (~> 2.6)
@@ -58,18 +58,18 @@ GEM
58
58
  karafka-testing (2.4.6)
59
59
  karafka (>= 2.4.0, < 2.5.0)
60
60
  waterdrop (>= 2.7.0)
61
- karafka-web (0.10.3)
61
+ karafka-web (0.10.4)
62
62
  erubi (~> 1.4)
63
63
  karafka (>= 2.4.10, < 2.5.0)
64
64
  karafka-core (>= 2.4.0, < 2.5.0)
65
65
  roda (~> 3.68, >= 3.69)
66
66
  tilt (~> 2.0)
67
67
  logger (1.6.1)
68
- mini_portile2 (2.8.7)
68
+ mini_portile2 (2.8.8)
69
69
  minitest (5.25.1)
70
70
  ostruct (0.6.1)
71
71
  raabro (1.4.0)
72
- rack (3.1.7)
72
+ rack (3.1.8)
73
73
  rake (13.2.1)
74
74
  roda (3.84.0)
75
75
  rack
data/docker-compose.yml CHANGED
@@ -3,7 +3,7 @@ version: '2'
3
3
  services:
4
4
  kafka:
5
5
  container_name: kafka
6
- image: confluentinc/cp-kafka:7.7.1
6
+ image: confluentinc/cp-kafka:7.8.0
7
7
 
8
8
  ports:
9
9
  - 9092:9092
@@ -31,8 +31,13 @@ module Karafka
31
31
  def current
32
32
  assignments = {}
33
33
 
34
- @assignments.each do |topic, partitions|
35
- assignments[topic] = partitions.dup.freeze
34
+ # Since the `@assignments` state can change during a rebalance, if we would iterate over
35
+ # it exactly during state change, we would end up with the following error:
36
+ # RuntimeError: can't add a new key into hash during iteration
37
+ @mutex.synchronize do
38
+ @assignments.each do |topic, partitions|
39
+ assignments[topic] = partitions.dup.freeze
40
+ end
36
41
  end
37
42
 
38
43
  assignments.freeze
@@ -63,6 +63,16 @@ module Karafka
63
63
  # Ignore earlier offsets than the one we already committed
64
64
  return true if coordinator.seek_offset > message.offset
65
65
  return false if revoked?
66
+
67
+ # If we have already marked this successfully in a transaction that was running
68
+ # we should not mark it again with the client offset delegation but instead we should
69
+ # just align the in-memory state
70
+ if @_in_transaction_marked
71
+ coordinator.seek_offset = message.offset + 1
72
+
73
+ return true
74
+ end
75
+
66
76
  return revoked? unless client.mark_as_consumed(message, offset_metadata)
67
77
 
68
78
  coordinator.seek_offset = message.offset + 1
@@ -90,6 +100,12 @@ module Karafka
90
100
  return true if coordinator.seek_offset > message.offset
91
101
  return false if revoked?
92
102
 
103
+ if @_in_transaction_marked
104
+ coordinator.seek_offset = message.offset + 1
105
+
106
+ return true
107
+ end
108
+
93
109
  return revoked? unless client.mark_as_consumed!(message, offset_metadata)
94
110
 
95
111
  coordinator.seek_offset = message.offset + 1
@@ -132,6 +148,7 @@ module Karafka
132
148
  transaction_started = true
133
149
  @_transaction_marked = []
134
150
  @_in_transaction = true
151
+ @_in_transaction_marked = false
135
152
 
136
153
  producer.transaction(&block)
137
154
 
@@ -143,6 +160,11 @@ module Karafka
143
160
  # @note We never need to use the blocking `#mark_as_consumed!` here because the offset
144
161
  # anyhow was already stored during the transaction
145
162
  #
163
+ # @note Since the offset could have been already stored in Kafka (could have because
164
+ # you can have transactions without marking), we use the `@_in_transaction_marked`
165
+ # state to decide if we need to dispatch the offset via client at all
166
+ # (if post transaction, then we do not have to)
167
+ #
146
168
  # @note In theory we could only keep reference to the most recent marking and reject
147
169
  # others. We however do not do it for two reasons:
148
170
  # - User may have non standard flow relying on some alternative order and we want to
@@ -158,6 +180,7 @@ module Karafka
158
180
  if transaction_started
159
181
  @_transaction_marked.clear
160
182
  @_in_transaction = false
183
+ @_in_transaction_marked = false
161
184
  end
162
185
  end
163
186
 
@@ -178,6 +201,7 @@ module Karafka
178
201
  offset_metadata
179
202
  )
180
203
 
204
+ @_in_transaction_marked = true
181
205
  @_transaction_marked ||= []
182
206
  @_transaction_marked << [message, offset_metadata, async]
183
207
  end
@@ -67,6 +67,20 @@ module Karafka
67
67
  end
68
68
  end
69
69
 
70
+ # Clear out the drawn routes.
71
+ alias array_clear clear
72
+ private :array_clear
73
+
74
+ # Clear routes and draw them again with the given block. Helpful for testing purposes.
75
+ # @param block [Proc] block we will evaluate within the builder context
76
+ def redraw(&block)
77
+ @mutex.synchronize do
78
+ @draws.clear
79
+ array_clear
80
+ end
81
+ draw(&block)
82
+ end
83
+
70
84
  # @return [Array<Karafka::Routing::ConsumerGroup>] only active consumer groups that
71
85
  # we want to use. Since Karafka supports multi-process setup, we need to be able
72
86
  # to pick only those consumer groups that should be active in our given process context
@@ -79,7 +93,7 @@ module Karafka
79
93
  @mutex.synchronize do
80
94
  @defaults = EMPTY_DEFAULTS
81
95
  @draws.clear
82
- super
96
+ array_clear
83
97
  end
84
98
  end
85
99
 
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.4.14'
6
+ VERSION = '2.4.15'
7
7
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.4.14
4
+ version: 2.4.15
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  i9zWxov0mr44TWegTVeypcWGd/0nxu1+QHVNHJrpqlPBRvwQsUm7fwmRInGpcaB8
36
36
  ap8wNYvryYzrzvzUxIVFBVM5PacgkFqRmolCa8I7tdKQN+R1
37
37
  -----END CERTIFICATE-----
38
- date: 2024-11-25 00:00:00.000000000 Z
38
+ date: 2024-12-04 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: base64
metadata.gz.sig CHANGED
Binary file