karafka 2.3.0.alpha1 → 2.3.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: de7ea23762cefa19d5f3620e92a39f0030cd8ff78f318c92f30c494d79b78163
4
- data.tar.gz: 775cfbd40d181036004dcf72dbcb84394dc8367bed0f6d69812f2324dc179d6f
3
+ metadata.gz: b07678937b0dc9c4154c90ccea2e9bbbc25aa2669461d937e859b77e134ddc48
4
+ data.tar.gz: a2e3eee8a9351a789dc33a1f78a48899a007da29b81205b4383e75a9fa0cc13c
5
5
  SHA512:
6
- metadata.gz: d68a4122a35afad517e4280b94f6f3d7cb3cab94fb37c11729e5e5c7a7aca082a7a272a52ff09a86e2f55ad0e078e234c88be79ce3b730527a2f6e7629ef259c
7
- data.tar.gz: aa2ddb108cc39caa8ad5c95a86d07006b5be374647e703414a7761ffd5c333010d7e53b9fd2c42216780e35f00639d8cf80126c291a59d0585424077840cc6b5
6
+ metadata.gz: d2331d328849f4ffb65aa5deb921bf8d12d732f06b30acc5b17ef1fb844d5da23166dbcd27a6b53a6d57d76c6a7d508e9c5479e064a73aa6e0c2eaed69597c8a
7
+ data.tar.gz: da8afa92e4b5fc552319b15322b3b3e883c2ec5c2b46ec887d098d34f7a0fbd7a7408558a89d781699eb521309c8fe69d2df7e6a1f46de19259451fb3050e391
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,6 +1,7 @@
1
1
  # Karafka framework changelog
2
2
 
3
3
  ## 2.3.0 (Unreleased)
4
+ - **[Feature]** Introduce Exactly-Once Semantics within consumers `#transaction` block (Pro)
4
5
  - **[Feature]** Provide ability to multiplex subscription groups (Pro)
5
6
  - **[Feature]** Provide `Karafka::Admin::Acl` for Kafka ACL management via the Admin APIs.
6
7
  - **[Feature]** Periodic Jobs (Pro)
@@ -13,6 +14,7 @@
13
14
  - [Enhancement] Provide an `:independent` configuration to DLQ allowing to reset pause count track on each marking as consumed when retrying.
14
15
  - [Enhancement] Remove no longer needed shutdown patches for `librdkafka` improving multi-sg shutdown times for `cooperative-sticky`.
15
16
  - [Enhancement] Allow for parallel closing of connections from independent consumer groups.
17
+ - [Enhancement] Provide recovery flow for cases where DLQ dispatch would fail.
16
18
  - [Change] Make `Kubernetes::LivenessListener` not start until Karafka app starts running.
17
19
  - [Change] Remove the legacy "inside of topics" way of defining subscription groups names
18
20
  - [Change] Update supported instrumentation to report on `#tick`.
data/Gemfile.lock CHANGED
@@ -1,18 +1,18 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.3.0.alpha1)
5
- karafka-core (>= 2.3.0.alpha1, < 2.4.0)
4
+ karafka (2.3.0.rc1)
5
+ karafka-core (>= 2.3.0.rc1, < 2.4.0)
6
6
  waterdrop (>= 2.6.12, < 3.0.0)
7
7
  zeitwerk (~> 2.3)
8
8
 
9
9
  GEM
10
10
  remote: https://rubygems.org/
11
11
  specs:
12
- activejob (7.1.2)
13
- activesupport (= 7.1.2)
12
+ activejob (7.1.3)
13
+ activesupport (= 7.1.3)
14
14
  globalid (>= 0.3.6)
15
- activesupport (7.1.2)
15
+ activesupport (7.1.3)
16
16
  base64
17
17
  bigdecimal
18
18
  concurrent-ruby (~> 1.0, >= 1.0.2)
@@ -23,9 +23,9 @@ GEM
23
23
  mutex_m
24
24
  tzinfo (~> 2.0)
25
25
  base64 (0.2.0)
26
- bigdecimal (3.1.5)
26
+ bigdecimal (3.1.6)
27
27
  byebug (11.1.3)
28
- concurrent-ruby (1.2.2)
28
+ concurrent-ruby (1.2.3)
29
29
  connection_pool (2.4.1)
30
30
  diff-lcs (1.5.0)
31
31
  docile (1.4.0)
@@ -39,7 +39,7 @@ GEM
39
39
  activesupport (>= 6.1)
40
40
  i18n (1.14.1)
41
41
  concurrent-ruby (~> 1.0)
42
- karafka-core (2.3.0.alpha1)
42
+ karafka-core (2.3.0.rc1)
43
43
  karafka-rdkafka (>= 0.14.7, < 0.15.0)
44
44
  karafka-rdkafka (0.14.7)
45
45
  ffi (~> 1.15)
@@ -52,11 +52,11 @@ GEM
52
52
  roda (~> 3.68, >= 3.69)
53
53
  tilt (~> 2.0)
54
54
  mini_portile2 (2.8.5)
55
- minitest (5.20.0)
55
+ minitest (5.21.2)
56
56
  mutex_m (0.2.0)
57
57
  rack (3.0.8)
58
58
  rake (13.1.0)
59
- roda (3.75.0)
59
+ roda (3.76.0)
60
60
  rack
61
61
  rspec (3.12.0)
62
62
  rspec-core (~> 3.12.0)
@@ -100,4 +100,4 @@ DEPENDENCIES
100
100
  simplecov
101
101
 
102
102
  BUNDLED WITH
103
- 2.5.3
103
+ 2.5.4
data/bin/integrations CHANGED
@@ -28,7 +28,7 @@ ROOT_PATH = Pathname.new(File.expand_path(File.join(File.dirname(__FILE__), '../
28
28
  CONCURRENCY = ENV.key?('CI') ? 5 : Etc.nprocessors * 3
29
29
 
30
30
  # How may bytes do we want to keep from the stdout in the buffer for when we need to print it
31
- MAX_BUFFER_OUTPUT = 51_200
31
+ MAX_BUFFER_OUTPUT = 102_400
32
32
 
33
33
  # Abstraction around a single test scenario execution process
34
34
  class Scenario
@@ -75,6 +75,7 @@ en:
75
75
  dead_letter_queue.topic_format: 'needs to be a string with a Kafka accepted format'
76
76
  dead_letter_queue.active_format: needs to be either true or false
77
77
  dead_letter_queue.independent_format: needs to be either true or false
78
+ dead_letter_queue.transactional_format: needs to be either true or false
78
79
  active_format: needs to be either true or false
79
80
  declaratives.partitions_format: needs to be more or equal to 1
80
81
  declaratives.active_format: needs to be true
data/karafka.gemspec CHANGED
@@ -21,7 +21,7 @@ Gem::Specification.new do |spec|
21
21
  without having to focus on things that are not your business domain.
22
22
  DESC
23
23
 
24
- spec.add_dependency 'karafka-core', '>= 2.3.0.alpha1', '< 2.4.0'
24
+ spec.add_dependency 'karafka-core', '>= 2.3.0.rc1', '< 2.4.0'
25
25
  spec.add_dependency 'waterdrop', '>= 2.6.12', '< 3.0.0'
26
26
  spec.add_dependency 'zeitwerk', '~> 2.3'
27
27
 
@@ -79,10 +79,25 @@ module Karafka
79
79
  end
80
80
 
81
81
  # @private
82
+ #
82
83
  # @note This should not be used by the end users as it is part of the lifecycle of things but
83
84
  # not as part of the public api.
85
+ #
86
+ # @note We handle and report errors here because of flows that could fail. For example a DLQ
87
+ # flow could fail if it was not able to dispatch the DLQ message. Other "non-user" based
88
+ # flows do not interact with external systems and their errors are expected to bubble up
84
89
  def on_after_consume
85
90
  handle_after_consume
91
+ rescue StandardError => e
92
+ Karafka.monitor.instrument(
93
+ 'error.occurred',
94
+ error: e,
95
+ caller: self,
96
+ seek_offset: coordinator.seek_offset,
97
+ type: 'consumer.after_consume.error'
98
+ )
99
+
100
+ retry_after_pause
86
101
  end
87
102
 
88
103
  # Can be used to run code prior to scheduling of idle execution
@@ -76,5 +76,9 @@ module Karafka
76
76
 
77
77
  # Raised when we want to un-pause listener that was not paused
78
78
  InvalidListenerPauseError = Class.new(BaseError)
79
+
80
+ # Raised in transactions when we attempt to store offset for a partition that we have lost
81
+ # This does not affect producer only transactions, hence we raise it only on offset storage
82
+ AssignmentLostError = Class.new(BaseError)
79
83
  end
80
84
  end
@@ -248,7 +248,10 @@ module Karafka
248
248
  error "Consumer on shutdown failed due to an error: #{error}"
249
249
  error details
250
250
  when 'consumer.tick.error'
251
- error "Consumer tick failed due to an error: #{error}"
251
+ error "Consumer on tick failed due to an error: #{error}"
252
+ error details
253
+ when 'consumer.after_consume.error'
254
+ error "Consumer on after_consume failed due to an error: #{error}"
252
255
  error details
253
256
  when 'worker.process.error'
254
257
  fatal "Worker processing failed due to an error: #{error}"
@@ -24,18 +24,9 @@ module Karafka
24
24
  # @note Manager operations relate to consumer groups and not subscription groups. Since
25
25
  # cluster operations can cause consumer group wide effects, we always apply only one
26
26
  # change on a consumer group.
27
- #
28
- # @note Since we collect statistical data from listeners and this happens in a background
29
- # thread, we need to make sure we lock not to have race conditions with expired data
30
- # eviction.
31
27
  class Manager < Karafka::Connection::Manager
32
28
  include Core::Helpers::Time
33
29
 
34
- # How long should we keep stale stats before evicting them completely
35
- EVICTION_DELAY = 5 * 60 * 1_000
36
-
37
- private_constant :EVICTION_DELAY
38
-
39
30
  # How long should we wait after a rebalance before doing anything on a consumer group
40
31
  #
41
32
  # @param scale_delay [Integer] How long should we wait before making any changes. Any
@@ -50,7 +41,6 @@ module Karafka
50
41
  state: '',
51
42
  join_state: '',
52
43
  state_age: 0,
53
- state_age_sync: monotonic_now,
54
44
  changed_at: monotonic_now
55
45
  }
56
46
  end
@@ -65,6 +55,9 @@ module Karafka
65
55
  def register(listeners)
66
56
  @listeners = listeners
67
57
 
58
+ # Preload all the keys into the hash so we never add keys to changes but just change them
59
+ listeners.each { |listener| @changes[listener.subscription_group.id] }
60
+
68
61
  in_sg_families do |first_subscription_group, sg_listeners|
69
62
  multiplexing = first_subscription_group.multiplexing
70
63
 
@@ -86,25 +79,22 @@ module Karafka
86
79
  # @note Please note that while we collect here per subscription group, we use those metrics
87
80
  # collectively on a whole consumer group. This reduces the friction.
88
81
  def notice(subscription_group_id, statistics)
89
- @mutex.synchronize do
90
- times = []
91
- # stateage is in microseconds
92
- # We monitor broker changes to make sure we do not introduce extra friction
93
- times << statistics['brokers'].values.map { |stats| stats['stateage'] }.min / 1_000
94
- times << statistics['cgrp']['rebalance_age']
95
- times << statistics['cgrp']['stateage']
96
-
97
- # Keep the previous change age for changes that were triggered by us
98
- previous_changed_at = @changes[subscription_group_id][:changed_at]
99
-
100
- @changes[subscription_group_id] = {
101
- state_age: times.min,
102
- changed_at: previous_changed_at,
103
- join_state: statistics['cgrp']['join_state'],
104
- state: statistics['cgrp']['state'],
105
- state_age_sync: monotonic_now
106
- }
107
- end
82
+ times = []
83
+ # stateage is in microseconds
84
+ # We monitor broker changes to make sure we do not introduce extra friction
85
+ times << statistics['brokers'].values.map { |stats| stats['stateage'] }.min / 1_000
86
+ times << statistics['cgrp']['rebalance_age']
87
+ times << statistics['cgrp']['stateage']
88
+
89
+ # Keep the previous change age for changes that were triggered by us
90
+ previous_changed_at = @changes[subscription_group_id][:changed_at]
91
+
92
+ @changes[subscription_group_id].merge!(
93
+ state_age: times.min,
94
+ changed_at: previous_changed_at,
95
+ join_state: statistics['cgrp']['join_state'],
96
+ state: statistics['cgrp']['state']
97
+ )
108
98
  end
109
99
 
110
100
  # Shuts down all the listeners when it is time (including moving to quiet) or rescales
@@ -158,8 +148,6 @@ module Karafka
158
148
  #
159
149
  # We always run scaling down and up because it may be applicable to different CGs
160
150
  def rescale
161
- evict
162
-
163
151
  scale_down
164
152
  scale_up
165
153
  end
@@ -232,23 +220,11 @@ module Karafka
232
220
  end
233
221
  end
234
222
 
235
- # Removes states that are no longer being reported for stopped/pending listeners
236
- def evict
237
- @mutex.synchronize do
238
- @changes.delete_if do |_, details|
239
- monotonic_now - details[:state_age_sync] >= EVICTION_DELAY
240
- end
241
- end
242
- end
243
-
244
223
  # Indicates, that something has changed on a subscription group. We consider every single
245
224
  # change we make as a change to the setup as well.
246
225
  # @param subscription_group_id [String]
247
226
  def touch(subscription_group_id)
248
- @mutex.synchronize do
249
- @changes[subscription_group_id][:changed_at] = 0
250
- @changes[subscription_group_id][:state_age_sync] = monotonic_now
251
- end
227
+ @changes[subscription_group_id][:changed_at] = 0
252
228
  end
253
229
 
254
230
  # @param sg_listeners [Array<Listener>] listeners from one multiplexed sg
@@ -257,17 +233,10 @@ module Karafka
257
233
  # are also stable. This is a strong indicator that no rebalances or other operations are
258
234
  # happening at a given moment.
259
235
  def stable?(sg_listeners)
260
- # If none of listeners has changes reported it means we did not yet start collecting
261
- # metrics about any of them and at least one must be present. We do not consider it
262
- # stable in such case as we still are waiting for metrics.
263
- return false if sg_listeners.none? do |sg_listener|
264
- @changes.key?(sg_listener.subscription_group.id)
265
- end
266
-
267
236
  sg_listeners.all? do |sg_listener|
268
- # Not all SGs may be started initially or may be stopped, we ignore them here as they
269
- # are irrelevant from the point of view of establishing stability
270
- next true unless @changes.key?(sg_listener.subscription_group.id)
237
+ # If a listener is not active, we do not take it into consideration when looking at
238
+ # the stability data
239
+ next true unless sg_listener.active?
271
240
 
272
241
  state = @changes[sg_listener.subscription_group.id]
273
242
 
@@ -27,6 +27,21 @@ module Karafka
27
27
  # Apply strategy for a non-feature based flow
28
28
  FEATURES = %i[].freeze
29
29
 
30
+ # Allows to set offset metadata that will be used with the upcoming marking as consumed
31
+ # as long as a different offset metadata was not used. After it was used either via
32
+ # `#mark_as_consumed` or `#mark_as_consumed!` it will be set back to `nil`. It is done
33
+ # that way to provide the end user with ability to influence metadata on the non-user
34
+ # initiated markings in complex flows.
35
+ #
36
+ # @param offset_metadata [String, nil] metadata we want to store with the upcoming
37
+ # marking as consumed
38
+ #
39
+ # @note Please be aware, that offset metadata set this way will be passed to any marking
40
+ # as consumed even if it was not user initiated. For example in the DLQ flow.
41
+ def store_offset_metadata(offset_metadata)
42
+ @_current_offset_metadata = offset_metadata
43
+ end
44
+
30
45
  # Marks message as consumed in an async way.
31
46
  #
32
47
  # @param message [Messages::Message] last successfully processed message.
@@ -38,7 +53,7 @@ module Karafka
38
53
  # processing another message. In case like this we do not pause on the message we've
39
54
  # already processed but rather at the next one. This applies to both sync and async
40
55
  # versions of this method.
41
- def mark_as_consumed(message, offset_metadata = nil)
56
+ def mark_as_consumed(message, offset_metadata = @_current_offset_metadata)
42
57
  if @_in_transaction
43
58
  mark_in_transaction(message, offset_metadata, true)
44
59
  else
@@ -54,6 +69,8 @@ module Karafka
54
69
  end
55
70
 
56
71
  true
72
+ ensure
73
+ @_current_offset_metadata = nil
57
74
  end
58
75
 
59
76
  # Marks message as consumed in a sync way.
@@ -62,7 +79,7 @@ module Karafka
62
79
  # @param offset_metadata [String, nil] offset metadata string or nil if nothing
63
80
  # @return [Boolean] true if we were able to mark the offset, false otherwise.
64
81
  # False indicates that we were not able and that we have lost the partition.
65
- def mark_as_consumed!(message, offset_metadata = nil)
82
+ def mark_as_consumed!(message, offset_metadata = @_current_offset_metadata)
66
83
  if @_in_transaction
67
84
  mark_in_transaction(message, offset_metadata, false)
68
85
  else
@@ -79,6 +96,8 @@ module Karafka
79
96
  end
80
97
 
81
98
  true
99
+ ensure
100
+ @_current_offset_metadata = nil
82
101
  end
83
102
 
84
103
  # Starts producer transaction, saves the transaction context for transactional marking
@@ -88,8 +107,23 @@ module Karafka
88
107
  # allow to mark offsets inside of the transaction. If the transaction is initialized
89
108
  # only from the consumer, the offset will be stored in a regular fashion.
90
109
  #
110
+ # @param active_producer [WaterDrop::Producer] alternative producer instance we may want
111
+ # to use. It is useful when we have connection pool or any other selective engine for
112
+ # managing multiple producers. If not provided, default producer taken from `#producer`
113
+ # will be used.
114
+ #
91
115
  # @param block [Proc] code that we want to run in a transaction
92
- def transaction(&block)
116
+ #
117
+ # @note Please note, that if you provide the producer, it will reassign the producer of
118
+ # the consumer for the transaction time. This means, that in case you would even
119
+ # accidentally refer to `Consumer#producer` from other threads, it will contain the
120
+ # reassigned producer and not the initially used/assigned producer. It is done that
121
+ # way, so the message producing aliases operate from within transactions and since the
122
+ # producer in transaction is locked, it will prevent other threads from using it.
123
+ def transaction(active_producer = producer, &block)
124
+ default_producer = producer
125
+ self.producer = active_producer
126
+
93
127
  transaction_started = false
94
128
 
95
129
  # Prevent from nested transactions. It would not make any sense
@@ -119,6 +153,8 @@ module Karafka
119
153
  marking.pop ? mark_as_consumed(*marking) : mark_as_consumed!(*marking)
120
154
  end
121
155
  ensure
156
+ self.producer = default_producer
157
+
122
158
  if transaction_started
123
159
  @_transaction_marked.clear
124
160
  @_in_transaction = false
@@ -134,6 +170,7 @@ module Karafka
134
170
  # transaction state synchronization usage as within transaction it is always sync)
135
171
  def mark_in_transaction(message, offset_metadata, async)
136
172
  raise Errors::TransactionRequiredError unless @_in_transaction
173
+ raise Errors::AssignmentLostError if revoked?
137
174
 
138
175
  producer.transaction_mark_as_consumed(
139
176
  client,
@@ -35,7 +35,7 @@ module Karafka
35
35
  # @see `Strategies::Default#mark_as_consumed` for more details
36
36
  # @param message [Messages::Message]
37
37
  # @param offset_metadata [String, nil]
38
- def mark_as_consumed(message, offset_metadata = nil)
38
+ def mark_as_consumed(message, offset_metadata = @_current_offset_metadata)
39
39
  return super unless retrying?
40
40
  return super unless topic.dead_letter_queue.independent?
41
41
  return false unless super
@@ -43,6 +43,8 @@ module Karafka
43
43
  coordinator.pause_tracker.reset
44
44
 
45
45
  true
46
+ ensure
47
+ @_current_offset_metadata = nil
46
48
  end
47
49
 
48
50
  # Override of the standard `#mark_as_consumed!`. Resets the pause tracker count in case
@@ -51,7 +53,7 @@ module Karafka
51
53
  # @see `Strategies::Default#mark_as_consumed!` for more details
52
54
  # @param message [Messages::Message]
53
55
  # @param offset_metadata [String, nil]
54
- def mark_as_consumed!(message, offset_metadata = nil)
56
+ def mark_as_consumed!(message, offset_metadata = @_current_offset_metadata)
55
57
  return super unless retrying?
56
58
  return super unless topic.dead_letter_queue.independent?
57
59
  return false unless super
@@ -59,6 +61,8 @@ module Karafka
59
61
  coordinator.pause_tracker.reset
60
62
 
61
63
  true
64
+ ensure
65
+ @_current_offset_metadata = nil
62
66
  end
63
67
 
64
68
  # When we encounter non-recoverable message, we skip it and go on with our lives
@@ -80,9 +84,7 @@ module Karafka
80
84
  else
81
85
  # We reset the pause to indicate we will now consider it as "ok".
82
86
  coordinator.pause_tracker.reset
83
- skippable_message, = find_skippable_message
84
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
85
- mark_as_consumed(skippable_message)
87
+ dispatch_if_needed_and_mark_as_consumed
86
88
  pause(coordinator.seek_offset, nil, false)
87
89
  end
88
90
  end
@@ -129,6 +131,25 @@ module Karafka
129
131
  )
130
132
  end
131
133
 
134
+ # Dispatches the message to the DLQ (when needed and when applicable based on settings)
135
+ # and marks this message as consumed for non MOM flows.
136
+ #
137
+ # If producer is transactional and config allows, uses transaction to do that
138
+ def dispatch_if_needed_and_mark_as_consumed
139
+ skippable_message, = find_skippable_message
140
+
141
+ dispatch = lambda do
142
+ dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
143
+ mark_as_consumed(skippable_message)
144
+ end
145
+
146
+ if dispatch_in_a_transaction?
147
+ transaction { dispatch.call }
148
+ else
149
+ dispatch.call
150
+ end
151
+ end
152
+
132
153
  # @param skippable_message [Array<Karafka::Messages::Message>]
133
154
  # @return [Hash] dispatch DLQ message
134
155
  def build_dlq_message(skippable_message)
@@ -164,6 +185,13 @@ module Karafka
164
185
  def dispatch_to_dlq?
165
186
  topic.dead_letter_queue.topic
166
187
  end
188
+
189
+ # @return [Boolean] should we use a transaction to move the data to the DLQ.
190
+ # We can do it only when producer is transactional and configuration for DLQ
191
+ # transactional dispatches is not set to false.
192
+ def dispatch_in_a_transaction?
193
+ producer.transactional? && topic.dead_letter_queue.transactional?
194
+ end
167
195
  end
168
196
  end
169
197
  end
@@ -46,9 +46,9 @@ module Karafka
46
46
  retry_after_pause
47
47
  else
48
48
  coordinator.pause_tracker.reset
49
- skippable_message, = find_skippable_message
50
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
51
- mark_as_consumed(skippable_message)
49
+
50
+ dispatch_if_needed_and_mark_as_consumed
51
+
52
52
  pause(coordinator.seek_offset, nil, false)
53
53
  end
54
54
  end
@@ -60,9 +60,8 @@ module Karafka
60
60
 
61
61
  return resume if revoked?
62
62
 
63
- skippable_message, = find_skippable_message
64
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
65
- mark_as_consumed(skippable_message)
63
+ dispatch_if_needed_and_mark_as_consumed
64
+
66
65
  pause(coordinator.seek_offset, nil, false)
67
66
  end
68
67
  end
@@ -49,9 +49,8 @@ module Karafka
49
49
 
50
50
  return resume if revoked?
51
51
 
52
- skippable_message, = find_skippable_message
53
- dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
54
- mark_as_consumed(skippable_message)
52
+ dispatch_if_needed_and_mark_as_consumed
53
+
55
54
  pause(coordinator.seek_offset, nil, false)
56
55
  end
57
56
  end
@@ -33,7 +33,7 @@ module Karafka
33
33
  # @note This virtual offset management uses a regular default marking API underneath.
34
34
  # We do not alter the "real" marking API, as VPs are just one of many cases we want
35
35
  # to support and we do not want to impact them with collective offsets management
36
- def mark_as_consumed(message, offset_metadata = nil)
36
+ def mark_as_consumed(message, offset_metadata = @_current_offset_metadata)
37
37
  if @_in_transaction && !collapsed?
38
38
  mark_in_transaction(message, offset_metadata, true)
39
39
  elsif collapsed?
@@ -55,11 +55,13 @@ module Karafka
55
55
  manager.markable? ? super(*manager.markable) : revoked?
56
56
  end
57
57
  end
58
+ ensure
59
+ @_current_offset_metadata = nil
58
60
  end
59
61
 
60
62
  # @param message [Karafka::Messages::Message] blocking marks message as consumed
61
63
  # @param offset_metadata [String, nil]
62
- def mark_as_consumed!(message, offset_metadata = nil)
64
+ def mark_as_consumed!(message, offset_metadata = @_current_offset_metadata)
63
65
  if @_in_transaction && !collapsed?
64
66
  mark_in_transaction(message, offset_metadata, false)
65
67
  elsif collapsed?
@@ -73,6 +75,8 @@ module Karafka
73
75
  manager.markable? ? super(*manager.markable) : revoked?
74
76
  end
75
77
  end
78
+ ensure
79
+ @_current_offset_metadata = nil
76
80
  end
77
81
 
78
82
  # Stores the next offset for processing inside of the transaction when collapsed and
@@ -93,6 +97,8 @@ module Karafka
93
97
  # transaction state synchronization usage as within transaction it is always sync)
94
98
  def mark_in_transaction(message, offset_metadata, async)
95
99
  raise Errors::TransactionRequiredError unless @_in_transaction
100
+ # Prevent from attempts of offset storage when we no longer own the assignment
101
+ raise Errors::AssignmentLostError if revoked?
96
102
 
97
103
  return super if collapsed?
98
104
 
@@ -13,10 +13,13 @@ module Karafka
13
13
  :topic,
14
14
  # Should retries be handled collectively on a batch or independently per message
15
15
  :independent,
16
+ # Move to DLQ and mark as consumed in transactional mode (if applicable)
17
+ :transactional,
16
18
  keyword_init: true
17
19
  ) do
18
20
  alias_method :active?, :active
19
21
  alias_method :independent?, :independent
22
+ alias_method :transactional?, :transactional
20
23
  end
21
24
  end
22
25
  end
@@ -20,6 +20,7 @@ module Karafka
20
20
  required(:active) { |val| [true, false].include?(val) }
21
21
  required(:independent) { |val| [true, false].include?(val) }
22
22
  required(:max_retries) { |val| val.is_a?(Integer) && val >= 0 }
23
+ required(:transactional) { |val| [true, false].include?(val) }
23
24
  end
24
25
 
25
26
  # Validate topic name only if dlq is active
@@ -16,17 +16,21 @@ module Karafka
16
16
  # if we do not want to move it anywhere and just skip
17
17
  # @param independent [Boolean] needs to be true in order for each marking as consumed
18
18
  # in a retry flow to reset the errors counter
19
+ # @param transactional [Boolean] if applicable, should transaction be used to move
20
+ # given message to the dead-letter topic and mark it as consumed.
19
21
  # @return [Config] defined config
20
22
  def dead_letter_queue(
21
23
  max_retries: DEFAULT_MAX_RETRIES,
22
24
  topic: nil,
23
- independent: false
25
+ independent: false,
26
+ transactional: true
24
27
  )
25
28
  @dead_letter_queue ||= Config.new(
26
29
  active: !topic.nil?,
27
30
  max_retries: max_retries,
28
31
  topic: topic,
29
- independent: independent
32
+ independent: independent,
33
+ transactional: transactional
30
34
  )
31
35
  end
32
36
 
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.3.0.alpha1'
6
+ VERSION = '2.3.0.rc1'
7
7
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.3.0.alpha1
4
+ version: 2.3.0.rc1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
36
36
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
37
37
  -----END CERTIFICATE-----
38
- date: 2024-01-15 00:00:00.000000000 Z
38
+ date: 2024-01-21 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: karafka-core
@@ -43,7 +43,7 @@ dependencies:
43
43
  requirements:
44
44
  - - ">="
45
45
  - !ruby/object:Gem::Version
46
- version: 2.3.0.alpha1
46
+ version: 2.3.0.rc1
47
47
  - - "<"
48
48
  - !ruby/object:Gem::Version
49
49
  version: 2.4.0
@@ -53,7 +53,7 @@ dependencies:
53
53
  requirements:
54
54
  - - ">="
55
55
  - !ruby/object:Gem::Version
56
- version: 2.3.0.alpha1
56
+ version: 2.3.0.rc1
57
57
  - - "<"
58
58
  - !ruby/object:Gem::Version
59
59
  version: 2.4.0
metadata.gz.sig CHANGED
@@ -1 +1,3 @@
1
- �ߚډ����bPԻg����;����r�ѽʚ�t⋈tXV1P���q�ʝ��یmBsw�=��ly'L����~̱��G>������l}P�,���"��f;�1{N*E��B�.i�ݨ|nj6��f)TBC�&x^8H�� h;�1AYD�p3z���* ߮�z7���k�r�Z)�<�3^x|�����+VP�\fqR��ֳ1�1��J� �\��>�\�\��O���nk;�L�ƃ¾���OJ<��E�z};�Xu%Pe���`Q}8(6[bD6�+E�B���d*S�b �?gg��՜v�rb7iK/��rLs��ͪ(��qy�K�����S��Ÿ���&�����|�#��r$#_�Au�^%l�b�
1
+ h!O��R�oZ�%QspKXv77�������a��F(=�yB��LK��VG�3� ��k���[L^��zGг(�.� թ���.m$�$xs�5M#m���@��)R+Ė��l2j_�͚Mγ�W<��̾ ̤'���B+v��a���Q����2�}v�uC^�om'�F��&v�������g����#�#���i��%�|�7���CA�������;���i����#��=�D�ׂG�t��m!�<�������<���O�� d�^�lkY�b�R�4x
2
+ 8:��I*¼h��y�i��ix���>pէ>�����,��#��-]�t���/q0M�t��W��k�=P��5;�N@!H)2I�j��6�\Mu�L
3
+ &