karafka 2.1.5.beta1 → 2.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/CHANGELOG.md +20 -1
- data/Gemfile.lock +9 -9
- data/karafka.gemspec +2 -2
- data/lib/karafka/admin.rb +34 -3
- data/lib/karafka/base_consumer.rb +16 -3
- data/lib/karafka/connection/client.rb +110 -88
- data/lib/karafka/errors.rb +4 -1
- data/lib/karafka/messages/seek.rb +3 -0
- data/lib/karafka/pro/iterator/expander.rb +95 -0
- data/lib/karafka/pro/iterator/tpl_builder.rb +145 -0
- data/lib/karafka/pro/iterator.rb +2 -87
- data/lib/karafka/pro/processing/filters_applier.rb +1 -0
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom.rb +3 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_ftr_lrj_mom_vp.rb +3 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom.rb +3 -1
- data/lib/karafka/pro/processing/strategies/aj/dlq_lrj_mom_vp.rb +3 -1
- data/lib/karafka/pro/processing/strategies/aj/ftr_lrj_mom_vp.rb +3 -1
- data/lib/karafka/pro/processing/strategies/aj/lrj_mom_vp.rb +4 -1
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj.rb +2 -2
- data/lib/karafka/pro/processing/strategies/dlq/ftr_lrj_mom.rb +2 -2
- data/lib/karafka/pro/processing/strategies/dlq/lrj.rb +2 -1
- data/lib/karafka/pro/processing/strategies/dlq/lrj_mom.rb +3 -1
- data/lib/karafka/pro/processing/strategies/ftr/default.rb +8 -1
- data/lib/karafka/pro/processing/strategies/lrj/default.rb +1 -1
- data/lib/karafka/pro/processing/strategies/lrj/ftr.rb +2 -2
- data/lib/karafka/pro/processing/strategies/lrj/ftr_mom.rb +2 -2
- data/lib/karafka/pro/processing/strategies/lrj/mom.rb +3 -1
- data/lib/karafka/pro/processing/virtual_offset_manager.rb +1 -1
- data/lib/karafka/processing/coordinator.rb +14 -0
- data/lib/karafka/processing/strategies/default.rb +12 -14
- data/lib/karafka/railtie.rb +2 -2
- data/lib/karafka/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +10 -8
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a6994a6d579728a877f84c87086d093aae8a1f830b891fcb4904883085432fe4
|
4
|
+
data.tar.gz: 13b21009a471194a72971ca81ddc718e044bb96587db0e8f186974f554e9ec62
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e4711880bde1d2cd1cb34959f740459979b74ff4d28a671a232f88adbe7473cf67e366fc2b492fac761c572f3a6dfc147a59d46fc08e1c5e18df8ac5f108afdd
|
7
|
+
data.tar.gz: c094600c2bd421ce309c0125d60ea82ed0106d5ce4566b3bb8c1aab13c553e7bd2f6651b98029e42ac831b132563b2c502dd1c76defbf8307cd9bd2393b258f7
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/CHANGELOG.md
CHANGED
@@ -1,11 +1,30 @@
|
|
1
1
|
# Karafka framework changelog
|
2
2
|
|
3
|
-
## 2.1.
|
3
|
+
## 2.1.6 (2023-06-29)
|
4
|
+
- [Improvement] Provide time support for iterator
|
5
|
+
- [Improvement] Provide time support for admin `#read_topic`
|
6
|
+
- [Improvement] Provide time support for consumer `#seek`.
|
7
|
+
- [Improvement] Remove no longer needed locks for client operations.
|
8
|
+
- [Improvement] Raise `Karafka::Errors::TopicNotFoundError` when trying to iterate over non-existing topic.
|
9
|
+
- [Improvement] Ensure that Kafka multi-command operations run under mutex together.
|
10
|
+
- [Change] Require `waterdrop` `>= 2.6.2`
|
11
|
+
- [Change] Require `karafka-core` `>= 2.1.1`
|
12
|
+
- [Refactor] Clean-up iterator code.
|
13
|
+
- [Fix] Improve performance in dev environment for a Rails app (juike)
|
14
|
+
- [Fix] Rename `InvalidRealOffsetUsage` to `InvalidRealOffsetUsageError` to align with naming of other errors.
|
15
|
+
- [Fix] Fix unstable spec.
|
16
|
+
- [Fix] Fix a case where automatic `#seek` would overwrite manual seek of a user when running LRJ.
|
17
|
+
- [Fix] Make sure, that user direct `#seek` and `#pause` operations take precedence over system actions.
|
18
|
+
- [Fix] Make sure, that `#pause` and `#resume` with one underlying connection do not race-condition.
|
19
|
+
|
20
|
+
## 2.1.5 (2023-06-19)
|
4
21
|
- [Improvement] Drastically improve `#revoked?` response quality by checking the real time assignment lost state on librdkafka.
|
5
22
|
- [Improvement] Improve eviction of saturated jobs that would run on already revoked assignments.
|
6
23
|
- [Improvement] Expose `#commit_offsets` and `#commit_offsets!` methods in the consumer to provide ability to commit offsets directly to Kafka without having to mark new messages as consumed.
|
7
24
|
- [Improvement] No longer skip offset commit when no messages marked as consumed as `librdkafka` has fixed the crashes there.
|
8
25
|
- [Improvement] Remove no longer needed patches.
|
26
|
+
- [Improvement] Ensure, that the coordinator revocation status is switched upon revocation detection when using `#revoked?`
|
27
|
+
- [Improvement] Add benchmarks for marking as consumed (sync and async).
|
9
28
|
- [Change] Require `karafka-core` `>= 2.1.0`
|
10
29
|
- [Change] Require `waterdrop` `>= 2.6.1`
|
11
30
|
|
data/Gemfile.lock
CHANGED
@@ -1,10 +1,10 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
karafka (2.1.
|
5
|
-
karafka-core (>= 2.1.
|
4
|
+
karafka (2.1.6)
|
5
|
+
karafka-core (>= 2.1.1, < 2.2.0)
|
6
6
|
thor (>= 0.20)
|
7
|
-
waterdrop (>= 2.6.
|
7
|
+
waterdrop (>= 2.6.2, < 3.0.0)
|
8
8
|
zeitwerk (~> 2.3)
|
9
9
|
|
10
10
|
GEM
|
@@ -30,14 +30,14 @@ GEM
|
|
30
30
|
activesupport (>= 5.0)
|
31
31
|
i18n (1.14.1)
|
32
32
|
concurrent-ruby (~> 1.0)
|
33
|
-
karafka-core (2.1.
|
33
|
+
karafka-core (2.1.1)
|
34
34
|
concurrent-ruby (>= 1.1)
|
35
|
-
karafka-rdkafka (>= 0.13.
|
36
|
-
karafka-rdkafka (0.13.
|
35
|
+
karafka-rdkafka (>= 0.13.1, < 0.14.0)
|
36
|
+
karafka-rdkafka (0.13.1)
|
37
37
|
ffi (~> 1.15)
|
38
38
|
mini_portile2 (~> 2.6)
|
39
39
|
rake (> 12)
|
40
|
-
karafka-web (0.6.
|
40
|
+
karafka-web (0.6.1)
|
41
41
|
erubi (~> 1.4)
|
42
42
|
karafka (>= 2.1.4, < 3.0.0)
|
43
43
|
karafka-core (>= 2.0.13, < 3.0.0)
|
@@ -72,8 +72,8 @@ GEM
|
|
72
72
|
tilt (2.2.0)
|
73
73
|
tzinfo (2.0.6)
|
74
74
|
concurrent-ruby (~> 1.0)
|
75
|
-
waterdrop (2.6.
|
76
|
-
karafka-core (>= 2.1.0
|
75
|
+
waterdrop (2.6.2)
|
76
|
+
karafka-core (>= 2.1.0, < 3.0.0)
|
77
77
|
zeitwerk (~> 2.3)
|
78
78
|
zeitwerk (2.6.8)
|
79
79
|
|
data/karafka.gemspec
CHANGED
@@ -21,9 +21,9 @@ Gem::Specification.new do |spec|
|
|
21
21
|
without having to focus on things that are not your business domain.
|
22
22
|
DESC
|
23
23
|
|
24
|
-
spec.add_dependency 'karafka-core', '>= 2.1.
|
24
|
+
spec.add_dependency 'karafka-core', '>= 2.1.1', '< 2.2.0'
|
25
25
|
spec.add_dependency 'thor', '>= 0.20'
|
26
|
-
spec.add_dependency 'waterdrop', '>= 2.6.
|
26
|
+
spec.add_dependency 'waterdrop', '>= 2.6.2', '< 3.0.0'
|
27
27
|
spec.add_dependency 'zeitwerk', '~> 2.3'
|
28
28
|
|
29
29
|
if $PROGRAM_NAME.end_with?('gem')
|
data/lib/karafka/admin.rb
CHANGED
@@ -18,6 +18,9 @@ module Karafka
|
|
18
18
|
# retry after checking that the operation was finished or failed using external factor.
|
19
19
|
MAX_WAIT_TIMEOUT = 1
|
20
20
|
|
21
|
+
# Max time for a TPL request. We increase it to compensate for remote clusters latency
|
22
|
+
TPL_REQUEST_TIMEOUT = 2_000
|
23
|
+
|
21
24
|
# How many times should be try. 1 x 60 => 60 seconds wait in total
|
22
25
|
MAX_ATTEMPTS = 60
|
23
26
|
|
@@ -34,7 +37,8 @@ module Karafka
|
|
34
37
|
'enable.auto.commit': false
|
35
38
|
}.freeze
|
36
39
|
|
37
|
-
private_constant :Topic, :CONFIG_DEFAULTS, :MAX_WAIT_TIMEOUT, :
|
40
|
+
private_constant :Topic, :CONFIG_DEFAULTS, :MAX_WAIT_TIMEOUT, :TPL_REQUEST_TIMEOUT,
|
41
|
+
:MAX_ATTEMPTS
|
38
42
|
|
39
43
|
class << self
|
40
44
|
# Allows us to read messages from the topic
|
@@ -42,8 +46,9 @@ module Karafka
|
|
42
46
|
# @param name [String, Symbol] topic name
|
43
47
|
# @param partition [Integer] partition
|
44
48
|
# @param count [Integer] how many messages we want to get at most
|
45
|
-
# @param start_offset [Integer] offset from which we should start. If -1 is provided
|
46
|
-
# (default) we will start from the latest offset
|
49
|
+
# @param start_offset [Integer, Time] offset from which we should start. If -1 is provided
|
50
|
+
# (default) we will start from the latest offset. If time is provided, the appropriate
|
51
|
+
# offset will be resolved.
|
47
52
|
# @param settings [Hash] kafka extra settings (optional)
|
48
53
|
#
|
49
54
|
# @return [Array<Karafka::Messages::Message>] array with messages
|
@@ -53,6 +58,9 @@ module Karafka
|
|
53
58
|
low_offset, high_offset = nil
|
54
59
|
|
55
60
|
with_consumer(settings) do |consumer|
|
61
|
+
# Convert the time offset (if needed)
|
62
|
+
start_offset = resolve_offset(consumer, name.to_s, partition, start_offset)
|
63
|
+
|
56
64
|
low_offset, high_offset = consumer.query_watermark_offsets(name, partition)
|
57
65
|
|
58
66
|
# Select offset dynamically if -1 or less
|
@@ -243,6 +251,29 @@ module Karafka
|
|
243
251
|
|
244
252
|
::Rdkafka::Config.new(config_hash)
|
245
253
|
end
|
254
|
+
|
255
|
+
# Resolves the offset if offset is in a time format. Otherwise returns the offset without
|
256
|
+
# resolving.
|
257
|
+
# @param consumer [::Rdkafka::Consumer]
|
258
|
+
# @param name [String, Symbol] expected topic name
|
259
|
+
# @param partition [Integer]
|
260
|
+
# @param offset [Integer, Time]
|
261
|
+
# @return [Integer] expected offset
|
262
|
+
def resolve_offset(consumer, name, partition, offset)
|
263
|
+
if offset.is_a?(Time)
|
264
|
+
tpl = ::Rdkafka::Consumer::TopicPartitionList.new
|
265
|
+
tpl.add_topic_and_partitions_with_offsets(
|
266
|
+
name, partition => offset
|
267
|
+
)
|
268
|
+
|
269
|
+
real_offsets = consumer.offsets_for_times(tpl, TPL_REQUEST_TIMEOUT)
|
270
|
+
detected_offset = real_offsets.to_h.dig(name, partition)
|
271
|
+
|
272
|
+
detected_offset&.offset || raise(Errors::InvalidTimeBasedOffsetError)
|
273
|
+
else
|
274
|
+
offset
|
275
|
+
end
|
276
|
+
end
|
246
277
|
end
|
247
278
|
end
|
248
279
|
end
|
@@ -70,6 +70,7 @@ module Karafka
|
|
70
70
|
#
|
71
71
|
# @return [Boolean] true if there was no exception, otherwise false.
|
72
72
|
#
|
73
|
+
# @private
|
73
74
|
# @note We keep the seek offset tracking, and use it to compensate for async offset flushing
|
74
75
|
# that may not yet kick in when error occurs. That way we pause always on the last processed
|
75
76
|
# message.
|
@@ -203,8 +204,15 @@ module Karafka
|
|
203
204
|
|
204
205
|
# Seeks in the context of current topic and partition
|
205
206
|
#
|
206
|
-
# @param offset [Integer] offset where we want to seek
|
207
|
-
|
207
|
+
# @param offset [Integer, Time] offset where we want to seek or time of the offset where we
|
208
|
+
# want to seek.
|
209
|
+
# @param manual_seek [Boolean] Flag to differentiate between user seek and system/strategy
|
210
|
+
# based seek. User seek operations should take precedence over system actions, hence we need
|
211
|
+
# to know who invoked it.
|
212
|
+
# @note Please note, that if you are seeking to a time offset, getting the offset is blocking
|
213
|
+
def seek(offset, manual_seek = true)
|
214
|
+
coordinator.manual_seek if manual_seek
|
215
|
+
|
208
216
|
client.seek(
|
209
217
|
Karafka::Messages::Seek.new(
|
210
218
|
topic.name,
|
@@ -221,7 +229,12 @@ module Karafka
|
|
221
229
|
# even before we poll but it gets reset when polling happens, hence we also need to switch
|
222
230
|
# the coordinator state after the revocation (but prior to running more jobs)
|
223
231
|
def revoked?
|
224
|
-
|
232
|
+
return true if coordinator.revoked?
|
233
|
+
return false unless client.assignment_lost?
|
234
|
+
|
235
|
+
coordinator.revoke
|
236
|
+
|
237
|
+
true
|
225
238
|
end
|
226
239
|
|
227
240
|
# @return [Boolean] are we retrying processing after an error. This can be used to provide a
|
@@ -20,11 +20,14 @@ module Karafka
|
|
20
20
|
# How many times should we retry polling in case of a failure
|
21
21
|
MAX_POLL_RETRIES = 20
|
22
22
|
|
23
|
+
# Max time for a TPL request. We increase it to compensate for remote clusters latency
|
24
|
+
TPL_REQUEST_TIMEOUT = 2_000
|
25
|
+
|
23
26
|
# We want to make sure we never close several clients in the same moment to prevent
|
24
27
|
# potential race conditions and other issues
|
25
28
|
SHUTDOWN_MUTEX = Mutex.new
|
26
29
|
|
27
|
-
private_constant :MAX_POLL_RETRIES, :SHUTDOWN_MUTEX
|
30
|
+
private_constant :MAX_POLL_RETRIES, :SHUTDOWN_MUTEX, :TPL_REQUEST_TIMEOUT
|
28
31
|
|
29
32
|
# Creates a new consumer instance.
|
30
33
|
#
|
@@ -35,12 +38,16 @@ module Karafka
|
|
35
38
|
@id = SecureRandom.hex(6)
|
36
39
|
# Name is set when we build consumer
|
37
40
|
@name = ''
|
38
|
-
@mutex = Mutex.new
|
39
41
|
@closed = false
|
40
42
|
@subscription_group = subscription_group
|
41
43
|
@buffer = RawMessagesBuffer.new
|
42
44
|
@rebalance_manager = RebalanceManager.new
|
43
45
|
@kafka = build_consumer
|
46
|
+
# There are few operations that can happen in parallel from the listener threads as well
|
47
|
+
# as from the workers. They are not fully thread-safe because they may be composed out of
|
48
|
+
# few calls to Kafka or out of few internal state changes. That is why we mutex them.
|
49
|
+
# It mostly revolves around pausing and resuming.
|
50
|
+
@mutex = Mutex.new
|
44
51
|
# We need to keep track of what we have paused for resuming
|
45
52
|
# In case we loose partition, we still need to resume it, otherwise it won't be fetched
|
46
53
|
# again if we get reassigned to it later on. We need to keep them as after revocation we
|
@@ -101,16 +108,12 @@ module Karafka
|
|
101
108
|
#
|
102
109
|
# @param message [Karafka::Messages::Message]
|
103
110
|
def store_offset(message)
|
104
|
-
|
105
|
-
internal_store_offset(message)
|
106
|
-
end
|
111
|
+
internal_store_offset(message)
|
107
112
|
end
|
108
113
|
|
109
114
|
# @return [Boolean] true if our current assignment has been lost involuntarily.
|
110
115
|
def assignment_lost?
|
111
|
-
@
|
112
|
-
@kafka.assignment_lost?
|
113
|
-
end
|
116
|
+
@kafka.assignment_lost?
|
114
117
|
end
|
115
118
|
|
116
119
|
# Commits the offset on a current consumer in a non-blocking or blocking way.
|
@@ -122,12 +125,12 @@ module Karafka
|
|
122
125
|
# @note This will commit all the offsets for the whole consumer. In order to achieve
|
123
126
|
# granular control over where the offset should be for particular topic partitions, the
|
124
127
|
# store_offset should be used to only store new offset when we want them to be flushed
|
128
|
+
#
|
129
|
+
# @note This method for async may return `true` despite involuntary partition revocation as
|
130
|
+
# it does **not** resolve to `lost_assignment?`. It returns only the commit state operation
|
131
|
+
# result.
|
125
132
|
def commit_offsets(async: true)
|
126
|
-
@mutex.lock
|
127
|
-
|
128
133
|
internal_commit_offsets(async: async)
|
129
|
-
ensure
|
130
|
-
@mutex.unlock
|
131
134
|
end
|
132
135
|
|
133
136
|
# Commits offset in a synchronous way.
|
@@ -140,13 +143,11 @@ module Karafka
|
|
140
143
|
# Seek to a particular message. The next poll on the topic/partition will return the
|
141
144
|
# message at the given offset.
|
142
145
|
#
|
143
|
-
# @param message [Messages::Message, Messages::Seek] message to which we want to seek to
|
146
|
+
# @param message [Messages::Message, Messages::Seek] message to which we want to seek to.
|
147
|
+
# It can have the time based offset.
|
148
|
+
# @note Please note, that if you are seeking to a time offset, getting the offset is blocking
|
144
149
|
def seek(message)
|
145
|
-
@mutex.
|
146
|
-
|
147
|
-
@kafka.seek(message)
|
148
|
-
ensure
|
149
|
-
@mutex.unlock
|
150
|
+
@mutex.synchronize { internal_seek(message) }
|
150
151
|
end
|
151
152
|
|
152
153
|
# Pauses given partition and moves back to last successful offset processed.
|
@@ -157,37 +158,34 @@ module Karafka
|
|
157
158
|
# be reprocessed after getting back to processing)
|
158
159
|
# @note This will pause indefinitely and requires manual `#resume`
|
159
160
|
def pause(topic, partition, offset)
|
160
|
-
@mutex.
|
161
|
-
|
162
|
-
|
163
|
-
return if @closed
|
164
|
-
|
165
|
-
pause_msg = Messages::Seek.new(topic, partition, offset)
|
161
|
+
@mutex.synchronize do
|
162
|
+
# Do not pause if the client got closed, would not change anything
|
163
|
+
return if @closed
|
166
164
|
|
167
|
-
|
165
|
+
pause_msg = Messages::Seek.new(topic, partition, offset)
|
168
166
|
|
169
|
-
|
170
|
-
# not own anymore.
|
171
|
-
tpl = topic_partition_list(topic, partition)
|
167
|
+
internal_commit_offsets(async: true)
|
172
168
|
|
173
|
-
|
169
|
+
# Here we do not use our cached tpls because we should not try to pause something we do
|
170
|
+
# not own anymore.
|
171
|
+
tpl = topic_partition_list(topic, partition)
|
174
172
|
|
175
|
-
|
176
|
-
'client.pause',
|
177
|
-
caller: self,
|
178
|
-
subscription_group: @subscription_group,
|
179
|
-
topic: topic,
|
180
|
-
partition: partition,
|
181
|
-
offset: offset
|
182
|
-
)
|
173
|
+
return unless tpl
|
183
174
|
|
184
|
-
|
175
|
+
Karafka.monitor.instrument(
|
176
|
+
'client.pause',
|
177
|
+
caller: self,
|
178
|
+
subscription_group: @subscription_group,
|
179
|
+
topic: topic,
|
180
|
+
partition: partition,
|
181
|
+
offset: offset
|
182
|
+
)
|
185
183
|
|
186
|
-
|
184
|
+
@paused_tpls[topic][partition] = tpl
|
187
185
|
|
188
|
-
|
189
|
-
|
190
|
-
|
186
|
+
@kafka.pause(tpl)
|
187
|
+
internal_seek(pause_msg)
|
188
|
+
end
|
191
189
|
end
|
192
190
|
|
193
191
|
# Resumes processing of a give topic partition after it was paused.
|
@@ -195,33 +193,31 @@ module Karafka
|
|
195
193
|
# @param topic [String] topic name
|
196
194
|
# @param partition [Integer] partition
|
197
195
|
def resume(topic, partition)
|
198
|
-
@mutex.
|
199
|
-
|
200
|
-
return if @closed
|
196
|
+
@mutex.synchronize do
|
197
|
+
return if @closed
|
201
198
|
|
202
|
-
|
203
|
-
|
199
|
+
# We now commit offsets on rebalances, thus we can do it async just to make sure
|
200
|
+
internal_commit_offsets(async: true)
|
204
201
|
|
205
|
-
|
206
|
-
|
202
|
+
# If we were not able, let's try to reuse the one we have (if we have)
|
203
|
+
tpl = topic_partition_list(topic, partition) || @paused_tpls[topic][partition]
|
207
204
|
|
208
|
-
|
205
|
+
return unless tpl
|
209
206
|
|
210
|
-
|
211
|
-
|
212
|
-
|
207
|
+
# If we did not have it, it means we never paused this partition, thus no resume should
|
208
|
+
# happen in the first place
|
209
|
+
return unless @paused_tpls[topic].delete(partition)
|
213
210
|
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
211
|
+
Karafka.monitor.instrument(
|
212
|
+
'client.resume',
|
213
|
+
caller: self,
|
214
|
+
subscription_group: @subscription_group,
|
215
|
+
topic: topic,
|
216
|
+
partition: partition
|
217
|
+
)
|
221
218
|
|
222
|
-
|
223
|
-
|
224
|
-
@mutex.unlock
|
219
|
+
@kafka.resume(tpl)
|
220
|
+
end
|
225
221
|
end
|
226
222
|
|
227
223
|
# Gracefully stops topic consumption.
|
@@ -238,9 +234,10 @@ module Karafka
|
|
238
234
|
# @param [Karafka::Messages::Message] message that we want to mark as processed
|
239
235
|
# @return [Boolean] true if successful. False if we no longer own given partition
|
240
236
|
# @note This method won't trigger automatic offsets commits, rather relying on the offset
|
241
|
-
# check-pointing trigger that happens with each batch processed
|
237
|
+
# check-pointing trigger that happens with each batch processed. It will however check the
|
238
|
+
# `librdkafka` assignment ownership to increase accuracy for involuntary revocations.
|
242
239
|
def mark_as_consumed(message)
|
243
|
-
store_offset(message)
|
240
|
+
store_offset(message) && !assignment_lost?
|
244
241
|
end
|
245
242
|
|
246
243
|
# Marks a given message as consumed and commits the offsets in a blocking way.
|
@@ -257,11 +254,9 @@ module Karafka
|
|
257
254
|
def reset
|
258
255
|
close
|
259
256
|
|
260
|
-
@
|
261
|
-
|
262
|
-
|
263
|
-
@kafka = build_consumer
|
264
|
-
end
|
257
|
+
@closed = false
|
258
|
+
@paused_tpls.clear
|
259
|
+
@kafka = build_consumer
|
265
260
|
end
|
266
261
|
|
267
262
|
# Runs a single poll ignoring all the potential errors
|
@@ -318,28 +313,55 @@ module Karafka
|
|
318
313
|
raise e
|
319
314
|
end
|
320
315
|
|
316
|
+
# Non-mutexed seek that should be used only internally. Outside we expose `#seek` that is
|
317
|
+
# wrapped with a mutex.
|
318
|
+
#
|
319
|
+
# @param message [Messages::Message, Messages::Seek] message to which we want to seek to.
|
320
|
+
# It can have the time based offset.
|
321
|
+
def internal_seek(message)
|
322
|
+
# If the seek message offset is in a time format, we need to find the closest "real"
|
323
|
+
# offset matching before we seek
|
324
|
+
if message.offset.is_a?(Time)
|
325
|
+
tpl = ::Rdkafka::Consumer::TopicPartitionList.new
|
326
|
+
tpl.add_topic_and_partitions_with_offsets(
|
327
|
+
message.topic,
|
328
|
+
message.partition => message.offset
|
329
|
+
)
|
330
|
+
|
331
|
+
# Now we can overwrite the seek message offset with our resolved offset and we can
|
332
|
+
# then seek to the appropriate message
|
333
|
+
# We set the timeout to 2_000 to make sure that remote clusters handle this well
|
334
|
+
real_offsets = @kafka.offsets_for_times(tpl, TPL_REQUEST_TIMEOUT)
|
335
|
+
detected_partition = real_offsets.to_h.dig(message.topic, message.partition)
|
336
|
+
|
337
|
+
# There always needs to be an offset. In case we seek into the future, where there
|
338
|
+
# are no offsets yet, we get -1 which indicates the most recent offset
|
339
|
+
# We should always detect offset, whether it is 0, -1 or a corresponding
|
340
|
+
message.offset = detected_partition&.offset || raise(Errors::InvalidTimeBasedOffsetError)
|
341
|
+
end
|
342
|
+
|
343
|
+
@kafka.seek(message)
|
344
|
+
end
|
345
|
+
|
321
346
|
# Commits the stored offsets in a sync way and closes the consumer.
|
322
347
|
def close
|
323
348
|
# Allow only one client to be closed at the same time
|
324
349
|
SHUTDOWN_MUTEX.synchronize do
|
325
|
-
#
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
# @note We do not clear rebalance manager here as we may still have revocation info
|
341
|
-
# here that we want to consider valid prior to running another reconnection
|
342
|
-
end
|
350
|
+
# Once client is closed, we should not close it again
|
351
|
+
# This could only happen in case of a race-condition when forceful shutdown happens
|
352
|
+
# and triggers this from a different thread
|
353
|
+
return if @closed
|
354
|
+
|
355
|
+
@closed = true
|
356
|
+
|
357
|
+
# Remove callbacks runners that were registered
|
358
|
+
::Karafka::Core::Instrumentation.statistics_callbacks.delete(@subscription_group.id)
|
359
|
+
::Karafka::Core::Instrumentation.error_callbacks.delete(@subscription_group.id)
|
360
|
+
|
361
|
+
@kafka.close
|
362
|
+
@buffer.clear
|
363
|
+
# @note We do not clear rebalance manager here as we may still have revocation info
|
364
|
+
# here that we want to consider valid prior to running another reconnection
|
343
365
|
end
|
344
366
|
end
|
345
367
|
|
data/lib/karafka/errors.rb
CHANGED
@@ -48,6 +48,9 @@ module Karafka
|
|
48
48
|
StrategyNotFoundError = Class.new(BaseError)
|
49
49
|
|
50
50
|
# This should never happen. Please open an issue if it does.
|
51
|
-
|
51
|
+
InvalidRealOffsetUsageError = Class.new(BaseError)
|
52
|
+
|
53
|
+
# This should never happen. Please open an issue if it does.
|
54
|
+
InvalidTimeBasedOffsetError = Class.new(BaseError)
|
52
55
|
end
|
53
56
|
end
|
@@ -4,6 +4,9 @@ module Karafka
|
|
4
4
|
module Messages
|
5
5
|
# "Fake" message that we use as an abstraction layer when seeking back.
|
6
6
|
# This allows us to encapsulate a seek with a simple abstraction
|
7
|
+
#
|
8
|
+
# @note `#offset` can be either the offset value or the time of the offset
|
9
|
+
# (first equal or greater)
|
7
10
|
Seek = Struct.new(:topic, :partition, :offset)
|
8
11
|
end
|
9
12
|
end
|
@@ -0,0 +1,95 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# This Karafka component is a Pro component under a commercial license.
|
4
|
+
# This Karafka component is NOT licensed under LGPL.
|
5
|
+
#
|
6
|
+
# All of the commercial components are present in the lib/karafka/pro directory of this
|
7
|
+
# repository and their usage requires commercial license agreement.
|
8
|
+
#
|
9
|
+
# Karafka has also commercial-friendly license, commercial support and commercial components.
|
10
|
+
#
|
11
|
+
# By sending a pull request to the pro components, you are agreeing to transfer the copyright of
|
12
|
+
# your code to Maciej Mensfeld.
|
13
|
+
|
14
|
+
module Karafka
|
15
|
+
module Pro
|
16
|
+
class Iterator
|
17
|
+
# There are various ways you can provide topics information for iterating.
|
18
|
+
#
|
19
|
+
# This mapper normalizes this data, resolves offsets and maps the time based offsets into
|
20
|
+
# appropriate once
|
21
|
+
#
|
22
|
+
# Following formats are accepted:
|
23
|
+
#
|
24
|
+
# - 'topic1' - just a string with one topic name
|
25
|
+
# - ['topic1', 'topic2'] - just the names
|
26
|
+
# - { 'topic1' => -100 } - names with negative lookup offset
|
27
|
+
# - { 'topic1' => { 0 => 5 } } - names with exact partitions offsets
|
28
|
+
# - { 'topic1' => { 0 => -5 }, 'topic2' => { 1 => 5 } } - with per partition negative offsets
|
29
|
+
# - { 'topic1' => 100 } - means we run all partitions from the offset 100
|
30
|
+
# - { 'topic1' => Time.now - 60 } - we run all partitions from the message from 60s ago
|
31
|
+
# - { 'topic1' => { 1 => Time.now - 60 } } - partition1 from message 60s ago
|
32
|
+
#
|
33
|
+
class Expander
|
34
|
+
# Expands topics to which we want to subscribe with partitions information in case this
|
35
|
+
# info is not provided.
|
36
|
+
#
|
37
|
+
# @param topics [Array, Hash, String] topics definitions
|
38
|
+
# @return [Hash] expanded and normalized requested topics and partitions data
|
39
|
+
def call(topics)
|
40
|
+
expanded = Hash.new { |h, k| h[k] = {} }
|
41
|
+
|
42
|
+
normalize_format(topics).map do |topic, details|
|
43
|
+
if details.is_a?(Hash)
|
44
|
+
details.each do |partition, offset|
|
45
|
+
expanded[topic][partition] = offset
|
46
|
+
end
|
47
|
+
else
|
48
|
+
partition_count(topic).times do |partition|
|
49
|
+
# If no offsets are provided, we just start from zero
|
50
|
+
expanded[topic][partition] = details || 0
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
expanded
|
56
|
+
end
|
57
|
+
|
58
|
+
private
|
59
|
+
|
60
|
+
# Input can be provided in multiple formats. Here we normalize it to one (hash).
|
61
|
+
#
|
62
|
+
# @param topics [Array, Hash, String] requested topics
|
63
|
+
# @return [Hash] normalized hash with topics data
|
64
|
+
def normalize_format(topics)
|
65
|
+
# Simplification for the single topic case
|
66
|
+
topics = [topics] if topics.is_a?(String)
|
67
|
+
|
68
|
+
# If we've got just array with topics, we need to convert that into a representation
|
69
|
+
# that we can expand with offsets
|
70
|
+
topics = topics.map { |name| [name, false] }.to_h if topics.is_a?(Array)
|
71
|
+
# We remap by creating new hash, just in case the hash came as the argument for this
|
72
|
+
# expanded. We do not want to modify user provided hash
|
73
|
+
topics.transform_keys(&:to_s)
|
74
|
+
end
|
75
|
+
|
76
|
+
# List of topics with their partition information for expansion
|
77
|
+
# We cache it so we do not have to run consecutive requests to obtain data about multiple
|
78
|
+
# topics
|
79
|
+
def topics
|
80
|
+
@topics ||= Admin.cluster_info.topics
|
81
|
+
end
|
82
|
+
|
83
|
+
# @param name [String] topic name
|
84
|
+
# @return [Integer] number of partitions of the topic we want to iterate over
|
85
|
+
def partition_count(name)
|
86
|
+
topics
|
87
|
+
.find { |topic| topic.fetch(:topic_name) == name }
|
88
|
+
.tap { |topic| topic || raise(Errors::TopicNotFoundError, name) }
|
89
|
+
.fetch(:partitions)
|
90
|
+
.count
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|