karafka 2.0.22 → 2.0.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/.ruby-version +1 -1
- data/CHANGELOG.md +7 -0
- data/Gemfile.lock +7 -8
- data/karafka.gemspec +2 -3
- data/lib/karafka/admin.rb +84 -4
- data/lib/karafka/base_consumer.rb +7 -1
- data/lib/karafka/connection/client.rb +4 -4
- data/lib/karafka/pro/loader.rb +1 -1
- data/lib/karafka/pro/processing/strategies/aj_dlq_mom.rb +2 -2
- data/lib/karafka/pro/processing/strategies/aj_lrj_mom_vp.rb +2 -2
- data/lib/karafka/pro/processing/strategies/aj_mom_vp.rb +1 -1
- data/lib/karafka/pro/processing/strategies/default.rb +5 -1
- data/lib/karafka/pro/processing/strategies/dlq.rb +4 -2
- data/lib/karafka/pro/processing/strategies/dlq_lrj.rb +3 -1
- data/lib/karafka/pro/processing/strategies/dlq_lrj_mom.rb +1 -1
- data/lib/karafka/pro/processing/strategies/dlq_mom.rb +2 -2
- data/lib/karafka/pro/processing/strategies/lrj.rb +4 -2
- data/lib/karafka/pro/processing/strategies/lrj_mom.rb +2 -2
- data/lib/karafka/pro/processing/strategies/mom.rb +1 -1
- data/lib/karafka/processing/coordinator.rb +15 -0
- data/lib/karafka/processing/strategies/aj_dlq_mom.rb +2 -2
- data/lib/karafka/processing/strategies/default.rb +7 -1
- data/lib/karafka/processing/strategies/dlq.rb +4 -2
- data/lib/karafka/processing/strategies/dlq_mom.rb +2 -2
- data/lib/karafka/processing/strategies/mom.rb +1 -1
- data/lib/karafka/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +7 -22
- metadata.gz.sig +0 -0
- data/lib/karafka/instrumentation.rb +0 -21
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 84d8130c528081b283889f9f1ebe89b8829b800a2c5d1f4ca99f6d6ce9b4c9df
|
4
|
+
data.tar.gz: 3cc30f65586226bcb6d8ed4fdac912a72c1a4a7eb5691862b378629d9de1347f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 8e1f3fc0a3c73035fdb38093eb0594c999d0a715752f41ed95079b1d8f2e89a5a0fa0abdf054272891e89ffa4dd8d010dd2225540d4480067d8bc0359bf2b7b8
|
7
|
+
data.tar.gz: 7c1f3f958ef52682e46efb11e8a3ddcbe641ba85c62956fefb84ec6eacadbe9d28ee509bdfd61fcd73ea8f52b1ab867de4909990503024cc5c603788931264ca
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/.ruby-version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
3.1.
|
1
|
+
3.1.3
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,12 @@
|
|
1
1
|
# Karafka framework changelog
|
2
2
|
|
3
|
+
## 2.0.23 (2022-12-07)
|
4
|
+
- [Maintenance] Align with `waterdrop` and `karafka-core`
|
5
|
+
- [Improvement] Provide `Admin#read_topic` API to get topic data without subscribing.
|
6
|
+
- [Improvement] Upon an end user `#pause`, do not commit the offset in automatic offset management mode. This will prevent from a scenario where pause is needed but during it a rebalance occurs and a different assigned process starts not from the pause location but from the automatic offset that may be different. This still allows for using the `#mark_as_consumed`.
|
7
|
+
- [Fix] Fix a scenario where manual `#pause` would be overwritten by a resume initiated by the strategy.
|
8
|
+
- [Fix] Fix a scenario where manual `#pause` in LRJ would cause infinite pause.
|
9
|
+
|
3
10
|
## 2.0.22 (2022-12-02)
|
4
11
|
- [Improvement] Load Pro components upon Karafka require so they can be altered prior to setup.
|
5
12
|
- [Improvement] Do not run LRJ jobs that were added to the jobs queue but were revoked meanwhile.
|
data/Gemfile.lock
CHANGED
@@ -1,11 +1,10 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
karafka (2.0.
|
5
|
-
karafka-core (>= 2.0.
|
6
|
-
rdkafka (>= 0.12)
|
4
|
+
karafka (2.0.23)
|
5
|
+
karafka-core (>= 2.0.6, < 3.0.0)
|
7
6
|
thor (>= 0.20)
|
8
|
-
waterdrop (>= 2.4.
|
7
|
+
waterdrop (>= 2.4.3, < 3.0.0)
|
9
8
|
zeitwerk (~> 2.3)
|
10
9
|
|
11
10
|
GEM
|
@@ -30,8 +29,9 @@ GEM
|
|
30
29
|
activesupport (>= 5.0)
|
31
30
|
i18n (1.12.0)
|
32
31
|
concurrent-ruby (~> 1.0)
|
33
|
-
karafka-core (2.0.
|
32
|
+
karafka-core (2.0.6)
|
34
33
|
concurrent-ruby (>= 1.1)
|
34
|
+
rdkafka (>= 0.12)
|
35
35
|
mini_portile2 (2.8.0)
|
36
36
|
minitest (5.16.3)
|
37
37
|
rake (13.0.6)
|
@@ -61,9 +61,8 @@ GEM
|
|
61
61
|
thor (1.2.1)
|
62
62
|
tzinfo (2.0.5)
|
63
63
|
concurrent-ruby (~> 1.0)
|
64
|
-
waterdrop (2.4.
|
65
|
-
karafka-core (>= 2.0.
|
66
|
-
rdkafka (>= 0.10)
|
64
|
+
waterdrop (2.4.3)
|
65
|
+
karafka-core (>= 2.0.6, < 3.0.0)
|
67
66
|
zeitwerk (~> 2.3)
|
68
67
|
zeitwerk (2.6.6)
|
69
68
|
|
data/karafka.gemspec
CHANGED
@@ -21,10 +21,9 @@ Gem::Specification.new do |spec|
|
|
21
21
|
without having to focus on things that are not your business domain.
|
22
22
|
DESC
|
23
23
|
|
24
|
-
spec.add_dependency 'karafka-core', '>= 2.0.
|
25
|
-
spec.add_dependency 'rdkafka', '>= 0.12'
|
24
|
+
spec.add_dependency 'karafka-core', '>= 2.0.6', '< 3.0.0'
|
26
25
|
spec.add_dependency 'thor', '>= 0.20'
|
27
|
-
spec.add_dependency 'waterdrop', '>= 2.4.
|
26
|
+
spec.add_dependency 'waterdrop', '>= 2.4.3', '< 3.0.0'
|
28
27
|
spec.add_dependency 'zeitwerk', '~> 2.3'
|
29
28
|
|
30
29
|
spec.required_ruby_version = '>= 2.7.0'
|
data/lib/karafka/admin.rb
CHANGED
@@ -9,7 +9,70 @@ module Karafka
|
|
9
9
|
# @note It always uses the primary defined cluster and does not support multi-cluster work.
|
10
10
|
# If you need this, just replace the cluster info for the time you use this
|
11
11
|
module Admin
|
12
|
+
# A fake admin topic representation that we use for messages fetched using this API
|
13
|
+
# We cannot use the topics directly because we may want to request data from topics that we
|
14
|
+
# do not have in the routing
|
15
|
+
Topic = Struct.new(:name, :deserializer)
|
16
|
+
|
17
|
+
# Defaults for config
|
18
|
+
CONFIG_DEFAULTS = {
|
19
|
+
'group.id': 'karafka_admin',
|
20
|
+
# We want to know when there is no more data not to end up with an endless loop
|
21
|
+
'enable.partition.eof': true,
|
22
|
+
'statistics.interval.ms': 0
|
23
|
+
}.freeze
|
24
|
+
|
25
|
+
private_constant :Topic, :CONFIG_DEFAULTS
|
26
|
+
|
12
27
|
class << self
|
28
|
+
# Allows us to read messages from the topic
|
29
|
+
#
|
30
|
+
# @param name [String, Symbol] topic name
|
31
|
+
# @param partition [Integer] partition
|
32
|
+
# @param count [Integer] how many messages we want to get at most
|
33
|
+
# @param offset [Integer] offset from which we should start. If -1 is provided (default) we
|
34
|
+
# will start from the latest offset
|
35
|
+
#
|
36
|
+
# @return [Array<Karafka::Messages::Message>] array with messages
|
37
|
+
def read_topic(name, partition, count, offset = -1)
|
38
|
+
messages = []
|
39
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
40
|
+
|
41
|
+
with_consumer do |consumer|
|
42
|
+
if offset.negative?
|
43
|
+
offsets = consumer.query_watermark_offsets(name, partition)
|
44
|
+
offset = offsets.last - count
|
45
|
+
end
|
46
|
+
|
47
|
+
offset = offset.negative? ? 0 : offset
|
48
|
+
|
49
|
+
tpl.add_topic_and_partitions_with_offsets(name, partition => offset)
|
50
|
+
consumer.assign(tpl)
|
51
|
+
|
52
|
+
# We should poll as long as we don't have all the messages that we need or as long as
|
53
|
+
# we do not read all the messages from the topic
|
54
|
+
loop do
|
55
|
+
break if messages.size >= count
|
56
|
+
|
57
|
+
message = consumer.poll(200)
|
58
|
+
messages << message if message
|
59
|
+
rescue Rdkafka::RdkafkaError => e
|
60
|
+
# End of partition
|
61
|
+
break if e.code == :partition_eof
|
62
|
+
|
63
|
+
raise e
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
messages.map do |message|
|
68
|
+
Messages::Builders::Message.call(
|
69
|
+
message,
|
70
|
+
Topic.new(name, Karafka::App.config.deserializer),
|
71
|
+
Time.now
|
72
|
+
)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
13
76
|
# Creates Kafka topic with given settings
|
14
77
|
#
|
15
78
|
# @param name [String] topic name
|
@@ -52,15 +115,32 @@ module Karafka
|
|
52
115
|
|
53
116
|
# Creates admin instance and yields it. After usage it closes the admin instance
|
54
117
|
def with_admin
|
55
|
-
|
56
|
-
config = Karafka::Setup::AttributesMap.producer(Karafka::App.config.kafka.dup)
|
57
|
-
|
58
|
-
admin = ::Rdkafka::Config.new(config).admin
|
118
|
+
admin = config(:producer).admin
|
59
119
|
result = yield(admin)
|
60
120
|
result
|
61
121
|
ensure
|
62
122
|
admin&.close
|
63
123
|
end
|
124
|
+
|
125
|
+
# Creates consumer instance and yields it. After usage it closes the consumer instance
|
126
|
+
def with_consumer
|
127
|
+
consumer = config(:consumer).consumer
|
128
|
+
result = yield(consumer)
|
129
|
+
result
|
130
|
+
ensure
|
131
|
+
consumer&.close
|
132
|
+
end
|
133
|
+
|
134
|
+
# @param type [Symbol] type of config we want
|
135
|
+
# @return [::Rdkafka::Config] rdkafka config
|
136
|
+
def config(type)
|
137
|
+
config_hash = Karafka::Setup::AttributesMap.public_send(
|
138
|
+
type,
|
139
|
+
Karafka::App.config.kafka.dup.merge(CONFIG_DEFAULTS)
|
140
|
+
)
|
141
|
+
|
142
|
+
::Rdkafka::Config.new(config_hash)
|
143
|
+
end
|
64
144
|
end
|
65
145
|
end
|
66
146
|
end
|
@@ -190,7 +190,10 @@ module Karafka
|
|
190
190
|
# @param offset [Integer] offset from which we want to restart the processing
|
191
191
|
# @param timeout [Integer, nil] how long in milliseconds do we want to pause or nil to use the
|
192
192
|
# default exponential pausing strategy defined for retries
|
193
|
-
|
193
|
+
# @param manual_pause [Boolean] Flag to differentiate between user pause and system/strategy
|
194
|
+
# based pause. While they both pause in exactly the same way, the strategy application
|
195
|
+
# may need to differentiate between them.
|
196
|
+
def pause(offset, timeout = nil, manual_pause = true)
|
194
197
|
timeout ? coordinator.pause_tracker.pause(timeout) : coordinator.pause_tracker.pause
|
195
198
|
|
196
199
|
client.pause(
|
@@ -198,6 +201,9 @@ module Karafka
|
|
198
201
|
messages.metadata.partition,
|
199
202
|
offset
|
200
203
|
)
|
204
|
+
|
205
|
+
# Indicate, that user took a manual action of pausing
|
206
|
+
coordinator.manual_pause if manual_pause
|
201
207
|
end
|
202
208
|
|
203
209
|
# Resumes processing of the current topic partition
|
@@ -308,8 +308,8 @@ module Karafka
|
|
308
308
|
@closed = true
|
309
309
|
|
310
310
|
# Remove callbacks runners that were registered
|
311
|
-
::Karafka::Instrumentation.statistics_callbacks.delete(@subscription_group.id)
|
312
|
-
::Karafka::Instrumentation.error_callbacks.delete(@subscription_group.id)
|
311
|
+
::Karafka::Core::Instrumentation.statistics_callbacks.delete(@subscription_group.id)
|
312
|
+
::Karafka::Core::Instrumentation.error_callbacks.delete(@subscription_group.id)
|
313
313
|
|
314
314
|
@kafka.close
|
315
315
|
@buffer.clear
|
@@ -397,7 +397,7 @@ module Karafka
|
|
397
397
|
@name = consumer.name
|
398
398
|
|
399
399
|
# Register statistics runner for this particular type of callbacks
|
400
|
-
::Karafka::Instrumentation.statistics_callbacks.add(
|
400
|
+
::Karafka::Core::Instrumentation.statistics_callbacks.add(
|
401
401
|
@subscription_group.id,
|
402
402
|
Instrumentation::Callbacks::Statistics.new(
|
403
403
|
@subscription_group.id,
|
@@ -408,7 +408,7 @@ module Karafka
|
|
408
408
|
)
|
409
409
|
|
410
410
|
# Register error tracking callback
|
411
|
-
::Karafka::Instrumentation.error_callbacks.add(
|
411
|
+
::Karafka::Core::Instrumentation.error_callbacks.add(
|
412
412
|
@subscription_group.id,
|
413
413
|
Instrumentation::Callbacks::Error.new(
|
414
414
|
@subscription_group.id,
|
data/lib/karafka/pro/loader.rb
CHANGED
@@ -53,7 +53,7 @@ module Karafka
|
|
53
53
|
private
|
54
54
|
|
55
55
|
# Sets proper config options to use pro components
|
56
|
-
# @param config [
|
56
|
+
# @param config [::Karafka::Core::Configurable::Node] root config node
|
57
57
|
def reconfigure(config)
|
58
58
|
icfg = config.internal
|
59
59
|
|
@@ -42,7 +42,7 @@ module Karafka
|
|
42
42
|
# Do NOT commit offsets, they are comitted after each job in the AJ consumer.
|
43
43
|
coordinator.pause_tracker.reset
|
44
44
|
elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
|
45
|
-
pause(coordinator.seek_offset)
|
45
|
+
pause(coordinator.seek_offset, nil, false)
|
46
46
|
else
|
47
47
|
coordinator.pause_tracker.reset
|
48
48
|
skippable_message = find_skippable_message
|
@@ -51,7 +51,7 @@ module Karafka
|
|
51
51
|
# since AJ consumer commits the offset after each job, we also know that the
|
52
52
|
# previous job was successful
|
53
53
|
mark_as_consumed(skippable_message)
|
54
|
-
pause(coordinator.seek_offset)
|
54
|
+
pause(coordinator.seek_offset, nil, false)
|
55
55
|
end
|
56
56
|
end
|
57
57
|
end
|
@@ -33,7 +33,7 @@ module Karafka
|
|
33
33
|
# No actions needed for the standard flow here
|
34
34
|
def handle_before_enqueue
|
35
35
|
coordinator.on_enqueued do
|
36
|
-
pause(coordinator.seek_offset, Lrj::MAX_PAUSE_TIME)
|
36
|
+
pause(coordinator.seek_offset, Lrj::MAX_PAUSE_TIME, false)
|
37
37
|
end
|
38
38
|
end
|
39
39
|
|
@@ -51,7 +51,7 @@ module Karafka
|
|
51
51
|
# If processing failed, we need to pause
|
52
52
|
# For long running job this will overwrite the default never-ending pause and will
|
53
53
|
# cause the processing to keep going after the error backoff
|
54
|
-
pause(coordinator.seek_offset)
|
54
|
+
pause(coordinator.seek_offset, nil, false)
|
55
55
|
end
|
56
56
|
end
|
57
57
|
end
|
@@ -73,9 +73,13 @@ module Karafka
|
|
73
73
|
if coordinator.success?
|
74
74
|
coordinator.pause_tracker.reset
|
75
75
|
|
76
|
+
# Do not mark last message if pause happened. This prevents a scenario where pause
|
77
|
+
# is overridden upon rebalance by marking
|
78
|
+
return if coordinator.manual_pause?
|
79
|
+
|
76
80
|
mark_as_consumed(last_group_message)
|
77
81
|
else
|
78
|
-
pause(coordinator.seek_offset)
|
82
|
+
pause(coordinator.seek_offset, nil, false)
|
79
83
|
end
|
80
84
|
end
|
81
85
|
end
|
@@ -32,9 +32,11 @@ module Karafka
|
|
32
32
|
if coordinator.success?
|
33
33
|
coordinator.pause_tracker.reset
|
34
34
|
|
35
|
+
return if coordinator.manual_pause?
|
36
|
+
|
35
37
|
mark_as_consumed(messages.last)
|
36
38
|
elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
|
37
|
-
pause(coordinator.seek_offset)
|
39
|
+
pause(coordinator.seek_offset, nil, false)
|
38
40
|
# If we've reached number of retries that we could, we need to skip the first message
|
39
41
|
# that was not marked as consumed, pause and continue, while also moving this message
|
40
42
|
# to the dead topic
|
@@ -44,7 +46,7 @@ module Karafka
|
|
44
46
|
skippable_message = find_skippable_message
|
45
47
|
dispatch_to_dlq(skippable_message) if dispatch_to_dlq?
|
46
48
|
mark_as_consumed(skippable_message)
|
47
|
-
pause(coordinator.seek_offset)
|
49
|
+
pause(coordinator.seek_offset, nil, false)
|
48
50
|
end
|
49
51
|
end
|
50
52
|
end
|
@@ -34,12 +34,14 @@ module Karafka
|
|
34
34
|
if coordinator.success?
|
35
35
|
coordinator.pause_tracker.reset
|
36
36
|
|
37
|
+
return if coordinator.manual_pause?
|
38
|
+
|
37
39
|
mark_as_consumed(last_group_message) unless revoked?
|
38
40
|
seek(coordinator.seek_offset) unless revoked?
|
39
41
|
|
40
42
|
resume
|
41
43
|
elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
|
42
|
-
pause(coordinator.seek_offset)
|
44
|
+
pause(coordinator.seek_offset, nil, false)
|
43
45
|
else
|
44
46
|
coordinator.pause_tracker.reset
|
45
47
|
|
@@ -35,7 +35,7 @@ module Karafka
|
|
35
35
|
if coordinator.success?
|
36
36
|
coordinator.pause_tracker.reset
|
37
37
|
elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
|
38
|
-
pause(coordinator.seek_offset)
|
38
|
+
pause(coordinator.seek_offset, nil, false)
|
39
39
|
# If we've reached number of retries that we could, we need to skip the first message
|
40
40
|
# that was not marked as consumed, pause and continue, while also moving this message
|
41
41
|
# to the dead topic.
|
@@ -51,7 +51,7 @@ module Karafka
|
|
51
51
|
dispatch_to_dlq(skippable_message)
|
52
52
|
end
|
53
53
|
|
54
|
-
pause(coordinator.seek_offset)
|
54
|
+
pause(coordinator.seek_offset, nil, false)
|
55
55
|
end
|
56
56
|
end
|
57
57
|
end
|
@@ -37,7 +37,7 @@ module Karafka
|
|
37
37
|
#
|
38
38
|
# For VP it applies the same way and since VP cannot be used with MOM we should not
|
39
39
|
# have any edge cases here.
|
40
|
-
pause(coordinator.seek_offset, MAX_PAUSE_TIME)
|
40
|
+
pause(coordinator.seek_offset, MAX_PAUSE_TIME, false)
|
41
41
|
end
|
42
42
|
end
|
43
43
|
|
@@ -47,6 +47,8 @@ module Karafka
|
|
47
47
|
if coordinator.success?
|
48
48
|
coordinator.pause_tracker.reset
|
49
49
|
|
50
|
+
return if coordinator.manual_pause?
|
51
|
+
|
50
52
|
mark_as_consumed(last_group_message) unless revoked?
|
51
53
|
seek(coordinator.seek_offset) unless revoked?
|
52
54
|
|
@@ -55,7 +57,7 @@ module Karafka
|
|
55
57
|
# If processing failed, we need to pause
|
56
58
|
# For long running job this will overwrite the default never-ending pause and will
|
57
59
|
# cause the processing to keep going after the error backoff
|
58
|
-
pause(coordinator.seek_offset)
|
60
|
+
pause(coordinator.seek_offset, nil, false)
|
59
61
|
end
|
60
62
|
end
|
61
63
|
end
|
@@ -36,7 +36,7 @@ module Karafka
|
|
36
36
|
#
|
37
37
|
# For VP it applies the same way and since VP cannot be used with MOM we should not
|
38
38
|
# have any edge cases here.
|
39
|
-
pause(coordinator.seek_offset, Lrj::MAX_PAUSE_TIME)
|
39
|
+
pause(coordinator.seek_offset, Lrj::MAX_PAUSE_TIME, false)
|
40
40
|
end
|
41
41
|
end
|
42
42
|
|
@@ -50,7 +50,7 @@ module Karafka
|
|
50
50
|
|
51
51
|
resume
|
52
52
|
else
|
53
|
-
pause(coordinator.seek_offset)
|
53
|
+
pause(coordinator.seek_offset, false)
|
54
54
|
end
|
55
55
|
end
|
56
56
|
end
|
@@ -21,6 +21,7 @@ module Karafka
|
|
21
21
|
@revoked = false
|
22
22
|
@consumptions = {}
|
23
23
|
@running_jobs = 0
|
24
|
+
@manual_pause = false
|
24
25
|
@mutex = Mutex.new
|
25
26
|
end
|
26
27
|
|
@@ -34,6 +35,9 @@ module Karafka
|
|
34
35
|
# consumption results of consumer instances we no longer control
|
35
36
|
@consumptions.clear
|
36
37
|
|
38
|
+
# When starting to run, no pause is expected and no manual pause as well
|
39
|
+
@manual_pause = false
|
40
|
+
|
37
41
|
# We set it on the first encounter and never again, because then the offset setting
|
38
42
|
# should be up to the consumers logic (our or the end user)
|
39
43
|
# Seek offset needs to be always initialized as for case where manual offset management
|
@@ -98,6 +102,17 @@ module Karafka
|
|
98
102
|
def revoked?
|
99
103
|
@revoked
|
100
104
|
end
|
105
|
+
|
106
|
+
# Store in the coordinator info, that this pause was done manually by the end user and not
|
107
|
+
# by the system itself
|
108
|
+
def manual_pause
|
109
|
+
@mutex.synchronize { @manual_pause = true }
|
110
|
+
end
|
111
|
+
|
112
|
+
# @return [Boolean] are we in a pause that was initiated by the user
|
113
|
+
def manual_pause?
|
114
|
+
@pause_tracker.paused? && @manual_pause
|
115
|
+
end
|
101
116
|
end
|
102
117
|
end
|
103
118
|
end
|
@@ -26,7 +26,7 @@ module Karafka
|
|
26
26
|
# Do NOT commit offsets, they are comitted after each job in the AJ consumer.
|
27
27
|
coordinator.pause_tracker.reset
|
28
28
|
elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
|
29
|
-
pause(coordinator.seek_offset)
|
29
|
+
pause(coordinator.seek_offset, nil, false)
|
30
30
|
else
|
31
31
|
coordinator.pause_tracker.reset
|
32
32
|
skippable_message = find_skippable_message
|
@@ -35,7 +35,7 @@ module Karafka
|
|
35
35
|
# since AJ consumer commits the offset after each job, we also know that the
|
36
36
|
# previous job was successful
|
37
37
|
mark_as_consumed(skippable_message)
|
38
|
-
pause(coordinator.seek_offset)
|
38
|
+
pause(coordinator.seek_offset, nil, false)
|
39
39
|
end
|
40
40
|
end
|
41
41
|
end
|
@@ -51,9 +51,15 @@ module Karafka
|
|
51
51
|
if coordinator.success?
|
52
52
|
coordinator.pause_tracker.reset
|
53
53
|
|
54
|
+
# We should not move the offset automatically when the partition was paused
|
55
|
+
# If we would not do this upon a revocation during the pause time, a different process
|
56
|
+
# would pick not from the place where we paused but from the offset that would be
|
57
|
+
# automatically committed here
|
58
|
+
return if coordinator.manual_pause?
|
59
|
+
|
54
60
|
mark_as_consumed(messages.last)
|
55
61
|
else
|
56
|
-
pause(coordinator.seek_offset)
|
62
|
+
pause(coordinator.seek_offset, nil, false)
|
57
63
|
end
|
58
64
|
end
|
59
65
|
|
@@ -22,9 +22,11 @@ module Karafka
|
|
22
22
|
if coordinator.success?
|
23
23
|
coordinator.pause_tracker.reset
|
24
24
|
|
25
|
+
return if coordinator.manual_pause?
|
26
|
+
|
25
27
|
mark_as_consumed(messages.last)
|
26
28
|
elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
|
27
|
-
pause(coordinator.seek_offset)
|
29
|
+
pause(coordinator.seek_offset, nil, false)
|
28
30
|
# If we've reached number of retries that we could, we need to skip the first message
|
29
31
|
# that was not marked as consumed, pause and continue, while also moving this message
|
30
32
|
# to the dead topic
|
@@ -43,7 +45,7 @@ module Karafka
|
|
43
45
|
return if revoked?
|
44
46
|
|
45
47
|
# We pause to backoff once just in case.
|
46
|
-
pause(coordinator.seek_offset)
|
48
|
+
pause(coordinator.seek_offset, nil, false)
|
47
49
|
end
|
48
50
|
end
|
49
51
|
|
@@ -21,7 +21,7 @@ module Karafka
|
|
21
21
|
if coordinator.success?
|
22
22
|
coordinator.pause_tracker.reset
|
23
23
|
elsif coordinator.pause_tracker.attempt <= topic.dead_letter_queue.max_retries
|
24
|
-
pause(coordinator.seek_offset)
|
24
|
+
pause(coordinator.seek_offset, nil, false)
|
25
25
|
# If we've reached number of retries that we could, we need to skip the first message
|
26
26
|
# that was not marked as consumed, pause and continue, while also moving this message
|
27
27
|
# to the dead topic
|
@@ -33,7 +33,7 @@ module Karafka
|
|
33
33
|
dispatch_to_dlq(skippable_message)
|
34
34
|
|
35
35
|
# We pause to backoff once just in case.
|
36
|
-
pause(coordinator.seek_offset)
|
36
|
+
pause(coordinator.seek_offset, nil, false)
|
37
37
|
end
|
38
38
|
end
|
39
39
|
end
|
data/lib/karafka/version.rb
CHANGED
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: karafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.0.
|
4
|
+
version: 2.0.23
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Maciej Mensfeld
|
@@ -35,7 +35,7 @@ cert_chain:
|
|
35
35
|
Qf04B9ceLUaC4fPVEz10FyobjaFoY4i32xRto3XnrzeAgfEe4swLq8bQsR3w/EF3
|
36
36
|
MGU0FeSV2Yj7Xc2x/7BzLK8xQn5l7Yy75iPF+KP3vVmDHnNl
|
37
37
|
-----END CERTIFICATE-----
|
38
|
-
date: 2022-12-
|
38
|
+
date: 2022-12-07 00:00:00.000000000 Z
|
39
39
|
dependencies:
|
40
40
|
- !ruby/object:Gem::Dependency
|
41
41
|
name: karafka-core
|
@@ -43,7 +43,7 @@ dependencies:
|
|
43
43
|
requirements:
|
44
44
|
- - ">="
|
45
45
|
- !ruby/object:Gem::Version
|
46
|
-
version: 2.0.
|
46
|
+
version: 2.0.6
|
47
47
|
- - "<"
|
48
48
|
- !ruby/object:Gem::Version
|
49
49
|
version: 3.0.0
|
@@ -53,24 +53,10 @@ dependencies:
|
|
53
53
|
requirements:
|
54
54
|
- - ">="
|
55
55
|
- !ruby/object:Gem::Version
|
56
|
-
version: 2.0.
|
56
|
+
version: 2.0.6
|
57
57
|
- - "<"
|
58
58
|
- !ruby/object:Gem::Version
|
59
59
|
version: 3.0.0
|
60
|
-
- !ruby/object:Gem::Dependency
|
61
|
-
name: rdkafka
|
62
|
-
requirement: !ruby/object:Gem::Requirement
|
63
|
-
requirements:
|
64
|
-
- - ">="
|
65
|
-
- !ruby/object:Gem::Version
|
66
|
-
version: '0.12'
|
67
|
-
type: :runtime
|
68
|
-
prerelease: false
|
69
|
-
version_requirements: !ruby/object:Gem::Requirement
|
70
|
-
requirements:
|
71
|
-
- - ">="
|
72
|
-
- !ruby/object:Gem::Version
|
73
|
-
version: '0.12'
|
74
60
|
- !ruby/object:Gem::Dependency
|
75
61
|
name: thor
|
76
62
|
requirement: !ruby/object:Gem::Requirement
|
@@ -91,7 +77,7 @@ dependencies:
|
|
91
77
|
requirements:
|
92
78
|
- - ">="
|
93
79
|
- !ruby/object:Gem::Version
|
94
|
-
version: 2.4.
|
80
|
+
version: 2.4.3
|
95
81
|
- - "<"
|
96
82
|
- !ruby/object:Gem::Version
|
97
83
|
version: 3.0.0
|
@@ -101,7 +87,7 @@ dependencies:
|
|
101
87
|
requirements:
|
102
88
|
- - ">="
|
103
89
|
- !ruby/object:Gem::Version
|
104
|
-
version: 2.4.
|
90
|
+
version: 2.4.3
|
105
91
|
- - "<"
|
106
92
|
- !ruby/object:Gem::Version
|
107
93
|
version: 3.0.0
|
@@ -200,7 +186,6 @@ files:
|
|
200
186
|
- lib/karafka/helpers/async.rb
|
201
187
|
- lib/karafka/helpers/colorize.rb
|
202
188
|
- lib/karafka/helpers/multi_delegator.rb
|
203
|
-
- lib/karafka/instrumentation.rb
|
204
189
|
- lib/karafka/instrumentation/callbacks/error.rb
|
205
190
|
- lib/karafka/instrumentation/callbacks/statistics.rb
|
206
191
|
- lib/karafka/instrumentation/logger.rb
|
@@ -355,7 +340,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
355
340
|
- !ruby/object:Gem::Version
|
356
341
|
version: '0'
|
357
342
|
requirements: []
|
358
|
-
rubygems_version: 3.3.
|
343
|
+
rubygems_version: 3.3.26
|
359
344
|
signing_key:
|
360
345
|
specification_version: 4
|
361
346
|
summary: Karafka is Ruby and Rails efficient Kafka processing framework.
|
metadata.gz.sig
CHANGED
Binary file
|
@@ -1,21 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Karafka
|
4
|
-
# @note Since we can only have one statistics callbacks manager and one error callbacks manager
|
5
|
-
# we use WaterDrops one that is already configured.
|
6
|
-
module Instrumentation
|
7
|
-
class << self
|
8
|
-
# Returns a manager for statistics callbacks
|
9
|
-
# @return [::WaterDrop::CallbacksManager]
|
10
|
-
def statistics_callbacks
|
11
|
-
::WaterDrop::Instrumentation.statistics_callbacks
|
12
|
-
end
|
13
|
-
|
14
|
-
# Returns a manager for error callbacks
|
15
|
-
# @return [::WaterDrop::CallbacksManager]
|
16
|
-
def error_callbacks
|
17
|
-
::WaterDrop::Instrumentation.error_callbacks
|
18
|
-
end
|
19
|
-
end
|
20
|
-
end
|
21
|
-
end
|