karafka 2.1.5.beta1 → 2.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 967412d28c31483df6a0c151ec0578367da24e503184608789791774347ffc53
4
- data.tar.gz: '0828ba1db27f9f287b3ab9f80928816a81ae45ad79ce092e02cccd33b9bac204'
3
+ metadata.gz: 82a8b28b55f1db0808be3d1e48616f6b2389466332c9116e263e37cab992fc65
4
+ data.tar.gz: 2f29bb9bb1c3f949d206c5c8453b35ad163219babb48687e2270e13914e78aba
5
5
  SHA512:
6
- metadata.gz: 3774daedd74efe8e2498e97e503226cc01848e7a7be6ca255852bcaf112bc790ff98953584c771de8afa96142e8244e9f1b036b18490305766b428671b136bce
7
- data.tar.gz: b575cf2be3d4cdcb73fec9cdfd65cd705408d1efae39ed200ae2d815e7c9241f582d5ebb8b802347289de910ee50589f0551bb4c194a161905e38425d05bd296
6
+ metadata.gz: 93a66f4aeb49cea810bfd90cf424b3334d1dae992035e0bd9613bbd3c42f642f94fd0efd979d57df5083a46f66f522a7d3952c9e24340b8a4dc4c23aff165a0f
7
+ data.tar.gz: 4ee03b442b3029aecf0ffd636ddccb054e51f2a448c3dd642993464bfc32aa45595f26835db8a9b5b01940ab5b532e0bc22a9a3cdbcc9899320b55010473c749
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,11 +1,13 @@
1
1
  # Karafka framework changelog
2
2
 
3
- ## 2.1.5 (Unreleased)
3
+ ## 2.1.5 (2023-06-19)
4
4
  - [Improvement] Drastically improve `#revoked?` response quality by checking the real time assignment lost state on librdkafka.
5
5
  - [Improvement] Improve eviction of saturated jobs that would run on already revoked assignments.
6
6
  - [Improvement] Expose `#commit_offsets` and `#commit_offsets!` methods in the consumer to provide ability to commit offsets directly to Kafka without having to mark new messages as consumed.
7
7
  - [Improvement] No longer skip offset commit when no messages marked as consumed as `librdkafka` has fixed the crashes there.
8
8
  - [Improvement] Remove no longer needed patches.
9
+ - [Improvement] Ensure, that the coordinator revocation status is switched upon revocation detection when using `#revoked?`
10
+ - [Improvement] Add benchmarks for marking as consumed (sync and async).
9
11
  - [Change] Require `karafka-core` `>= 2.1.0`
10
12
  - [Change] Require `waterdrop` `>= 2.6.1`
11
13
 
data/Gemfile.lock CHANGED
@@ -1,10 +1,10 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.1.5.beta1)
5
- karafka-core (>= 2.1.0.beta1, < 2.2.0)
4
+ karafka (2.1.5)
5
+ karafka-core (>= 2.1.0, < 2.2.0)
6
6
  thor (>= 0.20)
7
- waterdrop (>= 2.6.1.beta1, < 3.0.0)
7
+ waterdrop (>= 2.6.1, < 3.0.0)
8
8
  zeitwerk (~> 2.3)
9
9
 
10
10
  GEM
@@ -30,10 +30,10 @@ GEM
30
30
  activesupport (>= 5.0)
31
31
  i18n (1.14.1)
32
32
  concurrent-ruby (~> 1.0)
33
- karafka-core (2.1.0.beta1)
33
+ karafka-core (2.1.0)
34
34
  concurrent-ruby (>= 1.1)
35
- karafka-rdkafka (>= 0.13.0.beta2, < 0.14.0)
36
- karafka-rdkafka (0.13.0.beta2)
35
+ karafka-rdkafka (>= 0.13.0, < 0.14.0)
36
+ karafka-rdkafka (0.13.0)
37
37
  ffi (~> 1.15)
38
38
  mini_portile2 (~> 2.6)
39
39
  rake (> 12)
@@ -72,8 +72,8 @@ GEM
72
72
  tilt (2.2.0)
73
73
  tzinfo (2.0.6)
74
74
  concurrent-ruby (~> 1.0)
75
- waterdrop (2.6.1.beta1)
76
- karafka-core (>= 2.1.0.beta1, < 3.0.0)
75
+ waterdrop (2.6.1)
76
+ karafka-core (>= 2.1.0, < 3.0.0)
77
77
  zeitwerk (~> 2.3)
78
78
  zeitwerk (2.6.8)
79
79
 
data/karafka.gemspec CHANGED
@@ -21,9 +21,9 @@ Gem::Specification.new do |spec|
21
21
  without having to focus on things that are not your business domain.
22
22
  DESC
23
23
 
24
- spec.add_dependency 'karafka-core', '>= 2.1.0.beta1', '< 2.2.0'
24
+ spec.add_dependency 'karafka-core', '>= 2.1.0', '< 2.2.0'
25
25
  spec.add_dependency 'thor', '>= 0.20'
26
- spec.add_dependency 'waterdrop', '>= 2.6.1.beta1', '< 3.0.0'
26
+ spec.add_dependency 'waterdrop', '>= 2.6.1', '< 3.0.0'
27
27
  spec.add_dependency 'zeitwerk', '~> 2.3'
28
28
 
29
29
  if $PROGRAM_NAME.end_with?('gem')
@@ -221,7 +221,12 @@ module Karafka
221
221
  # even before we poll but it gets reset when polling happens, hence we also need to switch
222
222
  # the coordinator state after the revocation (but prior to running more jobs)
223
223
  def revoked?
224
- client.assignment_lost? || coordinator.revoked?
224
+ return true if coordinator.revoked?
225
+ return false unless client.assignment_lost?
226
+
227
+ coordinator.revoke
228
+
229
+ true
225
230
  end
226
231
 
227
232
  # @return [Boolean] are we retrying processing after an error. This can be used to provide a
@@ -122,6 +122,10 @@ module Karafka
122
122
  # @note This will commit all the offsets for the whole consumer. In order to achieve
123
123
  # granular control over where the offset should be for particular topic partitions, the
124
124
  # store_offset should be used to only store new offset when we want them to be flushed
125
+ #
126
+ # @note This method for async may return `true` despite involuntary partition revocation as
127
+ # it does **not** resolve to `lost_assignment?`. It returns only the commit state operation
128
+ # result.
125
129
  def commit_offsets(async: true)
126
130
  @mutex.lock
127
131
 
@@ -238,9 +242,10 @@ module Karafka
238
242
  # @param [Karafka::Messages::Message] message that we want to mark as processed
239
243
  # @return [Boolean] true if successful. False if we no longer own given partition
240
244
  # @note This method won't trigger automatic offsets commits, rather relying on the offset
241
- # check-pointing trigger that happens with each batch processed
245
+ # check-pointing trigger that happens with each batch processed. It will however check the
246
+ # `librdkafka` assignment ownership to increase accuracy for involuntary revocations.
242
247
  def mark_as_consumed(message)
243
- store_offset(message)
248
+ store_offset(message) && !assignment_lost?
244
249
  end
245
250
 
246
251
  # Marks a given message as consumed and commits the offsets in a blocking way.
@@ -27,12 +27,7 @@ module Karafka
27
27
  # Ignore earlier offsets than the one we already committed
28
28
  return true if coordinator.seek_offset > message.offset
29
29
  return false if revoked?
30
-
31
- unless client.mark_as_consumed(message)
32
- coordinator.revoke
33
-
34
- return false
35
- end
30
+ return revoked? unless client.mark_as_consumed(message)
36
31
 
37
32
  coordinator.seek_offset = message.offset + 1
38
33
 
@@ -49,11 +44,7 @@ module Karafka
49
44
  return true if coordinator.seek_offset > message.offset
50
45
  return false if revoked?
51
46
 
52
- unless client.mark_as_consumed!(message)
53
- coordinator.revoke
54
-
55
- return false
56
- end
47
+ return revoked? unless client.mark_as_consumed!(message)
57
48
 
58
49
  coordinator.seek_offset = message.offset + 1
59
50
 
@@ -62,11 +53,18 @@ module Karafka
62
53
 
63
54
  # Triggers an async offset commit
64
55
  #
56
+ # @param async [Boolean] should we use async (default) or sync commit
65
57
  # @return [Boolean] true if we still own the partition.
66
58
  # @note Due to its async nature, this may not fully represent the offset state in some
67
59
  # edge cases (like for example going beyond max.poll.interval)
68
- def commit_offsets
69
- client.commit_offsets(async: true)
60
+ def commit_offsets(async: true)
61
+ # Do not commit if we already lost the assignment
62
+ return false if revoked?
63
+ return true if client.commit_offsets(async: async)
64
+
65
+ # This will once more check the librdkafka revocation status and will revoke the
66
+ # coordinator in case it was not revoked
67
+ revoked?
70
68
  end
71
69
 
72
70
  # Triggers a synchronous offsets commit to Kafka
@@ -75,7 +73,7 @@ module Karafka
75
73
  # @note This is fully synchronous, hence the result of this can be used in DB transactions
76
74
  # etc as a way of making sure, that we still own the partition.
77
75
  def commit_offsets!
78
- client.commit_offsets(async: false)
76
+ commit_offsets(async: false)
79
77
  end
80
78
 
81
79
  # No actions needed for the standard flow here
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.1.5.beta1'
6
+ VERSION = '2.1.5'
7
7
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.1.5.beta1
4
+ version: 2.1.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  Qf04B9ceLUaC4fPVEz10FyobjaFoY4i32xRto3XnrzeAgfEe4swLq8bQsR3w/EF3
36
36
  MGU0FeSV2Yj7Xc2x/7BzLK8xQn5l7Yy75iPF+KP3vVmDHnNl
37
37
  -----END CERTIFICATE-----
38
- date: 2023-06-18 00:00:00.000000000 Z
38
+ date: 2023-06-19 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: karafka-core
@@ -43,7 +43,7 @@ dependencies:
43
43
  requirements:
44
44
  - - ">="
45
45
  - !ruby/object:Gem::Version
46
- version: 2.1.0.beta1
46
+ version: 2.1.0
47
47
  - - "<"
48
48
  - !ruby/object:Gem::Version
49
49
  version: 2.2.0
@@ -53,7 +53,7 @@ dependencies:
53
53
  requirements:
54
54
  - - ">="
55
55
  - !ruby/object:Gem::Version
56
- version: 2.1.0.beta1
56
+ version: 2.1.0
57
57
  - - "<"
58
58
  - !ruby/object:Gem::Version
59
59
  version: 2.2.0
@@ -77,7 +77,7 @@ dependencies:
77
77
  requirements:
78
78
  - - ">="
79
79
  - !ruby/object:Gem::Version
80
- version: 2.6.1.beta1
80
+ version: 2.6.1
81
81
  - - "<"
82
82
  - !ruby/object:Gem::Version
83
83
  version: 3.0.0
@@ -87,7 +87,7 @@ dependencies:
87
87
  requirements:
88
88
  - - ">="
89
89
  - !ruby/object:Gem::Version
90
- version: 2.6.1.beta1
90
+ version: 2.6.1
91
91
  - - "<"
92
92
  - !ruby/object:Gem::Version
93
93
  version: 3.0.0
@@ -414,9 +414,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
414
414
  version: '0'
415
415
  required_rubygems_version: !ruby/object:Gem::Requirement
416
416
  requirements:
417
- - - ">"
417
+ - - ">="
418
418
  - !ruby/object:Gem::Version
419
- version: 1.3.1
419
+ version: '0'
420
420
  requirements: []
421
421
  rubygems_version: 3.4.10
422
422
  signing_key:
metadata.gz.sig CHANGED
Binary file