rdkafka 0.13.0.beta.6 → 0.13.0.beta.8

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1d9baa27220c65598729e786ff2c585cdb23db317b964fd96aee7067b5922dd1
4
- data.tar.gz: 4b072016e30266c9ef8ed1ff5e88ff6caa16dd5ede48eec79e2ea22b14cbf222
3
+ metadata.gz: e774c8eb7ac27fd5d89e3b28e5786bfbbc1354e7a35da0d89e7e1ebc488c0b80
4
+ data.tar.gz: d21b5a39de5fd22e38e9d2342a984c552b317d525a67d4128a9c78a07e0b1737
5
5
  SHA512:
6
- metadata.gz: a42852f8a2bb57d28d8a55df808ebcc2cd7ca08daf1acdd139c6e8662ac81ad31da417193e5e6a6f9ed0df9f842e3842b5ad01461f1338e12deb1ca01d56830f
7
- data.tar.gz: 8dde39b477b76a7afae0f9f51ec3e1942b2d4d599828bbe7acdd777c17f8fec49f4cc359e3f850db30921d0119bbe32b04c1dbef49e16f0e46d3de313cd56744
6
+ metadata.gz: 0cb979f4cc0a693f183e817fad944f8f24f09190ccd5d6363f3df9696441ef8b76056e05286f0214443095cd8b11b2abf9e3b8223d73a43bc09583dbab9b90dc
7
+ data.tar.gz: dded899763cf50da00fda25dcb03974946ccc0199eaececa2f20bc043971e3bf8a14c85610258f207424f8b76c69eac1a025f1e4b30297a2eedd4937ca705dee
data/CHANGELOG.md CHANGED
@@ -5,7 +5,7 @@
5
5
  * Add topic name to delivery report (maeve)
6
6
  * Allow string partitioner config (mollyegibson)
7
7
  * Fix documented type for DeliveryReport#error (jimmydo)
8
- * Bump librdkafka to 1.9.2 (thijsc)
8
+ * Bump librdkafka to 2.0.2 (lmaia)
9
9
  * Use finalizers to cleanly exit producer and admin (thijsc)
10
10
  * Lock access to the native kafka client (thijsc)
11
11
  * Fix potential race condition in multi-threaded producer (mensfeld)
@@ -27,9 +27,11 @@ module Rdkafka
27
27
  def close
28
28
  return if closed?
29
29
  ObjectSpace.undefine_finalizer(self)
30
- @native_kafka.with_inner do |inner|
30
+
31
+ @native_kafka.synchronize do |inner|
31
32
  Rdkafka::Bindings.rd_kafka_consumer_close(inner)
32
33
  end
34
+
33
35
  @native_kafka.close
34
36
  end
35
37
 
data/lib/rdkafka/error.rb CHANGED
@@ -92,4 +92,10 @@ module Rdkafka
92
92
  super("Illegal call to #{method.to_s} on a closed admin")
93
93
  end
94
94
  end
95
+
96
+ class ClosedInnerError < BaseError
97
+ def initialize
98
+ super("Illegal call to a closed inner librdkafka instance")
99
+ end
100
+ end
95
101
  end
@@ -10,6 +10,8 @@ module Rdkafka
10
10
  @access_mutex = Mutex.new
11
11
  # Lock around internal polling
12
12
  @poll_mutex = Mutex.new
13
+ # counter for operations in progress using inner
14
+ @operations_in_progress = 0
13
15
 
14
16
  if run_polling_thread
15
17
  # Start thread to poll client for delivery callbacks,
@@ -35,10 +37,26 @@ module Rdkafka
35
37
  end
36
38
 
37
39
  def with_inner
38
- return if @inner.nil?
40
+ if @access_mutex.owned?
41
+ @operations_in_progress += 1
42
+ else
43
+ @access_mutex.synchronize { @operations_in_progress += 1 }
44
+ end
45
+
46
+ @inner.nil? ? raise(ClosedInnerError) : yield(@inner)
47
+ ensure
48
+ @operations_in_progress -= 1
49
+ end
39
50
 
51
+ def synchronize(&block)
40
52
  @access_mutex.synchronize do
41
- yield @inner
53
+ # Wait for any commands using the inner to finish
54
+ # This can take a while on blocking operations like polling but is essential not to proceed
55
+ # with certain types of operations like resources destruction as it can cause the process
56
+ # to hang or crash
57
+ sleep(0.01) until @operations_in_progress.zero?
58
+
59
+ with_inner(&block)
42
60
  end
43
61
  end
44
62
 
@@ -53,31 +71,31 @@ module Rdkafka
53
71
  def close(object_id=nil)
54
72
  return if closed?
55
73
 
56
- @access_mutex.lock
74
+ synchronize do
75
+ # Indicate to the outside world that we are closing
76
+ @closing = true
57
77
 
58
- # Indicate to the outside world that we are closing
59
- @closing = true
78
+ if @polling_thread
79
+ # Indicate to polling thread that we're closing
80
+ @polling_thread[:closing] = true
60
81
 
61
- if @polling_thread
62
- # Indicate to polling thread that we're closing
63
- @polling_thread[:closing] = true
64
-
65
- # Wait for the polling thread to finish up,
66
- # this can be aborted in practice if this
67
- # code runs from a finalizer.
68
- @polling_thread.join
69
- end
82
+ # Wait for the polling thread to finish up,
83
+ # this can be aborted in practice if this
84
+ # code runs from a finalizer.
85
+ @polling_thread.join
86
+ end
70
87
 
71
- # Destroy the client after locking both mutexes
72
- @poll_mutex.lock
88
+ # Destroy the client after locking both mutexes
89
+ @poll_mutex.lock
73
90
 
74
- # This check prevents a race condition, where we would enter the close in two threads
75
- # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
76
- # and would continue to run, trying to destroy inner twice
77
- return unless @inner
91
+ # This check prevents a race condition, where we would enter the close in two threads
92
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
93
+ # and would continue to run, trying to destroy inner twice
94
+ return unless @inner
78
95
 
79
- Rdkafka::Bindings.rd_kafka_destroy(@inner)
80
- @inner = nil
96
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
97
+ @inner = nil
98
+ end
81
99
  end
82
100
  end
83
101
  end
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.13.0.beta.6"
5
- LIBRDKAFKA_VERSION = "1.9.2"
6
- LIBRDKAFKA_SOURCE_SHA256 = "3fba157a9f80a0889c982acdd44608be8a46142270a389008b22d921be1198ad"
4
+ VERSION = "0.13.0.beta.8"
5
+ LIBRDKAFKA_VERSION = "2.0.2"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "f321bcb1e015a34114c83cf1aa7b99ee260236aab096b85c003170c90a47ca9d"
7
7
  end
@@ -286,6 +286,29 @@ describe Rdkafka::Consumer do
286
286
  consumer.poll(100)
287
287
  }.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
288
288
  end
289
+
290
+ context 'when there are outgoing operations in other threads' do
291
+ it 'should wait and not crash' do
292
+ times = []
293
+
294
+ # Run a long running poll
295
+ thread = Thread.new do
296
+ times << Time.now
297
+ consumer.subscribe("empty_test_topic")
298
+ times << Time.now
299
+ consumer.poll(1_000)
300
+ times << Time.now
301
+ end
302
+
303
+ # Make sure it starts before we close
304
+ sleep(0.1)
305
+ consumer.close
306
+ close_time = Time.now
307
+ thread.join
308
+
309
+ times.each { |op_time| expect(op_time).to be < close_time }
310
+ end
311
+ end
289
312
  end
290
313
 
291
314
  describe "#commit, #committed and #store_offset" do
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.0.beta.6
4
+ version: 0.13.0.beta.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-04-28 00:00:00.000000000 Z
11
+ date: 2023-06-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi
@@ -220,7 +220,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
220
220
  - !ruby/object:Gem::Version
221
221
  version: 1.3.1
222
222
  requirements: []
223
- rubygems_version: 3.3.13
223
+ rubygems_version: 3.4.1
224
224
  signing_key:
225
225
  specification_version: 4
226
226
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.