karafka-rdkafka 0.13.0 → 0.13.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 91b097ee3e4b5df5b9deb4009297c84b9d59ffa1ec0aa0f0b0afa230418478aa
4
- data.tar.gz: b0897c92e746ebe9194b735ebe0b813ae274c0a472c5381e9b097a4d059b2966
3
+ metadata.gz: 8d216b3535772b9039cb4f81113f75b1f1262052282c3bb159c78095cddccf3a
4
+ data.tar.gz: ce7e3a921cd0c4a0da1b1d00c4af672949445b0ed05e325c975a3b8533e8a134
5
5
  SHA512:
6
- metadata.gz: e6fb584d20d1958e66133e45806d2d25ba885f413ccb683e1380c65611f068d65198aa459920871b2a15b0d84e434d1824ae5eb10cba04a6a118538b471cd5d8
7
- data.tar.gz: ac0ff2519d7580018a7d6359f3e7bd7410f6c52aeb0e95a5b4cc749e6becc2863ddea229e11d8ef04f74865e608b4f7ee2e057730a5d837e4e6da5696817b3a8
6
+ metadata.gz: 5acb41af183c6a101c5b7bc88166573c22f2e84516d060b1afb78c6d958b44ffc1b13bcdb08fd8891f16c16ebd6acdabea6c51131d58c30c93e86fe7e3f714dd
7
+ data.tar.gz: a8daa33c897782dfb3875e9e8be078f40ed40a97c926c809f9cb349689d78d1fdeec8bfb70ca1e7a88dadade9f70dcd74ece2e647c30ed3fb855a4dfb3495aa7
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,3 +1,10 @@
1
+ # 0.13.2
2
+ * Ensure operations counter decrement is fully thread-safe
3
+ * Bump librdkafka to 2.1.1
4
+
5
+ # 0.13.1
6
+ * Add offsets_for_times method on consumer (timflapper)
7
+
1
8
  # 0.13.0
2
9
  * Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
3
10
  * Support both string and symbol header keys (ColinDKelley)
@@ -193,6 +193,7 @@ module Rdkafka
193
193
  attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
194
194
  attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
195
195
  attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
196
+ attach_function :rd_kafka_offsets_for_times, [:pointer, :pointer, :int], :int, blocking: true
196
197
 
197
198
  # Headers
198
199
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -142,11 +142,13 @@ module Rdkafka
142
142
  )
143
143
 
144
144
  if p.offset
145
+ offset = p.offset.is_a?(Time) ? p.offset.to_f * 1_000 : p.offset
146
+
145
147
  Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
146
148
  tpl,
147
149
  topic,
148
150
  p.partition,
149
- p.offset
151
+ offset
150
152
  )
151
153
  end
152
154
  end
@@ -34,9 +34,11 @@ module Rdkafka
34
34
  def close
35
35
  return if closed?
36
36
  ObjectSpace.undefine_finalizer(self)
37
- @native_kafka.with_inner do |inner|
37
+
38
+ @native_kafka.synchronize do |inner|
38
39
  Rdkafka::Bindings.rd_kafka_consumer_close(inner)
39
40
  end
41
+
40
42
  @native_kafka.close
41
43
  end
42
44
 
@@ -429,6 +431,39 @@ module Rdkafka
429
431
  end
430
432
  end
431
433
 
434
+ # Lookup offset for the given partitions by timestamp.
435
+ #
436
+ # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
437
+ #
438
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
439
+ #
440
+ # @return [TopicPartitionList]
441
+ def offsets_for_times(list, timeout_ms = 1000)
442
+ closed_consumer_check(__method__)
443
+
444
+ if !list.is_a?(TopicPartitionList)
445
+ raise TypeError.new("list has to be a TopicPartitionList")
446
+ end
447
+
448
+ tpl = list.to_native_tpl
449
+
450
+ response = @native_kafka.with_inner do |inner|
451
+ Rdkafka::Bindings.rd_kafka_offsets_for_times(
452
+ inner,
453
+ tpl,
454
+ timeout_ms # timeout
455
+ )
456
+ end
457
+
458
+ if response != 0
459
+ raise Rdkafka::RdkafkaError.new(response)
460
+ end
461
+
462
+ TopicPartitionList.from_native_tpl(tpl)
463
+ ensure
464
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
465
+ end
466
+
432
467
  # Manually commit the current offsets of this consumer.
433
468
  #
434
469
  # To use this set `enable.auto.commit`to `false` to disable automatic triggering
data/lib/rdkafka/error.rb CHANGED
@@ -92,4 +92,10 @@ module Rdkafka
92
92
  super("Illegal call to #{method.to_s} on a closed admin")
93
93
  end
94
94
  end
95
+
96
+ class ClosedInnerError < BaseError
97
+ def initialize
98
+ super("Illegal call to a closed inner librdkafka instance")
99
+ end
100
+ end
95
101
  end
@@ -10,6 +10,22 @@ module Rdkafka
10
10
  @access_mutex = Mutex.new
11
11
  # Lock around internal polling
12
12
  @poll_mutex = Mutex.new
13
+ # Lock around decrementing the operations in progress counter
14
+ # We have two mutexes - one for increment (`@access_mutex`) and one for decrement mutex
15
+ # because they serve different purposes:
16
+ #
17
+ # - `@access_mutex` allows us to lock the execution and make sure that any operation within
18
+ # the `#synchronize` is the only one running and that there are no other running
19
+ # operations.
20
+ # - `@decrement_mutex` ensures, that our decrement operation is thread-safe for any Ruby
21
+ # implementation.
22
+ #
23
+ # We do not use the same mutex, because it could create a deadlock when an already
24
+ # incremented operation cannot decrement because `@access_lock` is now owned by a different
25
+ # thread in a synchronized mode and the synchronized mode is waiting on the decrement.
26
+ @decrement_mutex = Mutex.new
27
+ # counter for operations in progress using inner
28
+ @operations_in_progress = 0
13
29
 
14
30
  if run_polling_thread
15
31
  # Start thread to poll client for delivery callbacks,
@@ -35,9 +51,27 @@ module Rdkafka
35
51
  end
36
52
 
37
53
  def with_inner
38
- return if @inner.nil?
54
+ if @access_mutex.owned?
55
+ @operations_in_progress += 1
56
+ else
57
+ @access_mutex.synchronize { @operations_in_progress += 1 }
58
+ end
59
+
60
+ @inner.nil? ? raise(ClosedInnerError) : yield(@inner)
61
+ ensure
62
+ @decrement_mutex.synchronize { @operations_in_progress -= 1 }
63
+ end
39
64
 
40
- yield @inner
65
+ def synchronize(&block)
66
+ @access_mutex.synchronize do
67
+ # Wait for any commands using the inner to finish
68
+ # This can take a while on blocking operations like polling but is essential not to proceed
69
+ # with certain types of operations like resources destruction as it can cause the process
70
+ # to hang or crash
71
+ sleep(0.01) until @operations_in_progress.zero?
72
+
73
+ with_inner(&block)
74
+ end
41
75
  end
42
76
 
43
77
  def finalizer
@@ -51,31 +85,31 @@ module Rdkafka
51
85
  def close(object_id=nil)
52
86
  return if closed?
53
87
 
54
- @access_mutex.lock
88
+ synchronize do
89
+ # Indicate to the outside world that we are closing
90
+ @closing = true
55
91
 
56
- # Indicate to the outside world that we are closing
57
- @closing = true
92
+ if @polling_thread
93
+ # Indicate to polling thread that we're closing
94
+ @polling_thread[:closing] = true
58
95
 
59
- if @polling_thread
60
- # Indicate to polling thread that we're closing
61
- @polling_thread[:closing] = true
62
-
63
- # Wait for the polling thread to finish up,
64
- # this can be aborted in practice if this
65
- # code runs from a finalizer.
66
- @polling_thread.join
67
- end
96
+ # Wait for the polling thread to finish up,
97
+ # this can be aborted in practice if this
98
+ # code runs from a finalizer.
99
+ @polling_thread.join
100
+ end
68
101
 
69
- # Destroy the client after locking both mutexes
70
- @poll_mutex.lock
102
+ # Destroy the client after locking both mutexes
103
+ @poll_mutex.lock
71
104
 
72
- # This check prevents a race condition, where we would enter the close in two threads
73
- # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
74
- # and would continue to run, trying to destroy inner twice
75
- return unless @inner
105
+ # This check prevents a race condition, where we would enter the close in two threads
106
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
107
+ # and would continue to run, trying to destroy inner twice
108
+ return unless @inner
76
109
 
77
- Rdkafka::Bindings.rd_kafka_destroy(@inner)
78
- @inner = nil
110
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
111
+ @inner = nil
112
+ end
79
113
  end
80
114
  end
81
115
  end
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.13.0"
5
- LIBRDKAFKA_VERSION = "2.0.2"
6
- LIBRDKAFKA_SOURCE_SHA256 = "f321bcb1e015a34114c83cf1aa7b99ee260236aab096b85c003170c90a47ca9d"
4
+ VERSION = "0.13.2"
5
+ LIBRDKAFKA_VERSION = "2.1.1"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "7be1fc37ab10ebdc037d5c5a9b35b48931edafffae054b488faaff99e60e0108"
7
7
  end
@@ -221,5 +221,24 @@ describe Rdkafka::Consumer::TopicPartitionList do
221
221
 
222
222
  expect(list).to eq other
223
223
  end
224
+
225
+ it "should create a native list with timetamp offsets if offsets are Time" do
226
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
227
+ list.add_topic_and_partitions_with_offsets("topic", 0 => Time.at(1505069646, 250_000))
228
+ end
229
+
230
+ tpl = list.to_native_tpl
231
+
232
+ compare_list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
233
+ list.add_topic_and_partitions_with_offsets(
234
+ "topic",
235
+ 0 => (Time.at(1505069646, 250_000).to_f * 1000).floor
236
+ )
237
+ end
238
+
239
+ native_list = Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
240
+
241
+ expect(native_list).to eq compare_list
242
+ end
224
243
  end
225
244
  end
@@ -312,6 +312,29 @@ describe Rdkafka::Consumer do
312
312
  consumer.poll(100)
313
313
  }.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
314
314
  end
315
+
316
+ context 'when there are outgoing operations in other threads' do
317
+ it 'should wait and not crash' do
318
+ times = []
319
+
320
+ # Run a long running poll
321
+ thread = Thread.new do
322
+ times << Time.now
323
+ consumer.subscribe("empty_test_topic")
324
+ times << Time.now
325
+ consumer.poll(1_000)
326
+ times << Time.now
327
+ end
328
+
329
+ # Make sure it starts before we close
330
+ sleep(0.1)
331
+ consumer.close
332
+ close_time = Time.now
333
+ thread.join
334
+
335
+ times.each { |op_time| expect(op_time).to be < close_time }
336
+ end
337
+ end
315
338
  end
316
339
 
317
340
  describe "#commit, #committed and #store_offset" do
@@ -953,6 +976,69 @@ describe Rdkafka::Consumer do
953
976
  end
954
977
  end
955
978
 
979
+ describe "#offsets_for_times" do
980
+ it "should raise when not TopicPartitionList" do
981
+ expect { consumer.offsets_for_times([]) }.to raise_error(TypeError)
982
+ end
983
+
984
+ it "should raise an error when offsets_for_times fails" do
985
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
986
+
987
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_offsets_for_times).and_return(7)
988
+
989
+ expect { consumer.offsets_for_times(tpl) }.to raise_error(Rdkafka::RdkafkaError)
990
+ end
991
+
992
+ context "when subscribed" do
993
+ let(:timeout) { 1000 }
994
+
995
+ before do
996
+ consumer.subscribe("consume_test_topic")
997
+
998
+ # 1. partitions are assigned
999
+ wait_for_assignment(consumer)
1000
+ expect(consumer.assignment).not_to be_empty
1001
+
1002
+ # 2. eat unrelated messages
1003
+ while(consumer.poll(timeout)) do; end
1004
+ end
1005
+
1006
+ after { consumer.unsubscribe }
1007
+
1008
+ def send_one_message(val)
1009
+ producer.produce(
1010
+ topic: "consume_test_topic",
1011
+ payload: "payload #{val}",
1012
+ key: "key 0",
1013
+ partition: 0
1014
+ ).wait
1015
+ end
1016
+
1017
+ it "returns a TopicParticionList with updated offsets" do
1018
+ send_one_message("a")
1019
+ send_one_message("b")
1020
+ send_one_message("c")
1021
+
1022
+ consumer.poll(timeout)
1023
+ message = consumer.poll(timeout)
1024
+ consumer.poll(timeout)
1025
+
1026
+ tpl = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
1027
+ list.add_topic_and_partitions_with_offsets(
1028
+ "consume_test_topic",
1029
+ [
1030
+ [0, message.timestamp]
1031
+ ]
1032
+ )
1033
+ end
1034
+
1035
+ tpl_response = consumer.offsets_for_times(tpl)
1036
+
1037
+ expect(tpl_response.to_h["consume_test_topic"][0].offset).to eq message.offset
1038
+ end
1039
+ end
1040
+ end
1041
+
956
1042
  describe "a rebalance listener" do
957
1043
  let(:consumer) do
958
1044
  config = rdkafka_consumer_config
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka-rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.0
4
+ version: 0.13.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
@@ -35,7 +35,7 @@ cert_chain:
35
35
  Qf04B9ceLUaC4fPVEz10FyobjaFoY4i32xRto3XnrzeAgfEe4swLq8bQsR3w/EF3
36
36
  MGU0FeSV2Yj7Xc2x/7BzLK8xQn5l7Yy75iPF+KP3vVmDHnNl
37
37
  -----END CERTIFICATE-----
38
- date: 2023-06-19 00:00:00.000000000 Z
38
+ date: 2023-06-28 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: ffi
@@ -258,7 +258,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
258
258
  - !ruby/object:Gem::Version
259
259
  version: '0'
260
260
  requirements: []
261
- rubygems_version: 3.4.1
261
+ rubygems_version: 3.3.4
262
262
  signing_key:
263
263
  specification_version: 4
264
264
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
metadata.gz.sig CHANGED
Binary file