karafka-rdkafka 0.13.0.beta2 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 56a5ffa857d0fc9ea161e2d7df1977483cd3a6fda5ccf6a514325d74300f1e29
4
- data.tar.gz: 608c801509e16e6f95a2d283e7360be9c873881c9d0bd5c6f8bba726bfa0178c
3
+ metadata.gz: 826f1e6186cf6d72d7d1460dc33fd1f35c884ee4e1acf8d13c720f3d5324db87
4
+ data.tar.gz: 5f975ec4eac686b2f0bb21eebd0ba83a0f7162fedef65a7f7357e6172a5f6498
5
5
  SHA512:
6
- metadata.gz: 55c51d05917ac15d112df8fecad5509f395e3ac673d4f7056ae4070b8f75be6c469ffd83d26efd335ec1a642d64538b010b99d04ce6ce4b131da1c9dd00e99bb
7
- data.tar.gz: bc7243f83e91359ccc5d72d269cd20fc2da80b7cf7aa6284bdd86d83dacc44e4054ad6220614ec324b07a5908d3ae16b74338b7b349debdc85f6261982fce18d
6
+ metadata.gz: 1c9e89a124d065581d0a7b04ec0116ddcc752da454091ce8f0d4df473f0ceb346fae7e3d0169b4b0590222c9a36e9f79a35da6a50d379e2cb66c335690ad40fa
7
+ data.tar.gz: 83066cace0c8e9e0e40cf95615db3df46c336c681d1ecb2973d81468f9eb2d106da4250fb35d893e2cc4eba3e5f05ba47baa680beba4887f16a61299499fe875
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,3 +1,6 @@
1
+ # 0.13.1
2
+ * Add offsets_for_times method on consumer (timflapper)
3
+
1
4
  # 0.13.0
2
5
  * Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
3
6
  * Support both string and symbol header keys (ColinDKelley)
@@ -193,6 +193,7 @@ module Rdkafka
193
193
  attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
194
194
  attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
195
195
  attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
196
+ attach_function :rd_kafka_offsets_for_times, [:pointer, :pointer, :int], :int, blocking: true
196
197
 
197
198
  # Headers
198
199
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -142,11 +142,13 @@ module Rdkafka
142
142
  )
143
143
 
144
144
  if p.offset
145
+ offset = p.offset.is_a?(Time) ? p.offset.to_f * 1_000 : p.offset
146
+
145
147
  Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
146
148
  tpl,
147
149
  topic,
148
150
  p.partition,
149
- p.offset
151
+ offset
150
152
  )
151
153
  end
152
154
  end
@@ -34,9 +34,11 @@ module Rdkafka
34
34
  def close
35
35
  return if closed?
36
36
  ObjectSpace.undefine_finalizer(self)
37
- @native_kafka.with_inner do |inner|
37
+
38
+ @native_kafka.synchronize do |inner|
38
39
  Rdkafka::Bindings.rd_kafka_consumer_close(inner)
39
40
  end
41
+
40
42
  @native_kafka.close
41
43
  end
42
44
 
@@ -429,6 +431,39 @@ module Rdkafka
429
431
  end
430
432
  end
431
433
 
434
+ # Lookup offset for the given partitions by timestamp.
435
+ #
436
+ # @param list [TopicPartitionList] The TopicPartitionList with timestamps instead of offsets
437
+ #
438
+ # @raise [RdKafkaError] When the OffsetForTimes lookup fails
439
+ #
440
+ # @return [TopicPartitionList]
441
+ def offsets_for_times(list, timeout_ms = 1000)
442
+ closed_consumer_check(__method__)
443
+
444
+ if !list.is_a?(TopicPartitionList)
445
+ raise TypeError.new("list has to be a TopicPartitionList")
446
+ end
447
+
448
+ tpl = list.to_native_tpl
449
+
450
+ response = @native_kafka.with_inner do |inner|
451
+ Rdkafka::Bindings.rd_kafka_offsets_for_times(
452
+ inner,
453
+ tpl,
454
+ timeout_ms # timeout
455
+ )
456
+ end
457
+
458
+ if response != 0
459
+ raise Rdkafka::RdkafkaError.new(response)
460
+ end
461
+
462
+ TopicPartitionList.from_native_tpl(tpl)
463
+ ensure
464
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
465
+ end
466
+
432
467
  # Manually commit the current offsets of this consumer.
433
468
  #
434
469
  # To use this set `enable.auto.commit`to `false` to disable automatic triggering
data/lib/rdkafka/error.rb CHANGED
@@ -92,4 +92,10 @@ module Rdkafka
92
92
  super("Illegal call to #{method.to_s} on a closed admin")
93
93
  end
94
94
  end
95
+
96
+ class ClosedInnerError < BaseError
97
+ def initialize
98
+ super("Illegal call to a closed inner librdkafka instance")
99
+ end
100
+ end
95
101
  end
@@ -10,6 +10,8 @@ module Rdkafka
10
10
  @access_mutex = Mutex.new
11
11
  # Lock around internal polling
12
12
  @poll_mutex = Mutex.new
13
+ # counter for operations in progress using inner
14
+ @operations_in_progress = 0
13
15
 
14
16
  if run_polling_thread
15
17
  # Start thread to poll client for delivery callbacks,
@@ -35,9 +37,27 @@ module Rdkafka
35
37
  end
36
38
 
37
39
  def with_inner
38
- return if @inner.nil?
40
+ if @access_mutex.owned?
41
+ @operations_in_progress += 1
42
+ else
43
+ @access_mutex.synchronize { @operations_in_progress += 1 }
44
+ end
45
+
46
+ @inner.nil? ? raise(ClosedInnerError) : yield(@inner)
47
+ ensure
48
+ @operations_in_progress -= 1
49
+ end
39
50
 
40
- yield @inner
51
+ def synchronize(&block)
52
+ @access_mutex.synchronize do
53
+ # Wait for any commands using the inner to finish
54
+ # This can take a while on blocking operations like polling but is essential not to proceed
55
+ # with certain types of operations like resources destruction as it can cause the process
56
+ # to hang or crash
57
+ sleep(0.01) until @operations_in_progress.zero?
58
+
59
+ with_inner(&block)
60
+ end
41
61
  end
42
62
 
43
63
  def finalizer
@@ -51,31 +71,31 @@ module Rdkafka
51
71
  def close(object_id=nil)
52
72
  return if closed?
53
73
 
54
- @access_mutex.lock
74
+ synchronize do
75
+ # Indicate to the outside world that we are closing
76
+ @closing = true
55
77
 
56
- # Indicate to the outside world that we are closing
57
- @closing = true
78
+ if @polling_thread
79
+ # Indicate to polling thread that we're closing
80
+ @polling_thread[:closing] = true
58
81
 
59
- if @polling_thread
60
- # Indicate to polling thread that we're closing
61
- @polling_thread[:closing] = true
62
-
63
- # Wait for the polling thread to finish up,
64
- # this can be aborted in practice if this
65
- # code runs from a finalizer.
66
- @polling_thread.join
67
- end
82
+ # Wait for the polling thread to finish up,
83
+ # this can be aborted in practice if this
84
+ # code runs from a finalizer.
85
+ @polling_thread.join
86
+ end
68
87
 
69
- # Destroy the client after locking both mutexes
70
- @poll_mutex.lock
88
+ # Destroy the client after locking both mutexes
89
+ @poll_mutex.lock
71
90
 
72
- # This check prevents a race condition, where we would enter the close in two threads
73
- # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
74
- # and would continue to run, trying to destroy inner twice
75
- return unless @inner
91
+ # This check prevents a race condition, where we would enter the close in two threads
92
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
93
+ # and would continue to run, trying to destroy inner twice
94
+ return unless @inner
76
95
 
77
- Rdkafka::Bindings.rd_kafka_destroy(@inner)
78
- @inner = nil
96
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
97
+ @inner = nil
98
+ end
79
99
  end
80
100
  end
81
101
  end
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.13.0.beta2"
4
+ VERSION = "0.13.1"
5
5
  LIBRDKAFKA_VERSION = "2.0.2"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "f321bcb1e015a34114c83cf1aa7b99ee260236aab096b85c003170c90a47ca9d"
7
7
  end
@@ -221,5 +221,24 @@ describe Rdkafka::Consumer::TopicPartitionList do
221
221
 
222
222
  expect(list).to eq other
223
223
  end
224
+
225
+ it "should create a native list with timetamp offsets if offsets are Time" do
226
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
227
+ list.add_topic_and_partitions_with_offsets("topic", 0 => Time.at(1505069646, 250_000))
228
+ end
229
+
230
+ tpl = list.to_native_tpl
231
+
232
+ compare_list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
233
+ list.add_topic_and_partitions_with_offsets(
234
+ "topic",
235
+ 0 => (Time.at(1505069646, 250_000).to_f * 1000).floor
236
+ )
237
+ end
238
+
239
+ native_list = Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
240
+
241
+ expect(native_list).to eq compare_list
242
+ end
224
243
  end
225
244
  end
@@ -312,6 +312,29 @@ describe Rdkafka::Consumer do
312
312
  consumer.poll(100)
313
313
  }.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
314
314
  end
315
+
316
+ context 'when there are outgoing operations in other threads' do
317
+ it 'should wait and not crash' do
318
+ times = []
319
+
320
+ # Run a long running poll
321
+ thread = Thread.new do
322
+ times << Time.now
323
+ consumer.subscribe("empty_test_topic")
324
+ times << Time.now
325
+ consumer.poll(1_000)
326
+ times << Time.now
327
+ end
328
+
329
+ # Make sure it starts before we close
330
+ sleep(0.1)
331
+ consumer.close
332
+ close_time = Time.now
333
+ thread.join
334
+
335
+ times.each { |op_time| expect(op_time).to be < close_time }
336
+ end
337
+ end
315
338
  end
316
339
 
317
340
  describe "#commit, #committed and #store_offset" do
@@ -953,6 +976,69 @@ describe Rdkafka::Consumer do
953
976
  end
954
977
  end
955
978
 
979
+ describe "#offsets_for_times" do
980
+ it "should raise when not TopicPartitionList" do
981
+ expect { consumer.offsets_for_times([]) }.to raise_error(TypeError)
982
+ end
983
+
984
+ it "should raise an error when offsets_for_times fails" do
985
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
986
+
987
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_offsets_for_times).and_return(7)
988
+
989
+ expect { consumer.offsets_for_times(tpl) }.to raise_error(Rdkafka::RdkafkaError)
990
+ end
991
+
992
+ context "when subscribed" do
993
+ let(:timeout) { 1000 }
994
+
995
+ before do
996
+ consumer.subscribe("consume_test_topic")
997
+
998
+ # 1. partitions are assigned
999
+ wait_for_assignment(consumer)
1000
+ expect(consumer.assignment).not_to be_empty
1001
+
1002
+ # 2. eat unrelated messages
1003
+ while(consumer.poll(timeout)) do; end
1004
+ end
1005
+
1006
+ after { consumer.unsubscribe }
1007
+
1008
+ def send_one_message(val)
1009
+ producer.produce(
1010
+ topic: "consume_test_topic",
1011
+ payload: "payload #{val}",
1012
+ key: "key 0",
1013
+ partition: 0
1014
+ ).wait
1015
+ end
1016
+
1017
+ it "returns a TopicParticionList with updated offsets" do
1018
+ send_one_message("a")
1019
+ send_one_message("b")
1020
+ send_one_message("c")
1021
+
1022
+ consumer.poll(timeout)
1023
+ message = consumer.poll(timeout)
1024
+ consumer.poll(timeout)
1025
+
1026
+ tpl = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
1027
+ list.add_topic_and_partitions_with_offsets(
1028
+ "consume_test_topic",
1029
+ [
1030
+ [0, message.timestamp]
1031
+ ]
1032
+ )
1033
+ end
1034
+
1035
+ tpl_response = consumer.offsets_for_times(tpl)
1036
+
1037
+ expect(tpl_response.to_h["consume_test_topic"][0].offset).to eq message.offset
1038
+ end
1039
+ end
1040
+ end
1041
+
956
1042
  describe "a rebalance listener" do
957
1043
  let(:consumer) do
958
1044
  config = rdkafka_consumer_config
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka-rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.0.beta2
4
+ version: 0.13.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
@@ -35,7 +35,7 @@ cert_chain:
35
35
  Qf04B9ceLUaC4fPVEz10FyobjaFoY4i32xRto3XnrzeAgfEe4swLq8bQsR3w/EF3
36
36
  MGU0FeSV2Yj7Xc2x/7BzLK8xQn5l7Yy75iPF+KP3vVmDHnNl
37
37
  -----END CERTIFICATE-----
38
- date: 2023-06-16 00:00:00.000000000 Z
38
+ date: 2023-06-26 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: ffi
@@ -254,11 +254,11 @@ required_ruby_version: !ruby/object:Gem::Requirement
254
254
  version: '2.6'
255
255
  required_rubygems_version: !ruby/object:Gem::Requirement
256
256
  requirements:
257
- - - ">"
257
+ - - ">="
258
258
  - !ruby/object:Gem::Version
259
- version: 1.3.1
259
+ version: '0'
260
260
  requirements: []
261
- rubygems_version: 3.1.2
261
+ rubygems_version: 3.3.4
262
262
  signing_key:
263
263
  specification_version: 4
264
264
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
metadata.gz.sig CHANGED
Binary file