karafka-rdkafka 0.14.3 → 0.14.5

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5fee43511074dfab207280d7114f70f8fb5cc7b6055fdb348cd9da302ee037d2
4
- data.tar.gz: 22656cc0b49a6d3aa7d6fad85c1b3c936b49089802ed07f4f43d8fdd85a16f2d
3
+ metadata.gz: 5ed56a133dd27e17055cf63b7e68945d9cb97b42690afa6c7eb230f56273754c
4
+ data.tar.gz: bcb63a1c2c77d599ab403bfdca1df0badd28f1c1cc72eed16169e6d4044fe778
5
5
  SHA512:
6
- metadata.gz: cdc7873ca1dda1d2afc6193c8e189850d5d3f865f19307cd997cc76c3fa39a919f2757ed610cf18854dc1ff61b270d1a9c85f880257c1c833435d7af9992a27a
7
- data.tar.gz: ed37156d9f17e8be5804720b426424260c3e70567cf2e24dc385ed5e8d66763199fb056a2c47e11fec1ff80ddaae8b2926f9420b05d1c9e88dc38735c32b4eb1
6
+ metadata.gz: '07185f868e86c3aaa917f8be42a14047cfb9dd9901001351f416f14338bf1dcc942b4d4649749832efed5c10cb3c7e9ea3543706fd7e11bb79fc332206569898'
7
+ data.tar.gz: c7bafd507d2b86c9ae61daab5ed32b142ec6d9e7dbd6d4cf26a704d432b257c00e84fb8872daab978c685941ab71e3201d7fe05635fa0fc8526eac016faf48d9
checksums.yaml.gz.sig CHANGED
Binary file
data/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # Rdkafka Changelog
2
2
 
3
+ ## 0.14.5 (2023-12-20)
4
+ - [Enhancement] Provide `label` producer handler and report reference for improved traceability.
5
+
6
+ ## 0.14.4 (2023-12-19)
7
+ - [Enhancement] Add ability to store offsets in a transaction (mensfeld)
8
+
3
9
  ## 0.14.3 (2023-12-17)
4
10
  - [Enhancement] Replace `rd_kafka_offset_store` with `rd_kafka_offsets_store` (mensfeld)
5
11
  - [Fix] Missing ACL `RD_KAFKA_RESOURCE_BROKER` constant reference (mensfeld)
@@ -205,6 +205,9 @@ module Rdkafka
205
205
  attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
206
206
  attach_function :rd_kafka_offsets_for_times, [:pointer, :pointer, :int], :int, blocking: true
207
207
  attach_function :rd_kafka_position, [:pointer, :pointer], :int, blocking: true
208
+ # those two are used for eos support
209
+ attach_function :rd_kafka_consumer_group_metadata, [:pointer], :pointer, blocking: true
210
+ attach_function :rd_kafka_consumer_group_metadata_destroy, [:pointer], :void, blocking: true
208
211
 
209
212
  # Headers
210
213
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -276,6 +279,7 @@ module Rdkafka
276
279
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
277
280
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
278
281
  attach_function :rd_kafka_init_transactions, [:pointer, :int], :pointer, blocking: true
282
+ attach_function :rd_kafka_send_offsets_to_transaction, [:pointer, :pointer, :pointer, :int], :pointer, blocking: true
279
283
  attach_function :rd_kafka_begin_transaction, [:pointer], :pointer, blocking: true
280
284
  attach_function :rd_kafka_abort_transaction, [:pointer, :int], :pointer, blocking: true
281
285
  attach_function :rd_kafka_commit_transaction, [:pointer, :int], :pointer, blocking: true
@@ -288,7 +288,16 @@ module Rdkafka
288
288
 
289
289
  # Call delivery callback on opaque
290
290
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
291
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], topic_name, message[:err]), delivery_handle)
291
+ opaque.call_delivery_callback(
292
+ Rdkafka::Producer::DeliveryReport.new(
293
+ message[:partition],
294
+ message[:offset],
295
+ topic_name,
296
+ message[:err],
297
+ delivery_handle.label
298
+ ),
299
+ delivery_handle
300
+ )
292
301
  end
293
302
  end
294
303
  end
@@ -671,6 +671,22 @@ module Rdkafka
671
671
  end
672
672
  end
673
673
 
674
+ # Returns pointer to the consumer group metadata. It is used only in the context of
675
+ # exactly-once-semantics in transactions, this is why it is never remapped to Ruby
676
+ #
677
+ # This API is **not** usable by itself from Ruby
678
+ #
679
+ # @note This pointer **needs** to be removed with `#rd_kafka_consumer_group_metadata_destroy`
680
+ #
681
+ # @private
682
+ def consumer_group_metadata_pointer
683
+ closed_consumer_check(__method__)
684
+
685
+ @native_kafka.with_inner do |inner|
686
+ Bindings.rd_kafka_consumer_group_metadata(inner)
687
+ end
688
+ end
689
+
674
690
  private
675
691
 
676
692
  def closed_consumer_check(method)
@@ -11,6 +11,9 @@ module Rdkafka
11
11
  :offset, :int64,
12
12
  :topic_name, :pointer
13
13
 
14
+ # @return [Object, nil] label set during message production or nil by default
15
+ attr_accessor :label
16
+
14
17
  # @return [String] the name of the operation (e.g. "delivery")
15
18
  def operation_name
16
19
  "delivery"
@@ -22,7 +25,9 @@ module Rdkafka
22
25
  DeliveryReport.new(
23
26
  self[:partition],
24
27
  self[:offset],
25
- self[:topic_name].read_string
28
+ self[:topic_name].read_string,
29
+ nil,
30
+ label
26
31
  )
27
32
  else
28
33
  DeliveryReport.new(
@@ -31,7 +36,8 @@ module Rdkafka
31
36
  # For part of errors, we will not get a topic name reference and in cases like this
32
37
  # we should not return it
33
38
  self[:topic_name].null? ? nil : self[:topic_name].read_string,
34
- Rdkafka::RdkafkaError.build(self[:response])
39
+ Rdkafka::RdkafkaError.build(self[:response]),
40
+ label
35
41
  )
36
42
  end
37
43
  end
@@ -21,6 +21,9 @@ module Rdkafka
21
21
  # @return [Integer]
22
22
  attr_reader :error
23
23
 
24
+ # @return [Object, nil] label set during message production or nil by default
25
+ attr_reader :label
26
+
24
27
  # We alias the `#topic_name` under `#topic` to make this consistent with `Consumer::Message`
25
28
  # where the topic name is under `#topic` method. That way we have a consistent name that
26
29
  # is present in both places
@@ -30,11 +33,12 @@ module Rdkafka
30
33
 
31
34
  private
32
35
 
33
- def initialize(partition, offset, topic_name = nil, error = nil)
36
+ def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
34
37
  @partition = partition
35
38
  @offset = offset
36
39
  @topic_name = topic_name
37
40
  @error = error
41
+ @label = label
38
42
  end
39
43
  end
40
44
  end
@@ -81,7 +81,7 @@ module Rdkafka
81
81
  @native_kafka.with_inner do |inner|
82
82
  response_ptr = Rdkafka::Bindings.rd_kafka_begin_transaction(inner)
83
83
 
84
- Rdkafka::RdkafkaError.validate!(response_ptr)
84
+ Rdkafka::RdkafkaError.validate!(response_ptr) || true
85
85
  end
86
86
  end
87
87
 
@@ -91,7 +91,7 @@ module Rdkafka
91
91
  @native_kafka.with_inner do |inner|
92
92
  response_ptr = Rdkafka::Bindings.rd_kafka_commit_transaction(inner, timeout_ms)
93
93
 
94
- Rdkafka::RdkafkaError.validate!(response_ptr)
94
+ Rdkafka::RdkafkaError.validate!(response_ptr) || true
95
95
  end
96
96
  end
97
97
 
@@ -100,8 +100,35 @@ module Rdkafka
100
100
 
101
101
  @native_kafka.with_inner do |inner|
102
102
  response_ptr = Rdkafka::Bindings.rd_kafka_abort_transaction(inner, timeout_ms)
103
+ Rdkafka::RdkafkaError.validate!(response_ptr) || true
104
+ end
105
+ end
106
+
107
+ # Sends provided offsets of a consumer to the transaction for collective commit
108
+ #
109
+ # @param consumer [Consumer] consumer that owns the given tpls
110
+ # @param tpl [Consumer::TopicPartitionList]
111
+ # @param timeout_ms [Integer] offsets send timeout
112
+ # @note Use **only** in the context of an active transaction
113
+ def send_offsets_to_transaction(consumer, tpl, timeout_ms = 5_000)
114
+ closed_producer_check(__method__)
115
+
116
+ return if tpl.empty?
117
+
118
+ cgmetadata = consumer.consumer_group_metadata_pointer
119
+ native_tpl = tpl.to_native_tpl
120
+
121
+ @native_kafka.with_inner do |inner|
122
+ response_ptr = Bindings.rd_kafka_send_offsets_to_transaction(inner, native_tpl, cgmetadata, timeout_ms)
123
+
103
124
  Rdkafka::RdkafkaError.validate!(response_ptr)
104
125
  end
126
+ ensure
127
+ if cgmetadata && !cgmetadata.null?
128
+ Bindings.rd_kafka_consumer_group_metadata_destroy(cgmetadata)
129
+ end
130
+
131
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(native_tpl) unless native_tpl.nil?
105
132
  end
106
133
 
107
134
  # Close this producer and wait for the internal poll queue to empty.
@@ -199,11 +226,12 @@ module Rdkafka
199
226
  # @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
200
227
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
201
228
  # @param headers [Hash<String,String>] Optional message headers
229
+ # @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
202
230
  #
203
231
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
204
232
  #
205
233
  # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
206
- def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil)
234
+ def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil, label: nil)
207
235
  closed_producer_check(__method__)
208
236
 
209
237
  # Start by checking and converting the input
@@ -245,6 +273,7 @@ module Rdkafka
245
273
  end
246
274
 
247
275
  delivery_handle = DeliveryHandle.new
276
+ delivery_handle.label = label
248
277
  delivery_handle[:pending] = true
249
278
  delivery_handle[:response] = -1
250
279
  delivery_handle[:partition] = -1
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.14.3"
4
+ VERSION = "0.14.5"
5
5
  LIBRDKAFKA_VERSION = "2.3.0"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "2d49c35c77eeb3d42fa61c43757fcbb6a206daa560247154e60642bcdcc14d12"
7
7
  end
@@ -360,7 +360,6 @@ describe Rdkafka::Consumer do
360
360
  end
361
361
  end
362
362
 
363
-
364
363
  describe "#position, #commit, #committed and #store_offset" do
365
364
  # Make sure there are messages to work with
366
365
  let!(:report) do
@@ -1142,6 +1141,14 @@ describe Rdkafka::Consumer do
1142
1141
  end
1143
1142
  end
1144
1143
 
1144
+ describe '#consumer_group_metadata_pointer' do
1145
+ it 'expect to yield pointer' do
1146
+ consumer.consumer_group_metadata_pointer do |pointer|
1147
+ expect(pointer).to be_a(FFI::Pointer)
1148
+ end
1149
+ end
1150
+ end
1151
+
1145
1152
  describe "a rebalance listener" do
1146
1153
  let(:consumer) do
1147
1154
  config = rdkafka_consumer_config
@@ -34,6 +34,7 @@ describe Rdkafka::Producer do
34
34
 
35
35
  producer.delivery_callback = lambda do |report|
36
36
  expect(report).not_to be_nil
37
+ expect(report.label).to eq "label"
37
38
  expect(report.partition).to eq 1
38
39
  expect(report.offset).to be >= 0
39
40
  expect(report.topic_name).to eq "produce_test_topic"
@@ -44,9 +45,12 @@ describe Rdkafka::Producer do
44
45
  handle = producer.produce(
45
46
  topic: "produce_test_topic",
46
47
  payload: "payload",
47
- key: "key"
48
+ key: "key",
49
+ label: "label"
48
50
  )
49
51
 
52
+ expect(handle.label).to eq "label"
53
+
50
54
  # Wait for it to be delivered
51
55
  handle.wait(max_wait_timeout: 15)
52
56
 
@@ -175,11 +179,13 @@ describe Rdkafka::Producer do
175
179
  handle = producer.produce(
176
180
  topic: "produce_test_topic",
177
181
  payload: "payload",
178
- key: "key"
182
+ key: "key",
183
+ label: "label"
179
184
  )
180
185
 
181
186
  # Should be pending at first
182
187
  expect(handle.pending?).to be true
188
+ expect(handle.label).to eq "label"
183
189
 
184
190
  # Check delivery handle and report
185
191
  report = handle.wait(max_wait_timeout: 5)
@@ -187,6 +193,7 @@ describe Rdkafka::Producer do
187
193
  expect(report).not_to be_nil
188
194
  expect(report.partition).to eq 1
189
195
  expect(report.offset).to be >= 0
196
+ expect(report.label).to eq "label"
190
197
 
191
198
  # Flush and close producer
192
199
  producer.flush
@@ -567,10 +574,11 @@ describe Rdkafka::Producer do
567
574
  end
568
575
 
569
576
  it "should contain the error in the response when not deliverable" do
570
- handler = producer.produce(topic: 'produce_test_topic', payload: nil)
577
+ handler = producer.produce(topic: 'produce_test_topic', payload: nil, label: 'na')
571
578
  # Wait for the async callbacks and delivery registry to update
572
579
  sleep(2)
573
580
  expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
581
+ expect(handler.create_result.label).to eq('na')
574
582
  end
575
583
  end
576
584
 
@@ -883,5 +891,30 @@ describe Rdkafka::Producer do
883
891
  expect { producer2.commit_transaction }.not_to raise_error
884
892
  end
885
893
  end
894
+
895
+ context 'when having a consumer with tpls for exactly once semantics' do
896
+ let(:tpl) do
897
+ producer.produce(topic: 'consume_test_topic', payload: 'data1', partition: 0).wait
898
+ result = producer.produce(topic: 'consume_test_topic', payload: 'data1', partition: 0).wait
899
+
900
+ Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
901
+ list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => result.offset + 1)
902
+ end
903
+ end
904
+
905
+ before do
906
+ consumer.subscribe("consume_test_topic")
907
+ wait_for_assignment(consumer)
908
+ producer.init_transactions
909
+ producer.begin_transaction
910
+ end
911
+
912
+ after { consumer.unsubscribe }
913
+
914
+ it 'expect to store offsets and not crash' do
915
+ producer.send_offsets_to_transaction(consumer, tpl)
916
+ producer.commit_transaction
917
+ end
918
+ end
886
919
  end
887
920
  end
data/spec/spec_helper.rb CHANGED
@@ -107,10 +107,6 @@ def wait_for_unassignment(consumer)
107
107
  end
108
108
  end
109
109
 
110
- def objects_of_type_count(type)
111
- ObjectSpace.each_object(type).count
112
- end
113
-
114
110
  def notify_listener(listener, &block)
115
111
  # 1. subscribe and poll
116
112
  consumer.subscribe("consume_test_topic")
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka-rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.14.3
4
+ version: 0.14.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
@@ -35,7 +35,7 @@ cert_chain:
35
35
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
36
36
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
37
37
  -----END CERTIFICATE-----
38
- date: 2023-12-17 00:00:00.000000000 Z
38
+ date: 2023-12-20 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: ffi
metadata.gz.sig CHANGED
Binary file