karafka-rdkafka 0.19.0 → 0.19.2.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,13 +6,31 @@ module Rdkafka
6
6
  include Helpers::Time
7
7
  include Helpers::OAuth
8
8
 
9
- # Cache partitions count for 30 seconds
10
- PARTITIONS_COUNT_TTL = 30
9
+ # @private
10
+ @@partitions_count_cache = PartitionsCountCache.new
11
+
12
+ # Global (process wide) partitions cache. We use it to store number of topics partitions,
13
+ # either from the librdkafka statistics (if enabled) or via direct inline calls every now and
14
+ # then. Since the partitions count can only grow and should be same for all consumers and
15
+ # producers, we can use a global cache as long as we ensure that updates only move up.
16
+ #
17
+ # @note It is critical to remember, that not all users may have statistics callbacks enabled,
18
+ # hence we should not make assumption that this cache is always updated from the stats.
19
+ #
20
+ # @return [Rdkafka::Producer::PartitionsCountCache]
21
+ def self.partitions_count_cache
22
+ @@partitions_count_cache
23
+ end
24
+
25
+ # @param partitions_count_cache [Rdkafka::Producer::PartitionsCountCache]
26
+ def self.partitions_count_cache=(partitions_count_cache)
27
+ @@partitions_count_cache = partitions_count_cache
28
+ end
11
29
 
12
30
  # Empty hash used as a default
13
31
  EMPTY_HASH = {}.freeze
14
32
 
15
- private_constant :PARTITIONS_COUNT_TTL, :EMPTY_HASH
33
+ private_constant :EMPTY_HASH
16
34
 
17
35
  # Raised when there was a critical issue when invoking rd_kafka_topic_new
18
36
  # This is a temporary solution until https://github.com/karafka/rdkafka-ruby/issues/451 is
@@ -43,25 +61,6 @@ module Rdkafka
43
61
 
44
62
  # Makes sure, that native kafka gets closed before it gets GCed by Ruby
45
63
  ObjectSpace.define_finalizer(self, native_kafka.finalizer)
46
-
47
- @_partitions_count_cache = Hash.new do |cache, topic|
48
- topic_metadata = nil
49
-
50
- @native_kafka.with_inner do |inner|
51
- topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
52
- end
53
-
54
- partition_count = topic_metadata ? topic_metadata[:partition_count] : -1
55
-
56
- # This approach caches the failure to fetch only for 1 second. This will make sure, that
57
- # we do not cache the failure for too long but also "buys" us a bit of time in case there
58
- # would be issues in the cluster so we won't overaload it with consecutive requests
59
- cache[topic] = if partition_count.positive?
60
- [monotonic_now, partition_count]
61
- else
62
- [monotonic_now - PARTITIONS_COUNT_TTL + 5, partition_count]
63
- end
64
- end
65
64
  end
66
65
 
67
66
  # Sets alternative set of configuration details that can be set per topic
@@ -284,18 +283,24 @@ module Rdkafka
284
283
  # @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
285
284
  # auto-created after returning nil.
286
285
  #
287
- # @note We cache the partition count for a given topic for given time.
286
+ # @note We cache the partition count for a given topic for given time. If statistics are
287
+ # enabled for any producer or consumer, it will take precedence over per instance fetching.
288
+ #
288
289
  # This prevents us in case someone uses `partition_key` from querying for the count with
289
- # each message. Instead we query once every 30 seconds at most if we have a valid partition
290
- # count or every 5 seconds in case we were not able to obtain number of partitions
290
+ # each message. Instead we query at most once every 30 seconds at most if we have a valid
291
+ # partition count or every 5 seconds in case we were not able to obtain number of partitions.
291
292
  def partition_count(topic)
292
293
  closed_producer_check(__method__)
293
294
 
294
- @_partitions_count_cache.delete_if do |_, cached|
295
- monotonic_now - cached.first > PARTITIONS_COUNT_TTL
296
- end
295
+ self.class.partitions_count_cache.get(topic) do
296
+ topic_metadata = nil
297
297
 
298
- @_partitions_count_cache[topic].last
298
+ @native_kafka.with_inner do |inner|
299
+ topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
300
+ end
301
+
302
+ topic_metadata ? topic_metadata[:partition_count] : -1
303
+ end
299
304
  end
300
305
 
301
306
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -309,7 +314,7 @@ module Rdkafka
309
314
  # @param partition [Integer,nil] Optional partition to produce to
310
315
  # @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
311
316
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
312
- # @param headers [Hash<String,String>] Optional message headers
317
+ # @param headers [Hash<String,String|Array<String>>] Optional message headers. Values can be either a single string or an array of strings to support duplicate headers per KIP-82
313
318
  # @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
314
319
  # @param topic_config [Hash] topic config for given message dispatch. Allows to send messages to topics with different configuration
315
320
  #
@@ -401,11 +406,23 @@ module Rdkafka
401
406
  if headers
402
407
  headers.each do |key0, value0|
403
408
  key = key0.to_s
404
- value = value0.to_s
405
- args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
406
- args << :string << key
407
- args << :pointer << value
408
- args << :size_t << value.bytesize
409
+ if value0.is_a?(Array)
410
+ # Handle array of values per KIP-82
411
+ value0.each do |value|
412
+ value = value.to_s
413
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
414
+ args << :string << key
415
+ args << :pointer << value
416
+ args << :size_t << value.bytesize
417
+ end
418
+ else
419
+ # Handle single value
420
+ value = value0.to_s
421
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
422
+ args << :string << key
423
+ args << :pointer << value
424
+ args << :size_t << value.bytesize
425
+ end
409
426
  end
410
427
  end
411
428
 
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.19.0"
4
+ VERSION = "0.19.2.rc1"
5
5
  LIBRDKAFKA_VERSION = "2.8.0"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "5bd1c46f63265f31c6bfcedcde78703f77d28238eadf23821c2b43fc30be3e25"
7
7
  end
data/lib/rdkafka.rb CHANGED
@@ -42,6 +42,7 @@ require "rdkafka/consumer/topic_partition_list"
42
42
  require "rdkafka/error"
43
43
  require "rdkafka/metadata"
44
44
  require "rdkafka/native_kafka"
45
+ require "rdkafka/producer/partitions_count_cache"
45
46
  require "rdkafka/producer"
46
47
  require "rdkafka/producer/delivery_handle"
47
48
  require "rdkafka/producer/delivery_report"
data/renovate.json CHANGED
@@ -1,6 +1,18 @@
1
1
  {
2
2
  "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3
3
  "extends": [
4
- "config:base"
4
+ "config:recommended"
5
+ ],
6
+ "github-actions": {
7
+ "enabled": true,
8
+ "pinDigests": true
9
+ },
10
+ "packageRules": [
11
+ {
12
+ "matchManagers": [
13
+ "github-actions"
14
+ ],
15
+ "minimumReleaseAge": "7 days"
16
+ }
5
17
  ]
6
18
  }
@@ -738,17 +738,19 @@ describe Rdkafka::Admin do
738
738
  end
739
739
  end
740
740
 
741
- context "when operating from a fork" do
742
- # @see https://github.com/ffi/ffi/issues/1114
743
- it 'expect to be able to create topics and run other admin operations without hanging' do
744
- # If the FFI issue is not mitigated, this will hang forever
745
- pid = fork do
746
- admin
747
- .create_topic(topic_name, topic_partition_count, topic_replication_factor)
748
- .wait
749
- end
741
+ unless RUBY_PLATFORM == 'java'
742
+ context "when operating from a fork" do
743
+ # @see https://github.com/ffi/ffi/issues/1114
744
+ it 'expect to be able to create topics and run other admin operations without hanging' do
745
+ # If the FFI issue is not mitigated, this will hang forever
746
+ pid = fork do
747
+ admin
748
+ .create_topic(topic_name, topic_partition_count, topic_replication_factor)
749
+ .wait
750
+ end
750
751
 
751
- Process.wait(pid)
752
+ Process.wait(pid)
753
+ end
752
754
  end
753
755
  end
754
756
  end
@@ -149,15 +149,6 @@ describe Rdkafka::Bindings do
149
149
  end
150
150
 
151
151
  describe "oauthbearer set token" do
152
-
153
- context "without args" do
154
- it "should raise argument error" do
155
- expect {
156
- Rdkafka::Bindings.rd_kafka_oauthbearer_set_token
157
- }.to raise_error(ArgumentError)
158
- end
159
- end
160
-
161
152
  context "with args" do
162
153
  before do
163
154
  DEFAULT_TOKEN_EXPIRY_SECONDS = 900
@@ -33,23 +33,25 @@ describe Rdkafka::Config do
33
33
  expect(log.string).to include "FATAL -- : I love testing"
34
34
  end
35
35
 
36
- it "expect to start new logger thread after fork and work" do
37
- reader, writer = IO.pipe
38
-
39
- pid = fork do
40
- $stdout.reopen(writer)
41
- Rdkafka::Config.logger = Logger.new($stdout)
42
- reader.close
43
- producer = rdkafka_producer_config(debug: 'all').producer
44
- producer.close
36
+ unless RUBY_PLATFORM == 'java'
37
+ it "expect to start new logger thread after fork and work" do
38
+ reader, writer = IO.pipe
39
+
40
+ pid = fork do
41
+ $stdout.reopen(writer)
42
+ Rdkafka::Config.logger = Logger.new($stdout)
43
+ reader.close
44
+ producer = rdkafka_producer_config(debug: 'all').producer
45
+ producer.close
46
+ writer.close
47
+ sleep(1)
48
+ end
49
+
45
50
  writer.close
46
- sleep(1)
51
+ Process.wait(pid)
52
+ output = reader.read
53
+ expect(output.split("\n").size).to be >= 20
47
54
  end
48
-
49
- writer.close
50
- Process.wait(pid)
51
- output = reader.read
52
- expect(output.split("\n").size).to be >= 20
53
55
  end
54
56
  end
55
57
 
@@ -3,7 +3,7 @@
3
3
  describe Rdkafka::Consumer::Headers do
4
4
  let(:headers) do
5
5
  { # Note String keys!
6
- "version" => "2.1.3",
6
+ "version" => ["2.1.3", "2.1.4"],
7
7
  "type" => "String"
8
8
  }
9
9
  end
@@ -17,27 +17,39 @@ describe Rdkafka::Consumer::Headers do
17
17
  Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
18
18
  end
19
19
 
20
+ # First version header
20
21
  expect(Rdkafka::Bindings).to \
21
22
  receive(:rd_kafka_header_get_all)
22
23
  .with(headers_ptr, 0, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
23
- expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: headers.keys[0]))
24
- expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[0].size)
25
- expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers.values[0]))
24
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: "version"))
25
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers["version"][0].size)
26
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers["version"][0]))
26
27
  Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
27
28
  end
28
29
 
30
+ # Second version header
29
31
  expect(Rdkafka::Bindings).to \
30
32
  receive(:rd_kafka_header_get_all)
31
33
  .with(headers_ptr, 1, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
32
- expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: headers.keys[1]))
33
- expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[1].size)
34
- expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers.values[1]))
34
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: "version"))
35
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers["version"][1].size)
36
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers["version"][1]))
35
37
  Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
36
38
  end
37
39
 
40
+ # Single type header
38
41
  expect(Rdkafka::Bindings).to \
39
42
  receive(:rd_kafka_header_get_all)
40
- .with(headers_ptr, 2, anything, anything, anything)
43
+ .with(headers_ptr, 2, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
44
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 2", read_string_to_null: "type"))
45
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers["type"].size)
46
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 2", read_string: headers["type"]))
47
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
48
+ end
49
+
50
+ expect(Rdkafka::Bindings).to \
51
+ receive(:rd_kafka_header_get_all)
52
+ .with(headers_ptr, 3, anything, anything, anything)
41
53
  .and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT)
42
54
  end
43
55
 
@@ -46,8 +58,12 @@ describe Rdkafka::Consumer::Headers do
46
58
  it { is_expected.to eq(headers) }
47
59
  it { is_expected.to be_frozen }
48
60
 
49
- it 'allows String key' do
50
- expect(subject['version']).to eq("2.1.3")
61
+ it 'returns array for duplicate headers' do
62
+ expect(subject['version']).to eq(["2.1.3", "2.1.4"])
63
+ end
64
+
65
+ it 'returns string for single headers' do
66
+ expect(subject['type']).to eq("String")
51
67
  end
52
68
 
53
69
  it 'does not support symbols mappings' do