rdkafka 0.15.0 → 0.15.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d906b2e71dae5b5f45459e915c48dc8cb88e0d51ebb90ded80cef3c8e5531b77
4
- data.tar.gz: 8f0df2688bbc3b264de22b5943b18462ad41898781cc12e6e534804409133ce0
3
+ metadata.gz: 8636c80e1798cf24b34cf25a20ca24f35e2951fb179843a3a85a94fe0274ca76
4
+ data.tar.gz: f115aa7fff4961d42280a7ad6fd78fed40568b936139d588ae2362a2f0f45c25
5
5
  SHA512:
6
- metadata.gz: c35d392b326f4d47077f419bced92b929436be548651afc9364f5ada2eda51883ad75feeeb30183369aa6b15db3ac4630410f408eae449d1b0cc5a007cf011fc
7
- data.tar.gz: 1487bb54713e6330ce55fd95f656dffd2edc34bcd8bc151d94faf2d6f043b8183276e3407d5f45c6152e99463c8323ccb83c15fff7007ebb96c4b369533002d5
6
+ metadata.gz: e5b5368a732e42b1c57aff93a7172c95b0bad93ac646dcba495c0509c5b6c29cf8753601d1f04f028c98579846e5db7e7119ae9a6a4cca2f41441316b60b5c9c
7
+ data.tar.gz: 8596d6944d5151df3ad875d93dbd7cf2aee00c1ded85e053e96880b8c5420ca6ba18a72f57cc867a7bfedf38060be3c9ea4334d79d7a21b2503a078bce1d266a
checksums.yaml.gz.sig CHANGED
Binary file
@@ -22,16 +22,15 @@ jobs:
22
22
  fail-fast: false
23
23
  matrix:
24
24
  ruby:
25
- - '3.3.0-preview2'
25
+ - '3.3'
26
26
  - '3.2'
27
27
  - '3.1'
28
28
  - '3.1.0'
29
29
  - '3.0'
30
30
  - '3.0.0'
31
31
  - '2.7'
32
- - '2.7.0'
33
32
  include:
34
- - ruby: '3.2'
33
+ - ruby: '3.3'
35
34
  coverage: 'true'
36
35
  steps:
37
36
  - uses: actions/checkout@v4
data/.ruby-version CHANGED
@@ -1 +1 @@
1
- 3.2.2
1
+ 3.3.0
data/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # Rdkafka Changelog
2
2
 
3
+ ## 0.15.1 (2024-01-30)
4
+ - [Enhancement] Provide support for Nix OS (alexandriainfantino)
5
+ - [Enhancement] Replace `rd_kafka_offset_store` with `rd_kafka_offsets_store` (mensfeld)
6
+ - [Enhancement] Alias `topic_name` as `topic` in the delivery report (mensfeld)
7
+ - [Enhancement] Provide `label` producer handler and report reference for improved traceability (mensfeld)
8
+ - [Enhancement] Include the error when invoking `create_result` on producer handle (mensfeld)
9
+ - [Enhancement] Skip intermediate array creation on delivery report callback execution (one per message) (mensfeld).
10
+ - [Enhancement] Report `-1` instead of `nil` in case `partition_count` failure (mensfeld).
11
+ - [Fix] Fix return type on `#rd_kafka_poll` (mensfeld)
12
+ - [Fix] `uint8_t` does not exist on Apple Silicon (mensfeld)
13
+ - [Fix] Missing ACL `RD_KAFKA_RESOURCE_BROKER` constant reference (mensfeld)
14
+ - [Fix] Partition cache caches invalid nil result for `PARTITIONS_COUNT_TTL` (mensfeld)
15
+ - [Change] Rename `matching_acl_pattern_type` to `matching_acl_resource_pattern_type` to align the whole API (mensfeld)
16
+
3
17
  ## 0.15.0 (2023-12-03)
4
18
  - **[Feature]** Add `Admin#metadata` (mensfeld)
5
19
  - **[Feature]** Add `Admin#create_partitions` (mensfeld)
data/README.md CHANGED
@@ -32,6 +32,7 @@ The most important pieces of a Kafka client are implemented, and we aim to provi
32
32
  * [Message Publishing Libraries](#message-publishing-libraries)
33
33
  - [Development](#development)
34
34
  - [Example](#example)
35
+ - [Versions](#versions)
35
36
 
36
37
  ## Project Scope
37
38
 
@@ -147,3 +148,14 @@ To see everything working, run these in separate tabs:
147
148
  bundle exec rake consume_messages
148
149
  bundle exec rake produce_messages
149
150
  ```
151
+
152
+ ## Versions
153
+
154
+ | rdkafka-ruby | librdkafka |
155
+ |-|-|
156
+ | 0.15.0 (2023-12-03) | 2.3.0 (2023-10-25) |
157
+ | 0.14.0 (2023-11-21) | 2.2.0 (2023-07-12) |
158
+ | 0.13.0 (2023-07-24) | 2.0.2 (2023-01-20) |
159
+ | 0.12.0 (2022-06-17) | 1.9.0 (2022-06-16) |
160
+ | 0.11.0 (2021-11-17) | 1.8.2 (2021-10-18) |
161
+ | 0.10.0 (2021-09-07) | 1.5.0 (2020-07-20) |
data/docker-compose.yml CHANGED
@@ -3,7 +3,7 @@ version: '2'
3
3
  services:
4
4
  kafka:
5
5
  container_name: kafka
6
- image: confluentinc/cp-kafka:7.5.2
6
+ image: confluentinc/cp-kafka:7.5.3
7
7
 
8
8
  ports:
9
9
  - 9092:9092
data/ext/Rakefile CHANGED
@@ -1,40 +1,57 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require File.expand_path('../../lib/rdkafka/version', __FILE__)
4
- require "mini_portile2"
5
4
  require "fileutils"
6
5
  require "open-uri"
7
6
 
8
7
  task :default => :clean do
9
- # Download and compile librdkafka
10
- recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
8
+ # For nix users, nix can't locate the file paths because the packages it's requiring aren't managed by the system but are
9
+ # managed by nix itself, so using the normal file paths doesn't work for nix users.
10
+ #
11
+ # Mini_portile causes an issue because it's dependencies are downloaded on the fly and therefore don't exist/aren't
12
+ # accessible in the nix environment
13
+ if ENV.fetch('RDKAFKA_EXT_PATH', '').empty?
14
+ # Download and compile librdkafka if RDKAFKA_EXT_PATH is not set
15
+ require "mini_portile2"
16
+ recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
11
17
 
12
- # Use default homebrew openssl if we're on mac and the directory exists
13
- # and each of flags is not empty
14
- if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}")
15
- ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV["CPPFLAGS"]
16
- ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV["LDFLAGS"]
17
- end
18
+ # Use default homebrew openssl if we're on mac and the directory exists
19
+ # and each of flags is not empty
20
+ if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}")
21
+ ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV["CPPFLAGS"]
22
+ ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV["LDFLAGS"]
23
+ end
18
24
 
19
- recipe.files << {
20
- :url => "https://codeload.github.com/confluentinc/librdkafka/tar.gz/v#{Rdkafka::LIBRDKAFKA_VERSION}",
21
- :sha256 => Rdkafka::LIBRDKAFKA_SOURCE_SHA256
22
- }
23
- recipe.configure_options = ["--host=#{recipe.host}"]
24
- recipe.cook
25
- # Move dynamic library we're interested in
26
- if recipe.host.include?('darwin')
27
- from_extension = '1.dylib'
28
- to_extension = 'dylib'
25
+ recipe.files << {
26
+ :url => "https://codeload.github.com/edenhill/librdkafka/tar.gz/v#{Rdkafka::LIBRDKAFKA_VERSION}",
27
+ :sha256 => Rdkafka::LIBRDKAFKA_SOURCE_SHA256
28
+ }
29
+ recipe.configure_options = ["--host=#{recipe.host}"]
30
+ recipe.cook
31
+ # Move dynamic library we're interested in
32
+ if recipe.host.include?('darwin')
33
+ from_extension = '1.dylib'
34
+ to_extension = 'dylib'
35
+ else
36
+ from_extension = 'so.1'
37
+ to_extension = 'so'
38
+ end
39
+ lib_path = File.join(File.dirname(__FILE__), "ports/#{recipe.host}/librdkafka/#{Rdkafka::LIBRDKAFKA_VERSION}/lib/librdkafka.#{from_extension}")
40
+ FileUtils.mv(lib_path, File.join(File.dirname(__FILE__), "librdkafka.#{to_extension}"))
41
+ # Cleanup files created by miniportile we don't need in the gem
42
+ FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
43
+ FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
29
44
  else
30
- from_extension = 'so.1'
31
- to_extension = 'so'
45
+ # Otherwise, copy existing libraries to ./ext
46
+ if ENV['RDKAFKA_EXT_PATH'].nil? || ENV['RDKAFKA_EXT_PATH'].empty?
47
+ raise "RDKAFKA_EXT_PATH must be set in your nix config when running under nix"
48
+ end
49
+ files = [
50
+ File.join(ENV['RDKAFKA_EXT_PATH'], 'lib', 'librdkafka.dylib'),
51
+ File.join(ENV['RDKAFKA_EXT_PATH'], 'lib', 'librdkafka.so')
52
+ ]
53
+ files.each { |ext| FileUtils.cp(ext, File.dirname(__FILE__)) if File.exist?(ext) }
32
54
  end
33
- lib_path = File.join(File.dirname(__FILE__), "ports/#{recipe.host}/librdkafka/#{Rdkafka::LIBRDKAFKA_VERSION}/lib/librdkafka.#{from_extension}")
34
- FileUtils.mv(lib_path, File.join(File.dirname(__FILE__), "librdkafka.#{to_extension}"))
35
- # Cleanup files created by miniportile we don't need in the gem
36
- FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
37
- FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
38
55
  end
39
56
 
40
57
  task :clean do
@@ -2,36 +2,50 @@
2
2
 
3
3
  module Rdkafka
4
4
  class Admin
5
-
6
5
  # Extracts attributes of rd_kafka_AclBinding_t
7
6
  #
8
7
  class AclBindingResult
9
- attr_reader :result_error, :error_string, :matching_acl_resource_type, :matching_acl_resource_name, :matching_acl_pattern_type, :matching_acl_principal, :matching_acl_host, :matching_acl_operation, :matching_acl_permission_type
8
+ attr_reader :result_error, :error_string, :matching_acl_resource_type,
9
+ :matching_acl_resource_name, :matching_acl_resource_pattern_type,
10
+ :matching_acl_principal, :matching_acl_host, :matching_acl_operation,
11
+ :matching_acl_permission_type
12
+
13
+ # This attribute was initially released under the name that is now an alias
14
+ # We keep it for backwards compatibility but it was changed for the consistency
15
+ alias matching_acl_pattern_type matching_acl_resource_pattern_type
10
16
 
11
17
  def initialize(matching_acl)
12
- rd_kafka_error_pointer = Rdkafka::Bindings.rd_kafka_AclBinding_error(matching_acl)
13
- @result_error = Rdkafka::Bindings.rd_kafka_error_code(rd_kafka_error_pointer)
14
- error_string = Rdkafka::Bindings.rd_kafka_error_string(rd_kafka_error_pointer)
15
- if error_string != FFI::Pointer::NULL
16
- @error_string = error_string.read_string
17
- end
18
- @matching_acl_resource_type = Rdkafka::Bindings.rd_kafka_AclBinding_restype(matching_acl)
19
- matching_acl_resource_name = Rdkafka::Bindings.rd_kafka_AclBinding_name(matching_acl)
20
- if matching_acl_resource_name != FFI::Pointer::NULL
21
- @matching_acl_resource_name = matching_acl_resource_name.read_string
22
- end
23
- @matching_acl_pattern_type = Rdkafka::Bindings.rd_kafka_AclBinding_resource_pattern_type(matching_acl)
24
- matching_acl_principal = Rdkafka::Bindings.rd_kafka_AclBinding_principal(matching_acl)
25
- if matching_acl_principal != FFI::Pointer::NULL
26
- @matching_acl_principal = matching_acl_principal.read_string
27
- end
28
- matching_acl_host = Rdkafka::Bindings.rd_kafka_AclBinding_host(matching_acl)
29
- if matching_acl_host != FFI::Pointer::NULL
30
- @matching_acl_host = matching_acl_host.read_string
31
- end
32
- @matching_acl_operation = Rdkafka::Bindings.rd_kafka_AclBinding_operation(matching_acl)
33
- @matching_acl_permission_type = Rdkafka::Bindings.rd_kafka_AclBinding_permission_type(matching_acl)
18
+ rd_kafka_error_pointer = Rdkafka::Bindings.rd_kafka_AclBinding_error(matching_acl)
19
+ @result_error = Rdkafka::Bindings.rd_kafka_error_code(rd_kafka_error_pointer)
20
+ error_string = Rdkafka::Bindings.rd_kafka_error_string(rd_kafka_error_pointer)
21
+
22
+ if error_string != FFI::Pointer::NULL
23
+ @error_string = error_string.read_string
34
24
  end
25
+
26
+ @matching_acl_resource_type = Rdkafka::Bindings.rd_kafka_AclBinding_restype(matching_acl)
27
+ matching_acl_resource_name = Rdkafka::Bindings.rd_kafka_AclBinding_name(matching_acl)
28
+
29
+ if matching_acl_resource_name != FFI::Pointer::NULL
30
+ @matching_acl_resource_name = matching_acl_resource_name.read_string
31
+ end
32
+
33
+ @matching_acl_resource_pattern_type = Rdkafka::Bindings.rd_kafka_AclBinding_resource_pattern_type(matching_acl)
34
+ matching_acl_principal = Rdkafka::Bindings.rd_kafka_AclBinding_principal(matching_acl)
35
+
36
+ if matching_acl_principal != FFI::Pointer::NULL
37
+ @matching_acl_principal = matching_acl_principal.read_string
38
+ end
39
+
40
+ matching_acl_host = Rdkafka::Bindings.rd_kafka_AclBinding_host(matching_acl)
41
+
42
+ if matching_acl_host != FFI::Pointer::NULL
43
+ @matching_acl_host = matching_acl_host.read_string
44
+ end
45
+
46
+ @matching_acl_operation = Rdkafka::Bindings.rd_kafka_AclBinding_operation(matching_acl)
47
+ @matching_acl_permission_type = Rdkafka::Bindings.rd_kafka_AclBinding_permission_type(matching_acl)
35
48
  end
49
+ end
36
50
  end
37
51
  end
@@ -32,7 +32,7 @@ module Rdkafka
32
32
  # Polling
33
33
 
34
34
  attach_function :rd_kafka_flush, [:pointer, :int], :int, blocking: true
35
- attach_function :rd_kafka_poll, [:pointer, :int], :void, blocking: true
35
+ attach_function :rd_kafka_poll, [:pointer, :int], :int, blocking: true
36
36
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
37
37
 
38
38
  # Metadata
@@ -185,12 +185,15 @@ module Rdkafka
185
185
  attach_function :rd_kafka_poll_set_consumer, [:pointer], :void, blocking: true
186
186
  attach_function :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: true
187
187
  attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
188
- attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int, blocking: true
188
+ attach_function :rd_kafka_offsets_store, [:pointer, :pointer], :int, blocking: true
189
189
  attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
190
190
  attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
191
191
  attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
192
192
  attach_function :rd_kafka_offsets_for_times, [:pointer, :pointer, :int], :int, blocking: true
193
193
  attach_function :rd_kafka_position, [:pointer, :pointer], :int, blocking: true
194
+ # those two are used for eos support
195
+ attach_function :rd_kafka_consumer_group_metadata, [:pointer], :pointer, blocking: true
196
+ attach_function :rd_kafka_consumer_group_metadata_destroy, [:pointer], :void, blocking: true
194
197
 
195
198
  # Headers
196
199
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -394,6 +397,7 @@ module Rdkafka
394
397
  RD_KAFKA_RESOURCE_ANY = 1
395
398
  RD_KAFKA_RESOURCE_TOPIC = 2
396
399
  RD_KAFKA_RESOURCE_GROUP = 3
400
+ RD_KAFKA_RESOURCE_BROKER = 4
397
401
 
398
402
  # rd_kafka_ResourcePatternType_t - https://github.com/confluentinc/librdkafka/blob/292d2a66b9921b783f08147807992e603c7af059/src/rdkafka.h#L7320
399
403
 
@@ -436,9 +440,9 @@ module Rdkafka
436
440
  class NativeError < FFI::Struct # rd_kafka_error_t
437
441
  layout :code, :int32,
438
442
  :errstr, :pointer,
439
- :fatal, :uint8_t,
440
- :retriable, :uint8_t,
441
- :txn_requires_abort, :uint8_t
443
+ :fatal, :u_int8_t,
444
+ :retriable, :u_int8_t,
445
+ :txn_requires_abort, :u_int8_t
442
446
  end
443
447
 
444
448
  attach_function :rd_kafka_group_result_error, [:pointer], NativeError.by_ref # rd_kafka_group_result_t* => rd_kafka_error_t*
@@ -261,7 +261,6 @@ module Rdkafka
261
261
  end
262
262
  end
263
263
  end
264
-
265
264
  end
266
265
 
267
266
  # FFI Function used for Message Delivery callbacks
@@ -289,7 +288,16 @@ module Rdkafka
289
288
 
290
289
  # Call delivery callback on opaque
291
290
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
292
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], topic_name, message[:err]), delivery_handle)
291
+ opaque.call_delivery_callback(
292
+ Rdkafka::Producer::DeliveryReport.new(
293
+ message[:partition],
294
+ message[:offset],
295
+ topic_name,
296
+ message[:err],
297
+ delivery_handle.label
298
+ ),
299
+ delivery_handle
300
+ )
293
301
  end
294
302
  end
295
303
  end
@@ -19,10 +19,6 @@ module Rdkafka
19
19
  @native_kafka = native_kafka
20
20
  end
21
21
 
22
- def finalizer
23
- ->(_) { close }
24
- end
25
-
26
22
  # @return [String] consumer name
27
23
  def name
28
24
  @name ||= @native_kafka.with_inner do |inner|
@@ -30,6 +26,10 @@ module Rdkafka
30
26
  end
31
27
  end
32
28
 
29
+ def finalizer
30
+ ->(_) { close }
31
+ end
32
+
33
33
  # Close this consumer
34
34
  # @return [nil]
35
35
  def close
@@ -239,7 +239,7 @@ module Rdkafka
239
239
  # @param timeout_ms [Integer] The timeout for fetching this information.
240
240
  # @return [TopicPartitionList]
241
241
  # @raise [RdkafkaError] When getting the committed positions fails.
242
- def committed(list=nil, timeout_ms=1200)
242
+ def committed(list=nil, timeout_ms=2000)
243
243
  closed_consumer_check(__method__)
244
244
 
245
245
  if list.nil?
@@ -387,27 +387,26 @@ module Rdkafka
387
387
  def store_offset(message)
388
388
  closed_consumer_check(__method__)
389
389
 
390
- # rd_kafka_offset_store is one of the few calls that does not support
391
- # a string as the topic, so create a native topic for it.
392
- native_topic = @native_kafka.with_inner do |inner|
393
- Rdkafka::Bindings.rd_kafka_topic_new(
390
+ list = TopicPartitionList.new
391
+ list.add_topic_and_partitions_with_offsets(
392
+ message.topic,
393
+ message.partition => message.offset + 1
394
+ )
395
+
396
+ tpl = list.to_native_tpl
397
+
398
+ response = @native_kafka.with_inner do |inner|
399
+ Rdkafka::Bindings.rd_kafka_offsets_store(
394
400
  inner,
395
- message.topic,
396
- nil
401
+ tpl
397
402
  )
398
403
  end
399
- response = Rdkafka::Bindings.rd_kafka_offset_store(
400
- native_topic,
401
- message.partition,
402
- message.offset
403
- )
404
+
404
405
  if response != 0
405
406
  raise Rdkafka::RdkafkaError.new(response)
406
407
  end
407
408
  ensure
408
- if native_topic && !native_topic.null?
409
- Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
410
- end
409
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
411
410
  end
412
411
 
413
412
  # Seek to a particular message. The next poll on the topic/partition will return the
@@ -673,6 +672,22 @@ module Rdkafka
673
672
  end
674
673
  end
675
674
 
675
+ # Returns pointer to the consumer group metadata. It is used only in the context of
676
+ # exactly-once-semantics in transactions, this is why it is never remapped to Ruby
677
+ #
678
+ # This API is **not** usable by itself from Ruby
679
+ #
680
+ # @note This pointer **needs** to be removed with `#rd_kafka_consumer_group_metadata_destroy`
681
+ #
682
+ # @private
683
+ def consumer_group_metadata_pointer
684
+ closed_consumer_check(__method__)
685
+
686
+ @native_kafka.with_inner do |inner|
687
+ Bindings.rd_kafka_consumer_group_metadata(inner)
688
+ end
689
+ end
690
+
676
691
  private
677
692
 
678
693
  def closed_consumer_check(method)
@@ -11,6 +11,9 @@ module Rdkafka
11
11
  :offset, :int64,
12
12
  :topic_name, :pointer
13
13
 
14
+ # @return [Object, nil] label set during message production or nil by default
15
+ attr_accessor :label
16
+
14
17
  # @return [String] the name of the operation (e.g. "delivery")
15
18
  def operation_name
16
19
  "delivery"
@@ -18,7 +21,15 @@ module Rdkafka
18
21
 
19
22
  # @return [DeliveryReport] a report on the delivery of the message
20
23
  def create_result
21
- DeliveryReport.new(self[:partition], self[:offset], self[:topic_name].read_string)
24
+ DeliveryReport.new(
25
+ self[:partition],
26
+ self[:offset],
27
+ # For part of errors, we will not get a topic name reference and in cases like this
28
+ # we should not return it
29
+ self[:topic_name].null? ? nil : self[:topic_name].read_string,
30
+ self[:response] != 0 ? RdkafkaError.new(self[:response]) : nil,
31
+ label
32
+ )
22
33
  end
23
34
  end
24
35
  end
@@ -12,21 +12,34 @@ module Rdkafka
12
12
  # @return [Integer]
13
13
  attr_reader :offset
14
14
 
15
- # The name of the topic this message was produced to.
16
- # @return [String]
15
+ # The name of the topic this message was produced to or nil in case of reports with errors
16
+ # where topic was not reached.
17
+ #
18
+ # @return [String, nil]
17
19
  attr_reader :topic_name
18
20
 
19
21
  # Error in case happen during produce.
20
22
  # @return [Integer]
21
23
  attr_reader :error
22
24
 
25
+ # @return [Object, nil] label set during message production or nil by default
26
+ attr_reader :label
27
+
28
+ # We alias the `#topic_name` under `#topic` to make this consistent with `Consumer::Message`
29
+ # where the topic name is under `#topic` method. That way we have a consistent name that
30
+ # is present in both places
31
+ #
32
+ # We do not remove the original `#topic_name` because of backwards compatibility
33
+ alias topic topic_name
34
+
23
35
  private
24
36
 
25
- def initialize(partition, offset, topic_name = nil, error = nil)
37
+ def initialize(partition, offset, topic_name = nil, error = nil, label = nil)
26
38
  @partition = partition
27
39
  @offset = offset
28
40
  @topic_name = topic_name
29
41
  @error = error
42
+ @label = label
30
43
  end
31
44
  end
32
45
  end
@@ -23,6 +23,9 @@ module Rdkafka
23
23
  attr_reader :delivery_callback_arity
24
24
 
25
25
  # @private
26
+ # @param native_kafka [NativeKafka]
27
+ # @param partitioner_name [String, nil] name of the partitioner we want to use or nil to use
28
+ # the "consistent_random" default
26
29
  def initialize(native_kafka, partitioner_name)
27
30
  @native_kafka = native_kafka
28
31
  @partitioner_name = partitioner_name || "consistent_random"
@@ -37,10 +40,16 @@ module Rdkafka
37
40
  topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
38
41
  end
39
42
 
40
- cache[topic] = [
41
- monotonic_now,
42
- topic_metadata ? topic_metadata[:partition_count] : nil
43
- ]
43
+ partition_count = topic_metadata ? topic_metadata[:partition_count] : -1
44
+
45
+ # This approach caches the failure to fetch only for 1 second. This will make sure, that
46
+ # we do not cache the failure for too long but also "buys" us a bit of time in case there
47
+ # would be issues in the cluster so we won't overaload it with consecutive requests
48
+ cache[topic] = if partition_count.positive?
49
+ [monotonic_now, partition_count]
50
+ else
51
+ [monotonic_now - PARTITIONS_COUNT_TTL + 5, partition_count]
52
+ end
44
53
  end
45
54
  end
46
55
 
@@ -134,14 +143,15 @@ module Rdkafka
134
143
  # Partition count for a given topic.
135
144
  #
136
145
  # @param topic [String] The topic name.
137
- # @return [Integer] partition count for a given topic
146
+ # @return [Integer] partition count for a given topic or `-1` if it could not be obtained.
138
147
  #
139
148
  # @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
140
149
  # auto-created after returning nil.
141
150
  #
142
151
  # @note We cache the partition count for a given topic for given time.
143
152
  # This prevents us in case someone uses `partition_key` from querying for the count with
144
- # each message. Instead we query once every 30 seconds at most
153
+ # each message. Instead we query once every 30 seconds at most if we have a valid partition
154
+ # count or every 5 seconds in case we were not able to obtain number of partitions
145
155
  def partition_count(topic)
146
156
  closed_producer_check(__method__)
147
157
 
@@ -164,11 +174,12 @@ module Rdkafka
164
174
  # @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
165
175
  # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
166
176
  # @param headers [Hash<String,String>] Optional message headers
177
+ # @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
167
178
  #
168
179
  # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
169
180
  #
170
181
  # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
171
- def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil)
182
+ def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil, label: nil)
172
183
  closed_producer_check(__method__)
173
184
 
174
185
  # Start by checking and converting the input
@@ -190,7 +201,7 @@ module Rdkafka
190
201
  if partition_key
191
202
  partition_count = partition_count(topic)
192
203
  # If the topic is not present, set to -1
193
- partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count
204
+ partition = Rdkafka::Bindings.partitioner(partition_key, partition_count, @partitioner_name) if partition_count.positive?
194
205
  end
195
206
 
196
207
  # If partition is nil, use -1 to let librdafka set the partition randomly or
@@ -210,6 +221,7 @@ module Rdkafka
210
221
  end
211
222
 
212
223
  delivery_handle = DeliveryHandle.new
224
+ delivery_handle.label = label
213
225
  delivery_handle[:pending] = true
214
226
  delivery_handle[:response] = -1
215
227
  delivery_handle[:partition] = -1
@@ -256,13 +268,27 @@ module Rdkafka
256
268
  delivery_handle
257
269
  end
258
270
 
271
+ # Calls (if registered) the delivery callback
272
+ #
273
+ # @param delivery_report [Producer::DeliveryReport]
274
+ # @param delivery_handle [Producer::DeliveryHandle]
259
275
  def call_delivery_callback(delivery_report, delivery_handle)
260
276
  return unless @delivery_callback
261
277
 
262
- args = [delivery_report, delivery_handle].take(@delivery_callback_arity)
263
- @delivery_callback.call(*args)
278
+ case @delivery_callback_arity
279
+ when 0
280
+ @delivery_callback.call
281
+ when 1
282
+ @delivery_callback.call(delivery_report)
283
+ else
284
+ @delivery_callback.call(delivery_report, delivery_handle)
285
+ end
264
286
  end
265
287
 
288
+ # Figures out the arity of a given block/method
289
+ #
290
+ # @param callback [#call, Proc]
291
+ # @return [Integer] arity of the provided block/method
266
292
  def arity(callback)
267
293
  return callback.arity if callback.respond_to?(:arity)
268
294
 
@@ -271,6 +297,10 @@ module Rdkafka
271
297
 
272
298
  private
273
299
 
300
+ # Ensures, no operations can happen on a closed producer
301
+ #
302
+ # @param method [Symbol] name of the method that invoked producer
303
+ # @raise [Rdkafka::ClosedProducerError]
274
304
  def closed_producer_check(method)
275
305
  raise Rdkafka::ClosedProducerError.new(method) if closed?
276
306
  end
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.15.0"
4
+ VERSION = "0.15.1"
5
5
  LIBRDKAFKA_VERSION = "2.3.0"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "2d49c35c77eeb3d42fa61c43757fcbb6a206daa560247154e60642bcdcc14d12"
7
7
  end
data/rdkafka.gemspec CHANGED
@@ -3,10 +3,10 @@
3
3
  require File.expand_path('lib/rdkafka/version', __dir__)
4
4
 
5
5
  Gem::Specification.new do |gem|
6
- gem.authors = ['Thijs Cadier']
6
+ gem.authors = ['Thijs Cadier', 'Maciej Mensfeld']
7
7
  gem.email = ["contact@karafka.io"]
8
8
  gem.description = "Modern Kafka client library for Ruby based on librdkafka"
9
- gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.4+."
9
+ gem.summary = "The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka. It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+ and Ruby 2.7+."
10
10
  gem.license = 'MIT'
11
11
 
12
12
  gem.files = `git ls-files`.split($\)
@@ -50,6 +50,7 @@ describe Rdkafka::Admin::DeleteAclReport do
50
50
  end
51
51
 
52
52
  it "should get deleted acl resource pattern type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL" do
53
+ expect(subject.deleted_acls[0].matching_acl_resource_pattern_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL)
53
54
  expect(subject.deleted_acls[0].matching_acl_pattern_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL)
54
55
  end
55
56
 
@@ -51,6 +51,7 @@ describe Rdkafka::Admin::DescribeAclReport do
51
51
  end
52
52
 
53
53
  it "should get matching acl resource pattern type as Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL" do
54
+ expect(subject.acls[0].matching_acl_resource_pattern_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL)
54
55
  expect(subject.acls[0].matching_acl_pattern_type).to eq(Rdkafka::Bindings::RD_KAFKA_RESOURCE_PATTERN_LITERAL)
55
56
  end
56
57
 
@@ -480,6 +480,8 @@ describe Rdkafka::Consumer do
480
480
  end
481
481
 
482
482
  describe "#store_offset" do
483
+ let(:consumer) { rdkafka_consumer_config('enable.auto.offset.store': false).consumer }
484
+
483
485
  before do
484
486
  config = {}
485
487
  config[:'enable.auto.offset.store'] = false
@@ -542,6 +544,14 @@ describe Rdkafka::Consumer do
542
544
  }.to raise_error(Rdkafka::RdkafkaError)
543
545
  end
544
546
  end
547
+
548
+ context "when trying to use with enable.auto.offset.store set to true" do
549
+ let(:consumer) { rdkafka_consumer_config('enable.auto.offset.store': true).consumer }
550
+
551
+ it "expect to raise invalid configuration error" do
552
+ expect { consumer.store_offset(message) }.to raise_error(Rdkafka::RdkafkaError, /invalid_arg/)
553
+ end
554
+ end
545
555
  end
546
556
  end
547
557
  end
@@ -1123,6 +1133,16 @@ describe Rdkafka::Consumer do
1123
1133
  end
1124
1134
  end
1125
1135
 
1136
+ describe '#consumer_group_metadata_pointer' do
1137
+ let(:pointer) { consumer.consumer_group_metadata_pointer }
1138
+
1139
+ after { Rdkafka::Bindings.rd_kafka_consumer_group_metadata_destroy(pointer) }
1140
+
1141
+ it 'expect to return a pointer' do
1142
+ expect(pointer).to be_a(FFI::Pointer)
1143
+ end
1144
+ end
1145
+
1126
1146
  describe "a rebalance listener" do
1127
1147
  let(:consumer) do
1128
1148
  config = rdkafka_consumer_config
@@ -15,6 +15,10 @@ describe Rdkafka::Producer::DeliveryReport do
15
15
  expect(subject.topic_name).to eq "topic"
16
16
  end
17
17
 
18
+ it "should get the same topic name under topic alias" do
19
+ expect(subject.topic).to eq "topic"
20
+ end
21
+
18
22
  it "should get the error" do
19
23
  expect(subject.error).to eq -1
20
24
  end
@@ -34,6 +34,7 @@ describe Rdkafka::Producer do
34
34
 
35
35
  producer.delivery_callback = lambda do |report|
36
36
  expect(report).not_to be_nil
37
+ expect(report.label).to eq "label"
37
38
  expect(report.partition).to eq 1
38
39
  expect(report.offset).to be >= 0
39
40
  expect(report.topic_name).to eq "produce_test_topic"
@@ -44,9 +45,12 @@ describe Rdkafka::Producer do
44
45
  handle = producer.produce(
45
46
  topic: "produce_test_topic",
46
47
  payload: "payload",
47
- key: "key"
48
+ key: "key",
49
+ label: "label"
48
50
  )
49
51
 
52
+ expect(handle.label).to eq "label"
53
+
50
54
  # Wait for it to be delivered
51
55
  handle.wait(max_wait_timeout: 15)
52
56
 
@@ -175,11 +179,13 @@ describe Rdkafka::Producer do
175
179
  handle = producer.produce(
176
180
  topic: "produce_test_topic",
177
181
  payload: "payload",
178
- key: "key"
182
+ key: "key",
183
+ label: "label"
179
184
  )
180
185
 
181
186
  # Should be pending at first
182
187
  expect(handle.pending?).to be true
188
+ expect(handle.label).to eq "label"
183
189
 
184
190
  # Check delivery handle and report
185
191
  report = handle.wait(max_wait_timeout: 5)
@@ -187,6 +193,7 @@ describe Rdkafka::Producer do
187
193
  expect(report).not_to be_nil
188
194
  expect(report.partition).to eq 1
189
195
  expect(report.offset).to be >= 0
196
+ expect(report.label).to eq "label"
190
197
 
191
198
  # Flush and close producer
192
199
  producer.flush
@@ -558,6 +565,23 @@ describe Rdkafka::Producer do
558
565
  end
559
566
  end
560
567
 
568
+ context "when not being able to deliver the message" do
569
+ let(:producer) do
570
+ rdkafka_producer_config(
571
+ "bootstrap.servers": "localhost:9093",
572
+ "message.timeout.ms": 100
573
+ ).producer
574
+ end
575
+
576
+ it "should contain the error in the response when not deliverable" do
577
+ handler = producer.produce(topic: 'produce_test_topic', payload: nil, label: 'na')
578
+ # Wait for the async callbacks and delivery registry to update
579
+ sleep(2)
580
+ expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
581
+ expect(handler.create_result.label).to eq('na')
582
+ end
583
+ end
584
+
561
585
  describe '#partition_count' do
562
586
  it { expect(producer.partition_count('consume_test_topic')).to eq(3) }
563
587
 
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,10 +1,11 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.15.0
4
+ version: 0.15.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
+ - Maciej Mensfeld
8
9
  autorequire:
9
10
  bindir: bin
10
11
  cert_chain:
@@ -35,7 +36,7 @@ cert_chain:
35
36
  AnG1dJU+yL2BK7vaVytLTstJME5mepSZ46qqIJXMuWob/YPDmVaBF39TDSG9e34s
36
37
  msG3BiCqgOgHAnL23+CN3Rt8MsuRfEtoTKpJVcCfoEoNHOkc
37
38
  -----END CERTIFICATE-----
38
- date: 2023-12-03 00:00:00.000000000 Z
39
+ date: 2024-01-30 00:00:00.000000000 Z
39
40
  dependencies:
40
41
  - !ruby/object:Gem::Dependency
41
42
  name: ffi
@@ -277,12 +278,12 @@ required_rubygems_version: !ruby/object:Gem::Requirement
277
278
  - !ruby/object:Gem::Version
278
279
  version: '0'
279
280
  requirements: []
280
- rubygems_version: 3.4.19
281
+ rubygems_version: 3.5.3
281
282
  signing_key:
282
283
  specification_version: 4
283
284
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
284
285
  It wraps the production-ready C client using the ffi gem and targets Kafka 1.0+
285
- and Ruby 2.4+.
286
+ and Ruby 2.7+.
286
287
  test_files:
287
288
  - spec/rdkafka/abstract_handle_spec.rb
288
289
  - spec/rdkafka/admin/create_acl_handle_spec.rb
metadata.gz.sig CHANGED
Binary file