rdkafka 0.11.1 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +7 -3
  3. data/CHANGELOG.md +23 -2
  4. data/Gemfile +2 -0
  5. data/README.md +26 -0
  6. data/Rakefile +2 -0
  7. data/dist/librdkafka_2.0.2.tar.gz +0 -0
  8. data/ext/Rakefile +56 -27
  9. data/lib/rdkafka/abstract_handle.rb +2 -0
  10. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  11. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  12. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  13. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  14. data/lib/rdkafka/admin.rb +50 -33
  15. data/lib/rdkafka/bindings.rb +59 -39
  16. data/lib/rdkafka/callbacks.rb +7 -1
  17. data/lib/rdkafka/config.rb +15 -12
  18. data/lib/rdkafka/consumer/headers.rb +24 -7
  19. data/lib/rdkafka/consumer/message.rb +3 -1
  20. data/lib/rdkafka/consumer/partition.rb +2 -0
  21. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  22. data/lib/rdkafka/consumer.rb +86 -44
  23. data/lib/rdkafka/error.rb +15 -0
  24. data/lib/rdkafka/metadata.rb +4 -2
  25. data/lib/rdkafka/native_kafka.rb +115 -0
  26. data/lib/rdkafka/producer/delivery_handle.rb +5 -2
  27. data/lib/rdkafka/producer/delivery_report.rb +9 -2
  28. data/lib/rdkafka/producer.rb +56 -38
  29. data/lib/rdkafka/version.rb +5 -3
  30. data/lib/rdkafka.rb +3 -0
  31. data/rdkafka.gemspec +2 -0
  32. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  33. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  34. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  35. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  36. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  37. data/spec/rdkafka/admin_spec.rb +4 -3
  38. data/spec/rdkafka/bindings_spec.rb +9 -0
  39. data/spec/rdkafka/callbacks_spec.rb +2 -0
  40. data/spec/rdkafka/config_spec.rb +17 -2
  41. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  42. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  43. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  44. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  45. data/spec/rdkafka/consumer_spec.rb +123 -27
  46. data/spec/rdkafka/error_spec.rb +2 -0
  47. data/spec/rdkafka/metadata_spec.rb +2 -0
  48. data/spec/rdkafka/native_kafka_spec.rb +124 -0
  49. data/spec/rdkafka/producer/delivery_handle_spec.rb +5 -0
  50. data/spec/rdkafka/producer/delivery_report_spec.rb +8 -2
  51. data/spec/rdkafka/producer_spec.rb +103 -24
  52. data/spec/spec_helper.rb +17 -1
  53. metadata +13 -9
  54. data/bin/console +0 -11
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9b7042f241be5aad91c403f61aabb8e8ba87afa46674db8ba58c487fe01f88c9
4
- data.tar.gz: 5cb00dc3dc8b4069a8e62cda9271e290d1371434332767270426488dedcff641
3
+ metadata.gz: 2cfb434cf743bf1ab22c99849ea6efc793b0d367c6c638546c572013354b03b0
4
+ data.tar.gz: db1f892fd0e7d43ca702ce4c560328e120e0f7db8029dd09e5195c14e8355026
5
5
  SHA512:
6
- metadata.gz: 2b287a0d81aca7909702969778fb64585b6808860971d50e1c5540eb0c43cf73617f9e701605b816e8f52a81e39d9909512e1d113f6a550555e6ca503dc5ff58
7
- data.tar.gz: 7924ab06f52646168b52d0478b605484e4aa1a3674ace4f97af5b56588aea6dec8e614d3258b0670f92f18a905509ae6982d2c8999fa719f8ea7451e515ecb3d
6
+ metadata.gz: 5e761ab003a9f64b20e06d06fb34dcf9114410c58aafd6e577a68851f630c4fdd38b481d2150857f7fce7b12cb208742e6898f5e7f29e4470bfacbaa3ae9ad78
7
+ data.tar.gz: 8358654b4323144848dd213751e35a91ab731539a4cba0c6f340b7618260304c8dc5310d8ba22cbcca6ea4e40aabfceb28dc1d24e3af6e45ae7669be82649578
@@ -9,15 +9,19 @@ agent:
9
9
  blocks:
10
10
  - name: Run specs
11
11
  task:
12
+ prologue:
13
+ commands:
14
+ - sudo apt install -y valgrind
12
15
  jobs:
13
16
  - name: bundle exec rspec
14
17
  matrix:
15
18
  - env_var: RUBY_VERSION
16
- values: [ "2.6.8", "2.7.4", "3.0.2", "jruby-9.3.1.0"]
19
+ values: [ "2.6.10", "2.7.6", "3.0.4", "3.1.2"]
17
20
  commands:
18
21
  - sem-version ruby $RUBY_VERSION
19
22
  - checkout
23
+ - docker-compose up -d --no-recreate
20
24
  - bundle install --path vendor/bundle
21
25
  - cd ext && bundle exec rake && cd ..
22
- - docker-compose up -d --no-recreate
23
- - bundle exec rspec
26
+ - ulimit -c unlimited
27
+ - valgrind -v bundle exec rspec
data/CHANGELOG.md CHANGED
@@ -1,5 +1,26 @@
1
- # 0.11.1
2
- * Use mini_portile2 2.6, otherwise you can't run nokogiri and rdkafka at the same time.
1
+ # 0.13.1 (2024-07-10)
2
+ - [Fix] Switch to local release of librdkafka to mitigate its unavailability.
3
+
4
+ # 0.13.0
5
+ * Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
6
+ * Support both string and symbol header keys (ColinDKelley)
7
+ * Handle tombstone messages properly (kgalieva)
8
+ * Add topic name to delivery report (maeve)
9
+ * Allow string partitioner config (mollyegibson)
10
+ * Fix documented type for DeliveryReport#error (jimmydo)
11
+ * Bump librdkafka to 2.0.2 (lmaia)
12
+ * Use finalizers to cleanly exit producer and admin (thijsc)
13
+ * Lock access to the native kafka client (thijsc)
14
+ * Fix potential race condition in multi-threaded producer (mensfeld)
15
+ * Fix leaking FFI resources in specs (mensfeld)
16
+ * Improve specs stability (mensfeld)
17
+ * Make metadata request timeout configurable (mensfeld)
18
+ * call_on_partitions_assigned and call_on_partitions_revoked only get a tpl passed in (thijsc)
19
+
20
+ # 0.12.0
21
+ * Bumps librdkafka to 1.9.0
22
+ * Fix crash on empty partition key (mensfeld)
23
+ * Pass the delivery handle to the callback (gvisokinskas)
3
24
 
4
25
  # 0.11.0
5
26
  * Upgrade librdkafka to 1.8.2
data/Gemfile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  source "https://rubygems.org"
2
4
 
3
5
  gemspec
data/README.md CHANGED
@@ -23,6 +23,19 @@ The most important pieces of a Kafka client are implemented. We're
23
23
  working towards feature completeness, you can track that here:
24
24
  https://github.com/appsignal/rdkafka-ruby/milestone/1
25
25
 
26
+ ## Table of content
27
+
28
+ - [Installation](#installation)
29
+ - [Usage](#usage)
30
+ * [Consuming messages](#consuming-messages)
31
+ * [Producing messages](#producing-messages)
32
+ - [Higher level libraries](#higher-level-libraries)
33
+ * [Message processing frameworks](#message-processing-frameworks)
34
+ * [Message publishing libraries](#message-publishing-libraries)
35
+ - [Development](#development)
36
+ - [Example](#example)
37
+
38
+
26
39
  ## Installation
27
40
 
28
41
  This gem downloads and compiles librdkafka when it is installed. If you
@@ -77,6 +90,19 @@ Note that creating a producer consumes some resources that will not be
77
90
  released until it `#close` is explicitly called, so be sure to call
78
91
  `Config#producer` only as necessary.
79
92
 
93
+ ## Higher level libraries
94
+
95
+ Currently, there are two actively developed frameworks based on rdkafka-ruby, that provide higher level API that can be used to work with Kafka messages and one library for publishing messages.
96
+
97
+ ### Message processing frameworks
98
+
99
+ * [Karafka](https://github.com/karafka/karafka) - Ruby and Rails efficient Kafka processing framework.
100
+ * [Racecar](https://github.com/zendesk/racecar) - A simple framework for Kafka consumers in Ruby
101
+
102
+ ### Message publishing libraries
103
+
104
+ * [WaterDrop](https://github.com/karafka/waterdrop) – Standalone Karafka library for producing Kafka messages.
105
+
80
106
  ## Development
81
107
 
82
108
  A Docker Compose file is included to run Kafka and Zookeeper. To run
data/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # Rakefile
2
4
 
3
5
  require 'bundler/gem_tasks'
Binary file
data/ext/Rakefile CHANGED
@@ -1,38 +1,67 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('../../lib/rdkafka/version', __FILE__)
2
- require "mini_portile2"
3
4
  require "fileutils"
4
5
  require "open-uri"
5
6
 
6
7
  task :default => :clean do
7
- # Download and compile librdkafka
8
- recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
9
-
10
- # Use default homebrew openssl if we're on mac and the directory exists
11
- # and each of flags is not empty
12
- if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}")
13
- ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV["CPPFLAGS"]
14
- ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV["LDFLAGS"]
15
- end
8
+ # For nix users, nix can't locate the file paths because the packages it's requiring aren't managed by the system but are
9
+ # managed by nix itself, so using the normal file paths doesn't work for nix users.
10
+ #
11
+ # Mini_portile causes an issue because it's dependencies are downloaded on the fly and therefore don't exist/aren't
12
+ # accessible in the nix environment
13
+ if ENV.fetch('RDKAFKA_EXT_PATH', '').empty?
14
+ # Download and compile librdkafka if RDKAFKA_EXT_PATH is not set
15
+ require "mini_portile2"
16
+ recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
17
+
18
+ # Use default homebrew openssl if we're on mac and the directory exists
19
+ # and each of flags is not empty
20
+ if recipe.host&.include?("darwin") && system("which brew &> /dev/null") && Dir.exist?("#{homebrew_prefix = %x(brew --prefix openssl).strip}")
21
+ ENV["CPPFLAGS"] = "-I#{homebrew_prefix}/include" unless ENV["CPPFLAGS"]
22
+ ENV["LDFLAGS"] = "-L#{homebrew_prefix}/lib" unless ENV["LDFLAGS"]
23
+ end
24
+
25
+ releases = File.expand_path(File.join(File.dirname(__FILE__), '../dist'))
26
+
27
+ recipe.files << {
28
+ :url => "file://#{releases}/librdkafka_#{Rdkafka::LIBRDKAFKA_VERSION}.tar.gz",
29
+ :sha256 => Rdkafka::LIBRDKAFKA_SOURCE_SHA256
30
+ }
31
+ recipe.configure_options = ["--host=#{recipe.host}"]
16
32
 
17
- recipe.files << {
18
- :url => "https://codeload.github.com/edenhill/librdkafka/tar.gz/v#{Rdkafka::LIBRDKAFKA_VERSION}",
19
- :sha256 => Rdkafka::LIBRDKAFKA_SOURCE_SHA256
20
- }
21
- recipe.configure_options = ["--host=#{recipe.host}"]
22
- recipe.cook
23
- # Move dynamic library we're interested in
24
- if recipe.host.include?('darwin')
25
- from_extension = '1.dylib'
26
- to_extension = 'dylib'
33
+ # Disable using libc regex engine in favor of the embedded one
34
+ # The default regex engine of librdkafka does not always work exactly as most of the users
35
+ # would expect, hence this flag allows for changing it to the other one
36
+ if ENV.key?('RDKAFKA_DISABLE_REGEX_EXT')
37
+ recipe.configure_options << '--disable-regex-ext'
38
+ end
39
+
40
+ recipe.cook
41
+ # Move dynamic library we're interested in
42
+ if recipe.host.include?('darwin')
43
+ from_extension = '1.dylib'
44
+ to_extension = 'dylib'
45
+ else
46
+ from_extension = 'so.1'
47
+ to_extension = 'so'
48
+ end
49
+ lib_path = File.join(File.dirname(__FILE__), "ports/#{recipe.host}/librdkafka/#{Rdkafka::LIBRDKAFKA_VERSION}/lib/librdkafka.#{from_extension}")
50
+ FileUtils.mv(lib_path, File.join(File.dirname(__FILE__), "librdkafka.#{to_extension}"))
51
+ # Cleanup files created by miniportile we don't need in the gem
52
+ FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
53
+ FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
27
54
  else
28
- from_extension = 'so.1'
29
- to_extension = 'so'
55
+ # Otherwise, copy existing libraries to ./ext
56
+ if ENV['RDKAFKA_EXT_PATH'].nil? || ENV['RDKAFKA_EXT_PATH'].empty?
57
+ raise "RDKAFKA_EXT_PATH must be set in your nix config when running under nix"
58
+ end
59
+ files = [
60
+ File.join(ENV['RDKAFKA_EXT_PATH'], 'lib', 'librdkafka.dylib'),
61
+ File.join(ENV['RDKAFKA_EXT_PATH'], 'lib', 'librdkafka.so')
62
+ ]
63
+ files.each { |ext| FileUtils.cp(ext, File.dirname(__FILE__)) if File.exist?(ext) }
30
64
  end
31
- lib_path = File.join(File.dirname(__FILE__), "ports/#{recipe.host}/librdkafka/#{Rdkafka::LIBRDKAFKA_VERSION}/lib/librdkafka.#{from_extension}")
32
- FileUtils.mv(lib_path, File.join(File.dirname(__FILE__), "librdkafka.#{to_extension}"))
33
- # Cleanup files created by miniportile we don't need in the gem
34
- FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
35
- FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
36
65
  end
37
66
 
38
67
  task :clean do
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
 
3
5
  module Rdkafka
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicReport
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicReport
data/lib/rdkafka/admin.rb CHANGED
@@ -1,33 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "objspace"
4
+
1
5
  module Rdkafka
2
6
  class Admin
3
7
  # @private
4
8
  def initialize(native_kafka)
5
9
  @native_kafka = native_kafka
6
- @closing = false
7
-
8
- # Start thread to poll client for callbacks
9
- @polling_thread = Thread.new do
10
- loop do
11
- Rdkafka::Bindings.rd_kafka_poll(@native_kafka, 250)
12
- # Exit thread if closing and the poll queue is empty
13
- if @closing && Rdkafka::Bindings.rd_kafka_outq_len(@native_kafka) == 0
14
- break
15
- end
16
- end
17
- end
18
- @polling_thread.abort_on_exception = true
10
+
11
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
12
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
13
+ end
14
+
15
+ def finalizer
16
+ ->(_) { close }
19
17
  end
20
18
 
21
19
  # Close this admin instance
22
20
  def close
23
- return unless @native_kafka
24
-
25
- # Indicate to polling thread that we're closing
26
- @closing = true
27
- # Wait for the polling thread to finish up
28
- @polling_thread.join
29
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
30
- @native_kafka = nil
21
+ return if closed?
22
+ ObjectSpace.undefine_finalizer(self)
23
+ @native_kafka.close
24
+ end
25
+
26
+ # Whether this admin has closed
27
+ def closed?
28
+ @native_kafka.closed?
31
29
  end
32
30
 
33
31
  # Create a topic with the given partition count and replication factor
@@ -38,6 +36,7 @@ module Rdkafka
38
36
  #
39
37
  # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
40
38
  def create_topic(topic_name, partition_count, replication_factor, topic_config={})
39
+ closed_admin_check(__method__)
41
40
 
42
41
  # Create a rd_kafka_NewTopic_t representing the new topic
43
42
  error_buffer = FFI::MemoryPointer.from_string(" " * 256)
@@ -68,7 +67,9 @@ module Rdkafka
68
67
  topics_array_ptr.write_array_of_pointer(pointer_array)
69
68
 
70
69
  # Get a pointer to the queue that our request will be enqueued on
71
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
70
+ queue_ptr = @native_kafka.with_inner do |inner|
71
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
72
+ end
72
73
  if queue_ptr.null?
73
74
  Rdkafka::Bindings.rd_kafka_NewTopic_destroy(new_topic_ptr)
74
75
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -79,18 +80,22 @@ module Rdkafka
79
80
  create_topic_handle[:pending] = true
80
81
  create_topic_handle[:response] = -1
81
82
  CreateTopicHandle.register(create_topic_handle)
82
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
83
+ admin_options_ptr = @native_kafka.with_inner do |inner|
84
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
85
+ end
83
86
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_topic_handle.to_ptr)
84
87
 
85
88
  begin
86
- Rdkafka::Bindings.rd_kafka_CreateTopics(
87
- @native_kafka,
89
+ @native_kafka.with_inner do |inner|
90
+ Rdkafka::Bindings.rd_kafka_CreateTopics(
91
+ inner,
88
92
  topics_array_ptr,
89
93
  1,
90
94
  admin_options_ptr,
91
95
  queue_ptr
92
- )
93
- rescue Exception => err
96
+ )
97
+ end
98
+ rescue Exception
94
99
  CreateTopicHandle.remove(create_topic_handle.to_ptr.address)
95
100
  raise
96
101
  ensure
@@ -108,6 +113,7 @@ module Rdkafka
108
113
  #
109
114
  # @return [DeleteTopicHandle] Delete topic handle that can be used to wait for the result of deleting the topic
110
115
  def delete_topic(topic_name)
116
+ closed_admin_check(__method__)
111
117
 
112
118
  # Create a rd_kafka_DeleteTopic_t representing the topic to be deleted
113
119
  delete_topic_ptr = Rdkafka::Bindings.rd_kafka_DeleteTopic_new(FFI::MemoryPointer.from_string(topic_name))
@@ -118,7 +124,9 @@ module Rdkafka
118
124
  topics_array_ptr.write_array_of_pointer(pointer_array)
119
125
 
120
126
  # Get a pointer to the queue that our request will be enqueued on
121
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
127
+ queue_ptr = @native_kafka.with_inner do |inner|
128
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
129
+ end
122
130
  if queue_ptr.null?
123
131
  Rdkafka::Bindings.rd_kafka_DeleteTopic_destroy(delete_topic_ptr)
124
132
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -129,18 +137,22 @@ module Rdkafka
129
137
  delete_topic_handle[:pending] = true
130
138
  delete_topic_handle[:response] = -1
131
139
  DeleteTopicHandle.register(delete_topic_handle)
132
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
140
+ admin_options_ptr = @native_kafka.with_inner do |inner|
141
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
142
+ end
133
143
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, delete_topic_handle.to_ptr)
134
144
 
135
145
  begin
136
- Rdkafka::Bindings.rd_kafka_DeleteTopics(
137
- @native_kafka,
146
+ @native_kafka.with_inner do |inner|
147
+ Rdkafka::Bindings.rd_kafka_DeleteTopics(
148
+ inner,
138
149
  topics_array_ptr,
139
150
  1,
140
151
  admin_options_ptr,
141
152
  queue_ptr
142
- )
143
- rescue Exception => err
153
+ )
154
+ end
155
+ rescue Exception
144
156
  DeleteTopicHandle.remove(delete_topic_handle.to_ptr.address)
145
157
  raise
146
158
  ensure
@@ -151,5 +163,10 @@ module Rdkafka
151
163
 
152
164
  delete_topic_handle
153
165
  end
166
+
167
+ private
168
+ def closed_admin_check(method)
169
+ raise Rdkafka::ClosedAdminError.new(method) if closed?
170
+ end
154
171
  end
155
172
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
  require "json"
3
5
  require "logger"
@@ -15,7 +17,7 @@ module Rdkafka
15
17
  end
16
18
  end
17
19
 
18
- ffi_lib File.join(File.dirname(__FILE__), "../../ext/librdkafka.#{lib_extension}")
20
+ ffi_lib File.join(__dir__, "../../ext/librdkafka.#{lib_extension}")
19
21
 
20
22
  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175
21
23
  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174
@@ -33,15 +35,16 @@ module Rdkafka
33
35
 
34
36
  # Polling
35
37
 
38
+ attach_function :rd_kafka_flush, [:pointer, :int], :void, blocking: true
36
39
  attach_function :rd_kafka_poll, [:pointer, :int], :void, blocking: true
37
40
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
38
41
 
39
42
  # Metadata
40
43
 
41
- attach_function :rd_kafka_memberid, [:pointer], :string
42
- attach_function :rd_kafka_clusterid, [:pointer], :string
43
- attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int
44
- attach_function :rd_kafka_metadata_destroy, [:pointer], :void
44
+ attach_function :rd_kafka_memberid, [:pointer], :string, blocking: true
45
+ attach_function :rd_kafka_clusterid, [:pointer], :string, blocking: true
46
+ attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int, blocking: true
47
+ attach_function :rd_kafka_metadata_destroy, [:pointer], :void, blocking: true
45
48
 
46
49
  # Message struct
47
50
 
@@ -110,6 +113,7 @@ module Rdkafka
110
113
  attach_function :rd_kafka_conf_set_stats_cb, [:pointer, :stats_cb], :void
111
114
  callback :error_cb, [:pointer, :int, :string, :pointer], :void
112
115
  attach_function :rd_kafka_conf_set_error_cb, [:pointer, :error_cb], :void
116
+ attach_function :rd_kafka_rebalance_protocol, [:pointer], :string
113
117
 
114
118
  # Log queue
115
119
  attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
@@ -165,24 +169,27 @@ module Rdkafka
165
169
  ]
166
170
 
167
171
  attach_function :rd_kafka_new, [:kafka_type, :pointer, :pointer, :int], :pointer
172
+
168
173
  attach_function :rd_kafka_destroy, [:pointer], :void
169
174
 
170
175
  # Consumer
171
176
 
172
- attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int
173
- attach_function :rd_kafka_unsubscribe, [:pointer], :int
174
- attach_function :rd_kafka_subscription, [:pointer, :pointer], :int
175
- attach_function :rd_kafka_assign, [:pointer, :pointer], :int
176
- attach_function :rd_kafka_assignment, [:pointer, :pointer], :int
177
- attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int
177
+ attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int, blocking: true
178
+ attach_function :rd_kafka_unsubscribe, [:pointer], :int, blocking: true
179
+ attach_function :rd_kafka_subscription, [:pointer, :pointer], :int, blocking: true
180
+ attach_function :rd_kafka_assign, [:pointer, :pointer], :int, blocking: true
181
+ attach_function :rd_kafka_incremental_assign, [:pointer, :pointer], :int, blocking: true
182
+ attach_function :rd_kafka_incremental_unassign, [:pointer, :pointer], :int, blocking: true
183
+ attach_function :rd_kafka_assignment, [:pointer, :pointer], :int, blocking: true
184
+ attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int, blocking: true
178
185
  attach_function :rd_kafka_commit, [:pointer, :pointer, :bool], :int, blocking: true
179
- attach_function :rd_kafka_poll_set_consumer, [:pointer], :void
186
+ attach_function :rd_kafka_poll_set_consumer, [:pointer], :void, blocking: true
180
187
  attach_function :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: true
181
188
  attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
182
- attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
183
- attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int
184
- attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int
185
- attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int
189
+ attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int, blocking: true
190
+ attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
191
+ attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
192
+ attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
186
193
 
187
194
  # Headers
188
195
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -191,30 +198,36 @@ module Rdkafka
191
198
  # Rebalance
192
199
 
193
200
  callback :rebalance_cb_function, [:pointer, :int, :pointer, :pointer], :void
194
- attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void
201
+ attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void, blocking: true
195
202
 
196
203
  RebalanceCallback = FFI::Function.new(
197
204
  :void, [:pointer, :int, :pointer, :pointer]
198
205
  ) do |client_ptr, code, partitions_ptr, opaque_ptr|
199
206
  case code
200
207
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
201
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
208
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
209
+ Rdkafka::Bindings.rd_kafka_incremental_assign(client_ptr, partitions_ptr)
210
+ else
211
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
212
+ end
202
213
  else # RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS or errors
203
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
214
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
215
+ Rdkafka::Bindings.rd_kafka_incremental_unassign(client_ptr, partitions_ptr)
216
+ else
217
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
218
+ end
204
219
  end
205
220
 
206
221
  opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
207
222
  return unless opaque
208
223
 
209
224
  tpl = Rdkafka::Consumer::TopicPartitionList.from_native_tpl(partitions_ptr).freeze
210
- consumer = Rdkafka::Consumer.new(client_ptr)
211
-
212
225
  begin
213
226
  case code
214
227
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
215
- opaque.call_on_partitions_assigned(consumer, tpl)
228
+ opaque.call_on_partitions_assigned(tpl)
216
229
  when RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
217
- opaque.call_on_partitions_revoked(consumer, tpl)
230
+ opaque.call_on_partitions_revoked(tpl)
218
231
  end
219
232
  rescue Exception => err
220
233
  Rdkafka::Config.logger.error("Unhandled exception: #{err.class} - #{err.message}")
@@ -241,19 +254,26 @@ module Rdkafka
241
254
 
242
255
  RD_KAFKA_MSG_F_COPY = 0x2
243
256
 
244
- attach_function :rd_kafka_producev, [:pointer, :varargs], :int
257
+ attach_function :rd_kafka_producev, [:pointer, :varargs], :int, blocking: true
245
258
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
246
259
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
247
260
 
248
261
  # Partitioner
249
- attach_function :rd_kafka_msg_partitioner_consistent_random, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
262
+ PARTITIONERS = %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random).each_with_object({}) do |name, hsh|
263
+ method_name = "rd_kafka_msg_partitioner_#{name}".to_sym
264
+ attach_function method_name, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
265
+ hsh[name] = method_name
266
+ end
250
267
 
251
- def self.partitioner(str, partition_count)
268
+ def self.partitioner(str, partition_count, partitioner_name = "consistent_random")
252
269
  # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
253
270
  return -1 unless partition_count&.nonzero?
254
271
 
255
- str_ptr = FFI::MemoryPointer.from_string(str)
256
- rd_kafka_msg_partitioner_consistent_random(nil, str_ptr, str.size, partition_count, nil, nil)
272
+ str_ptr = str.empty? ? FFI::MemoryPointer::NULL : FFI::MemoryPointer.from_string(str)
273
+ method_name = PARTITIONERS.fetch(partitioner_name) do
274
+ raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner_name}")
275
+ end
276
+ public_send(method_name, nil, str_ptr, str.size > 0 ? str.size : 1, partition_count, nil, nil)
257
277
  end
258
278
 
259
279
  # Create Topics
@@ -261,23 +281,23 @@ module Rdkafka
261
281
  RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
262
282
  RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100 # rd_kafka_event_type_t
263
283
 
264
- attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
265
- attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer
266
- attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32
267
- attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void
268
- attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer
269
- attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer
284
+ attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void, blocking: true
285
+ attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer, blocking: true
286
+ attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32, blocking: true
287
+ attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void, blocking: true
288
+ attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer, blocking: true
289
+ attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
270
290
 
271
291
  # Delete Topics
272
292
 
273
293
  RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
274
294
  RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101 # rd_kafka_event_type_t
275
295
 
276
- attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32
277
- attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer
278
- attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void
279
- attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer
280
- attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer
296
+ attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32, blocking: true
297
+ attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer, blocking: true
298
+ attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void, blocking: true
299
+ attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer, blocking: true
300
+ attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
281
301
 
282
302
  # Background Queue and Callback
283
303
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  module Callbacks
3
5
 
@@ -90,14 +92,18 @@ module Rdkafka
90
92
  message = Rdkafka::Bindings::Message.new(message_ptr)
91
93
  delivery_handle_ptr_address = message[:_private].address
92
94
  if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
95
+ topic_name = Rdkafka::Bindings.rd_kafka_topic_name(message[:rkt])
96
+
93
97
  # Update delivery handle
94
98
  delivery_handle[:response] = message[:err]
95
99
  delivery_handle[:partition] = message[:partition]
96
100
  delivery_handle[:offset] = message[:offset]
101
+ delivery_handle[:topic_name] = FFI::MemoryPointer.from_string(topic_name)
97
102
  delivery_handle[:pending] = false
103
+
98
104
  # Call delivery callback on opaque
99
105
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
100
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]))
106
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], topic_name, message[:err]), delivery_handle)
101
107
  end
102
108
  end
103
109
  end