karafka-rdkafka 0.12.2 → 0.13.0.beta1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +1 -1
  4. data/CHANGELOG.md +26 -0
  5. data/Gemfile +2 -0
  6. data/README.md +26 -0
  7. data/Rakefile +2 -0
  8. data/ext/Rakefile +2 -0
  9. data/karafka-rdkafka.gemspec +2 -0
  10. data/lib/rdkafka/abstract_handle.rb +2 -0
  11. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  12. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  13. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  14. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  15. data/lib/rdkafka/admin.rb +95 -73
  16. data/lib/rdkafka/bindings.rb +53 -37
  17. data/lib/rdkafka/callbacks.rb +7 -1
  18. data/lib/rdkafka/config.rb +13 -10
  19. data/lib/rdkafka/consumer/headers.rb +24 -7
  20. data/lib/rdkafka/consumer/message.rb +3 -1
  21. data/lib/rdkafka/consumer/partition.rb +2 -0
  22. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  23. data/lib/rdkafka/consumer.rb +100 -44
  24. data/lib/rdkafka/error.rb +9 -0
  25. data/lib/rdkafka/metadata.rb +25 -2
  26. data/lib/rdkafka/native_kafka.rb +83 -0
  27. data/lib/rdkafka/producer/delivery_handle.rb +5 -2
  28. data/lib/rdkafka/producer/delivery_report.rb +9 -2
  29. data/lib/rdkafka/producer.rb +75 -12
  30. data/lib/rdkafka/version.rb +3 -1
  31. data/lib/rdkafka.rb +3 -1
  32. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  33. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  34. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  35. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  36. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  37. data/spec/rdkafka/admin_spec.rb +4 -3
  38. data/spec/rdkafka/bindings_spec.rb +2 -0
  39. data/spec/rdkafka/callbacks_spec.rb +2 -0
  40. data/spec/rdkafka/config_spec.rb +17 -2
  41. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  42. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  43. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  44. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  45. data/spec/rdkafka/consumer_spec.rb +124 -22
  46. data/spec/rdkafka/error_spec.rb +2 -0
  47. data/spec/rdkafka/metadata_spec.rb +2 -0
  48. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -34
  49. data/spec/rdkafka/producer/delivery_handle_spec.rb +5 -0
  50. data/spec/rdkafka/producer/delivery_report_spec.rb +8 -2
  51. data/spec/rdkafka/producer_spec.rb +124 -19
  52. data/spec/spec_helper.rb +17 -1
  53. data.tar.gz.sig +0 -0
  54. metadata +10 -10
  55. metadata.gz.sig +0 -0
  56. data/bin/console +0 -11
  57. data/lib/rdkafka/producer/client.rb +0 -47
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1eb78fe36d0d36d5ba7b6313105d85fb0e392ecbf9a91214428bf0e77a356e14
4
- data.tar.gz: fcddc2950bdf3cef4f0f495497808a3cb865b7940b80d515ebd4e7ee7a1b52d2
3
+ metadata.gz: 419dcef5e71a6578202fde7eacd628054ac77bae0c91c57845fd6dbd6ade6715
4
+ data.tar.gz: 05e7e1ef8e07c1b2dddf02f1e3f8e8e0da7dfcb75af41ccc2f9dcba2e30bba1f
5
5
  SHA512:
6
- metadata.gz: 44145d9a2d7d9b9231db9d0acfd40cf7adc54fe41eae67821196a067e85ed709d7e233df977f848a1ff4639bff9c383524dae40d17e9e37d2307bfdff4b8bb75
7
- data.tar.gz: 84df98bd3eb668168d61cdc09b0d64e310c6927e9fa70a451594c54324e688aadffd740023ee16ed50eb883f278b4285082d1ff7831350b695c3e48d2394662d
6
+ metadata.gz: 4643afd749af09b9bd2c8d16ae25b989eb03feb39b12c966059956cad47966a8a74e6d0c356bfdd4115a3a419aebbab1d8421410e0bef64bd2f1611d66ba2ab6
7
+ data.tar.gz: 7f526e3bb375126862a84b7824e60872454366cfa7a2b5abb8d6e09f978423dc1e56e1c2a488f36f28ff622f942834b7d576d6fe91e4263cb5864165597c881f
checksums.yaml.gz.sig CHANGED
Binary file
@@ -47,7 +47,7 @@ jobs:
47
47
  GITHUB_COVERAGE: ${{matrix.coverage}}
48
48
 
49
49
  run: |
50
+ docker-compose up -d --no-recreate
50
51
  bundle install --path vendor/bundle
51
52
  cd ext && bundle exec rake && cd ..
52
- docker-compose up -d --no-recreate
53
53
  bundle exec rspec
data/CHANGELOG.md CHANGED
@@ -1,3 +1,27 @@
1
+ # 0.13.0
2
+ * Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
3
+ * Support both string and symbol header keys (ColinDKelley)
4
+ * Handle tombstone messages properly (kgalieva)
5
+ * Add topic name to delivery report (maeve)
6
+ * Allow string partitioner config (mollyegibson)
7
+ * Fix documented type for DeliveryReport#error (jimmydo)
8
+ * Bump librdkafka to 2.0.2 (lmaia)
9
+ * Use finalizers to cleanly exit producer and admin (thijsc)
10
+ * Lock access to the native kafka client (thijsc)
11
+ * Fix potential race condition in multi-threaded producer (mensfeld)
12
+ * Fix leaking FFI resources in specs (mensfeld)
13
+ * Improve specs stability (mensfeld)
14
+ * Make metadata request timeout configurable (mensfeld)
15
+ * call_on_partitions_assigned and call_on_partitions_revoked only get a tpl passed in (thijsc)
16
+ * Support `#assignment_lost?` on a consumer to check for involuntary assignment revocation (mensfeld)
17
+ * Expose `#name` on the consumer and producer (mensfeld)
18
+ * Introduce producer partitions count metadata cache (mensfeld)
19
+ * Retry metadta fetches on certain errors with a backoff (mensfeld)
20
+
21
+ # 0.12.3
22
+ - Include backtrace in non-raised binded errors.
23
+ - Include topic name in the delivery reports
24
+
1
25
  # 0.12.2
2
26
  * Increase the metadata default timeout from 250ms to 2 seconds. This should allow for working with remote clusters.
3
27
 
@@ -7,6 +31,8 @@
7
31
 
8
32
  # 0.12.0
9
33
  * Bumps librdkafka to 1.9.0
34
+ * Fix crash on empty partition key (mensfeld)
35
+ * Pass the delivery handle to the callback (gvisokinskas)
10
36
 
11
37
  # 0.11.0
12
38
  * Upgrade librdkafka to 1.8.2
data/Gemfile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  source "https://rubygems.org"
2
4
 
3
5
  gemspec
data/README.md CHANGED
@@ -23,6 +23,19 @@ The most important pieces of a Kafka client are implemented. We're
23
23
  working towards feature completeness, you can track that here:
24
24
  https://github.com/appsignal/rdkafka-ruby/milestone/1
25
25
 
26
+ ## Table of content
27
+
28
+ - [Installation](#installation)
29
+ - [Usage](#usage)
30
+ * [Consuming messages](#consuming-messages)
31
+ * [Producing messages](#producing-messages)
32
+ - [Higher level libraries](#higher-level-libraries)
33
+ * [Message processing frameworks](#message-processing-frameworks)
34
+ * [Message publishing libraries](#message-publishing-libraries)
35
+ - [Development](#development)
36
+ - [Example](#example)
37
+
38
+
26
39
  ## Installation
27
40
 
28
41
  This gem downloads and compiles librdkafka when it is installed. If you
@@ -77,6 +90,19 @@ Note that creating a producer consumes some resources that will not be
77
90
  released until it `#close` is explicitly called, so be sure to call
78
91
  `Config#producer` only as necessary.
79
92
 
93
+ ## Higher level libraries
94
+
95
+ Currently, there are two actively developed frameworks based on rdkafka-ruby, that provide higher level API that can be used to work with Kafka messages and one library for publishing messages.
96
+
97
+ ### Message processing frameworks
98
+
99
+ * [Karafka](https://github.com/karafka/karafka) - Ruby and Rails efficient Kafka processing framework.
100
+ * [Racecar](https://github.com/zendesk/racecar) - A simple framework for Kafka consumers in Ruby
101
+
102
+ ### Message publishing libraries
103
+
104
+ * [WaterDrop](https://github.com/karafka/waterdrop) – Standalone Karafka library for producing Kafka messages.
105
+
80
106
  ## Development
81
107
 
82
108
  A Docker Compose file is included to run Kafka and Zookeeper. To run
data/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # Rakefile
2
4
 
3
5
  require 'bundler/gem_tasks'
data/ext/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('../../lib/rdkafka/version', __FILE__)
2
4
  require "mini_portile2"
3
5
  require "fileutils"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('lib/rdkafka/version', __dir__)
2
4
 
3
5
  Gem::Specification.new do |gem|
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
 
3
5
  module Rdkafka
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicReport
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicReport
data/lib/rdkafka/admin.rb CHANGED
@@ -1,33 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "objspace"
4
+
1
5
  module Rdkafka
2
6
  class Admin
3
7
  # @private
4
8
  def initialize(native_kafka)
5
9
  @native_kafka = native_kafka
6
- @closing = false
7
-
8
- # Start thread to poll client for callbacks
9
- @polling_thread = Thread.new do
10
- loop do
11
- Rdkafka::Bindings.rd_kafka_poll(@native_kafka, 250)
12
- # Exit thread if closing and the poll queue is empty
13
- if @closing && Rdkafka::Bindings.rd_kafka_outq_len(@native_kafka) == 0
14
- break
15
- end
16
- end
17
- end
18
- @polling_thread.abort_on_exception = true
10
+
11
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
12
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
13
+ end
14
+
15
+ def finalizer
16
+ ->(_) { close }
19
17
  end
20
18
 
21
19
  # Close this admin instance
22
20
  def close
23
- return unless @native_kafka
24
-
25
- # Indicate to polling thread that we're closing
26
- @closing = true
27
- # Wait for the polling thread to finish up
28
- @polling_thread.join
29
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
30
- @native_kafka = nil
21
+ return if closed?
22
+ ObjectSpace.undefine_finalizer(self)
23
+ @native_kafka.close
24
+ end
25
+
26
+ # Whether this admin has closed
27
+ def closed?
28
+ @native_kafka.closed?
31
29
  end
32
30
 
33
31
  # Create a topic with the given partition count and replication factor
@@ -38,6 +36,7 @@ module Rdkafka
38
36
  #
39
37
  # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
40
38
  def create_topic(topic_name, partition_count, replication_factor, topic_config={})
39
+ closed_admin_check(__method__)
41
40
 
42
41
  # Create a rd_kafka_NewTopic_t representing the new topic
43
42
  error_buffer = FFI::MemoryPointer.from_string(" " * 256)
@@ -68,7 +67,9 @@ module Rdkafka
68
67
  topics_array_ptr.write_array_of_pointer(pointer_array)
69
68
 
70
69
  # Get a pointer to the queue that our request will be enqueued on
71
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
70
+ queue_ptr = @native_kafka.with_inner do |inner|
71
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
72
+ end
72
73
  if queue_ptr.null?
73
74
  Rdkafka::Bindings.rd_kafka_NewTopic_destroy(new_topic_ptr)
74
75
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -79,17 +80,21 @@ module Rdkafka
79
80
  create_topic_handle[:pending] = true
80
81
  create_topic_handle[:response] = -1
81
82
  CreateTopicHandle.register(create_topic_handle)
82
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
83
+ admin_options_ptr = @native_kafka.with_inner do |inner|
84
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
85
+ end
83
86
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_topic_handle.to_ptr)
84
87
 
85
88
  begin
86
- Rdkafka::Bindings.rd_kafka_CreateTopics(
87
- @native_kafka,
89
+ @native_kafka.with_inner do |inner|
90
+ Rdkafka::Bindings.rd_kafka_CreateTopics(
91
+ inner,
88
92
  topics_array_ptr,
89
93
  1,
90
94
  admin_options_ptr,
91
95
  queue_ptr
92
- )
96
+ )
97
+ end
93
98
  rescue Exception
94
99
  CreateTopicHandle.remove(create_topic_handle.to_ptr.address)
95
100
  raise
@@ -108,6 +113,7 @@ module Rdkafka
108
113
  #
109
114
  # @return [DeleteTopicHandle] Delete topic handle that can be used to wait for the result of deleting the topic
110
115
  def delete_topic(topic_name)
116
+ closed_admin_check(__method__)
111
117
 
112
118
  # Create a rd_kafka_DeleteTopic_t representing the topic to be deleted
113
119
  delete_topic_ptr = Rdkafka::Bindings.rd_kafka_DeleteTopic_new(FFI::MemoryPointer.from_string(topic_name))
@@ -118,7 +124,9 @@ module Rdkafka
118
124
  topics_array_ptr.write_array_of_pointer(pointer_array)
119
125
 
120
126
  # Get a pointer to the queue that our request will be enqueued on
121
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
127
+ queue_ptr = @native_kafka.with_inner do |inner|
128
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
129
+ end
122
130
  if queue_ptr.null?
123
131
  Rdkafka::Bindings.rd_kafka_DeleteTopic_destroy(delete_topic_ptr)
124
132
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -129,17 +137,21 @@ module Rdkafka
129
137
  delete_topic_handle[:pending] = true
130
138
  delete_topic_handle[:response] = -1
131
139
  DeleteTopicHandle.register(delete_topic_handle)
132
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
140
+ admin_options_ptr = @native_kafka.with_inner do |inner|
141
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
142
+ end
133
143
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, delete_topic_handle.to_ptr)
134
144
 
135
145
  begin
136
- Rdkafka::Bindings.rd_kafka_DeleteTopics(
137
- @native_kafka,
146
+ @native_kafka.with_inner do |inner|
147
+ Rdkafka::Bindings.rd_kafka_DeleteTopics(
148
+ inner,
138
149
  topics_array_ptr,
139
150
  1,
140
151
  admin_options_ptr,
141
152
  queue_ptr
142
- )
153
+ )
154
+ end
143
155
  rescue Exception
144
156
  DeleteTopicHandle.remove(delete_topic_handle.to_ptr.address)
145
157
  raise
@@ -163,54 +175,64 @@ module Rdkafka
163
175
  #
164
176
  # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
165
177
  def create_partitions(topic_name, partition_count)
166
- error_buffer = FFI::MemoryPointer.from_string(" " * 256)
167
- new_partitions_ptr = Rdkafka::Bindings.rd_kafka_NewPartitions_new(
168
- FFI::MemoryPointer.from_string(topic_name),
169
- partition_count,
170
- error_buffer,
171
- 256
172
- )
173
- if new_partitions_ptr.null?
174
- raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
175
- end
178
+ closed_admin_check(__method__)
179
+
180
+ @native_kafka.with_inner do |inner|
181
+ error_buffer = FFI::MemoryPointer.from_string(" " * 256)
182
+ new_partitions_ptr = Rdkafka::Bindings.rd_kafka_NewPartitions_new(
183
+ FFI::MemoryPointer.from_string(topic_name),
184
+ partition_count,
185
+ error_buffer,
186
+ 256
187
+ )
188
+ if new_partitions_ptr.null?
189
+ raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
190
+ end
176
191
 
177
- pointer_array = [new_partitions_ptr]
178
- topics_array_ptr = FFI::MemoryPointer.new(:pointer)
179
- topics_array_ptr.write_array_of_pointer(pointer_array)
192
+ pointer_array = [new_partitions_ptr]
193
+ topics_array_ptr = FFI::MemoryPointer.new(:pointer)
194
+ topics_array_ptr.write_array_of_pointer(pointer_array)
180
195
 
181
- # Get a pointer to the queue that our request will be enqueued on
182
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
183
- if queue_ptr.null?
184
- Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
185
- raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
186
- end
196
+ # Get a pointer to the queue that our request will be enqueued on
197
+ queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
198
+ if queue_ptr.null?
199
+ Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
200
+ raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
201
+ end
187
202
 
188
- # Create and register the handle we will return to the caller
189
- create_partitions_handle = CreatePartitionsHandle.new
190
- create_partitions_handle[:pending] = true
191
- create_partitions_handle[:response] = -1
192
- CreatePartitionsHandle.register(create_partitions_handle)
193
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS)
194
- Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_partitions_handle.to_ptr)
203
+ # Create and register the handle we will return to the caller
204
+ create_partitions_handle = CreatePartitionsHandle.new
205
+ create_partitions_handle[:pending] = true
206
+ create_partitions_handle[:response] = -1
207
+ CreatePartitionsHandle.register(create_partitions_handle)
208
+ admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS)
209
+ Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_partitions_handle.to_ptr)
210
+
211
+ begin
212
+ Rdkafka::Bindings.rd_kafka_CreatePartitions(
213
+ inner,
214
+ topics_array_ptr,
215
+ 1,
216
+ admin_options_ptr,
217
+ queue_ptr
218
+ )
219
+ rescue Exception
220
+ CreatePartitionsHandle.remove(create_partitions_handle.to_ptr.address)
221
+ raise
222
+ ensure
223
+ Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
224
+ Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
225
+ Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
226
+ end
195
227
 
196
- begin
197
- Rdkafka::Bindings.rd_kafka_CreatePartitions(
198
- @native_kafka,
199
- topics_array_ptr,
200
- 1,
201
- admin_options_ptr,
202
- queue_ptr
203
- )
204
- rescue Exception
205
- CreatePartitionsHandle.remove(create_partitions_handle.to_ptr.address)
206
- raise
207
- ensure
208
- Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
209
- Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
210
- Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
228
+ create_partitions_handle
211
229
  end
230
+ end
231
+
232
+ private
212
233
 
213
- create_partitions_handle
234
+ def closed_admin_check(method)
235
+ raise Rdkafka::ClosedAdminError.new(method) if closed?
214
236
  end
215
237
  end
216
238
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
  require "json"
3
5
  require "logger"
@@ -15,7 +17,7 @@ module Rdkafka
15
17
  end
16
18
  end
17
19
 
18
- ffi_lib File.join(File.dirname(__FILE__), "../../ext/librdkafka.#{lib_extension}")
20
+ ffi_lib File.join(__dir__, "../../ext/librdkafka.#{lib_extension}")
19
21
 
20
22
  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175
21
23
  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174
@@ -33,15 +35,17 @@ module Rdkafka
33
35
 
34
36
  # Polling
35
37
 
38
+ attach_function :rd_kafka_flush, [:pointer, :int], :void, blocking: true
36
39
  attach_function :rd_kafka_poll, [:pointer, :int], :void, blocking: true
37
40
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
38
41
 
39
42
  # Metadata
40
43
 
41
- attach_function :rd_kafka_memberid, [:pointer], :string
42
- attach_function :rd_kafka_clusterid, [:pointer], :string
43
- attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int
44
- attach_function :rd_kafka_metadata_destroy, [:pointer], :void
44
+ attach_function :rd_kafka_name, [:pointer], :string, blocking: true
45
+ attach_function :rd_kafka_memberid, [:pointer], :string, blocking: true
46
+ attach_function :rd_kafka_clusterid, [:pointer], :string, blocking: true
47
+ attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int, blocking: true
48
+ attach_function :rd_kafka_metadata_destroy, [:pointer], :void, blocking: true
45
49
 
46
50
  # Message struct
47
51
 
@@ -110,6 +114,7 @@ module Rdkafka
110
114
  attach_function :rd_kafka_conf_set_stats_cb, [:pointer, :stats_cb], :void
111
115
  callback :error_cb, [:pointer, :int, :string, :pointer], :void
112
116
  attach_function :rd_kafka_conf_set_error_cb, [:pointer, :error_cb], :void
117
+ attach_function :rd_kafka_rebalance_protocol, [:pointer], :string
113
118
 
114
119
  # Log queue
115
120
  attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
@@ -153,6 +158,7 @@ module Rdkafka
153
158
  ) do |_client_prr, err_code, reason, _opaque|
154
159
  if Rdkafka::Config.error_callback
155
160
  error = Rdkafka::RdkafkaError.new(err_code, broker_message: reason)
161
+ error.set_backtrace(caller)
156
162
  Rdkafka::Config.error_callback.call(error)
157
163
  end
158
164
  end
@@ -165,24 +171,28 @@ module Rdkafka
165
171
  ]
166
172
 
167
173
  attach_function :rd_kafka_new, [:kafka_type, :pointer, :pointer, :int], :pointer
174
+
168
175
  attach_function :rd_kafka_destroy, [:pointer], :void
169
176
 
170
177
  # Consumer
171
178
 
172
- attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int
173
- attach_function :rd_kafka_unsubscribe, [:pointer], :int
174
- attach_function :rd_kafka_subscription, [:pointer, :pointer], :int
175
- attach_function :rd_kafka_assign, [:pointer, :pointer], :int
176
- attach_function :rd_kafka_assignment, [:pointer, :pointer], :int
177
- attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int
179
+ attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int, blocking: true
180
+ attach_function :rd_kafka_unsubscribe, [:pointer], :int, blocking: true
181
+ attach_function :rd_kafka_subscription, [:pointer, :pointer], :int, blocking: true
182
+ attach_function :rd_kafka_assign, [:pointer, :pointer], :int, blocking: true
183
+ attach_function :rd_kafka_incremental_assign, [:pointer, :pointer], :int, blocking: true
184
+ attach_function :rd_kafka_incremental_unassign, [:pointer, :pointer], :int, blocking: true
185
+ attach_function :rd_kafka_assignment, [:pointer, :pointer], :int, blocking: true
186
+ attach_function :rd_kafka_assignment_lost, [:pointer], :int, blocking: true
187
+ attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int, blocking: true
178
188
  attach_function :rd_kafka_commit, [:pointer, :pointer, :bool], :int, blocking: true
179
- attach_function :rd_kafka_poll_set_consumer, [:pointer], :void
189
+ attach_function :rd_kafka_poll_set_consumer, [:pointer], :void, blocking: true
180
190
  attach_function :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: true
181
191
  attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
182
- attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
183
- attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int
184
- attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int
185
- attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int
192
+ attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int, blocking: true
193
+ attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
194
+ attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
195
+ attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
186
196
 
187
197
  # Headers
188
198
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -191,30 +201,36 @@ module Rdkafka
191
201
  # Rebalance
192
202
 
193
203
  callback :rebalance_cb_function, [:pointer, :int, :pointer, :pointer], :void
194
- attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void
204
+ attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void, blocking: true
195
205
 
196
206
  RebalanceCallback = FFI::Function.new(
197
207
  :void, [:pointer, :int, :pointer, :pointer]
198
208
  ) do |client_ptr, code, partitions_ptr, opaque_ptr|
199
209
  case code
200
210
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
201
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
211
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
212
+ Rdkafka::Bindings.rd_kafka_incremental_assign(client_ptr, partitions_ptr)
213
+ else
214
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
215
+ end
202
216
  else # RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS or errors
203
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
217
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
218
+ Rdkafka::Bindings.rd_kafka_incremental_unassign(client_ptr, partitions_ptr)
219
+ else
220
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
221
+ end
204
222
  end
205
223
 
206
224
  opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
207
225
  return unless opaque
208
226
 
209
227
  tpl = Rdkafka::Consumer::TopicPartitionList.from_native_tpl(partitions_ptr).freeze
210
- consumer = Rdkafka::Consumer.new(client_ptr)
211
-
212
228
  begin
213
229
  case code
214
230
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
215
- opaque.call_on_partitions_assigned(consumer, tpl)
231
+ opaque.call_on_partitions_assigned(tpl)
216
232
  when RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
217
- opaque.call_on_partitions_revoked(consumer, tpl)
233
+ opaque.call_on_partitions_revoked(tpl)
218
234
  end
219
235
  rescue Exception => err
220
236
  Rdkafka::Config.logger.error("Unhandled exception: #{err.class} - #{err.message}")
@@ -241,7 +257,7 @@ module Rdkafka
241
257
 
242
258
  RD_KAFKA_MSG_F_COPY = 0x2
243
259
 
244
- attach_function :rd_kafka_producev, [:pointer, :varargs], :int
260
+ attach_function :rd_kafka_producev, [:pointer, :varargs], :int, blocking: true
245
261
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
246
262
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
247
263
 
@@ -256,11 +272,11 @@ module Rdkafka
256
272
  # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
257
273
  return -1 unless partition_count&.nonzero?
258
274
 
259
- str_ptr = FFI::MemoryPointer.from_string(str)
275
+ str_ptr = str.empty? ? FFI::MemoryPointer::NULL : FFI::MemoryPointer.from_string(str)
260
276
  method_name = PARTITIONERS.fetch(partitioner_name) do
261
277
  raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner_name}")
262
278
  end
263
- public_send(method_name, nil, str_ptr, str.size, partition_count, nil, nil)
279
+ public_send(method_name, nil, str_ptr, str.size > 0 ? str.size : 1, partition_count, nil, nil)
264
280
  end
265
281
 
266
282
  # Create Topics
@@ -268,23 +284,23 @@ module Rdkafka
268
284
  RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
269
285
  RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100 # rd_kafka_event_type_t
270
286
 
271
- attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
272
- attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer
273
- attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32
274
- attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void
275
- attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer
276
- attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer
287
+ attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void, blocking: true
288
+ attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer, blocking: true
289
+ attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32, blocking: true
290
+ attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void, blocking: true
291
+ attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer, blocking: true
292
+ attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
277
293
 
278
294
  # Delete Topics
279
295
 
280
296
  RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
281
297
  RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101 # rd_kafka_event_type_t
282
298
 
283
- attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32
284
- attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer
285
- attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void
286
- attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer
287
- attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer
299
+ attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32, blocking: true
300
+ attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer, blocking: true
301
+ attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void, blocking: true
302
+ attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer, blocking: true
303
+ attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
288
304
 
289
305
  # Create partitions
290
306
  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS = 3
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  module Callbacks
3
5
 
@@ -109,14 +111,18 @@ module Rdkafka
109
111
  message = Rdkafka::Bindings::Message.new(message_ptr)
110
112
  delivery_handle_ptr_address = message[:_private].address
111
113
  if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
114
+ topic_name = Rdkafka::Bindings.rd_kafka_topic_name(message[:rkt])
115
+
112
116
  # Update delivery handle
113
117
  delivery_handle[:response] = message[:err]
114
118
  delivery_handle[:partition] = message[:partition]
115
119
  delivery_handle[:offset] = message[:offset]
120
+ delivery_handle[:topic_name] = FFI::MemoryPointer.from_string(topic_name)
116
121
  delivery_handle[:pending] = false
122
+
117
123
  # Call delivery callback on opaque
118
124
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
119
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]), delivery_handle)
125
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], topic_name, message[:err]), delivery_handle)
120
126
  end
121
127
  end
122
128
  end