karafka-rdkafka 0.12.3 → 0.13.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/workflows/ci.yml +1 -1
  4. data/CHANGELOG.md +23 -0
  5. data/Gemfile +2 -0
  6. data/README.md +26 -0
  7. data/Rakefile +2 -0
  8. data/ext/Rakefile +2 -0
  9. data/karafka-rdkafka.gemspec +2 -0
  10. data/lib/rdkafka/abstract_handle.rb +2 -0
  11. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  12. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  13. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  14. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  15. data/lib/rdkafka/admin.rb +95 -73
  16. data/lib/rdkafka/bindings.rb +52 -37
  17. data/lib/rdkafka/callbacks.rb +2 -0
  18. data/lib/rdkafka/config.rb +13 -10
  19. data/lib/rdkafka/consumer/headers.rb +24 -7
  20. data/lib/rdkafka/consumer/message.rb +3 -1
  21. data/lib/rdkafka/consumer/partition.rb +2 -0
  22. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  23. data/lib/rdkafka/consumer.rb +100 -44
  24. data/lib/rdkafka/error.rb +9 -0
  25. data/lib/rdkafka/metadata.rb +25 -2
  26. data/lib/rdkafka/native_kafka.rb +81 -0
  27. data/lib/rdkafka/producer/delivery_handle.rb +2 -0
  28. data/lib/rdkafka/producer/delivery_report.rb +3 -1
  29. data/lib/rdkafka/producer.rb +75 -12
  30. data/lib/rdkafka/version.rb +3 -1
  31. data/lib/rdkafka.rb +3 -1
  32. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  33. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  34. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  35. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  36. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  37. data/spec/rdkafka/admin_spec.rb +4 -3
  38. data/spec/rdkafka/bindings_spec.rb +2 -0
  39. data/spec/rdkafka/callbacks_spec.rb +2 -0
  40. data/spec/rdkafka/config_spec.rb +17 -2
  41. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  42. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  43. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  44. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  45. data/spec/rdkafka/consumer_spec.rb +124 -22
  46. data/spec/rdkafka/error_spec.rb +2 -0
  47. data/spec/rdkafka/metadata_spec.rb +2 -0
  48. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -34
  49. data/spec/rdkafka/producer/delivery_handle_spec.rb +2 -0
  50. data/spec/rdkafka/producer/delivery_report_spec.rb +4 -2
  51. data/spec/rdkafka/producer_spec.rb +118 -17
  52. data/spec/spec_helper.rb +17 -1
  53. data.tar.gz.sig +0 -0
  54. metadata +9 -9
  55. metadata.gz.sig +0 -0
  56. data/bin/console +0 -11
  57. data/lib/rdkafka/producer/client.rb +0 -47
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 26881045d33d54883dbc4483364b0162e0f380619fcfe4079c892817b04328bf
4
- data.tar.gz: 9db9e87135682ff34cbc699dd20d6dcf43f5d2d82ee82153152b7d91eaf02152
3
+ metadata.gz: 91b097ee3e4b5df5b9deb4009297c84b9d59ffa1ec0aa0f0b0afa230418478aa
4
+ data.tar.gz: b0897c92e746ebe9194b735ebe0b813ae274c0a472c5381e9b097a4d059b2966
5
5
  SHA512:
6
- metadata.gz: 64200c66fb2623c04bb4dd3313173926a508e6a2a0678d5a5beab83dab75b4f597d6fd1d5b7fd2b875aec74db5338fa57bb54b79b1d420d9b04d1a5272c86171
7
- data.tar.gz: ce91a9bd42090b85a13a2ba1d40779894b330aab5f50a074e093386c9137ca926ebf90d402a7a9179e2192251a9c55291f72f16ead9a8680f13ce20f659e68c4
6
+ metadata.gz: e6fb584d20d1958e66133e45806d2d25ba885f413ccb683e1380c65611f068d65198aa459920871b2a15b0d84e434d1824ae5eb10cba04a6a118538b471cd5d8
7
+ data.tar.gz: ac0ff2519d7580018a7d6359f3e7bd7410f6c52aeb0e95a5b4cc749e6becc2863ddea229e11d8ef04f74865e608b4f7ee2e057730a5d837e4e6da5696817b3a8
checksums.yaml.gz.sig CHANGED
Binary file
@@ -47,7 +47,7 @@ jobs:
47
47
  GITHUB_COVERAGE: ${{matrix.coverage}}
48
48
 
49
49
  run: |
50
+ docker-compose up -d --no-recreate
50
51
  bundle install --path vendor/bundle
51
52
  cd ext && bundle exec rake && cd ..
52
- docker-compose up -d --no-recreate
53
53
  bundle exec rspec
data/CHANGELOG.md CHANGED
@@ -1,3 +1,24 @@
1
+ # 0.13.0
2
+ * Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
3
+ * Support both string and symbol header keys (ColinDKelley)
4
+ * Handle tombstone messages properly (kgalieva)
5
+ * Add topic name to delivery report (maeve)
6
+ * Allow string partitioner config (mollyegibson)
7
+ * Fix documented type for DeliveryReport#error (jimmydo)
8
+ * Bump librdkafka to 2.0.2 (lmaia)
9
+ * Use finalizers to cleanly exit producer and admin (thijsc)
10
+ * Lock access to the native kafka client (thijsc)
11
+ * Fix potential race condition in multi-threaded producer (mensfeld)
12
+ * Fix leaking FFI resources in specs (mensfeld)
13
+ * Improve specs stability (mensfeld)
14
+ * Make metadata request timeout configurable (mensfeld)
15
+ * call_on_partitions_assigned and call_on_partitions_revoked only get a tpl passed in (thijsc)
16
+ * Support `#assignment_lost?` on a consumer to check for involuntary assignment revocation (mensfeld)
17
+ * Expose `#name` on the consumer and producer (mensfeld)
18
+ * Introduce producer partitions count metadata cache (mensfeld)
19
+ * Retry metadta fetches on certain errors with a backoff (mensfeld)
20
+ * Do not lock access to underlying native kafka client and rely on Karafka granular locking (mensfeld)
21
+
1
22
  # 0.12.3
2
23
  - Include backtrace in non-raised binded errors.
3
24
  - Include topic name in the delivery reports
@@ -11,6 +32,8 @@
11
32
 
12
33
  # 0.12.0
13
34
  * Bumps librdkafka to 1.9.0
35
+ * Fix crash on empty partition key (mensfeld)
36
+ * Pass the delivery handle to the callback (gvisokinskas)
14
37
 
15
38
  # 0.11.0
16
39
  * Upgrade librdkafka to 1.8.2
data/Gemfile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  source "https://rubygems.org"
2
4
 
3
5
  gemspec
data/README.md CHANGED
@@ -23,6 +23,19 @@ The most important pieces of a Kafka client are implemented. We're
23
23
  working towards feature completeness, you can track that here:
24
24
  https://github.com/appsignal/rdkafka-ruby/milestone/1
25
25
 
26
+ ## Table of content
27
+
28
+ - [Installation](#installation)
29
+ - [Usage](#usage)
30
+ * [Consuming messages](#consuming-messages)
31
+ * [Producing messages](#producing-messages)
32
+ - [Higher level libraries](#higher-level-libraries)
33
+ * [Message processing frameworks](#message-processing-frameworks)
34
+ * [Message publishing libraries](#message-publishing-libraries)
35
+ - [Development](#development)
36
+ - [Example](#example)
37
+
38
+
26
39
  ## Installation
27
40
 
28
41
  This gem downloads and compiles librdkafka when it is installed. If you
@@ -77,6 +90,19 @@ Note that creating a producer consumes some resources that will not be
77
90
  released until it `#close` is explicitly called, so be sure to call
78
91
  `Config#producer` only as necessary.
79
92
 
93
+ ## Higher level libraries
94
+
95
+ Currently, there are two actively developed frameworks based on rdkafka-ruby, that provide higher level API that can be used to work with Kafka messages and one library for publishing messages.
96
+
97
+ ### Message processing frameworks
98
+
99
+ * [Karafka](https://github.com/karafka/karafka) - Ruby and Rails efficient Kafka processing framework.
100
+ * [Racecar](https://github.com/zendesk/racecar) - A simple framework for Kafka consumers in Ruby
101
+
102
+ ### Message publishing libraries
103
+
104
+ * [WaterDrop](https://github.com/karafka/waterdrop) – Standalone Karafka library for producing Kafka messages.
105
+
80
106
  ## Development
81
107
 
82
108
  A Docker Compose file is included to run Kafka and Zookeeper. To run
data/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # Rakefile
2
4
 
3
5
  require 'bundler/gem_tasks'
data/ext/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('../../lib/rdkafka/version', __FILE__)
2
4
  require "mini_portile2"
3
5
  require "fileutils"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('lib/rdkafka/version', __dir__)
2
4
 
3
5
  Gem::Specification.new do |gem|
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
 
3
5
  module Rdkafka
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicReport
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicReport
data/lib/rdkafka/admin.rb CHANGED
@@ -1,33 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "objspace"
4
+
1
5
  module Rdkafka
2
6
  class Admin
3
7
  # @private
4
8
  def initialize(native_kafka)
5
9
  @native_kafka = native_kafka
6
- @closing = false
7
-
8
- # Start thread to poll client for callbacks
9
- @polling_thread = Thread.new do
10
- loop do
11
- Rdkafka::Bindings.rd_kafka_poll(@native_kafka, 250)
12
- # Exit thread if closing and the poll queue is empty
13
- if @closing && Rdkafka::Bindings.rd_kafka_outq_len(@native_kafka) == 0
14
- break
15
- end
16
- end
17
- end
18
- @polling_thread.abort_on_exception = true
10
+
11
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
12
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
13
+ end
14
+
15
+ def finalizer
16
+ ->(_) { close }
19
17
  end
20
18
 
21
19
  # Close this admin instance
22
20
  def close
23
- return unless @native_kafka
24
-
25
- # Indicate to polling thread that we're closing
26
- @closing = true
27
- # Wait for the polling thread to finish up
28
- @polling_thread.join
29
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
30
- @native_kafka = nil
21
+ return if closed?
22
+ ObjectSpace.undefine_finalizer(self)
23
+ @native_kafka.close
24
+ end
25
+
26
+ # Whether this admin has closed
27
+ def closed?
28
+ @native_kafka.closed?
31
29
  end
32
30
 
33
31
  # Create a topic with the given partition count and replication factor
@@ -38,6 +36,7 @@ module Rdkafka
38
36
  #
39
37
  # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
40
38
  def create_topic(topic_name, partition_count, replication_factor, topic_config={})
39
+ closed_admin_check(__method__)
41
40
 
42
41
  # Create a rd_kafka_NewTopic_t representing the new topic
43
42
  error_buffer = FFI::MemoryPointer.from_string(" " * 256)
@@ -68,7 +67,9 @@ module Rdkafka
68
67
  topics_array_ptr.write_array_of_pointer(pointer_array)
69
68
 
70
69
  # Get a pointer to the queue that our request will be enqueued on
71
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
70
+ queue_ptr = @native_kafka.with_inner do |inner|
71
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
72
+ end
72
73
  if queue_ptr.null?
73
74
  Rdkafka::Bindings.rd_kafka_NewTopic_destroy(new_topic_ptr)
74
75
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -79,17 +80,21 @@ module Rdkafka
79
80
  create_topic_handle[:pending] = true
80
81
  create_topic_handle[:response] = -1
81
82
  CreateTopicHandle.register(create_topic_handle)
82
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
83
+ admin_options_ptr = @native_kafka.with_inner do |inner|
84
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
85
+ end
83
86
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_topic_handle.to_ptr)
84
87
 
85
88
  begin
86
- Rdkafka::Bindings.rd_kafka_CreateTopics(
87
- @native_kafka,
89
+ @native_kafka.with_inner do |inner|
90
+ Rdkafka::Bindings.rd_kafka_CreateTopics(
91
+ inner,
88
92
  topics_array_ptr,
89
93
  1,
90
94
  admin_options_ptr,
91
95
  queue_ptr
92
- )
96
+ )
97
+ end
93
98
  rescue Exception
94
99
  CreateTopicHandle.remove(create_topic_handle.to_ptr.address)
95
100
  raise
@@ -108,6 +113,7 @@ module Rdkafka
108
113
  #
109
114
  # @return [DeleteTopicHandle] Delete topic handle that can be used to wait for the result of deleting the topic
110
115
  def delete_topic(topic_name)
116
+ closed_admin_check(__method__)
111
117
 
112
118
  # Create a rd_kafka_DeleteTopic_t representing the topic to be deleted
113
119
  delete_topic_ptr = Rdkafka::Bindings.rd_kafka_DeleteTopic_new(FFI::MemoryPointer.from_string(topic_name))
@@ -118,7 +124,9 @@ module Rdkafka
118
124
  topics_array_ptr.write_array_of_pointer(pointer_array)
119
125
 
120
126
  # Get a pointer to the queue that our request will be enqueued on
121
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
127
+ queue_ptr = @native_kafka.with_inner do |inner|
128
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
129
+ end
122
130
  if queue_ptr.null?
123
131
  Rdkafka::Bindings.rd_kafka_DeleteTopic_destroy(delete_topic_ptr)
124
132
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -129,17 +137,21 @@ module Rdkafka
129
137
  delete_topic_handle[:pending] = true
130
138
  delete_topic_handle[:response] = -1
131
139
  DeleteTopicHandle.register(delete_topic_handle)
132
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
140
+ admin_options_ptr = @native_kafka.with_inner do |inner|
141
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
142
+ end
133
143
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, delete_topic_handle.to_ptr)
134
144
 
135
145
  begin
136
- Rdkafka::Bindings.rd_kafka_DeleteTopics(
137
- @native_kafka,
146
+ @native_kafka.with_inner do |inner|
147
+ Rdkafka::Bindings.rd_kafka_DeleteTopics(
148
+ inner,
138
149
  topics_array_ptr,
139
150
  1,
140
151
  admin_options_ptr,
141
152
  queue_ptr
142
- )
153
+ )
154
+ end
143
155
  rescue Exception
144
156
  DeleteTopicHandle.remove(delete_topic_handle.to_ptr.address)
145
157
  raise
@@ -163,54 +175,64 @@ module Rdkafka
163
175
  #
164
176
  # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
165
177
  def create_partitions(topic_name, partition_count)
166
- error_buffer = FFI::MemoryPointer.from_string(" " * 256)
167
- new_partitions_ptr = Rdkafka::Bindings.rd_kafka_NewPartitions_new(
168
- FFI::MemoryPointer.from_string(topic_name),
169
- partition_count,
170
- error_buffer,
171
- 256
172
- )
173
- if new_partitions_ptr.null?
174
- raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
175
- end
178
+ closed_admin_check(__method__)
179
+
180
+ @native_kafka.with_inner do |inner|
181
+ error_buffer = FFI::MemoryPointer.from_string(" " * 256)
182
+ new_partitions_ptr = Rdkafka::Bindings.rd_kafka_NewPartitions_new(
183
+ FFI::MemoryPointer.from_string(topic_name),
184
+ partition_count,
185
+ error_buffer,
186
+ 256
187
+ )
188
+ if new_partitions_ptr.null?
189
+ raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
190
+ end
176
191
 
177
- pointer_array = [new_partitions_ptr]
178
- topics_array_ptr = FFI::MemoryPointer.new(:pointer)
179
- topics_array_ptr.write_array_of_pointer(pointer_array)
192
+ pointer_array = [new_partitions_ptr]
193
+ topics_array_ptr = FFI::MemoryPointer.new(:pointer)
194
+ topics_array_ptr.write_array_of_pointer(pointer_array)
180
195
 
181
- # Get a pointer to the queue that our request will be enqueued on
182
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
183
- if queue_ptr.null?
184
- Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
185
- raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
186
- end
196
+ # Get a pointer to the queue that our request will be enqueued on
197
+ queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
198
+ if queue_ptr.null?
199
+ Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
200
+ raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
201
+ end
187
202
 
188
- # Create and register the handle we will return to the caller
189
- create_partitions_handle = CreatePartitionsHandle.new
190
- create_partitions_handle[:pending] = true
191
- create_partitions_handle[:response] = -1
192
- CreatePartitionsHandle.register(create_partitions_handle)
193
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS)
194
- Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_partitions_handle.to_ptr)
203
+ # Create and register the handle we will return to the caller
204
+ create_partitions_handle = CreatePartitionsHandle.new
205
+ create_partitions_handle[:pending] = true
206
+ create_partitions_handle[:response] = -1
207
+ CreatePartitionsHandle.register(create_partitions_handle)
208
+ admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATEPARTITIONS)
209
+ Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_partitions_handle.to_ptr)
210
+
211
+ begin
212
+ Rdkafka::Bindings.rd_kafka_CreatePartitions(
213
+ inner,
214
+ topics_array_ptr,
215
+ 1,
216
+ admin_options_ptr,
217
+ queue_ptr
218
+ )
219
+ rescue Exception
220
+ CreatePartitionsHandle.remove(create_partitions_handle.to_ptr.address)
221
+ raise
222
+ ensure
223
+ Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
224
+ Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
225
+ Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
226
+ end
195
227
 
196
- begin
197
- Rdkafka::Bindings.rd_kafka_CreatePartitions(
198
- @native_kafka,
199
- topics_array_ptr,
200
- 1,
201
- admin_options_ptr,
202
- queue_ptr
203
- )
204
- rescue Exception
205
- CreatePartitionsHandle.remove(create_partitions_handle.to_ptr.address)
206
- raise
207
- ensure
208
- Rdkafka::Bindings.rd_kafka_AdminOptions_destroy(admin_options_ptr)
209
- Rdkafka::Bindings.rd_kafka_queue_destroy(queue_ptr)
210
- Rdkafka::Bindings.rd_kafka_NewPartitions_destroy(new_partitions_ptr)
228
+ create_partitions_handle
211
229
  end
230
+ end
231
+
232
+ private
212
233
 
213
- create_partitions_handle
234
+ def closed_admin_check(method)
235
+ raise Rdkafka::ClosedAdminError.new(method) if closed?
214
236
  end
215
237
  end
216
238
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
  require "json"
3
5
  require "logger"
@@ -15,7 +17,7 @@ module Rdkafka
15
17
  end
16
18
  end
17
19
 
18
- ffi_lib File.join(File.dirname(__FILE__), "../../ext/librdkafka.#{lib_extension}")
20
+ ffi_lib File.join(__dir__, "../../ext/librdkafka.#{lib_extension}")
19
21
 
20
22
  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175
21
23
  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174
@@ -33,15 +35,17 @@ module Rdkafka
33
35
 
34
36
  # Polling
35
37
 
38
+ attach_function :rd_kafka_flush, [:pointer, :int], :void, blocking: true
36
39
  attach_function :rd_kafka_poll, [:pointer, :int], :void, blocking: true
37
40
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
38
41
 
39
42
  # Metadata
40
43
 
41
- attach_function :rd_kafka_memberid, [:pointer], :string
42
- attach_function :rd_kafka_clusterid, [:pointer], :string
43
- attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int
44
- attach_function :rd_kafka_metadata_destroy, [:pointer], :void
44
+ attach_function :rd_kafka_name, [:pointer], :string, blocking: true
45
+ attach_function :rd_kafka_memberid, [:pointer], :string, blocking: true
46
+ attach_function :rd_kafka_clusterid, [:pointer], :string, blocking: true
47
+ attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int, blocking: true
48
+ attach_function :rd_kafka_metadata_destroy, [:pointer], :void, blocking: true
45
49
 
46
50
  # Message struct
47
51
 
@@ -110,6 +114,7 @@ module Rdkafka
110
114
  attach_function :rd_kafka_conf_set_stats_cb, [:pointer, :stats_cb], :void
111
115
  callback :error_cb, [:pointer, :int, :string, :pointer], :void
112
116
  attach_function :rd_kafka_conf_set_error_cb, [:pointer, :error_cb], :void
117
+ attach_function :rd_kafka_rebalance_protocol, [:pointer], :string
113
118
 
114
119
  # Log queue
115
120
  attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
@@ -166,24 +171,28 @@ module Rdkafka
166
171
  ]
167
172
 
168
173
  attach_function :rd_kafka_new, [:kafka_type, :pointer, :pointer, :int], :pointer
174
+
169
175
  attach_function :rd_kafka_destroy, [:pointer], :void
170
176
 
171
177
  # Consumer
172
178
 
173
- attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int
174
- attach_function :rd_kafka_unsubscribe, [:pointer], :int
175
- attach_function :rd_kafka_subscription, [:pointer, :pointer], :int
176
- attach_function :rd_kafka_assign, [:pointer, :pointer], :int
177
- attach_function :rd_kafka_assignment, [:pointer, :pointer], :int
178
- attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int
179
+ attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int, blocking: true
180
+ attach_function :rd_kafka_unsubscribe, [:pointer], :int, blocking: true
181
+ attach_function :rd_kafka_subscription, [:pointer, :pointer], :int, blocking: true
182
+ attach_function :rd_kafka_assign, [:pointer, :pointer], :int, blocking: true
183
+ attach_function :rd_kafka_incremental_assign, [:pointer, :pointer], :int, blocking: true
184
+ attach_function :rd_kafka_incremental_unassign, [:pointer, :pointer], :int, blocking: true
185
+ attach_function :rd_kafka_assignment, [:pointer, :pointer], :int, blocking: true
186
+ attach_function :rd_kafka_assignment_lost, [:pointer], :int, blocking: true
187
+ attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int, blocking: true
179
188
  attach_function :rd_kafka_commit, [:pointer, :pointer, :bool], :int, blocking: true
180
- attach_function :rd_kafka_poll_set_consumer, [:pointer], :void
189
+ attach_function :rd_kafka_poll_set_consumer, [:pointer], :void, blocking: true
181
190
  attach_function :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: true
182
191
  attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
183
- attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
184
- attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int
185
- attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int
186
- attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int
192
+ attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int, blocking: true
193
+ attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
194
+ attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
195
+ attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
187
196
 
188
197
  # Headers
189
198
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -192,30 +201,36 @@ module Rdkafka
192
201
  # Rebalance
193
202
 
194
203
  callback :rebalance_cb_function, [:pointer, :int, :pointer, :pointer], :void
195
- attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void
204
+ attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void, blocking: true
196
205
 
197
206
  RebalanceCallback = FFI::Function.new(
198
207
  :void, [:pointer, :int, :pointer, :pointer]
199
208
  ) do |client_ptr, code, partitions_ptr, opaque_ptr|
200
209
  case code
201
210
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
202
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
211
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
212
+ Rdkafka::Bindings.rd_kafka_incremental_assign(client_ptr, partitions_ptr)
213
+ else
214
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
215
+ end
203
216
  else # RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS or errors
204
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
217
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
218
+ Rdkafka::Bindings.rd_kafka_incremental_unassign(client_ptr, partitions_ptr)
219
+ else
220
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
221
+ end
205
222
  end
206
223
 
207
224
  opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
208
225
  return unless opaque
209
226
 
210
227
  tpl = Rdkafka::Consumer::TopicPartitionList.from_native_tpl(partitions_ptr).freeze
211
- consumer = Rdkafka::Consumer.new(client_ptr)
212
-
213
228
  begin
214
229
  case code
215
230
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
216
- opaque.call_on_partitions_assigned(consumer, tpl)
231
+ opaque.call_on_partitions_assigned(tpl)
217
232
  when RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
218
- opaque.call_on_partitions_revoked(consumer, tpl)
233
+ opaque.call_on_partitions_revoked(tpl)
219
234
  end
220
235
  rescue Exception => err
221
236
  Rdkafka::Config.logger.error("Unhandled exception: #{err.class} - #{err.message}")
@@ -242,7 +257,7 @@ module Rdkafka
242
257
 
243
258
  RD_KAFKA_MSG_F_COPY = 0x2
244
259
 
245
- attach_function :rd_kafka_producev, [:pointer, :varargs], :int
260
+ attach_function :rd_kafka_producev, [:pointer, :varargs], :int, blocking: true
246
261
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
247
262
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
248
263
 
@@ -257,11 +272,11 @@ module Rdkafka
257
272
  # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
258
273
  return -1 unless partition_count&.nonzero?
259
274
 
260
- str_ptr = FFI::MemoryPointer.from_string(str)
275
+ str_ptr = str.empty? ? FFI::MemoryPointer::NULL : FFI::MemoryPointer.from_string(str)
261
276
  method_name = PARTITIONERS.fetch(partitioner_name) do
262
277
  raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner_name}")
263
278
  end
264
- public_send(method_name, nil, str_ptr, str.size, partition_count, nil, nil)
279
+ public_send(method_name, nil, str_ptr, str.size > 0 ? str.size : 1, partition_count, nil, nil)
265
280
  end
266
281
 
267
282
  # Create Topics
@@ -269,23 +284,23 @@ module Rdkafka
269
284
  RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
270
285
  RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100 # rd_kafka_event_type_t
271
286
 
272
- attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
273
- attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer
274
- attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32
275
- attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void
276
- attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer
277
- attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer
287
+ attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void, blocking: true
288
+ attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer, blocking: true
289
+ attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32, blocking: true
290
+ attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void, blocking: true
291
+ attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer, blocking: true
292
+ attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
278
293
 
279
294
  # Delete Topics
280
295
 
281
296
  RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
282
297
  RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101 # rd_kafka_event_type_t
283
298
 
284
- attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32
285
- attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer
286
- attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void
287
- attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer
288
- attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer
299
+ attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32, blocking: true
300
+ attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer, blocking: true
301
+ attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void, blocking: true
302
+ attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer, blocking: true
303
+ attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
289
304
 
290
305
  # Create partitions
291
306
  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS = 3
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  module Callbacks
3
5