rdkafka 0.12.0 → 0.13.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +7 -3
  3. data/CHANGELOG.md +18 -0
  4. data/Gemfile +2 -0
  5. data/README.md +26 -0
  6. data/Rakefile +2 -0
  7. data/ext/Rakefile +2 -0
  8. data/lib/rdkafka/abstract_handle.rb +2 -0
  9. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  10. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  11. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  12. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  13. data/lib/rdkafka/admin.rb +48 -31
  14. data/lib/rdkafka/bindings.rb +50 -37
  15. data/lib/rdkafka/callbacks.rb +7 -1
  16. data/lib/rdkafka/config.rb +13 -10
  17. data/lib/rdkafka/consumer/headers.rb +24 -7
  18. data/lib/rdkafka/consumer/message.rb +3 -1
  19. data/lib/rdkafka/consumer/partition.rb +2 -0
  20. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  21. data/lib/rdkafka/consumer.rb +86 -44
  22. data/lib/rdkafka/error.rb +15 -0
  23. data/lib/rdkafka/metadata.rb +4 -2
  24. data/lib/rdkafka/native_kafka.rb +115 -0
  25. data/lib/rdkafka/producer/delivery_handle.rb +5 -2
  26. data/lib/rdkafka/producer/delivery_report.rb +9 -2
  27. data/lib/rdkafka/producer.rb +35 -13
  28. data/lib/rdkafka/version.rb +5 -3
  29. data/lib/rdkafka.rb +3 -1
  30. data/rdkafka.gemspec +2 -0
  31. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  32. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  33. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  34. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  35. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  36. data/spec/rdkafka/admin_spec.rb +4 -3
  37. data/spec/rdkafka/bindings_spec.rb +2 -0
  38. data/spec/rdkafka/callbacks_spec.rb +2 -0
  39. data/spec/rdkafka/config_spec.rb +17 -2
  40. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  41. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  42. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  43. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  44. data/spec/rdkafka/consumer_spec.rb +120 -22
  45. data/spec/rdkafka/error_spec.rb +2 -0
  46. data/spec/rdkafka/metadata_spec.rb +2 -0
  47. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +13 -34
  48. data/spec/rdkafka/producer/delivery_handle_spec.rb +5 -0
  49. data/spec/rdkafka/producer/delivery_report_spec.rb +8 -2
  50. data/spec/rdkafka/producer_spec.rb +51 -19
  51. data/spec/spec_helper.rb +17 -1
  52. metadata +12 -12
  53. data/bin/console +0 -11
  54. data/lib/rdkafka/producer/client.rb +0 -47
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 821523c304fc7a1fbb2c7be2b58d98d56600b645b89fdb4093f976418650035d
4
- data.tar.gz: '039b8e345fd8be5f295a293d64466071dbefd77d81b01460abb0fcf343a6bed3'
3
+ metadata.gz: b4b1881445c53cdd213efd2e7ed3feb94a32a327fdf63ab35455d337f09e7320
4
+ data.tar.gz: 02276df9702539c951ab392b4c5e086f87b731f7d319aedafb7b1209bc444688
5
5
  SHA512:
6
- metadata.gz: 2c7ac2199a63aacd3b1420890981ed5d953ae5cdadb874886cc4e396fa1fd8f69333633319beef35a05a002d75d22335a526a126e518cc3fbbb877a1c11ef2f7
7
- data.tar.gz: 5d23c6beec3759877013b040018111453e05c41238014b07a27c1a9d8b96e8af3bc037aacd1ebe89f856435cf0afb8e34a9f89443f87cf1a3682736efb79b4bd
6
+ metadata.gz: 7f0e11a17d03091c8e13308c6998131c142349ce520473de7fec5908f0bcf2dfd59542966c3a850008f0db39e96b763b66f7b937487ba11f2fbb444e00aff25c
7
+ data.tar.gz: d15c5f7cd5f4b9041da55e7e46bf05936c627517d5067a9449844e2721e752de852006f432e5fbe72549d77d8378545a671a969652b4abada8aa8182f4fa4985
@@ -9,15 +9,19 @@ agent:
9
9
  blocks:
10
10
  - name: Run specs
11
11
  task:
12
+ prologue:
13
+ commands:
14
+ - sudo apt install -y valgrind
12
15
  jobs:
13
16
  - name: bundle exec rspec
14
17
  matrix:
15
18
  - env_var: RUBY_VERSION
16
- values: [ "2.6.8", "2.7.4", "3.0.2", "jruby-9.3.1.0"]
19
+ values: [ "2.6.10", "2.7.6", "3.0.4", "3.1.2"]
17
20
  commands:
18
21
  - sem-version ruby $RUBY_VERSION
19
22
  - checkout
23
+ - docker-compose up -d --no-recreate
20
24
  - bundle install --path vendor/bundle
21
25
  - cd ext && bundle exec rake && cd ..
22
- - docker-compose up -d --no-recreate
23
- - bundle exec rspec
26
+ - ulimit -c unlimited
27
+ - valgrind -v bundle exec rspec
data/CHANGELOG.md CHANGED
@@ -1,5 +1,23 @@
1
+ # 0.13.0
2
+ * Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
3
+ * Support both string and symbol header keys (ColinDKelley)
4
+ * Handle tombstone messages properly (kgalieva)
5
+ * Add topic name to delivery report (maeve)
6
+ * Allow string partitioner config (mollyegibson)
7
+ * Fix documented type for DeliveryReport#error (jimmydo)
8
+ * Bump librdkafka to 2.0.2 (lmaia)
9
+ * Use finalizers to cleanly exit producer and admin (thijsc)
10
+ * Lock access to the native kafka client (thijsc)
11
+ * Fix potential race condition in multi-threaded producer (mensfeld)
12
+ * Fix leaking FFI resources in specs (mensfeld)
13
+ * Improve specs stability (mensfeld)
14
+ * Make metadata request timeout configurable (mensfeld)
15
+ * call_on_partitions_assigned and call_on_partitions_revoked only get a tpl passed in (thijsc)
16
+
1
17
  # 0.12.0
2
18
  * Bumps librdkafka to 1.9.0
19
+ * Fix crash on empty partition key (mensfeld)
20
+ * Pass the delivery handle to the callback (gvisokinskas)
3
21
 
4
22
  # 0.11.0
5
23
  * Upgrade librdkafka to 1.8.2
data/Gemfile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  source "https://rubygems.org"
2
4
 
3
5
  gemspec
data/README.md CHANGED
@@ -23,6 +23,19 @@ The most important pieces of a Kafka client are implemented. We're
23
23
  working towards feature completeness, you can track that here:
24
24
  https://github.com/appsignal/rdkafka-ruby/milestone/1
25
25
 
26
+ ## Table of content
27
+
28
+ - [Installation](#installation)
29
+ - [Usage](#usage)
30
+ * [Consuming messages](#consuming-messages)
31
+ * [Producing messages](#producing-messages)
32
+ - [Higher level libraries](#higher-level-libraries)
33
+ * [Message processing frameworks](#message-processing-frameworks)
34
+ * [Message publishing libraries](#message-publishing-libraries)
35
+ - [Development](#development)
36
+ - [Example](#example)
37
+
38
+
26
39
  ## Installation
27
40
 
28
41
  This gem downloads and compiles librdkafka when it is installed. If you
@@ -77,6 +90,19 @@ Note that creating a producer consumes some resources that will not be
77
90
  released until it `#close` is explicitly called, so be sure to call
78
91
  `Config#producer` only as necessary.
79
92
 
93
+ ## Higher level libraries
94
+
95
+ Currently, there are two actively developed frameworks based on rdkafka-ruby, that provide higher level API that can be used to work with Kafka messages and one library for publishing messages.
96
+
97
+ ### Message processing frameworks
98
+
99
+ * [Karafka](https://github.com/karafka/karafka) - Ruby and Rails efficient Kafka processing framework.
100
+ * [Racecar](https://github.com/zendesk/racecar) - A simple framework for Kafka consumers in Ruby
101
+
102
+ ### Message publishing libraries
103
+
104
+ * [WaterDrop](https://github.com/karafka/waterdrop) – Standalone Karafka library for producing Kafka messages.
105
+
80
106
  ## Development
81
107
 
82
108
  A Docker Compose file is included to run Kafka and Zookeeper. To run
data/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # Rakefile
2
4
 
3
5
  require 'bundler/gem_tasks'
data/ext/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('../../lib/rdkafka/version', __FILE__)
2
4
  require "mini_portile2"
3
5
  require "fileutils"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
 
3
5
  module Rdkafka
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicReport
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicReport
data/lib/rdkafka/admin.rb CHANGED
@@ -1,33 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "objspace"
4
+
1
5
  module Rdkafka
2
6
  class Admin
3
7
  # @private
4
8
  def initialize(native_kafka)
5
9
  @native_kafka = native_kafka
6
- @closing = false
7
-
8
- # Start thread to poll client for callbacks
9
- @polling_thread = Thread.new do
10
- loop do
11
- Rdkafka::Bindings.rd_kafka_poll(@native_kafka, 250)
12
- # Exit thread if closing and the poll queue is empty
13
- if @closing && Rdkafka::Bindings.rd_kafka_outq_len(@native_kafka) == 0
14
- break
15
- end
16
- end
17
- end
18
- @polling_thread.abort_on_exception = true
10
+
11
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
12
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
13
+ end
14
+
15
+ def finalizer
16
+ ->(_) { close }
19
17
  end
20
18
 
21
19
  # Close this admin instance
22
20
  def close
23
- return unless @native_kafka
24
-
25
- # Indicate to polling thread that we're closing
26
- @closing = true
27
- # Wait for the polling thread to finish up
28
- @polling_thread.join
29
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
30
- @native_kafka = nil
21
+ return if closed?
22
+ ObjectSpace.undefine_finalizer(self)
23
+ @native_kafka.close
24
+ end
25
+
26
+ # Whether this admin has closed
27
+ def closed?
28
+ @native_kafka.closed?
31
29
  end
32
30
 
33
31
  # Create a topic with the given partition count and replication factor
@@ -38,6 +36,7 @@ module Rdkafka
38
36
  #
39
37
  # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
40
38
  def create_topic(topic_name, partition_count, replication_factor, topic_config={})
39
+ closed_admin_check(__method__)
41
40
 
42
41
  # Create a rd_kafka_NewTopic_t representing the new topic
43
42
  error_buffer = FFI::MemoryPointer.from_string(" " * 256)
@@ -68,7 +67,9 @@ module Rdkafka
68
67
  topics_array_ptr.write_array_of_pointer(pointer_array)
69
68
 
70
69
  # Get a pointer to the queue that our request will be enqueued on
71
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
70
+ queue_ptr = @native_kafka.with_inner do |inner|
71
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
72
+ end
72
73
  if queue_ptr.null?
73
74
  Rdkafka::Bindings.rd_kafka_NewTopic_destroy(new_topic_ptr)
74
75
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -79,17 +80,21 @@ module Rdkafka
79
80
  create_topic_handle[:pending] = true
80
81
  create_topic_handle[:response] = -1
81
82
  CreateTopicHandle.register(create_topic_handle)
82
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
83
+ admin_options_ptr = @native_kafka.with_inner do |inner|
84
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
85
+ end
83
86
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_topic_handle.to_ptr)
84
87
 
85
88
  begin
86
- Rdkafka::Bindings.rd_kafka_CreateTopics(
87
- @native_kafka,
89
+ @native_kafka.with_inner do |inner|
90
+ Rdkafka::Bindings.rd_kafka_CreateTopics(
91
+ inner,
88
92
  topics_array_ptr,
89
93
  1,
90
94
  admin_options_ptr,
91
95
  queue_ptr
92
- )
96
+ )
97
+ end
93
98
  rescue Exception
94
99
  CreateTopicHandle.remove(create_topic_handle.to_ptr.address)
95
100
  raise
@@ -108,6 +113,7 @@ module Rdkafka
108
113
  #
109
114
  # @return [DeleteTopicHandle] Delete topic handle that can be used to wait for the result of deleting the topic
110
115
  def delete_topic(topic_name)
116
+ closed_admin_check(__method__)
111
117
 
112
118
  # Create a rd_kafka_DeleteTopic_t representing the topic to be deleted
113
119
  delete_topic_ptr = Rdkafka::Bindings.rd_kafka_DeleteTopic_new(FFI::MemoryPointer.from_string(topic_name))
@@ -118,7 +124,9 @@ module Rdkafka
118
124
  topics_array_ptr.write_array_of_pointer(pointer_array)
119
125
 
120
126
  # Get a pointer to the queue that our request will be enqueued on
121
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
127
+ queue_ptr = @native_kafka.with_inner do |inner|
128
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
129
+ end
122
130
  if queue_ptr.null?
123
131
  Rdkafka::Bindings.rd_kafka_DeleteTopic_destroy(delete_topic_ptr)
124
132
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -129,17 +137,21 @@ module Rdkafka
129
137
  delete_topic_handle[:pending] = true
130
138
  delete_topic_handle[:response] = -1
131
139
  DeleteTopicHandle.register(delete_topic_handle)
132
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
140
+ admin_options_ptr = @native_kafka.with_inner do |inner|
141
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
142
+ end
133
143
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, delete_topic_handle.to_ptr)
134
144
 
135
145
  begin
136
- Rdkafka::Bindings.rd_kafka_DeleteTopics(
137
- @native_kafka,
146
+ @native_kafka.with_inner do |inner|
147
+ Rdkafka::Bindings.rd_kafka_DeleteTopics(
148
+ inner,
138
149
  topics_array_ptr,
139
150
  1,
140
151
  admin_options_ptr,
141
152
  queue_ptr
142
- )
153
+ )
154
+ end
143
155
  rescue Exception
144
156
  DeleteTopicHandle.remove(delete_topic_handle.to_ptr.address)
145
157
  raise
@@ -151,5 +163,10 @@ module Rdkafka
151
163
 
152
164
  delete_topic_handle
153
165
  end
166
+
167
+ private
168
+ def closed_admin_check(method)
169
+ raise Rdkafka::ClosedAdminError.new(method) if closed?
170
+ end
154
171
  end
155
172
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
  require "json"
3
5
  require "logger"
@@ -15,7 +17,7 @@ module Rdkafka
15
17
  end
16
18
  end
17
19
 
18
- ffi_lib File.join(File.dirname(__FILE__), "../../ext/librdkafka.#{lib_extension}")
20
+ ffi_lib File.join(__dir__, "../../ext/librdkafka.#{lib_extension}")
19
21
 
20
22
  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175
21
23
  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174
@@ -33,15 +35,16 @@ module Rdkafka
33
35
 
34
36
  # Polling
35
37
 
38
+ attach_function :rd_kafka_flush, [:pointer, :int], :void, blocking: true
36
39
  attach_function :rd_kafka_poll, [:pointer, :int], :void, blocking: true
37
40
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
38
41
 
39
42
  # Metadata
40
43
 
41
- attach_function :rd_kafka_memberid, [:pointer], :string
42
- attach_function :rd_kafka_clusterid, [:pointer], :string
43
- attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int
44
- attach_function :rd_kafka_metadata_destroy, [:pointer], :void
44
+ attach_function :rd_kafka_memberid, [:pointer], :string, blocking: true
45
+ attach_function :rd_kafka_clusterid, [:pointer], :string, blocking: true
46
+ attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int, blocking: true
47
+ attach_function :rd_kafka_metadata_destroy, [:pointer], :void, blocking: true
45
48
 
46
49
  # Message struct
47
50
 
@@ -110,6 +113,7 @@ module Rdkafka
110
113
  attach_function :rd_kafka_conf_set_stats_cb, [:pointer, :stats_cb], :void
111
114
  callback :error_cb, [:pointer, :int, :string, :pointer], :void
112
115
  attach_function :rd_kafka_conf_set_error_cb, [:pointer, :error_cb], :void
116
+ attach_function :rd_kafka_rebalance_protocol, [:pointer], :string
113
117
 
114
118
  # Log queue
115
119
  attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
@@ -165,24 +169,27 @@ module Rdkafka
165
169
  ]
166
170
 
167
171
  attach_function :rd_kafka_new, [:kafka_type, :pointer, :pointer, :int], :pointer
172
+
168
173
  attach_function :rd_kafka_destroy, [:pointer], :void
169
174
 
170
175
  # Consumer
171
176
 
172
- attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int
173
- attach_function :rd_kafka_unsubscribe, [:pointer], :int
174
- attach_function :rd_kafka_subscription, [:pointer, :pointer], :int
175
- attach_function :rd_kafka_assign, [:pointer, :pointer], :int
176
- attach_function :rd_kafka_assignment, [:pointer, :pointer], :int
177
- attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int
177
+ attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int, blocking: true
178
+ attach_function :rd_kafka_unsubscribe, [:pointer], :int, blocking: true
179
+ attach_function :rd_kafka_subscription, [:pointer, :pointer], :int, blocking: true
180
+ attach_function :rd_kafka_assign, [:pointer, :pointer], :int, blocking: true
181
+ attach_function :rd_kafka_incremental_assign, [:pointer, :pointer], :int, blocking: true
182
+ attach_function :rd_kafka_incremental_unassign, [:pointer, :pointer], :int, blocking: true
183
+ attach_function :rd_kafka_assignment, [:pointer, :pointer], :int, blocking: true
184
+ attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int, blocking: true
178
185
  attach_function :rd_kafka_commit, [:pointer, :pointer, :bool], :int, blocking: true
179
- attach_function :rd_kafka_poll_set_consumer, [:pointer], :void
186
+ attach_function :rd_kafka_poll_set_consumer, [:pointer], :void, blocking: true
180
187
  attach_function :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: true
181
188
  attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
182
- attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
183
- attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int
184
- attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int
185
- attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int
189
+ attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int, blocking: true
190
+ attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
191
+ attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
192
+ attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
186
193
 
187
194
  # Headers
188
195
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -191,30 +198,36 @@ module Rdkafka
191
198
  # Rebalance
192
199
 
193
200
  callback :rebalance_cb_function, [:pointer, :int, :pointer, :pointer], :void
194
- attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void
201
+ attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void, blocking: true
195
202
 
196
203
  RebalanceCallback = FFI::Function.new(
197
204
  :void, [:pointer, :int, :pointer, :pointer]
198
205
  ) do |client_ptr, code, partitions_ptr, opaque_ptr|
199
206
  case code
200
207
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
201
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
208
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
209
+ Rdkafka::Bindings.rd_kafka_incremental_assign(client_ptr, partitions_ptr)
210
+ else
211
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
212
+ end
202
213
  else # RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS or errors
203
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
214
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
215
+ Rdkafka::Bindings.rd_kafka_incremental_unassign(client_ptr, partitions_ptr)
216
+ else
217
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
218
+ end
204
219
  end
205
220
 
206
221
  opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
207
222
  return unless opaque
208
223
 
209
224
  tpl = Rdkafka::Consumer::TopicPartitionList.from_native_tpl(partitions_ptr).freeze
210
- consumer = Rdkafka::Consumer.new(client_ptr)
211
-
212
225
  begin
213
226
  case code
214
227
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
215
- opaque.call_on_partitions_assigned(consumer, tpl)
228
+ opaque.call_on_partitions_assigned(tpl)
216
229
  when RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
217
- opaque.call_on_partitions_revoked(consumer, tpl)
230
+ opaque.call_on_partitions_revoked(tpl)
218
231
  end
219
232
  rescue Exception => err
220
233
  Rdkafka::Config.logger.error("Unhandled exception: #{err.class} - #{err.message}")
@@ -241,7 +254,7 @@ module Rdkafka
241
254
 
242
255
  RD_KAFKA_MSG_F_COPY = 0x2
243
256
 
244
- attach_function :rd_kafka_producev, [:pointer, :varargs], :int
257
+ attach_function :rd_kafka_producev, [:pointer, :varargs], :int, blocking: true
245
258
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
246
259
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
247
260
 
@@ -256,11 +269,11 @@ module Rdkafka
256
269
  # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
257
270
  return -1 unless partition_count&.nonzero?
258
271
 
259
- str_ptr = FFI::MemoryPointer.from_string(str)
272
+ str_ptr = str.empty? ? FFI::MemoryPointer::NULL : FFI::MemoryPointer.from_string(str)
260
273
  method_name = PARTITIONERS.fetch(partitioner_name) do
261
274
  raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner_name}")
262
275
  end
263
- public_send(method_name, nil, str_ptr, str.size, partition_count, nil, nil)
276
+ public_send(method_name, nil, str_ptr, str.size > 0 ? str.size : 1, partition_count, nil, nil)
264
277
  end
265
278
 
266
279
  # Create Topics
@@ -268,23 +281,23 @@ module Rdkafka
268
281
  RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
269
282
  RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100 # rd_kafka_event_type_t
270
283
 
271
- attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
272
- attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer
273
- attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32
274
- attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void
275
- attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer
276
- attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer
284
+ attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void, blocking: true
285
+ attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer, blocking: true
286
+ attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32, blocking: true
287
+ attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void, blocking: true
288
+ attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer, blocking: true
289
+ attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
277
290
 
278
291
  # Delete Topics
279
292
 
280
293
  RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
281
294
  RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101 # rd_kafka_event_type_t
282
295
 
283
- attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32
284
- attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer
285
- attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void
286
- attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer
287
- attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer
296
+ attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32, blocking: true
297
+ attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer, blocking: true
298
+ attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void, blocking: true
299
+ attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer, blocking: true
300
+ attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
288
301
 
289
302
  # Background Queue and Callback
290
303
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  module Callbacks
3
5
 
@@ -90,14 +92,18 @@ module Rdkafka
90
92
  message = Rdkafka::Bindings::Message.new(message_ptr)
91
93
  delivery_handle_ptr_address = message[:_private].address
92
94
  if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
95
+ topic_name = Rdkafka::Bindings.rd_kafka_topic_name(message[:rkt])
96
+
93
97
  # Update delivery handle
94
98
  delivery_handle[:response] = message[:err]
95
99
  delivery_handle[:partition] = message[:partition]
96
100
  delivery_handle[:offset] = message[:offset]
101
+ delivery_handle[:topic_name] = FFI::MemoryPointer.from_string(topic_name)
97
102
  delivery_handle[:pending] = false
103
+
98
104
  # Call delivery callback on opaque
99
105
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
100
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]), delivery_handle)
106
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], topic_name, message[:err]), delivery_handle)
101
107
  end
102
108
  end
103
109
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "logger"
2
4
 
3
5
  module Rdkafka
@@ -30,7 +32,6 @@ module Rdkafka
30
32
  @@logger
31
33
  end
32
34
 
33
-
34
35
  # Returns a queue whose contents will be passed to the configured logger. Each entry
35
36
  # should follow the format [Logger::Severity, String]. The benefit over calling the
36
37
  # logger directly is that this is safe to use from trap contexts.
@@ -47,7 +48,7 @@ module Rdkafka
47
48
  # @return [nil]
48
49
  def self.logger=(logger)
49
50
  raise NoLoggerError if logger.nil?
50
- @@logger=logger
51
+ @@logger = logger
51
52
  end
52
53
 
53
54
  # Set a callback that will be called every time the underlying client emits statistics.
@@ -156,13 +157,14 @@ module Rdkafka
156
157
  Rdkafka::Bindings.rd_kafka_conf_set_rebalance_cb(config, Rdkafka::Bindings::RebalanceCallback)
157
158
  end
158
159
 
160
+ # Create native client
159
161
  kafka = native_kafka(config, :rd_kafka_consumer)
160
162
 
161
163
  # Redirect the main queue to the consumer
162
164
  Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
163
165
 
164
166
  # Return consumer with Kafka client
165
- Rdkafka::Consumer.new(kafka)
167
+ Rdkafka::Consumer.new(Rdkafka::NativeKafka.new(kafka, run_polling_thread: false))
166
168
  end
167
169
 
168
170
  # Create a producer with this configuration.
@@ -179,7 +181,8 @@ module Rdkafka
179
181
  # Set callback to receive delivery reports on config
180
182
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
181
183
  # Return producer with Kafka client
182
- Rdkafka::Producer.new(Rdkafka::Producer::Client.new(native_kafka(config, :rd_kafka_producer)), self[:partitioner]).tap do |producer|
184
+ partitioner_name = self[:partitioner] || self["partitioner"]
185
+ Rdkafka::Producer.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true), partitioner_name).tap do |producer|
183
186
  opaque.producer = producer
184
187
  end
185
188
  end
@@ -194,7 +197,7 @@ module Rdkafka
194
197
  opaque = Opaque.new
195
198
  config = native_config(opaque)
196
199
  Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
197
- Rdkafka::Admin.new(native_kafka(config, :rd_kafka_producer))
200
+ Rdkafka::Admin.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true))
198
201
  end
199
202
 
200
203
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
@@ -210,7 +213,7 @@ module Rdkafka
210
213
 
211
214
  # This method is only intended to be used to create a client,
212
215
  # using it in another way will leak memory.
213
- def native_config(opaque=nil)
216
+ def native_config(opaque = nil)
214
217
  Rdkafka::Bindings.rd_kafka_conf_new.tap do |config|
215
218
  # Create config
216
219
  @config_hash.merge(REQUIRED_CONFIG).each do |key, value|
@@ -282,18 +285,18 @@ module Rdkafka
282
285
  producer.call_delivery_callback(delivery_report, delivery_handle) if producer
283
286
  end
284
287
 
285
- def call_on_partitions_assigned(consumer, list)
288
+ def call_on_partitions_assigned(list)
286
289
  return unless consumer_rebalance_listener
287
290
  return unless consumer_rebalance_listener.respond_to?(:on_partitions_assigned)
288
291
 
289
- consumer_rebalance_listener.on_partitions_assigned(consumer, list)
292
+ consumer_rebalance_listener.on_partitions_assigned(list)
290
293
  end
291
294
 
292
- def call_on_partitions_revoked(consumer, list)
295
+ def call_on_partitions_revoked(list)
293
296
  return unless consumer_rebalance_listener
294
297
  return unless consumer_rebalance_listener.respond_to?(:on_partitions_revoked)
295
298
 
296
- consumer_rebalance_listener.on_partitions_revoked(consumer, list)
299
+ consumer_rebalance_listener.on_partitions_revoked(list)
297
300
  end
298
301
  end
299
302
  end
@@ -1,10 +1,26 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
- # A message headers
4
- class Headers
5
- # Reads a native kafka's message header into ruby's hash
5
+ # Interface to return headers for a consumer message
6
+ module Headers
7
+ class HashWithSymbolKeysTreatedLikeStrings < Hash
8
+ def [](key)
9
+ if key.is_a?(Symbol)
10
+ Kernel.warn("rdkafka deprecation warning: header access with Symbol key #{key.inspect} treated as a String. " \
11
+ "Please change your code to use String keys to avoid this warning. Symbol keys will break in version 1.")
12
+ super(key.to_s)
13
+ else
14
+ super
15
+ end
16
+ end
17
+ end
18
+
19
+ # Reads a librdkafka native message's headers and returns them as a Ruby Hash
20
+ #
21
+ # @param [librdkakfa message] native_message
6
22
  #
7
- # @return [Hash<String, String>] a message headers
23
+ # @return [Hash<String, String>] headers Hash for the native_message
8
24
  #
9
25
  # @raise [Rdkafka::RdkafkaError] when fail to read headers
10
26
  #
@@ -24,7 +40,8 @@ module Rdkafka
24
40
  name_ptrptr = FFI::MemoryPointer.new(:pointer)
25
41
  value_ptrptr = FFI::MemoryPointer.new(:pointer)
26
42
  size_ptr = Rdkafka::Bindings::SizePtr.new
27
- headers = {}
43
+
44
+ headers = HashWithSymbolKeysTreatedLikeStrings.new
28
45
 
29
46
  idx = 0
30
47
  loop do
@@ -51,12 +68,12 @@ module Rdkafka
51
68
 
52
69
  value = value_ptr.read_string(size)
53
70
 
54
- headers[name.to_sym] = value
71
+ headers[name] = value
55
72
 
56
73
  idx += 1
57
74
  end
58
75
 
59
- headers
76
+ headers.freeze
60
77
  end
61
78
  end
62
79
  end