rdkafka 0.12.0 → 0.13.0.beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. checksums.yaml +4 -4
  2. data/.semaphore/semaphore.yml +6 -2
  3. data/CHANGELOG.md +12 -0
  4. data/Gemfile +2 -0
  5. data/Rakefile +2 -0
  6. data/ext/Rakefile +2 -0
  7. data/lib/rdkafka/abstract_handle.rb +2 -0
  8. data/lib/rdkafka/admin/create_topic_handle.rb +2 -0
  9. data/lib/rdkafka/admin/create_topic_report.rb +2 -0
  10. data/lib/rdkafka/admin/delete_topic_handle.rb +2 -0
  11. data/lib/rdkafka/admin/delete_topic_report.rb +2 -0
  12. data/lib/rdkafka/admin.rb +35 -35
  13. data/lib/rdkafka/bindings.rb +19 -5
  14. data/lib/rdkafka/callbacks.rb +7 -1
  15. data/lib/rdkafka/config.rb +7 -5
  16. data/lib/rdkafka/consumer/headers.rb +24 -7
  17. data/lib/rdkafka/consumer/message.rb +3 -1
  18. data/lib/rdkafka/consumer/partition.rb +2 -0
  19. data/lib/rdkafka/consumer/topic_partition_list.rb +2 -0
  20. data/lib/rdkafka/consumer.rb +19 -10
  21. data/lib/rdkafka/error.rb +9 -0
  22. data/lib/rdkafka/metadata.rb +2 -0
  23. data/lib/rdkafka/native_kafka.rb +52 -0
  24. data/lib/rdkafka/producer/delivery_handle.rb +5 -2
  25. data/lib/rdkafka/producer/delivery_report.rb +9 -2
  26. data/lib/rdkafka/producer.rb +11 -10
  27. data/lib/rdkafka/version.rb +5 -3
  28. data/lib/rdkafka.rb +3 -1
  29. data/rdkafka.gemspec +2 -0
  30. data/spec/rdkafka/abstract_handle_spec.rb +2 -0
  31. data/spec/rdkafka/admin/create_topic_handle_spec.rb +2 -0
  32. data/spec/rdkafka/admin/create_topic_report_spec.rb +2 -0
  33. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +2 -0
  34. data/spec/rdkafka/admin/delete_topic_report_spec.rb +2 -0
  35. data/spec/rdkafka/admin_spec.rb +4 -3
  36. data/spec/rdkafka/bindings_spec.rb +2 -0
  37. data/spec/rdkafka/callbacks_spec.rb +2 -0
  38. data/spec/rdkafka/config_spec.rb +14 -0
  39. data/spec/rdkafka/consumer/headers_spec.rb +62 -0
  40. data/spec/rdkafka/consumer/message_spec.rb +2 -0
  41. data/spec/rdkafka/consumer/partition_spec.rb +2 -0
  42. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +2 -0
  43. data/spec/rdkafka/consumer_spec.rb +84 -15
  44. data/spec/rdkafka/error_spec.rb +2 -0
  45. data/spec/rdkafka/metadata_spec.rb +2 -0
  46. data/spec/rdkafka/{producer/client_spec.rb → native_kafka_spec.rb} +8 -6
  47. data/spec/rdkafka/producer/delivery_handle_spec.rb +5 -0
  48. data/spec/rdkafka/producer/delivery_report_spec.rb +8 -2
  49. data/spec/rdkafka/producer_spec.rb +45 -16
  50. data/spec/spec_helper.rb +16 -0
  51. metadata +13 -11
  52. data/lib/rdkafka/producer/client.rb +0 -47
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 821523c304fc7a1fbb2c7be2b58d98d56600b645b89fdb4093f976418650035d
4
- data.tar.gz: '039b8e345fd8be5f295a293d64466071dbefd77d81b01460abb0fcf343a6bed3'
3
+ metadata.gz: eac016dc712d56e178c77a62f8af4e422319a783e80acbd7528453bfca6ca260
4
+ data.tar.gz: 8a4ed8df1339e0e74c96b259431012465c499ae9e9c8c6db8ff751497e1e5c43
5
5
  SHA512:
6
- metadata.gz: 2c7ac2199a63aacd3b1420890981ed5d953ae5cdadb874886cc4e396fa1fd8f69333633319beef35a05a002d75d22335a526a126e518cc3fbbb877a1c11ef2f7
7
- data.tar.gz: 5d23c6beec3759877013b040018111453e05c41238014b07a27c1a9d8b96e8af3bc037aacd1ebe89f856435cf0afb8e34a9f89443f87cf1a3682736efb79b4bd
6
+ metadata.gz: e3167de231aaabdfa512cabef13a4ffaabcc94757a2b7d0f360ed27a9005a40da5bbb262dffc49bda03d11557eef63dcf6b9c010fb944cf41fcb06bb7531c3ee
7
+ data.tar.gz: 8db04d46686505a9b1fef79c8544f3c27056ae5ff9e9eede9343d4f5cef4c63c844b9798011a5339ebbdf2cab883d1c3fd5df61842ea90c4862fa0d03dd88610
@@ -9,15 +9,19 @@ agent:
9
9
  blocks:
10
10
  - name: Run specs
11
11
  task:
12
+ prologue:
13
+ commands:
14
+ - sudo apt install -y valgrind
12
15
  jobs:
13
16
  - name: bundle exec rspec
14
17
  matrix:
15
18
  - env_var: RUBY_VERSION
16
- values: [ "2.6.8", "2.7.4", "3.0.2", "jruby-9.3.1.0"]
19
+ values: [ "2.6.10", "2.7.6", "3.0.4", "3.1.2"]
17
20
  commands:
18
21
  - sem-version ruby $RUBY_VERSION
19
22
  - checkout
20
23
  - bundle install --path vendor/bundle
21
24
  - cd ext && bundle exec rake && cd ..
22
25
  - docker-compose up -d --no-recreate
23
- - bundle exec rspec
26
+ - ulimit -c unlimited
27
+ - valgrind -v bundle exec rspec
data/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
+ # 0.13.0
2
+ * Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
3
+ * Support both string and symbol header keys (ColinDKelley)
4
+ * Handle tombstone messages properly (kgalieva)
5
+ * Add topic name to delivery report (maeve)
6
+ * Allow string partitioner config (mollyegibson)
7
+ * Fix documented type for DeliveryReport#error (jimmydo)
8
+ * Bump librdkafka to 1.9.2 (thijsc)
9
+ * Use finalizers to cleanly exit producer and admin (thijsc)
10
+
1
11
  # 0.12.0
2
12
  * Bumps librdkafka to 1.9.0
13
+ * Fix crash on empty partition key (mensfeld)
14
+ * Pass the delivery handle to the callback (gvisokinskas)
3
15
 
4
16
  # 0.11.0
5
17
  * Upgrade librdkafka to 1.8.2
data/Gemfile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  source "https://rubygems.org"
2
4
 
3
5
  gemspec
data/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # Rakefile
2
4
 
3
5
  require 'bundler/gem_tasks'
data/ext/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require File.expand_path('../../lib/rdkafka/version', __FILE__)
2
4
  require "mini_portile2"
3
5
  require "fileutils"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
 
3
5
  module Rdkafka
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class CreateTopicReport
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicHandle < AbstractHandle
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Admin
3
5
  class DeleteTopicReport
data/lib/rdkafka/admin.rb CHANGED
@@ -1,33 +1,26 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "objspace"
4
+
1
5
  module Rdkafka
2
6
  class Admin
3
7
  # @private
4
8
  def initialize(native_kafka)
5
9
  @native_kafka = native_kafka
6
- @closing = false
7
-
8
- # Start thread to poll client for callbacks
9
- @polling_thread = Thread.new do
10
- loop do
11
- Rdkafka::Bindings.rd_kafka_poll(@native_kafka, 250)
12
- # Exit thread if closing and the poll queue is empty
13
- if @closing && Rdkafka::Bindings.rd_kafka_outq_len(@native_kafka) == 0
14
- break
15
- end
16
- end
17
- end
18
- @polling_thread.abort_on_exception = true
10
+
11
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
12
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
13
+ end
14
+
15
+ def finalizer
16
+ ->(_) { close }
19
17
  end
20
18
 
21
19
  # Close this admin instance
22
20
  def close
23
- return unless @native_kafka
24
-
25
- # Indicate to polling thread that we're closing
26
- @closing = true
27
- # Wait for the polling thread to finish up
28
- @polling_thread.join
29
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
30
- @native_kafka = nil
21
+ ObjectSpace.undefine_finalizer(self)
22
+
23
+ @native_kafka.close
31
24
  end
32
25
 
33
26
  # Create a topic with the given partition count and replication factor
@@ -38,6 +31,7 @@ module Rdkafka
38
31
  #
39
32
  # @return [CreateTopicHandle] Create topic handle that can be used to wait for the result of creating the topic
40
33
  def create_topic(topic_name, partition_count, replication_factor, topic_config={})
34
+ closed_admin_check(__method__)
41
35
 
42
36
  # Create a rd_kafka_NewTopic_t representing the new topic
43
37
  error_buffer = FFI::MemoryPointer.from_string(" " * 256)
@@ -68,7 +62,7 @@ module Rdkafka
68
62
  topics_array_ptr.write_array_of_pointer(pointer_array)
69
63
 
70
64
  # Get a pointer to the queue that our request will be enqueued on
71
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
65
+ queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka.inner)
72
66
  if queue_ptr.null?
73
67
  Rdkafka::Bindings.rd_kafka_NewTopic_destroy(new_topic_ptr)
74
68
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -79,16 +73,16 @@ module Rdkafka
79
73
  create_topic_handle[:pending] = true
80
74
  create_topic_handle[:response] = -1
81
75
  CreateTopicHandle.register(create_topic_handle)
82
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
76
+ admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka.inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
83
77
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_topic_handle.to_ptr)
84
78
 
85
79
  begin
86
80
  Rdkafka::Bindings.rd_kafka_CreateTopics(
87
- @native_kafka,
88
- topics_array_ptr,
89
- 1,
90
- admin_options_ptr,
91
- queue_ptr
81
+ @native_kafka.inner,
82
+ topics_array_ptr,
83
+ 1,
84
+ admin_options_ptr,
85
+ queue_ptr
92
86
  )
93
87
  rescue Exception
94
88
  CreateTopicHandle.remove(create_topic_handle.to_ptr.address)
@@ -108,6 +102,7 @@ module Rdkafka
108
102
  #
109
103
  # @return [DeleteTopicHandle] Delete topic handle that can be used to wait for the result of deleting the topic
110
104
  def delete_topic(topic_name)
105
+ closed_admin_check(__method__)
111
106
 
112
107
  # Create a rd_kafka_DeleteTopic_t representing the topic to be deleted
113
108
  delete_topic_ptr = Rdkafka::Bindings.rd_kafka_DeleteTopic_new(FFI::MemoryPointer.from_string(topic_name))
@@ -118,7 +113,7 @@ module Rdkafka
118
113
  topics_array_ptr.write_array_of_pointer(pointer_array)
119
114
 
120
115
  # Get a pointer to the queue that our request will be enqueued on
121
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka)
116
+ queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka.inner)
122
117
  if queue_ptr.null?
123
118
  Rdkafka::Bindings.rd_kafka_DeleteTopic_destroy(delete_topic_ptr)
124
119
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -129,16 +124,16 @@ module Rdkafka
129
124
  delete_topic_handle[:pending] = true
130
125
  delete_topic_handle[:response] = -1
131
126
  DeleteTopicHandle.register(delete_topic_handle)
132
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
127
+ admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka.inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
133
128
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, delete_topic_handle.to_ptr)
134
129
 
135
130
  begin
136
131
  Rdkafka::Bindings.rd_kafka_DeleteTopics(
137
- @native_kafka,
138
- topics_array_ptr,
139
- 1,
140
- admin_options_ptr,
141
- queue_ptr
132
+ @native_kafka.inner,
133
+ topics_array_ptr,
134
+ 1,
135
+ admin_options_ptr,
136
+ queue_ptr
142
137
  )
143
138
  rescue Exception
144
139
  DeleteTopicHandle.remove(delete_topic_handle.to_ptr.address)
@@ -151,5 +146,10 @@ module Rdkafka
151
146
 
152
147
  delete_topic_handle
153
148
  end
149
+
150
+ private
151
+ def closed_admin_check(method)
152
+ raise Rdkafka::ClosedAdminError.new(method) if @native_kafka.closed?
153
+ end
154
154
  end
155
155
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "ffi"
2
4
  require "json"
3
5
  require "logger"
@@ -15,7 +17,7 @@ module Rdkafka
15
17
  end
16
18
  end
17
19
 
18
- ffi_lib File.join(File.dirname(__FILE__), "../../ext/librdkafka.#{lib_extension}")
20
+ ffi_lib File.join(__dir__, "../../ext/librdkafka.#{lib_extension}")
19
21
 
20
22
  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175
21
23
  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174
@@ -33,6 +35,7 @@ module Rdkafka
33
35
 
34
36
  # Polling
35
37
 
38
+ attach_function :rd_kafka_flush, [:pointer, :int], :void, blocking: true
36
39
  attach_function :rd_kafka_poll, [:pointer, :int], :void, blocking: true
37
40
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
38
41
 
@@ -110,6 +113,7 @@ module Rdkafka
110
113
  attach_function :rd_kafka_conf_set_stats_cb, [:pointer, :stats_cb], :void
111
114
  callback :error_cb, [:pointer, :int, :string, :pointer], :void
112
115
  attach_function :rd_kafka_conf_set_error_cb, [:pointer, :error_cb], :void
116
+ attach_function :rd_kafka_rebalance_protocol, [:pointer], :string
113
117
 
114
118
  # Log queue
115
119
  attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
@@ -173,6 +177,8 @@ module Rdkafka
173
177
  attach_function :rd_kafka_unsubscribe, [:pointer], :int
174
178
  attach_function :rd_kafka_subscription, [:pointer, :pointer], :int
175
179
  attach_function :rd_kafka_assign, [:pointer, :pointer], :int
180
+ attach_function :rd_kafka_incremental_assign, [:pointer, :pointer], :int
181
+ attach_function :rd_kafka_incremental_unassign, [:pointer, :pointer], :int
176
182
  attach_function :rd_kafka_assignment, [:pointer, :pointer], :int
177
183
  attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int
178
184
  attach_function :rd_kafka_commit, [:pointer, :pointer, :bool], :int, blocking: true
@@ -198,9 +204,17 @@ module Rdkafka
198
204
  ) do |client_ptr, code, partitions_ptr, opaque_ptr|
199
205
  case code
200
206
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
201
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
207
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
208
+ Rdkafka::Bindings.rd_kafka_incremental_assign(client_ptr, partitions_ptr)
209
+ else
210
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
211
+ end
202
212
  else # RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS or errors
203
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
213
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
214
+ Rdkafka::Bindings.rd_kafka_incremental_unassign(client_ptr, partitions_ptr)
215
+ else
216
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
217
+ end
204
218
  end
205
219
 
206
220
  opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
@@ -256,11 +270,11 @@ module Rdkafka
256
270
  # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
257
271
  return -1 unless partition_count&.nonzero?
258
272
 
259
- str_ptr = FFI::MemoryPointer.from_string(str)
273
+ str_ptr = str.empty? ? FFI::MemoryPointer::NULL : FFI::MemoryPointer.from_string(str)
260
274
  method_name = PARTITIONERS.fetch(partitioner_name) do
261
275
  raise Rdkafka::Config::ConfigError.new("Unknown partitioner: #{partitioner_name}")
262
276
  end
263
- public_send(method_name, nil, str_ptr, str.size, partition_count, nil, nil)
277
+ public_send(method_name, nil, str_ptr, str.size > 0 ? str.size : 1, partition_count, nil, nil)
264
278
  end
265
279
 
266
280
  # Create Topics
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  module Callbacks
3
5
 
@@ -90,14 +92,18 @@ module Rdkafka
90
92
  message = Rdkafka::Bindings::Message.new(message_ptr)
91
93
  delivery_handle_ptr_address = message[:_private].address
92
94
  if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
95
+ topic_name = Rdkafka::Bindings.rd_kafka_topic_name(message[:rkt])
96
+
93
97
  # Update delivery handle
94
98
  delivery_handle[:response] = message[:err]
95
99
  delivery_handle[:partition] = message[:partition]
96
100
  delivery_handle[:offset] = message[:offset]
101
+ delivery_handle[:topic_name] = FFI::MemoryPointer.from_string(topic_name)
97
102
  delivery_handle[:pending] = false
103
+
98
104
  # Call delivery callback on opaque
99
105
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
100
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]), delivery_handle)
106
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], topic_name, message[:err]), delivery_handle)
101
107
  end
102
108
  end
103
109
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require "logger"
2
4
 
3
5
  module Rdkafka
@@ -30,7 +32,6 @@ module Rdkafka
30
32
  @@logger
31
33
  end
32
34
 
33
-
34
35
  # Returns a queue whose contents will be passed to the configured logger. Each entry
35
36
  # should follow the format [Logger::Severity, String]. The benefit over calling the
36
37
  # logger directly is that this is safe to use from trap contexts.
@@ -47,7 +48,7 @@ module Rdkafka
47
48
  # @return [nil]
48
49
  def self.logger=(logger)
49
50
  raise NoLoggerError if logger.nil?
50
- @@logger=logger
51
+ @@logger = logger
51
52
  end
52
53
 
53
54
  # Set a callback that will be called every time the underlying client emits statistics.
@@ -179,7 +180,8 @@ module Rdkafka
179
180
  # Set callback to receive delivery reports on config
180
181
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
181
182
  # Return producer with Kafka client
182
- Rdkafka::Producer.new(Rdkafka::Producer::Client.new(native_kafka(config, :rd_kafka_producer)), self[:partitioner]).tap do |producer|
183
+ partitioner_name = self[:partitioner] || self["partitioner"]
184
+ Rdkafka::Producer.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer)), partitioner_name).tap do |producer|
183
185
  opaque.producer = producer
184
186
  end
185
187
  end
@@ -194,7 +196,7 @@ module Rdkafka
194
196
  opaque = Opaque.new
195
197
  config = native_config(opaque)
196
198
  Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
197
- Rdkafka::Admin.new(native_kafka(config, :rd_kafka_producer))
199
+ Rdkafka::Admin.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer)))
198
200
  end
199
201
 
200
202
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
@@ -210,7 +212,7 @@ module Rdkafka
210
212
 
211
213
  # This method is only intended to be used to create a client,
212
214
  # using it in another way will leak memory.
213
- def native_config(opaque=nil)
215
+ def native_config(opaque = nil)
214
216
  Rdkafka::Bindings.rd_kafka_conf_new.tap do |config|
215
217
  # Create config
216
218
  @config_hash.merge(REQUIRED_CONFIG).each do |key, value|
@@ -1,10 +1,26 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
- # A message headers
4
- class Headers
5
- # Reads a native kafka's message header into ruby's hash
5
+ # Interface to return headers for a consumer message
6
+ module Headers
7
+ class HashWithSymbolKeysTreatedLikeStrings < Hash
8
+ def [](key)
9
+ if key.is_a?(Symbol)
10
+ Kernel.warn("rdkafka deprecation warning: header access with Symbol key #{key.inspect} treated as a String. " \
11
+ "Please change your code to use String keys to avoid this warning. Symbol keys will break in version 1.")
12
+ super(key.to_s)
13
+ else
14
+ super
15
+ end
16
+ end
17
+ end
18
+
19
+ # Reads a librdkafka native message's headers and returns them as a Ruby Hash
20
+ #
21
+ # @param [librdkakfa message] native_message
6
22
  #
7
- # @return [Hash<String, String>] a message headers
23
+ # @return [Hash<String, String>] headers Hash for the native_message
8
24
  #
9
25
  # @raise [Rdkafka::RdkafkaError] when fail to read headers
10
26
  #
@@ -24,7 +40,8 @@ module Rdkafka
24
40
  name_ptrptr = FFI::MemoryPointer.new(:pointer)
25
41
  value_ptrptr = FFI::MemoryPointer.new(:pointer)
26
42
  size_ptr = Rdkafka::Bindings::SizePtr.new
27
- headers = {}
43
+
44
+ headers = HashWithSymbolKeysTreatedLikeStrings.new
28
45
 
29
46
  idx = 0
30
47
  loop do
@@ -51,12 +68,12 @@ module Rdkafka
51
68
 
52
69
  value = value_ptr.read_string(size)
53
70
 
54
- headers[name.to_sym] = value
71
+ headers[name] = value
55
72
 
56
73
  idx += 1
57
74
  end
58
75
 
59
- headers
76
+ headers.freeze
60
77
  end
61
78
  end
62
79
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # A message that was consumed from a topic.
@@ -18,7 +20,7 @@ module Rdkafka
18
20
  # @return [String, nil]
19
21
  attr_reader :key
20
22
 
21
- # This message's offset in it's partition
23
+ # This message's offset in its partition
22
24
  # @return [Integer]
23
25
  attr_reader :offset
24
26
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # Information about a partition, used in {TopicPartitionList}.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Consumer
3
5
  # A list of topics with their partition information
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # A consumer of Kafka messages. It uses the high-level consumer approach where the Kafka
3
5
  # brokers automatically assign partitions and load balance partitions over consumers that
@@ -14,20 +16,27 @@ module Rdkafka
14
16
  # @private
15
17
  def initialize(native_kafka)
16
18
  @native_kafka = native_kafka
17
- @closing = false
19
+ end
20
+
21
+ def finalizer
22
+ ->(_) { close }
18
23
  end
19
24
 
20
25
  # Close this consumer
21
26
  # @return [nil]
22
27
  def close
23
- return unless @native_kafka
28
+ return if closed?
24
29
 
25
- @closing = true
26
30
  Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
31
  Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
32
  @native_kafka = nil
29
33
  end
30
34
 
35
+ # Whether this consumer has closed
36
+ def closed?
37
+ @native_kafka.nil?
38
+ end
39
+
31
40
  # Subscribe to one or more topics letting Kafka handle partition assignments.
32
41
  #
33
42
  # @param topics [Array<String>] One or more topic names
@@ -459,7 +468,7 @@ module Rdkafka
459
468
  if message
460
469
  yield(message)
461
470
  else
462
- if @closing
471
+ if closed?
463
472
  break
464
473
  else
465
474
  next
@@ -468,10 +477,6 @@ module Rdkafka
468
477
  end
469
478
  end
470
479
 
471
- def closed_consumer_check(method)
472
- raise Rdkafka::ClosedConsumerError.new(method) if @native_kafka.nil?
473
- end
474
-
475
480
  # Poll for new messages and yield them in batches that may contain
476
481
  # messages from more than one partition.
477
482
  #
@@ -527,7 +532,7 @@ module Rdkafka
527
532
  bytes = 0
528
533
  end_time = monotonic_now + timeout_ms / 1000.0
529
534
  loop do
530
- break if @closing
535
+ break if closed?
531
536
  max_wait = end_time - monotonic_now
532
537
  max_wait_ms = if max_wait <= 0
533
538
  0 # should not block, but may retrieve a message
@@ -545,7 +550,7 @@ module Rdkafka
545
550
  end
546
551
  if message
547
552
  slice << message
548
- bytes += message.payload.bytesize
553
+ bytes += message.payload.bytesize if message.payload
549
554
  end
550
555
  if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
551
556
  yield slice.dup, nil
@@ -561,5 +566,9 @@ module Rdkafka
561
566
  # needed because Time.now can go backwards
562
567
  Process.clock_gettime(Process::CLOCK_MONOTONIC)
563
568
  end
569
+
570
+ def closed_consumer_check(method)
571
+ raise Rdkafka::ClosedConsumerError.new(method) if closed?
572
+ end
564
573
  end
565
574
  end
data/lib/rdkafka/error.rb CHANGED
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  # Base error class.
3
5
  class BaseError < RuntimeError; end
@@ -83,4 +85,11 @@ module Rdkafka
83
85
  super("Illegal call to #{method.to_s} on a closed producer")
84
86
  end
85
87
  end
88
+
89
+ # Error class for public consumer method calls on a closed admin.
90
+ class ClosedAdminError < BaseError
91
+ def initialize(method)
92
+ super("Illegal call to #{method.to_s} on a closed admin")
93
+ end
94
+ end
86
95
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Metadata
3
5
  attr_reader :brokers, :topics
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # @private
5
+ # A wrapper around a native kafka that polls and cleanly exits
6
+ class NativeKafka
7
+ def initialize(inner)
8
+ @inner = inner
9
+
10
+ # Start thread to poll client for delivery callbacks
11
+ @polling_thread = Thread.new do
12
+ loop do
13
+ Rdkafka::Bindings.rd_kafka_poll(inner, 250)
14
+ # Exit thread if closing and the poll queue is empty
15
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
16
+ break
17
+ end
18
+ end
19
+ end
20
+ @polling_thread.abort_on_exception = true
21
+ @polling_thread[:closing] = false
22
+ end
23
+
24
+ def inner
25
+ @inner
26
+ end
27
+
28
+ def finalizer
29
+ ->(_) { close }
30
+ end
31
+
32
+ def closed?
33
+ @inner.nil?
34
+ end
35
+
36
+ def close(object_id=nil)
37
+ return if closed?
38
+
39
+ # Flush outstanding activity
40
+ Rdkafka::Bindings.rd_kafka_flush(@inner, 30 * 1000)
41
+
42
+ # Indicate to polling thread that we're closing
43
+ @polling_thread[:closing] = true
44
+ # Wait for the polling thread to finish up
45
+ @polling_thread.join
46
+
47
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
48
+
49
+ @inner = nil
50
+ end
51
+ end
52
+ end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Rdkafka
2
4
  class Producer
3
5
  # Handle to wait for a delivery report which is returned when
@@ -6,7 +8,8 @@ module Rdkafka
6
8
  layout :pending, :bool,
7
9
  :response, :int,
8
10
  :partition, :int,
9
- :offset, :int64
11
+ :offset, :int64,
12
+ :topic_name, :pointer
10
13
 
11
14
  # @return [String] the name of the operation (e.g. "delivery")
12
15
  def operation_name
@@ -15,7 +18,7 @@ module Rdkafka
15
18
 
16
19
  # @return [DeliveryReport] a report on the delivery of the message
17
20
  def create_result
18
- DeliveryReport.new(self[:partition], self[:offset])
21
+ DeliveryReport.new(self[:partition], self[:offset], self[:topic_name].read_string)
19
22
  end
20
23
  end
21
24
  end