rdkafka 0.13.0.beta.1 → 0.13.0.beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c3c5b37efae2485950c8a7627e09dfca50bd7c1264e18ae811b85c3f9ae7d09a
4
- data.tar.gz: fb106016aae053f18f53885ae176cdb9e07078c80139146b165231cdd7e490ab
3
+ metadata.gz: 9db414f84847b884bded4eb7643ead52ee664256acdde47dcf7bb5e8421b5882
4
+ data.tar.gz: d696bce3413d5a591542e6bb1b7191acabd5b331e4115fb7a78b6d65320874d4
5
5
  SHA512:
6
- metadata.gz: 2aaaf70e222ad813ec88ce49078f0e643c1860ff8ce93289134d52ac8a7104681c3f66fad48083c3d78223f7f3157dd7019bd41355933141316078bb1c5fd3aa
7
- data.tar.gz: a1503635d8e51589db14db327176cbae4f8be9454938e63c96f2bfdcbd258cef1f392479955797e742463cd44ab1951f7596d23adb2b5c06ebf6ff6ab1963442
6
+ metadata.gz: cd209306c840710661108a357adbeec6a24cc1aa1a828959041a0810c25b19ddbb9b52442a69c389995361c96c075a9a572b532cc6ba97c70160d99db46f5d8d
7
+ data.tar.gz: 5cb50b3717ab7904d4d8afb46233f0435cf519511755ee5137ccdf638775947f23db3ff40ac1efcd0562a767e23f5a65224e8d59f3a753b33206253f9db4dfc6
data/CHANGELOG.md CHANGED
@@ -1,4 +1,7 @@
1
1
  # 0.13.0
2
+ * Support cooperative sticky partition assignment in the rebalance callback (methodmissing)
3
+ * Support both string and symbol header keys (ColinDKelley)
4
+ * Handle tombstone messages properly (kgalieva)
2
5
  * Add topic name to delivery report (maeve)
3
6
  * Allow string partitioner config (mollyegibson)
4
7
  * Fix documented type for DeliveryReport#error (jimmydo)
@@ -6,7 +9,7 @@
6
9
  * Use finalizers to cleanly exit producer and admin (thijsc)
7
10
 
8
11
  # 0.12.0
9
- * Bump librdkafka to 1.9.0
12
+ * Bumps librdkafka to 1.9.0
10
13
  * Fix crash on empty partition key (mensfeld)
11
14
  * Pass the delivery handle to the callback (gvisokinskas)
12
15
 
data/lib/rdkafka/admin.rb CHANGED
@@ -18,11 +18,16 @@ module Rdkafka
18
18
 
19
19
  # Close this admin instance
20
20
  def close
21
+ return if closed?
21
22
  ObjectSpace.undefine_finalizer(self)
22
-
23
23
  @native_kafka.close
24
24
  end
25
25
 
26
+ # Whether this admin has closed
27
+ def closed?
28
+ @native_kafka.closed?
29
+ end
30
+
26
31
  # Create a topic with the given partition count and replication factor
27
32
  #
28
33
  # @raise [ConfigError] When the partition count or replication factor are out of valid range
@@ -149,7 +154,7 @@ module Rdkafka
149
154
 
150
155
  private
151
156
  def closed_admin_check(method)
152
- raise Rdkafka::ClosedAdminError.new(method) if @native_kafka.closed?
157
+ raise Rdkafka::ClosedAdminError.new(method) if closed?
153
158
  end
154
159
  end
155
160
  end
@@ -17,7 +17,7 @@ module Rdkafka
17
17
  end
18
18
  end
19
19
 
20
- ffi_lib File.join(File.dirname(__FILE__), "../../ext/librdkafka.#{lib_extension}")
20
+ ffi_lib File.join(__dir__, "../../ext/librdkafka.#{lib_extension}")
21
21
 
22
22
  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175
23
23
  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174
@@ -113,6 +113,7 @@ module Rdkafka
113
113
  attach_function :rd_kafka_conf_set_stats_cb, [:pointer, :stats_cb], :void
114
114
  callback :error_cb, [:pointer, :int, :string, :pointer], :void
115
115
  attach_function :rd_kafka_conf_set_error_cb, [:pointer, :error_cb], :void
116
+ attach_function :rd_kafka_rebalance_protocol, [:pointer], :string
116
117
 
117
118
  # Log queue
118
119
  attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
@@ -168,7 +169,9 @@ module Rdkafka
168
169
  ]
169
170
 
170
171
  attach_function :rd_kafka_new, [:kafka_type, :pointer, :pointer, :int], :pointer
171
- attach_function :rd_kafka_destroy, [:pointer], :void
172
+
173
+ RD_KAFKA_DESTROY_F_IMMEDIATE = 0x4
174
+ attach_function :rd_kafka_destroy_flags, [:pointer, :int], :void
172
175
 
173
176
  # Consumer
174
177
 
@@ -176,6 +179,8 @@ module Rdkafka
176
179
  attach_function :rd_kafka_unsubscribe, [:pointer], :int
177
180
  attach_function :rd_kafka_subscription, [:pointer, :pointer], :int
178
181
  attach_function :rd_kafka_assign, [:pointer, :pointer], :int
182
+ attach_function :rd_kafka_incremental_assign, [:pointer, :pointer], :int
183
+ attach_function :rd_kafka_incremental_unassign, [:pointer, :pointer], :int
179
184
  attach_function :rd_kafka_assignment, [:pointer, :pointer], :int
180
185
  attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int
181
186
  attach_function :rd_kafka_commit, [:pointer, :pointer, :bool], :int, blocking: true
@@ -201,9 +206,17 @@ module Rdkafka
201
206
  ) do |client_ptr, code, partitions_ptr, opaque_ptr|
202
207
  case code
203
208
  when RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
204
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
209
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
210
+ Rdkafka::Bindings.rd_kafka_incremental_assign(client_ptr, partitions_ptr)
211
+ else
212
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, partitions_ptr)
213
+ end
205
214
  else # RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS or errors
206
- Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
215
+ if Rdkafka::Bindings.rd_kafka_rebalance_protocol(client_ptr) == "COOPERATIVE"
216
+ Rdkafka::Bindings.rd_kafka_incremental_unassign(client_ptr, partitions_ptr)
217
+ else
218
+ Rdkafka::Bindings.rd_kafka_assign(client_ptr, FFI::Pointer::NULL)
219
+ end
207
220
  end
208
221
 
209
222
  opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
@@ -157,13 +157,14 @@ module Rdkafka
157
157
  Rdkafka::Bindings.rd_kafka_conf_set_rebalance_cb(config, Rdkafka::Bindings::RebalanceCallback)
158
158
  end
159
159
 
160
+ # Create native client
160
161
  kafka = native_kafka(config, :rd_kafka_consumer)
161
162
 
162
163
  # Redirect the main queue to the consumer
163
164
  Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
164
165
 
165
166
  # Return consumer with Kafka client
166
- Rdkafka::Consumer.new(kafka)
167
+ Rdkafka::Consumer.new(Rdkafka::NativeKafka.new(kafka, run_polling_thread: false))
167
168
  end
168
169
 
169
170
  # Create a producer with this configuration.
@@ -181,7 +182,7 @@ module Rdkafka
181
182
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
182
183
  # Return producer with Kafka client
183
184
  partitioner_name = self[:partitioner] || self["partitioner"]
184
- Rdkafka::Producer.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer)), partitioner_name).tap do |producer|
185
+ Rdkafka::Producer.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true), partitioner_name).tap do |producer|
185
186
  opaque.producer = producer
186
187
  end
187
188
  end
@@ -196,7 +197,7 @@ module Rdkafka
196
197
  opaque = Opaque.new
197
198
  config = native_config(opaque)
198
199
  Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
199
- Rdkafka::Admin.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer)))
200
+ Rdkafka::Admin.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true))
200
201
  end
201
202
 
202
203
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
@@ -2,11 +2,25 @@
2
2
 
3
3
  module Rdkafka
4
4
  class Consumer
5
- # A message headers
6
- class Headers
7
- # Reads a native kafka's message header into ruby's hash
5
+ # Interface to return headers for a consumer message
6
+ module Headers
7
+ class HashWithSymbolKeysTreatedLikeStrings < Hash
8
+ def [](key)
9
+ if key.is_a?(Symbol)
10
+ Kernel.warn("rdkafka deprecation warning: header access with Symbol key #{key.inspect} treated as a String. " \
11
+ "Please change your code to use String keys to avoid this warning. Symbol keys will break in version 1.")
12
+ super(key.to_s)
13
+ else
14
+ super
15
+ end
16
+ end
17
+ end
18
+
19
+ # Reads a librdkafka native message's headers and returns them as a Ruby Hash
20
+ #
21
+ # @param [librdkakfa message] native_message
8
22
  #
9
- # @return [Hash<String, String>] a message headers
23
+ # @return [Hash<String, String>] headers Hash for the native_message
10
24
  #
11
25
  # @raise [Rdkafka::RdkafkaError] when fail to read headers
12
26
  #
@@ -26,7 +40,8 @@ module Rdkafka
26
40
  name_ptrptr = FFI::MemoryPointer.new(:pointer)
27
41
  value_ptrptr = FFI::MemoryPointer.new(:pointer)
28
42
  size_ptr = Rdkafka::Bindings::SizePtr.new
29
- headers = {}
43
+
44
+ headers = HashWithSymbolKeysTreatedLikeStrings.new
30
45
 
31
46
  idx = 0
32
47
  loop do
@@ -53,12 +68,12 @@ module Rdkafka
53
68
 
54
69
  value = value_ptr.read_string(size)
55
70
 
56
- headers[name.to_sym] = value
71
+ headers[name] = value
57
72
 
58
73
  idx += 1
59
74
  end
60
75
 
61
- headers
76
+ headers.freeze
62
77
  end
63
78
  end
64
79
  end
@@ -20,7 +20,7 @@ module Rdkafka
20
20
  # @return [String, nil]
21
21
  attr_reader :key
22
22
 
23
- # This message's offset in it's partition
23
+ # This message's offset in its partition
24
24
  # @return [Integer]
25
25
  attr_reader :offset
26
26
 
@@ -26,15 +26,14 @@ module Rdkafka
26
26
  # @return [nil]
27
27
  def close
28
28
  return if closed?
29
-
30
- Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
31
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
32
- @native_kafka = nil
29
+ ObjectSpace.undefine_finalizer(self)
30
+ Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka.inner)
31
+ @native_kafka.close
33
32
  end
34
33
 
35
34
  # Whether this consumer has closed
36
35
  def closed?
37
- @native_kafka.nil?
36
+ @native_kafka.closed?
38
37
  end
39
38
 
40
39
  # Subscribe to one or more topics letting Kafka handle partition assignments.
@@ -55,7 +54,7 @@ module Rdkafka
55
54
  end
56
55
 
57
56
  # Subscribe to topic partition list and check this was successful
58
- response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
57
+ response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka.inner, tpl)
59
58
  if response != 0
60
59
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
61
60
  end
@@ -71,7 +70,7 @@ module Rdkafka
71
70
  def unsubscribe
72
71
  closed_consumer_check(__method__)
73
72
 
74
- response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
73
+ response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka.inner)
75
74
  if response != 0
76
75
  raise Rdkafka::RdkafkaError.new(response)
77
76
  end
@@ -94,7 +93,7 @@ module Rdkafka
94
93
  tpl = list.to_native_tpl
95
94
 
96
95
  begin
97
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
96
+ response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka.inner, tpl)
98
97
 
99
98
  if response != 0
100
99
  list = TopicPartitionList.from_native_tpl(tpl)
@@ -122,7 +121,7 @@ module Rdkafka
122
121
  tpl = list.to_native_tpl
123
122
 
124
123
  begin
125
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
124
+ response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka.inner, tpl)
126
125
  if response != 0
127
126
  raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
128
127
  end
@@ -140,7 +139,7 @@ module Rdkafka
140
139
  closed_consumer_check(__method__)
141
140
 
142
141
  ptr = FFI::MemoryPointer.new(:pointer)
143
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
142
+ response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka.inner, ptr)
144
143
 
145
144
  if response != 0
146
145
  raise Rdkafka::RdkafkaError.new(response)
@@ -170,7 +169,7 @@ module Rdkafka
170
169
  tpl = list.to_native_tpl
171
170
 
172
171
  begin
173
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
172
+ response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka.inner, tpl)
174
173
  if response != 0
175
174
  raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
176
175
  end
@@ -188,7 +187,7 @@ module Rdkafka
188
187
  closed_consumer_check(__method__)
189
188
 
190
189
  ptr = FFI::MemoryPointer.new(:pointer)
191
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
190
+ response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka.inner, ptr)
192
191
  if response != 0
193
192
  raise Rdkafka::RdkafkaError.new(response)
194
193
  end
@@ -227,7 +226,7 @@ module Rdkafka
227
226
  tpl = list.to_native_tpl
228
227
 
229
228
  begin
230
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
229
+ response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka.inner, tpl, timeout_ms)
231
230
  if response != 0
232
231
  raise Rdkafka::RdkafkaError.new(response)
233
232
  end
@@ -253,7 +252,7 @@ module Rdkafka
253
252
  high = FFI::MemoryPointer.new(:int64, 1)
254
253
 
255
254
  response = Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
256
- @native_kafka,
255
+ @native_kafka.inner,
257
256
  topic,
258
257
  partition,
259
258
  low,
@@ -307,7 +306,7 @@ module Rdkafka
307
306
  # @return [String, nil]
308
307
  def cluster_id
309
308
  closed_consumer_check(__method__)
310
- Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
309
+ Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka.inner)
311
310
  end
312
311
 
313
312
  # Returns this client's broker-assigned group member id
@@ -317,7 +316,7 @@ module Rdkafka
317
316
  # @return [String, nil]
318
317
  def member_id
319
318
  closed_consumer_check(__method__)
320
- Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
319
+ Rdkafka::Bindings.rd_kafka_memberid(@native_kafka.inner)
321
320
  end
322
321
 
323
322
  # Store offset of a message to be used in the next commit of this consumer
@@ -335,7 +334,7 @@ module Rdkafka
335
334
  # rd_kafka_offset_store is one of the few calls that does not support
336
335
  # a string as the topic, so create a native topic for it.
337
336
  native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
338
- @native_kafka,
337
+ @native_kafka.inner,
339
338
  message.topic,
340
339
  nil
341
340
  )
@@ -367,7 +366,7 @@ module Rdkafka
367
366
  # rd_kafka_offset_store is one of the few calls that does not support
368
367
  # a string as the topic, so create a native topic for it.
369
368
  native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
370
- @native_kafka,
369
+ @native_kafka.inner,
371
370
  message.topic,
372
371
  nil
373
372
  )
@@ -411,7 +410,7 @@ module Rdkafka
411
410
  tpl = list ? list.to_native_tpl : nil
412
411
 
413
412
  begin
414
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
413
+ response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka.inner, tpl, async)
415
414
  if response != 0
416
415
  raise Rdkafka::RdkafkaError.new(response)
417
416
  end
@@ -430,7 +429,7 @@ module Rdkafka
430
429
  def poll(timeout_ms)
431
430
  closed_consumer_check(__method__)
432
431
 
433
- message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
432
+ message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka.inner, timeout_ms)
434
433
  if message_ptr.null?
435
434
  nil
436
435
  else
@@ -445,7 +444,7 @@ module Rdkafka
445
444
  end
446
445
  ensure
447
446
  # Clean up rdkafka message if there is one
448
- if !message_ptr.nil? && !message_ptr.null?
447
+ if message_ptr && !message_ptr.null?
449
448
  Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
450
449
  end
451
450
  end
@@ -550,7 +549,7 @@ module Rdkafka
550
549
  end
551
550
  if message
552
551
  slice << message
553
- bytes += message.payload.bytesize
552
+ bytes += message.payload.bytesize if message.payload
554
553
  end
555
554
  if slice.size == max_items || bytes >= bytes_threshold || monotonic_now >= end_time - 0.001
556
555
  yield slice.dup, nil
@@ -4,21 +4,26 @@ module Rdkafka
4
4
  # @private
5
5
  # A wrapper around a native kafka that polls and cleanly exits
6
6
  class NativeKafka
7
- def initialize(inner)
7
+ def initialize(inner, run_polling_thread:)
8
8
  @inner = inner
9
9
 
10
- # Start thread to poll client for delivery callbacks
11
- @polling_thread = Thread.new do
12
- loop do
13
- Rdkafka::Bindings.rd_kafka_poll(inner, 250)
14
- # Exit thread if closing and the poll queue is empty
15
- if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
16
- break
10
+ if run_polling_thread
11
+ # Start thread to poll client for delivery callbacks,
12
+ # not used in consumer.
13
+ @polling_thread = Thread.new do
14
+ loop do
15
+ Rdkafka::Bindings.rd_kafka_poll(inner, 250)
16
+ # Exit thread if closing and the poll queue is empty
17
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
18
+ break
19
+ end
17
20
  end
18
21
  end
22
+ @polling_thread.abort_on_exception = true
23
+ @polling_thread[:closing] = false
19
24
  end
20
- @polling_thread.abort_on_exception = true
21
- @polling_thread[:closing] = false
25
+
26
+ @closing = false
22
27
  end
23
28
 
24
29
  def inner
@@ -30,22 +35,27 @@ module Rdkafka
30
35
  end
31
36
 
32
37
  def closed?
33
- @inner.nil?
38
+ @closing || @inner.nil?
34
39
  end
35
40
 
36
41
  def close(object_id=nil)
37
42
  return if closed?
38
43
 
39
- # Flush outstanding activity
40
- Rdkafka::Bindings.rd_kafka_flush(@inner, 30 * 1000)
41
-
42
- # Indicate to polling thread that we're closing
43
- @polling_thread[:closing] = true
44
- # Wait for the polling thread to finish up
45
- @polling_thread.join
44
+ # Indicate to the outside world that we are closing
45
+ @closing = true
46
46
 
47
- Rdkafka::Bindings.rd_kafka_destroy(@inner)
47
+ if @polling_thread
48
+ # Indicate to polling thread that we're closing
49
+ @polling_thread[:closing] = true
50
+ # Wait for the polling thread to finish up
51
+ @polling_thread.join
52
+ end
48
53
 
54
+ # Destroy the client
55
+ Rdkafka::Bindings.rd_kafka_destroy_flags(
56
+ @inner,
57
+ Rdkafka::Bindings::RD_KAFKA_DESTROY_F_IMMEDIATE
58
+ )
49
59
  @inner = nil
50
60
  end
51
61
  end
@@ -40,11 +40,23 @@ module Rdkafka
40
40
 
41
41
  # Close this producer and wait for the internal poll queue to empty.
42
42
  def close
43
+ return if closed?
43
44
  ObjectSpace.undefine_finalizer(self)
44
-
45
45
  @native_kafka.close
46
46
  end
47
47
 
48
+ # Whether this producer has closed
49
+ def closed?
50
+ @native_kafka.closed?
51
+ end
52
+
53
+ # Wait until all outstanding producer requests are completed, with the given timeout
54
+ # in seconds. Call this before closing a producer to ensure delivery of all messages.
55
+ def flush(timeout_ms=5_000)
56
+ closed_producer_check(__method__)
57
+ Rdkafka::Bindings.rd_kafka_flush(@native_kafka.inner, timeout_ms)
58
+ end
59
+
48
60
  # Partition count for a given topic.
49
61
  # NOTE: If 'allow.auto.create.topics' is set to true in the broker, the topic will be auto-created after returning nil.
50
62
  #
@@ -173,7 +185,7 @@ module Rdkafka
173
185
 
174
186
  private
175
187
  def closed_producer_check(method)
176
- raise Rdkafka::ClosedProducerError.new(method) if @native_kafka.closed?
188
+ raise Rdkafka::ClosedProducerError.new(method) if closed?
177
189
  end
178
190
  end
179
191
  end
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.13.0.beta.1"
4
+ VERSION = "0.13.0.beta.3"
5
5
  LIBRDKAFKA_VERSION = "1.9.2"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "3fba157a9f80a0889c982acdd44608be8a46142270a389008b22d921be1198ad"
7
7
  end
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "spec_helper"
4
+
5
+ describe Rdkafka::Consumer::Headers do
6
+ let(:headers) do
7
+ { # Note String keys!
8
+ "version" => "2.1.3",
9
+ "type" => "String"
10
+ }
11
+ end
12
+ let(:native_message) { double('native message') }
13
+ let(:headers_ptr) { double('headers pointer') }
14
+
15
+ describe '.from_native' do
16
+ before do
17
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(native_message, anything) do |_, headers_ptrptr|
18
+ expect(headers_ptrptr).to receive(:read_pointer).and_return(headers_ptr)
19
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
20
+ end
21
+
22
+ expect(Rdkafka::Bindings).to \
23
+ receive(:rd_kafka_header_get_all)
24
+ .with(headers_ptr, 0, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
25
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 0", read_string_to_null: headers.keys[0]))
26
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[0].size)
27
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 0", read_string: headers.values[0]))
28
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
29
+ end
30
+
31
+ expect(Rdkafka::Bindings).to \
32
+ receive(:rd_kafka_header_get_all)
33
+ .with(headers_ptr, 1, anything, anything, anything) do |_, _, name_ptrptr, value_ptrptr, size_ptr|
34
+ expect(name_ptrptr).to receive(:read_pointer).and_return(double("pointer 1", read_string_to_null: headers.keys[1]))
35
+ expect(size_ptr).to receive(:[]).with(:value).and_return(headers.keys[1].size)
36
+ expect(value_ptrptr).to receive(:read_pointer).and_return(double("value pointer 1", read_string: headers.values[1]))
37
+ Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
38
+ end
39
+
40
+ expect(Rdkafka::Bindings).to \
41
+ receive(:rd_kafka_header_get_all)
42
+ .with(headers_ptr, 2, anything, anything, anything)
43
+ .and_return(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__NOENT)
44
+ end
45
+
46
+ subject { described_class.from_native(native_message) }
47
+
48
+ it { is_expected.to eq(headers) }
49
+ it { is_expected.to be_frozen }
50
+
51
+ it 'allows String key' do
52
+ expect(subject['version']).to eq("2.1.3")
53
+ end
54
+
55
+ it 'allows Symbol key, but warns' do
56
+ expect(Kernel).to \
57
+ receive(:warn).with("rdkafka deprecation warning: header access with Symbol key :version treated as a String. " \
58
+ "Please change your code to use String keys to avoid this warning. Symbol keys will break in version 1.")
59
+ expect(subject[:version]).to eq("2.1.3")
60
+ end
61
+ end
62
+ end
@@ -28,7 +28,7 @@ describe Rdkafka::Consumer::Message do
28
28
  end
29
29
 
30
30
  after(:each) do
31
- Rdkafka::Bindings.rd_kafka_destroy(native_client)
31
+ Rdkafka::Bindings.rd_kafka_destroy_flags(native_client, Rdkafka::Bindings::RD_KAFKA_DESTROY_F_IMMEDIATE)
32
32
  end
33
33
 
34
34
  subject { Rdkafka::Consumer::Message.new(native_message) }
@@ -595,7 +595,7 @@ describe Rdkafka::Consumer do
595
595
  end
596
596
 
597
597
  describe "#poll with headers" do
598
- it "should return message with headers" do
598
+ it "should return message with headers using string keys (when produced with symbol keys)" do
599
599
  report = producer.produce(
600
600
  topic: "consume_test_topic",
601
601
  key: "key headers",
@@ -605,7 +605,20 @@ describe Rdkafka::Consumer do
605
605
  message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
606
606
  expect(message).to be
607
607
  expect(message.key).to eq('key headers')
608
- expect(message.headers).to include(foo: 'bar')
608
+ expect(message.headers).to include('foo' => 'bar')
609
+ end
610
+
611
+ it "should return message with headers using string keys (when produced with string keys)" do
612
+ report = producer.produce(
613
+ topic: "consume_test_topic",
614
+ key: "key headers",
615
+ headers: { 'foo' => 'bar' }
616
+ ).wait
617
+
618
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
619
+ expect(message).to be
620
+ expect(message.key).to eq('key headers')
621
+ expect(message.headers).to include('foo' => 'bar')
609
622
  end
610
623
 
611
624
  it "should return message with no headers" do
@@ -700,7 +713,7 @@ describe Rdkafka::Consumer do
700
713
  n.times do |i|
701
714
  handles << producer.produce(
702
715
  topic: topic_name,
703
- payload: Time.new.to_f.to_s,
716
+ payload: i % 10 == 0 ? nil : Time.new.to_f.to_s,
704
717
  key: i.to_s,
705
718
  partition: 0
706
719
  )
@@ -964,18 +977,6 @@ describe Rdkafka::Consumer do
964
977
  expect(listener.queue).to eq([:assigned, :revoked])
965
978
  end
966
979
  end
967
-
968
- def notify_listener(listener)
969
- # 1. subscribe and poll
970
- consumer.subscribe("consume_test_topic")
971
- wait_for_assignment(consumer)
972
- consumer.poll(100)
973
-
974
- # 2. unsubscribe
975
- consumer.unsubscribe
976
- wait_for_unassignment(consumer)
977
- consumer.close
978
- end
979
980
  end
980
981
 
981
982
  context "methods that should not be called after a consumer has been closed" do
@@ -1015,4 +1016,62 @@ describe Rdkafka::Consumer do
1015
1016
 
1016
1017
  expect(consumer.closed?).to eq(true)
1017
1018
  end
1019
+
1020
+ context "when the rebalance protocol is cooperative" do
1021
+ let(:consumer) do
1022
+ config = rdkafka_consumer_config(
1023
+ {
1024
+ :"partition.assignment.strategy" => "cooperative-sticky",
1025
+ :"debug" => "consumer",
1026
+ }
1027
+ )
1028
+ config.consumer_rebalance_listener = listener
1029
+ config.consumer
1030
+ end
1031
+
1032
+ let(:listener) do
1033
+ Struct.new(:queue) do
1034
+ def on_partitions_assigned(consumer, list)
1035
+ collect(:assign, list)
1036
+ end
1037
+
1038
+ def on_partitions_revoked(consumer, list)
1039
+ collect(:revoke, list)
1040
+ end
1041
+
1042
+ def collect(name, list)
1043
+ partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
1044
+ queue << ([name] + partitions)
1045
+ end
1046
+ end.new([])
1047
+ end
1048
+
1049
+ it "should be able to assign and unassign partitions using the cooperative partition assignment APIs" do
1050
+ notify_listener(listener) do
1051
+ handles = []
1052
+ 10.times do
1053
+ handles << producer.produce(
1054
+ topic: "consume_test_topic",
1055
+ payload: "payload 1",
1056
+ key: "key 1",
1057
+ partition: 0
1058
+ )
1059
+ end
1060
+ handles.each(&:wait)
1061
+
1062
+ consumer.subscribe("consume_test_topic")
1063
+ # Check the first 10 messages. Then close the consumer, which
1064
+ # should break the each loop.
1065
+ consumer.each_with_index do |message, i|
1066
+ expect(message).to be_a Rdkafka::Consumer::Message
1067
+ break if i == 10
1068
+ end
1069
+ end
1070
+
1071
+ expect(listener.queue).to eq([
1072
+ [:assign, "consume_test_topic", 0, 1, 2],
1073
+ [:revoke, "consume_test_topic", 0, 1, 2]
1074
+ ])
1075
+ end
1076
+ end
1018
1077
  end
@@ -10,7 +10,7 @@ describe Rdkafka::Metadata do
10
10
 
11
11
  after do
12
12
  Rdkafka::Bindings.rd_kafka_consumer_close(native_kafka)
13
- Rdkafka::Bindings.rd_kafka_destroy(native_kafka)
13
+ Rdkafka::Bindings.rd_kafka_destroy_flags(native_kafka, Rdkafka::Bindings::RD_KAFKA_DESTROY_F_IMMEDIATE)
14
14
  end
15
15
 
16
16
  context "passing in a topic name" do
@@ -8,12 +8,12 @@ describe Rdkafka::NativeKafka do
8
8
  let(:closing) { false }
9
9
  let(:thread) { double(Thread) }
10
10
 
11
- subject(:client) { described_class.new(native) }
11
+ subject(:client) { described_class.new(native, run_polling_thread: true) }
12
12
 
13
13
  before do
14
14
  allow(Rdkafka::Bindings).to receive(:rd_kafka_poll).with(instance_of(FFI::Pointer), 250).and_call_original
15
15
  allow(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(instance_of(FFI::Pointer)).and_return(0).and_call_original
16
- allow(Rdkafka::Bindings).to receive(:rd_kafka_destroy)
16
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_destroy_flags)
17
17
  allow(Thread).to receive(:new).and_return(thread)
18
18
 
19
19
  allow(thread).to receive(:[]=).with(:closing, anything)
@@ -53,6 +53,16 @@ describe Rdkafka::NativeKafka do
53
53
  expect(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(native).at_least(:once)
54
54
  end
55
55
  end
56
+
57
+ context "if not enabled" do
58
+ subject(:client) { described_class.new(native, run_polling_thread: false) }
59
+
60
+ it "is not created" do
61
+ expect(Thread).not_to receive(:new)
62
+
63
+ client
64
+ end
65
+ end
56
66
  end
57
67
 
58
68
  def polling_loop_expects(&block)
@@ -76,7 +86,7 @@ describe Rdkafka::NativeKafka do
76
86
 
77
87
  context "and attempt to close" do
78
88
  it "calls the `destroy` binding" do
79
- expect(Rdkafka::Bindings).to receive(:rd_kafka_destroy).with(native)
89
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_destroy_flags).with(native, Rdkafka::Bindings::RD_KAFKA_DESTROY_F_IMMEDIATE)
80
90
 
81
91
  client.close
82
92
  end
@@ -111,7 +121,7 @@ describe Rdkafka::NativeKafka do
111
121
 
112
122
  context "and attempt to close again" do
113
123
  it "does not call the `destroy` binding" do
114
- expect(Rdkafka::Bindings).not_to receive(:rd_kafka_destroy)
124
+ expect(Rdkafka::Bindings).not_to receive(:rd_kafka_destroy_flags)
115
125
 
116
126
  client.close
117
127
  end
@@ -9,7 +9,8 @@ describe Rdkafka::Producer do
9
9
 
10
10
  after do
11
11
  # Registry should always end up being empty
12
- expect(Rdkafka::Producer::DeliveryHandle::REGISTRY).to eq({})
12
+ registry = Rdkafka::Producer::DeliveryHandle::REGISTRY
13
+ expect(registry).to be_empty, registry.inspect
13
14
  producer.close
14
15
  consumer.close
15
16
  end
@@ -184,10 +185,11 @@ describe Rdkafka::Producer do
184
185
  expect(report.partition).to eq 1
185
186
  expect(report.offset).to be >= 0
186
187
 
187
- # Close producer
188
+ # Flush and close producer
189
+ producer.flush
188
190
  producer.close
189
191
 
190
- # Consume message and verify it's content
192
+ # Consume message and verify its content
191
193
  message = wait_for_message(
192
194
  topic: "produce_test_topic",
193
195
  delivery_report: report,
@@ -211,7 +213,7 @@ describe Rdkafka::Producer do
211
213
  )
212
214
  report = handle.wait(max_wait_timeout: 5)
213
215
 
214
- # Consume message and verify it's content
216
+ # Consume message and verify its content
215
217
  message = wait_for_message(
216
218
  topic: "produce_test_topic",
217
219
  delivery_report: report,
@@ -285,7 +287,7 @@ describe Rdkafka::Producer do
285
287
  )
286
288
  report = handle.wait(max_wait_timeout: 5)
287
289
 
288
- # Consume message and verify it's content
290
+ # Consume message and verify its content
289
291
  message = wait_for_message(
290
292
  topic: "produce_test_topic",
291
293
  delivery_report: report,
@@ -318,7 +320,7 @@ describe Rdkafka::Producer do
318
320
  )
319
321
  report = handle.wait(max_wait_timeout: 5)
320
322
 
321
- # Consume message and verify it's content
323
+ # Consume message and verify its content
322
324
  message = wait_for_message(
323
325
  topic: "produce_test_topic",
324
326
  delivery_report: report,
@@ -339,7 +341,7 @@ describe Rdkafka::Producer do
339
341
  )
340
342
  report = handle.wait(max_wait_timeout: 5)
341
343
 
342
- # Consume message and verify it's content
344
+ # Consume message and verify its content
343
345
  message = wait_for_message(
344
346
  topic: "produce_test_topic",
345
347
  delivery_report: report,
@@ -359,7 +361,7 @@ describe Rdkafka::Producer do
359
361
  )
360
362
  report = handle.wait(max_wait_timeout: 5)
361
363
 
362
- # Consume message and verify it's content
364
+ # Consume message and verify its content
363
365
  message = wait_for_message(
364
366
  topic: "produce_test_topic",
365
367
  delivery_report: report,
@@ -377,7 +379,7 @@ describe Rdkafka::Producer do
377
379
  )
378
380
  report = handle.wait(max_wait_timeout: 5)
379
381
 
380
- # Consume message and verify it's content
382
+ # Consume message and verify its content
381
383
  message = wait_for_message(
382
384
  topic: "produce_test_topic",
383
385
  delivery_report: report,
@@ -397,7 +399,7 @@ describe Rdkafka::Producer do
397
399
  )
398
400
  report = handle.wait(max_wait_timeout: 5)
399
401
 
400
- # Consume message and verify it's content
402
+ # Consume message and verify its content
401
403
  message = wait_for_message(
402
404
  topic: "produce_test_topic",
403
405
  delivery_report: report,
@@ -406,9 +408,9 @@ describe Rdkafka::Producer do
406
408
 
407
409
  expect(message.payload).to eq "payload headers"
408
410
  expect(message.key).to eq "key headers"
409
- expect(message.headers[:foo]).to eq "bar"
410
- expect(message.headers[:baz]).to eq "foobar"
411
- expect(message.headers[:foobar]).to be_nil
411
+ expect(message.headers["foo"]).to eq "bar"
412
+ expect(message.headers["baz"]).to eq "foobar"
413
+ expect(message.headers["foobar"]).to be_nil
412
414
  end
413
415
 
414
416
  it "should produce a message with empty headers" do
@@ -420,7 +422,7 @@ describe Rdkafka::Producer do
420
422
  )
421
423
  report = handle.wait(max_wait_timeout: 5)
422
424
 
423
- # Consume message and verify it's content
425
+ # Consume message and verify its content
424
426
  message = wait_for_message(
425
427
  topic: "produce_test_topic",
426
428
  delivery_report: report,
@@ -458,10 +460,10 @@ describe Rdkafka::Producer do
458
460
  # wait for and check the message in the main process.
459
461
  reader, writer = IO.pipe
460
462
 
461
- fork do
463
+ pid = fork do
462
464
  reader.close
463
465
 
464
- # Avoids sharing the socket between processes.
466
+ # Avoid sharing the client between processes.
465
467
  producer = rdkafka_producer_config.producer
466
468
 
467
469
  handle = producer.produce(
@@ -480,8 +482,10 @@ describe Rdkafka::Producer do
480
482
 
481
483
  writer.write(report_json)
482
484
  writer.close
485
+ producer.flush
483
486
  producer.close
484
487
  end
488
+ Process.wait(pid)
485
489
 
486
490
  writer.close
487
491
  report_hash = JSON.parse(reader.read)
@@ -493,7 +497,7 @@ describe Rdkafka::Producer do
493
497
 
494
498
  reader.close
495
499
 
496
- # Consume message and verify it's content
500
+ # Consume message and verify its content
497
501
  message = wait_for_message(
498
502
  topic: "produce_test_topic",
499
503
  delivery_report: report,
data/spec/spec_helper.rb CHANGED
@@ -106,6 +106,20 @@ def wait_for_unassignment(consumer)
106
106
  end
107
107
  end
108
108
 
109
+ def notify_listener(listener, &block)
110
+ # 1. subscribe and poll
111
+ consumer.subscribe("consume_test_topic")
112
+ wait_for_assignment(consumer)
113
+ consumer.poll(100)
114
+
115
+ block.call if block
116
+
117
+ # 2. unsubscribe
118
+ consumer.unsubscribe
119
+ wait_for_unassignment(consumer)
120
+ consumer.close
121
+ end
122
+
109
123
  RSpec.configure do |config|
110
124
  config.filter_run focus: true
111
125
  config.run_all_when_everything_filtered = true
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.0.beta.1
4
+ version: 0.13.0.beta.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-10-11 00:00:00.000000000 Z
11
+ date: 2022-10-25 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi
@@ -191,6 +191,7 @@ files:
191
191
  - spec/rdkafka/bindings_spec.rb
192
192
  - spec/rdkafka/callbacks_spec.rb
193
193
  - spec/rdkafka/config_spec.rb
194
+ - spec/rdkafka/consumer/headers_spec.rb
194
195
  - spec/rdkafka/consumer/message_spec.rb
195
196
  - spec/rdkafka/consumer/partition_spec.rb
196
197
  - spec/rdkafka/consumer/topic_partition_list_spec.rb
@@ -221,7 +222,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
221
222
  - !ruby/object:Gem::Version
222
223
  version: 1.3.1
223
224
  requirements: []
224
- rubygems_version: 3.3.7
225
+ rubygems_version: 3.3.13
225
226
  signing_key:
226
227
  specification_version: 4
227
228
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
@@ -237,6 +238,7 @@ test_files:
237
238
  - spec/rdkafka/bindings_spec.rb
238
239
  - spec/rdkafka/callbacks_spec.rb
239
240
  - spec/rdkafka/config_spec.rb
241
+ - spec/rdkafka/consumer/headers_spec.rb
240
242
  - spec/rdkafka/consumer/message_spec.rb
241
243
  - spec/rdkafka/consumer/partition_spec.rb
242
244
  - spec/rdkafka/consumer/topic_partition_list_spec.rb