rdkafka 0.13.0.beta.2 → 0.13.0.beta.5

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: eac016dc712d56e178c77a62f8af4e422319a783e80acbd7528453bfca6ca260
4
- data.tar.gz: 8a4ed8df1339e0e74c96b259431012465c499ae9e9c8c6db8ff751497e1e5c43
3
+ metadata.gz: 5b5ba8c133537fc31e3fb5f00d230816b92d8c8f63c8d94b39f80d5d7fb22918
4
+ data.tar.gz: df7679de1c6724585ac5c0e636b6a239ae9d6e859496a855b2596c45e07099ca
5
5
  SHA512:
6
- metadata.gz: e3167de231aaabdfa512cabef13a4ffaabcc94757a2b7d0f360ed27a9005a40da5bbb262dffc49bda03d11557eef63dcf6b9c010fb944cf41fcb06bb7531c3ee
7
- data.tar.gz: 8db04d46686505a9b1fef79c8544f3c27056ae5ff9e9eede9343d4f5cef4c63c844b9798011a5339ebbdf2cab883d1c3fd5df61842ea90c4862fa0d03dd88610
6
+ metadata.gz: 6d1b685394205c9beaf388e40165c028dda9ef3a48ed430e9ccb24e320facadf462215b36e82a62f82cfaf61b1d6040936c352db9bb21371cb4f855d977f9c70
7
+ data.tar.gz: 65822e65cf3ac6297fec721756cba360f68d3533e69d76ec7208d563d39f0c659a5ac34ef21f633ad0053b9db0cc1cba887d9b4ad75b1083f2ceaf564258c722
@@ -20,8 +20,8 @@ blocks:
20
20
  commands:
21
21
  - sem-version ruby $RUBY_VERSION
22
22
  - checkout
23
+ - docker-compose up -d --no-recreate
23
24
  - bundle install --path vendor/bundle
24
25
  - cd ext && bundle exec rake && cd ..
25
- - docker-compose up -d --no-recreate
26
26
  - ulimit -c unlimited
27
27
  - valgrind -v bundle exec rspec
data/CHANGELOG.md CHANGED
@@ -7,6 +7,10 @@
7
7
  * Fix documented type for DeliveryReport#error (jimmydo)
8
8
  * Bump librdkafka to 1.9.2 (thijsc)
9
9
  * Use finalizers to cleanly exit producer and admin (thijsc)
10
+ * Lock access to the native kafka client (thijsc)
11
+ - Fix potential race condition in multi-threaded producer (mensfeld)
12
+ - Fix leaking FFI resources in specs (mensfeld)
13
+ - Improve specs stability (mensfeld)
10
14
 
11
15
  # 0.12.0
12
16
  * Bumps librdkafka to 1.9.0
data/README.md CHANGED
@@ -23,6 +23,19 @@ The most important pieces of a Kafka client are implemented. We're
23
23
  working towards feature completeness, you can track that here:
24
24
  https://github.com/appsignal/rdkafka-ruby/milestone/1
25
25
 
26
+ ## Table of content
27
+
28
+ - [Installation](#installation)
29
+ - [Usage](#usage)
30
+ * [Consuming messages](#consuming-messages)
31
+ * [Producing messages](#producing-messages)
32
+ - [Higher level libraries](#higher-level-libraries)
33
+ * [Message processing frameworks](#message-processing-frameworks)
34
+ * [Message publishing libraries](#message-publishing-libraries)
35
+ - [Development](#development)
36
+ - [Example](#example)
37
+
38
+
26
39
  ## Installation
27
40
 
28
41
  This gem downloads and compiles librdkafka when it is installed. If you
@@ -77,6 +90,19 @@ Note that creating a producer consumes some resources that will not be
77
90
  released until it `#close` is explicitly called, so be sure to call
78
91
  `Config#producer` only as necessary.
79
92
 
93
+ ## Higher level libraries
94
+
95
+ Currently, there are two actively developed frameworks based on rdkafka-ruby, that provide higher level API that can be used to work with Kafka messages and one library for publishing messages.
96
+
97
+ ### Message processing frameworks
98
+
99
+ * [Karafka](https://github.com/karafka/karafka) - Ruby and Rails efficient Kafka processing framework.
100
+ * [Racecar](https://github.com/zendesk/racecar) - A simple framework for Kafka consumers in Ruby
101
+
102
+ ### Message publishing libraries
103
+
104
+ * [WaterDrop](https://github.com/karafka/waterdrop) – Standalone Karafka library for producing Kafka messages.
105
+
80
106
  ## Development
81
107
 
82
108
  A Docker Compose file is included to run Kafka and Zookeeper. To run
data/lib/rdkafka/admin.rb CHANGED
@@ -18,11 +18,16 @@ module Rdkafka
18
18
 
19
19
  # Close this admin instance
20
20
  def close
21
+ return if closed?
21
22
  ObjectSpace.undefine_finalizer(self)
22
-
23
23
  @native_kafka.close
24
24
  end
25
25
 
26
+ # Whether this admin has closed
27
+ def closed?
28
+ @native_kafka.closed?
29
+ end
30
+
26
31
  # Create a topic with the given partition count and replication factor
27
32
  #
28
33
  # @raise [ConfigError] When the partition count or replication factor are out of valid range
@@ -62,7 +67,9 @@ module Rdkafka
62
67
  topics_array_ptr.write_array_of_pointer(pointer_array)
63
68
 
64
69
  # Get a pointer to the queue that our request will be enqueued on
65
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka.inner)
70
+ queue_ptr = @native_kafka.with_inner do |inner|
71
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
72
+ end
66
73
  if queue_ptr.null?
67
74
  Rdkafka::Bindings.rd_kafka_NewTopic_destroy(new_topic_ptr)
68
75
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -73,17 +80,21 @@ module Rdkafka
73
80
  create_topic_handle[:pending] = true
74
81
  create_topic_handle[:response] = -1
75
82
  CreateTopicHandle.register(create_topic_handle)
76
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka.inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
83
+ admin_options_ptr = @native_kafka.with_inner do |inner|
84
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_CREATETOPICS)
85
+ end
77
86
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, create_topic_handle.to_ptr)
78
87
 
79
88
  begin
80
- Rdkafka::Bindings.rd_kafka_CreateTopics(
81
- @native_kafka.inner,
82
- topics_array_ptr,
83
- 1,
84
- admin_options_ptr,
85
- queue_ptr
86
- )
89
+ @native_kafka.with_inner do |inner|
90
+ Rdkafka::Bindings.rd_kafka_CreateTopics(
91
+ inner,
92
+ topics_array_ptr,
93
+ 1,
94
+ admin_options_ptr,
95
+ queue_ptr
96
+ )
97
+ end
87
98
  rescue Exception
88
99
  CreateTopicHandle.remove(create_topic_handle.to_ptr.address)
89
100
  raise
@@ -113,7 +124,9 @@ module Rdkafka
113
124
  topics_array_ptr.write_array_of_pointer(pointer_array)
114
125
 
115
126
  # Get a pointer to the queue that our request will be enqueued on
116
- queue_ptr = Rdkafka::Bindings.rd_kafka_queue_get_background(@native_kafka.inner)
127
+ queue_ptr = @native_kafka.with_inner do |inner|
128
+ Rdkafka::Bindings.rd_kafka_queue_get_background(inner)
129
+ end
117
130
  if queue_ptr.null?
118
131
  Rdkafka::Bindings.rd_kafka_DeleteTopic_destroy(delete_topic_ptr)
119
132
  raise Rdkafka::Config::ConfigError.new("rd_kafka_queue_get_background was NULL")
@@ -124,17 +137,21 @@ module Rdkafka
124
137
  delete_topic_handle[:pending] = true
125
138
  delete_topic_handle[:response] = -1
126
139
  DeleteTopicHandle.register(delete_topic_handle)
127
- admin_options_ptr = Rdkafka::Bindings.rd_kafka_AdminOptions_new(@native_kafka.inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
140
+ admin_options_ptr = @native_kafka.with_inner do |inner|
141
+ Rdkafka::Bindings.rd_kafka_AdminOptions_new(inner, Rdkafka::Bindings::RD_KAFKA_ADMIN_OP_DELETETOPICS)
142
+ end
128
143
  Rdkafka::Bindings.rd_kafka_AdminOptions_set_opaque(admin_options_ptr, delete_topic_handle.to_ptr)
129
144
 
130
145
  begin
131
- Rdkafka::Bindings.rd_kafka_DeleteTopics(
132
- @native_kafka.inner,
133
- topics_array_ptr,
134
- 1,
135
- admin_options_ptr,
136
- queue_ptr
137
- )
146
+ @native_kafka.with_inner do |inner|
147
+ Rdkafka::Bindings.rd_kafka_DeleteTopics(
148
+ inner,
149
+ topics_array_ptr,
150
+ 1,
151
+ admin_options_ptr,
152
+ queue_ptr
153
+ )
154
+ end
138
155
  rescue Exception
139
156
  DeleteTopicHandle.remove(delete_topic_handle.to_ptr.address)
140
157
  raise
@@ -149,7 +166,7 @@ module Rdkafka
149
166
 
150
167
  private
151
168
  def closed_admin_check(method)
152
- raise Rdkafka::ClosedAdminError.new(method) if @native_kafka.closed?
169
+ raise Rdkafka::ClosedAdminError.new(method) if closed?
153
170
  end
154
171
  end
155
172
  end
@@ -41,10 +41,10 @@ module Rdkafka
41
41
 
42
42
  # Metadata
43
43
 
44
- attach_function :rd_kafka_memberid, [:pointer], :string
45
- attach_function :rd_kafka_clusterid, [:pointer], :string
46
- attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int
47
- attach_function :rd_kafka_metadata_destroy, [:pointer], :void
44
+ attach_function :rd_kafka_memberid, [:pointer], :string, blocking: true
45
+ attach_function :rd_kafka_clusterid, [:pointer], :string, blocking: true
46
+ attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int, blocking: true
47
+ attach_function :rd_kafka_metadata_destroy, [:pointer], :void, blocking: true
48
48
 
49
49
  # Message struct
50
50
 
@@ -169,26 +169,27 @@ module Rdkafka
169
169
  ]
170
170
 
171
171
  attach_function :rd_kafka_new, [:kafka_type, :pointer, :pointer, :int], :pointer
172
+
172
173
  attach_function :rd_kafka_destroy, [:pointer], :void
173
174
 
174
175
  # Consumer
175
176
 
176
- attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int
177
- attach_function :rd_kafka_unsubscribe, [:pointer], :int
178
- attach_function :rd_kafka_subscription, [:pointer, :pointer], :int
179
- attach_function :rd_kafka_assign, [:pointer, :pointer], :int
180
- attach_function :rd_kafka_incremental_assign, [:pointer, :pointer], :int
181
- attach_function :rd_kafka_incremental_unassign, [:pointer, :pointer], :int
182
- attach_function :rd_kafka_assignment, [:pointer, :pointer], :int
183
- attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int
177
+ attach_function :rd_kafka_subscribe, [:pointer, :pointer], :int, blocking: true
178
+ attach_function :rd_kafka_unsubscribe, [:pointer], :int, blocking: true
179
+ attach_function :rd_kafka_subscription, [:pointer, :pointer], :int, blocking: true
180
+ attach_function :rd_kafka_assign, [:pointer, :pointer], :int, blocking: true
181
+ attach_function :rd_kafka_incremental_assign, [:pointer, :pointer], :int, blocking: true
182
+ attach_function :rd_kafka_incremental_unassign, [:pointer, :pointer], :int, blocking: true
183
+ attach_function :rd_kafka_assignment, [:pointer, :pointer], :int, blocking: true
184
+ attach_function :rd_kafka_committed, [:pointer, :pointer, :int], :int, blocking: true
184
185
  attach_function :rd_kafka_commit, [:pointer, :pointer, :bool], :int, blocking: true
185
- attach_function :rd_kafka_poll_set_consumer, [:pointer], :void
186
+ attach_function :rd_kafka_poll_set_consumer, [:pointer], :void, blocking: true
186
187
  attach_function :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: true
187
188
  attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
188
- attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
189
- attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int
190
- attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int
191
- attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int
189
+ attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int, blocking: true
190
+ attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
191
+ attach_function :rd_kafka_resume_partitions, [:pointer, :pointer], :int, blocking: true
192
+ attach_function :rd_kafka_seek, [:pointer, :int32, :int64, :int], :int, blocking: true
192
193
 
193
194
  # Headers
194
195
  attach_function :rd_kafka_header_get_all, [:pointer, :size_t, :pointer, :pointer, SizePtr], :int
@@ -197,7 +198,7 @@ module Rdkafka
197
198
  # Rebalance
198
199
 
199
200
  callback :rebalance_cb_function, [:pointer, :int, :pointer, :pointer], :void
200
- attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void
201
+ attach_function :rd_kafka_conf_set_rebalance_cb, [:pointer, :rebalance_cb_function], :void, blocking: true
201
202
 
202
203
  RebalanceCallback = FFI::Function.new(
203
204
  :void, [:pointer, :int, :pointer, :pointer]
@@ -255,7 +256,7 @@ module Rdkafka
255
256
 
256
257
  RD_KAFKA_MSG_F_COPY = 0x2
257
258
 
258
- attach_function :rd_kafka_producev, [:pointer, :varargs], :int
259
+ attach_function :rd_kafka_producev, [:pointer, :varargs], :int, blocking: true
259
260
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
260
261
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
261
262
 
@@ -282,23 +283,23 @@ module Rdkafka
282
283
  RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
283
284
  RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100 # rd_kafka_event_type_t
284
285
 
285
- attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
286
- attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer
287
- attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32
288
- attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void
289
- attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer
290
- attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer
286
+ attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void, blocking: true
287
+ attach_function :rd_kafka_NewTopic_new, [:pointer, :size_t, :size_t, :pointer, :size_t], :pointer, blocking: true
288
+ attach_function :rd_kafka_NewTopic_set_config, [:pointer, :string, :string], :int32, blocking: true
289
+ attach_function :rd_kafka_NewTopic_destroy, [:pointer], :void, blocking: true
290
+ attach_function :rd_kafka_event_CreateTopics_result, [:pointer], :pointer, blocking: true
291
+ attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
291
292
 
292
293
  # Delete Topics
293
294
 
294
295
  RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
295
296
  RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101 # rd_kafka_event_type_t
296
297
 
297
- attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32
298
- attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer
299
- attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void
300
- attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer
301
- attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer
298
+ attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32, blocking: true
299
+ attach_function :rd_kafka_DeleteTopic_new, [:pointer], :pointer, blocking: true
300
+ attach_function :rd_kafka_DeleteTopic_destroy, [:pointer], :void, blocking: true
301
+ attach_function :rd_kafka_event_DeleteTopics_result, [:pointer], :pointer, blocking: true
302
+ attach_function :rd_kafka_DeleteTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
302
303
 
303
304
  # Background Queue and Callback
304
305
 
@@ -157,13 +157,14 @@ module Rdkafka
157
157
  Rdkafka::Bindings.rd_kafka_conf_set_rebalance_cb(config, Rdkafka::Bindings::RebalanceCallback)
158
158
  end
159
159
 
160
+ # Create native client
160
161
  kafka = native_kafka(config, :rd_kafka_consumer)
161
162
 
162
163
  # Redirect the main queue to the consumer
163
164
  Rdkafka::Bindings.rd_kafka_poll_set_consumer(kafka)
164
165
 
165
166
  # Return consumer with Kafka client
166
- Rdkafka::Consumer.new(kafka)
167
+ Rdkafka::Consumer.new(Rdkafka::NativeKafka.new(kafka, run_polling_thread: false))
167
168
  end
168
169
 
169
170
  # Create a producer with this configuration.
@@ -181,7 +182,7 @@ module Rdkafka
181
182
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Callbacks::DeliveryCallbackFunction)
182
183
  # Return producer with Kafka client
183
184
  partitioner_name = self[:partitioner] || self["partitioner"]
184
- Rdkafka::Producer.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer)), partitioner_name).tap do |producer|
185
+ Rdkafka::Producer.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true), partitioner_name).tap do |producer|
185
186
  opaque.producer = producer
186
187
  end
187
188
  end
@@ -196,7 +197,7 @@ module Rdkafka
196
197
  opaque = Opaque.new
197
198
  config = native_config(opaque)
198
199
  Rdkafka::Bindings.rd_kafka_conf_set_background_event_cb(config, Rdkafka::Callbacks::BackgroundEventCallbackFunction)
199
- Rdkafka::Admin.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer)))
200
+ Rdkafka::Admin.new(Rdkafka::NativeKafka.new(native_kafka(config, :rd_kafka_producer), run_polling_thread: true))
200
201
  end
201
202
 
202
203
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
@@ -26,15 +26,16 @@ module Rdkafka
26
26
  # @return [nil]
27
27
  def close
28
28
  return if closed?
29
-
30
- Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
31
- Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
32
- @native_kafka = nil
29
+ ObjectSpace.undefine_finalizer(self)
30
+ @native_kafka.with_inner do |inner|
31
+ Rdkafka::Bindings.rd_kafka_consumer_close(inner)
32
+ end
33
+ @native_kafka.close
33
34
  end
34
35
 
35
36
  # Whether this consumer has closed
36
37
  def closed?
37
- @native_kafka.nil?
38
+ @native_kafka.closed?
38
39
  end
39
40
 
40
41
  # Subscribe to one or more topics letting Kafka handle partition assignments.
@@ -55,7 +56,9 @@ module Rdkafka
55
56
  end
56
57
 
57
58
  # Subscribe to topic partition list and check this was successful
58
- response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
59
+ response = @native_kafka.with_inner do |inner|
60
+ Rdkafka::Bindings.rd_kafka_subscribe(inner, tpl)
61
+ end
59
62
  if response != 0
60
63
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
61
64
  end
@@ -71,7 +74,9 @@ module Rdkafka
71
74
  def unsubscribe
72
75
  closed_consumer_check(__method__)
73
76
 
74
- response = Rdkafka::Bindings.rd_kafka_unsubscribe(@native_kafka)
77
+ response = @native_kafka.with_inner do |inner|
78
+ Rdkafka::Bindings.rd_kafka_unsubscribe(inner)
79
+ end
75
80
  if response != 0
76
81
  raise Rdkafka::RdkafkaError.new(response)
77
82
  end
@@ -94,7 +99,9 @@ module Rdkafka
94
99
  tpl = list.to_native_tpl
95
100
 
96
101
  begin
97
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
102
+ response = @native_kafka.with_inner do |inner|
103
+ Rdkafka::Bindings.rd_kafka_pause_partitions(inner, tpl)
104
+ end
98
105
 
99
106
  if response != 0
100
107
  list = TopicPartitionList.from_native_tpl(tpl)
@@ -122,7 +129,9 @@ module Rdkafka
122
129
  tpl = list.to_native_tpl
123
130
 
124
131
  begin
125
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
132
+ response = @native_kafka.with_inner do |inner|
133
+ Rdkafka::Bindings.rd_kafka_resume_partitions(inner, tpl)
134
+ end
126
135
  if response != 0
127
136
  raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
128
137
  end
@@ -140,7 +149,9 @@ module Rdkafka
140
149
  closed_consumer_check(__method__)
141
150
 
142
151
  ptr = FFI::MemoryPointer.new(:pointer)
143
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
152
+ response = @native_kafka.with_inner do |inner|
153
+ Rdkafka::Bindings.rd_kafka_subscription(inner, ptr)
154
+ end
144
155
 
145
156
  if response != 0
146
157
  raise Rdkafka::RdkafkaError.new(response)
@@ -170,7 +181,9 @@ module Rdkafka
170
181
  tpl = list.to_native_tpl
171
182
 
172
183
  begin
173
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
184
+ response = @native_kafka.with_inner do |inner|
185
+ Rdkafka::Bindings.rd_kafka_assign(inner, tpl)
186
+ end
174
187
  if response != 0
175
188
  raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
176
189
  end
@@ -188,7 +201,9 @@ module Rdkafka
188
201
  closed_consumer_check(__method__)
189
202
 
190
203
  ptr = FFI::MemoryPointer.new(:pointer)
191
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
204
+ response = @native_kafka.with_inner do |inner|
205
+ Rdkafka::Bindings.rd_kafka_assignment(inner, ptr)
206
+ end
192
207
  if response != 0
193
208
  raise Rdkafka::RdkafkaError.new(response)
194
209
  end
@@ -227,7 +242,9 @@ module Rdkafka
227
242
  tpl = list.to_native_tpl
228
243
 
229
244
  begin
230
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
245
+ response = @native_kafka.with_inner do |inner|
246
+ Rdkafka::Bindings.rd_kafka_committed(inner, tpl, timeout_ms)
247
+ end
231
248
  if response != 0
232
249
  raise Rdkafka::RdkafkaError.new(response)
233
250
  end
@@ -252,14 +269,16 @@ module Rdkafka
252
269
  low = FFI::MemoryPointer.new(:int64, 1)
253
270
  high = FFI::MemoryPointer.new(:int64, 1)
254
271
 
255
- response = Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
256
- @native_kafka,
257
- topic,
258
- partition,
259
- low,
260
- high,
261
- timeout_ms,
262
- )
272
+ response = @native_kafka.with_inner do |inner|
273
+ Rdkafka::Bindings.rd_kafka_query_watermark_offsets(
274
+ inner,
275
+ topic,
276
+ partition,
277
+ low,
278
+ high,
279
+ timeout_ms,
280
+ )
281
+ end
263
282
  if response != 0
264
283
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
265
284
  end
@@ -307,7 +326,9 @@ module Rdkafka
307
326
  # @return [String, nil]
308
327
  def cluster_id
309
328
  closed_consumer_check(__method__)
310
- Rdkafka::Bindings.rd_kafka_clusterid(@native_kafka)
329
+ @native_kafka.with_inner do |inner|
330
+ Rdkafka::Bindings.rd_kafka_clusterid(inner)
331
+ end
311
332
  end
312
333
 
313
334
  # Returns this client's broker-assigned group member id
@@ -317,7 +338,9 @@ module Rdkafka
317
338
  # @return [String, nil]
318
339
  def member_id
319
340
  closed_consumer_check(__method__)
320
- Rdkafka::Bindings.rd_kafka_memberid(@native_kafka)
341
+ @native_kafka.with_inner do |inner|
342
+ Rdkafka::Bindings.rd_kafka_memberid(inner)
343
+ end
321
344
  end
322
345
 
323
346
  # Store offset of a message to be used in the next commit of this consumer
@@ -334,11 +357,13 @@ module Rdkafka
334
357
 
335
358
  # rd_kafka_offset_store is one of the few calls that does not support
336
359
  # a string as the topic, so create a native topic for it.
337
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
338
- @native_kafka,
339
- message.topic,
340
- nil
341
- )
360
+ native_topic = @native_kafka.with_inner do |inner|
361
+ Rdkafka::Bindings.rd_kafka_topic_new(
362
+ inner,
363
+ message.topic,
364
+ nil
365
+ )
366
+ end
342
367
  response = Rdkafka::Bindings.rd_kafka_offset_store(
343
368
  native_topic,
344
369
  message.partition,
@@ -366,11 +391,13 @@ module Rdkafka
366
391
 
367
392
  # rd_kafka_offset_store is one of the few calls that does not support
368
393
  # a string as the topic, so create a native topic for it.
369
- native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
370
- @native_kafka,
371
- message.topic,
372
- nil
373
- )
394
+ native_topic = @native_kafka.with_inner do |inner|
395
+ Rdkafka::Bindings.rd_kafka_topic_new(
396
+ inner,
397
+ message.topic,
398
+ nil
399
+ )
400
+ end
374
401
  response = Rdkafka::Bindings.rd_kafka_seek(
375
402
  native_topic,
376
403
  message.partition,
@@ -411,7 +438,9 @@ module Rdkafka
411
438
  tpl = list ? list.to_native_tpl : nil
412
439
 
413
440
  begin
414
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
441
+ response = @native_kafka.with_inner do |inner|
442
+ Rdkafka::Bindings.rd_kafka_commit(inner, tpl, async)
443
+ end
415
444
  if response != 0
416
445
  raise Rdkafka::RdkafkaError.new(response)
417
446
  end
@@ -430,7 +459,9 @@ module Rdkafka
430
459
  def poll(timeout_ms)
431
460
  closed_consumer_check(__method__)
432
461
 
433
- message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
462
+ message_ptr = @native_kafka.with_inner do |inner|
463
+ Rdkafka::Bindings.rd_kafka_consumer_poll(inner, timeout_ms)
464
+ end
434
465
  if message_ptr.null?
435
466
  nil
436
467
  else
@@ -445,7 +476,7 @@ module Rdkafka
445
476
  end
446
477
  ensure
447
478
  # Clean up rdkafka message if there is one
448
- if !message_ptr.nil? && !message_ptr.null?
479
+ if message_ptr && !message_ptr.null?
449
480
  Rdkafka::Bindings.rd_kafka_message_destroy(message_ptr)
450
481
  end
451
482
  end
@@ -4,25 +4,42 @@ module Rdkafka
4
4
  # @private
5
5
  # A wrapper around a native kafka that polls and cleanly exits
6
6
  class NativeKafka
7
- def initialize(inner)
7
+ def initialize(inner, run_polling_thread:)
8
8
  @inner = inner
9
+ # Lock around external access
10
+ @access_mutex = Mutex.new
11
+ # Lock around internal polling
12
+ @poll_mutex = Mutex.new
9
13
 
10
- # Start thread to poll client for delivery callbacks
11
- @polling_thread = Thread.new do
12
- loop do
13
- Rdkafka::Bindings.rd_kafka_poll(inner, 250)
14
- # Exit thread if closing and the poll queue is empty
15
- if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
16
- break
14
+ if run_polling_thread
15
+ # Start thread to poll client for delivery callbacks,
16
+ # not used in consumer.
17
+ @polling_thread = Thread.new do
18
+ loop do
19
+ @poll_mutex.synchronize do
20
+ Rdkafka::Bindings.rd_kafka_poll(inner, 100)
21
+ end
22
+
23
+ # Exit thread if closing and the poll queue is empty
24
+ if Thread.current[:closing] && Rdkafka::Bindings.rd_kafka_outq_len(inner) == 0
25
+ break
26
+ end
17
27
  end
18
28
  end
29
+
30
+ @polling_thread.abort_on_exception = true
31
+ @polling_thread[:closing] = false
19
32
  end
20
- @polling_thread.abort_on_exception = true
21
- @polling_thread[:closing] = false
33
+
34
+ @closing = false
22
35
  end
23
36
 
24
- def inner
25
- @inner
37
+ def with_inner
38
+ return if @inner.nil?
39
+
40
+ @access_mutex.synchronize do
41
+ yield @inner
42
+ end
26
43
  end
27
44
 
28
45
  def finalizer
@@ -30,22 +47,36 @@ module Rdkafka
30
47
  end
31
48
 
32
49
  def closed?
33
- @inner.nil?
50
+ @closing || @inner.nil?
34
51
  end
35
52
 
36
53
  def close(object_id=nil)
37
54
  return if closed?
38
55
 
39
- # Flush outstanding activity
40
- Rdkafka::Bindings.rd_kafka_flush(@inner, 30 * 1000)
56
+ @access_mutex.lock
41
57
 
42
- # Indicate to polling thread that we're closing
43
- @polling_thread[:closing] = true
44
- # Wait for the polling thread to finish up
45
- @polling_thread.join
58
+ # Indicate to the outside world that we are closing
59
+ @closing = true
46
60
 
47
- Rdkafka::Bindings.rd_kafka_destroy(@inner)
61
+ if @polling_thread
62
+ # Indicate to polling thread that we're closing
63
+ @polling_thread[:closing] = true
48
64
 
65
+ # Wait for the polling thread to finish up,
66
+ # this can be aborted in practice if this
67
+ # code runs from a finalizer.
68
+ @polling_thread.join
69
+ end
70
+
71
+ # Destroy the client after locking both mutexes
72
+ @poll_mutex.lock
73
+
74
+ # This check prevents a race condition, where we would enter the close in two threads
75
+ # and after unlocking the primary one that hold the lock but finished, ours would be unlocked
76
+ # and would continue to run, trying to destroy inner twice
77
+ return unless @inner
78
+
79
+ Rdkafka::Bindings.rd_kafka_destroy(@inner)
49
80
  @inner = nil
50
81
  end
51
82
  end
@@ -40,11 +40,28 @@ module Rdkafka
40
40
 
41
41
  # Close this producer and wait for the internal poll queue to empty.
42
42
  def close
43
+ return if closed?
43
44
  ObjectSpace.undefine_finalizer(self)
44
-
45
45
  @native_kafka.close
46
46
  end
47
47
 
48
+ # Whether this producer has closed
49
+ def closed?
50
+ @native_kafka.closed?
51
+ end
52
+
53
+ # Wait until all outstanding producer requests are completed, with the given timeout
54
+ # in seconds. Call this before closing a producer to ensure delivery of all messages.
55
+ #
56
+ # @param timeout_ms [Integer] how long should we wait for flush of all messages
57
+ def flush(timeout_ms=5_000)
58
+ closed_producer_check(__method__)
59
+
60
+ @native_kafka.with_inner do |inner|
61
+ Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
62
+ end
63
+ end
64
+
48
65
  # Partition count for a given topic.
49
66
  # NOTE: If 'allow.auto.create.topics' is set to true in the broker, the topic will be auto-created after returning nil.
50
67
  #
@@ -53,7 +70,9 @@ module Rdkafka
53
70
  # @return partition count [Integer,nil]
54
71
  def partition_count(topic)
55
72
  closed_producer_check(__method__)
56
- Rdkafka::Metadata.new(@native_kafka.inner, topic).topics&.first[:partition_count]
73
+ @native_kafka.with_inner do |inner|
74
+ Rdkafka::Metadata.new(inner, topic).topics&.first[:partition_count]
75
+ end
57
76
  end
58
77
 
59
78
  # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
@@ -144,10 +163,12 @@ module Rdkafka
144
163
  args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_END
145
164
 
146
165
  # Produce the message
147
- response = Rdkafka::Bindings.rd_kafka_producev(
148
- @native_kafka.inner,
149
- *args
150
- )
166
+ response = @native_kafka.with_inner do |inner|
167
+ Rdkafka::Bindings.rd_kafka_producev(
168
+ inner,
169
+ *args
170
+ )
171
+ end
151
172
 
152
173
  # Raise error if the produce call was not successful
153
174
  if response != 0
@@ -173,7 +194,7 @@ module Rdkafka
173
194
 
174
195
  private
175
196
  def closed_producer_check(method)
176
- raise Rdkafka::ClosedProducerError.new(method) if @native_kafka.closed?
197
+ raise Rdkafka::ClosedProducerError.new(method) if closed?
177
198
  end
178
199
  end
179
200
  end
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rdkafka
4
- VERSION = "0.13.0.beta.2"
4
+ VERSION = "0.13.0.beta.5"
5
5
  LIBRDKAFKA_VERSION = "1.9.2"
6
6
  LIBRDKAFKA_SOURCE_SHA256 = "3fba157a9f80a0889c982acdd44608be8a46142270a389008b22d921be1198ad"
7
7
  end
@@ -151,22 +151,23 @@ describe Rdkafka::Config do
151
151
  end
152
152
 
153
153
  it "allows string partitioner key" do
154
- expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2")
154
+ expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2").and_call_original
155
155
  config = Rdkafka::Config.new("partitioner" => "murmur2")
156
- config.producer
156
+ config.producer.close
157
157
  end
158
158
 
159
159
  it "allows symbol partitioner key" do
160
- expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2")
160
+ expect(Rdkafka::Producer).to receive(:new).with(kind_of(Rdkafka::NativeKafka), "murmur2").and_call_original
161
161
  config = Rdkafka::Config.new(:partitioner => "murmur2")
162
- config.producer
162
+ config.producer.close
163
163
  end
164
164
 
165
165
  it "should allow configuring zstd compression" do
166
166
  config = Rdkafka::Config.new('compression.codec' => 'zstd')
167
167
  begin
168
- expect(config.producer).to be_a Rdkafka::Producer
169
- config.producer.close
168
+ producer = config.producer
169
+ expect(producer).to be_a Rdkafka::Producer
170
+ producer.close
170
171
  rescue Rdkafka::Config::ConfigError => ex
171
172
  pending "Zstd compression not supported on this machine"
172
173
  raise ex
@@ -55,7 +55,7 @@ describe Rdkafka::Consumer do
55
55
 
56
56
  describe "#pause and #resume" do
57
57
  context "subscription" do
58
- let(:timeout) { 1000 }
58
+ let(:timeout) { 2000 }
59
59
 
60
60
  before { consumer.subscribe("consume_test_topic") }
61
61
  after { consumer.unsubscribe }
@@ -738,7 +738,8 @@ describe Rdkafka::Consumer do
738
738
  #
739
739
  # This is, in effect, an integration test and the subsequent specs are
740
740
  # unit tests.
741
- create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
741
+ admin = rdkafka_config.admin
742
+ create_topic_handle = admin.create_topic(topic_name, 1, 1)
742
743
  create_topic_handle.wait(max_wait_timeout: 15.0)
743
744
  consumer.subscribe(topic_name)
744
745
  produce_n 42
@@ -751,6 +752,7 @@ describe Rdkafka::Consumer do
751
752
  expect(all_yields.flatten.size).to eq 42
752
753
  expect(all_yields.size).to be > 4
753
754
  expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
755
+ admin.close
754
756
  end
755
757
 
756
758
  it "should batch poll results and yield arrays of messages" do
@@ -793,13 +795,15 @@ describe Rdkafka::Consumer do
793
795
  end
794
796
 
795
797
  it "should yield [] if nothing is received before the timeout" do
796
- create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
798
+ admin = rdkafka_config.admin
799
+ create_topic_handle = admin.create_topic(topic_name, 1, 1)
797
800
  create_topic_handle.wait(max_wait_timeout: 15.0)
798
801
  consumer.subscribe(topic_name)
799
802
  consumer.each_batch do |batch|
800
803
  expect(batch).to eq([])
801
804
  break
802
805
  end
806
+ admin.close
803
807
  end
804
808
 
805
809
  it "should yield batchs of max_items in size if messages are already fetched" do
@@ -876,6 +880,7 @@ describe Rdkafka::Consumer do
876
880
  expect(batches_yielded.first.size).to eq 2
877
881
  expect(exceptions_yielded.flatten.size).to eq 1
878
882
  expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
883
+ consumer.close
879
884
  end
880
885
  end
881
886
 
@@ -917,6 +922,7 @@ describe Rdkafka::Consumer do
917
922
  expect(each_batch_iterations).to eq 0
918
923
  expect(batches_yielded.size).to eq 0
919
924
  expect(exceptions_yielded.size).to eq 0
925
+ consumer.close
920
926
  end
921
927
  end
922
928
  end
@@ -8,12 +8,9 @@ describe Rdkafka::NativeKafka do
8
8
  let(:closing) { false }
9
9
  let(:thread) { double(Thread) }
10
10
 
11
- subject(:client) { described_class.new(native) }
11
+ subject(:client) { described_class.new(native, run_polling_thread: true) }
12
12
 
13
13
  before do
14
- allow(Rdkafka::Bindings).to receive(:rd_kafka_poll).with(instance_of(FFI::Pointer), 250).and_call_original
15
- allow(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(instance_of(FFI::Pointer)).and_return(0).and_call_original
16
- allow(Rdkafka::Bindings).to receive(:rd_kafka_destroy)
17
14
  allow(Thread).to receive(:new).and_return(thread)
18
15
 
19
16
  allow(thread).to receive(:[]=).with(:closing, anything)
@@ -21,6 +18,8 @@ describe Rdkafka::NativeKafka do
21
18
  allow(thread).to receive(:abort_on_exception=).with(anything)
22
19
  end
23
20
 
21
+ after { client.close }
22
+
24
23
  context "defaults" do
25
24
  it "sets the thread to abort on exception" do
26
25
  expect(thread).to receive(:abort_on_exception=).with(true)
@@ -41,32 +40,12 @@ describe Rdkafka::NativeKafka do
41
40
 
42
41
  client
43
42
  end
44
-
45
- it "polls the native with default 250ms timeout" do
46
- polling_loop_expects do
47
- expect(Rdkafka::Bindings).to receive(:rd_kafka_poll).with(instance_of(FFI::Pointer), 250).at_least(:once)
48
- end
49
- end
50
-
51
- it "check the out queue of native client" do
52
- polling_loop_expects do
53
- expect(Rdkafka::Bindings).to receive(:rd_kafka_outq_len).with(native).at_least(:once)
54
- end
55
- end
56
- end
57
-
58
- def polling_loop_expects(&block)
59
- Thread.current[:closing] = true # this forces the loop break with line #12
60
-
61
- allow(Thread).to receive(:new).and_yield do |_|
62
- block.call
63
- end.and_return(thread)
64
-
65
- client
66
43
  end
67
44
 
68
- it "exposes inner client" do
69
- expect(client.inner).to eq(native)
45
+ it "exposes the inner client" do
46
+ client.with_inner do |inner|
47
+ expect(inner).to eq(native)
48
+ end
70
49
  end
71
50
 
72
51
  context "when client was not yet closed (`nil`)" do
@@ -76,7 +55,7 @@ describe Rdkafka::NativeKafka do
76
55
 
77
56
  context "and attempt to close" do
78
57
  it "calls the `destroy` binding" do
79
- expect(Rdkafka::Bindings).to receive(:rd_kafka_destroy).with(native)
58
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_destroy).with(native).and_call_original
80
59
 
81
60
  client.close
82
61
  end
@@ -96,7 +75,6 @@ describe Rdkafka::NativeKafka do
96
75
  it "closes and unassign the native client" do
97
76
  client.close
98
77
 
99
- expect(client.inner).to eq(nil)
100
78
  expect(client.closed?).to eq(true)
101
79
  end
102
80
  end
@@ -111,7 +89,7 @@ describe Rdkafka::NativeKafka do
111
89
 
112
90
  context "and attempt to close again" do
113
91
  it "does not call the `destroy` binding" do
114
- expect(Rdkafka::Bindings).not_to receive(:rd_kafka_destroy)
92
+ expect(Rdkafka::Bindings).not_to receive(:rd_kafka_destroy_flags)
115
93
 
116
94
  client.close
117
95
  end
@@ -131,7 +109,6 @@ describe Rdkafka::NativeKafka do
131
109
  it "does not close and unassign the native client again" do
132
110
  client.close
133
111
 
134
- expect(client.inner).to eq(nil)
135
112
  expect(client.closed?).to eq(true)
136
113
  end
137
114
  end
@@ -185,7 +185,8 @@ describe Rdkafka::Producer do
185
185
  expect(report.partition).to eq 1
186
186
  expect(report.offset).to be >= 0
187
187
 
188
- # Close producer
188
+ # Flush and close producer
189
+ producer.flush
189
190
  producer.close
190
191
 
191
192
  # Consume message and verify its content
@@ -459,10 +460,10 @@ describe Rdkafka::Producer do
459
460
  # wait for and check the message in the main process.
460
461
  reader, writer = IO.pipe
461
462
 
462
- fork do
463
+ pid = fork do
463
464
  reader.close
464
465
 
465
- # Avoids sharing the socket between processes.
466
+ # Avoid sharing the client between processes.
466
467
  producer = rdkafka_producer_config.producer
467
468
 
468
469
  handle = producer.produce(
@@ -481,8 +482,10 @@ describe Rdkafka::Producer do
481
482
 
482
483
  writer.write(report_json)
483
484
  writer.close
485
+ producer.flush
484
486
  producer.close
485
487
  end
488
+ Process.wait(pid)
486
489
 
487
490
  writer.close
488
491
  report_hash = JSON.parse(reader.read)
data/spec/spec_helper.rb CHANGED
@@ -73,7 +73,7 @@ def new_native_topic(topic_name="topic_name", native_client: )
73
73
  end
74
74
 
75
75
  def wait_for_message(topic:, delivery_report:, timeout_in_seconds: 30, consumer: nil)
76
- new_consumer = !!consumer
76
+ new_consumer = consumer.nil?
77
77
  consumer ||= rdkafka_consumer_config.consumer
78
78
  consumer.subscribe(topic)
79
79
  timeout = Time.now.to_i + timeout_in_seconds
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.0.beta.2
4
+ version: 0.13.0.beta.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-10-12 00:00:00.000000000 Z
11
+ date: 2023-04-24 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi
@@ -139,8 +139,7 @@ dependencies:
139
139
  description: Modern Kafka client library for Ruby based on librdkafka
140
140
  email:
141
141
  - thijs@appsignal.com
142
- executables:
143
- - console
142
+ executables: []
144
143
  extensions:
145
144
  - ext/Rakefile
146
145
  extra_rdoc_files: []
@@ -155,7 +154,6 @@ files:
155
154
  - LICENSE
156
155
  - README.md
157
156
  - Rakefile
158
- - bin/console
159
157
  - docker-compose.yml
160
158
  - ext/README.md
161
159
  - ext/Rakefile
@@ -222,7 +220,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
222
220
  - !ruby/object:Gem::Version
223
221
  version: 1.3.1
224
222
  requirements: []
225
- rubygems_version: 3.3.13
223
+ rubygems_version: 3.4.1
226
224
  signing_key:
227
225
  specification_version: 4
228
226
  summary: The rdkafka gem is a modern Kafka client library for Ruby based on librdkafka.
data/bin/console DELETED
@@ -1,11 +0,0 @@
1
- #!/usr/bin/env ruby
2
-
3
- # frozen_string_literal: true
4
-
5
- ENV["IRBRC"] = File.join(File.dirname(__FILE__), ".irbrc")
6
-
7
- require "bundler/setup"
8
- require "rdkafka"
9
-
10
- require "irb"
11
- IRB.start(__FILE__)