rdkafka 0.7.0 → 0.8.0.beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 38ccc62319eff24c7def2b8cebced2a8551b28a1651e77348c868ce189a7d418
4
- data.tar.gz: e23e2cad27a95e2bf51c2428e4d2246deae903f8e0d7800b086b35c5b262fc65
3
+ metadata.gz: 8fd9f55f469403bfcec14203f75fa0bb61ebef1a1aaad9f0a4f58b11f02b16cb
4
+ data.tar.gz: e0e07afcaca58f06ae35d5eaad8074ab725d90dc5be8393cb5faf37148f52f91
5
5
  SHA512:
6
- metadata.gz: 5faa270613e25d64f2b5d955a637b97b8315e7e365843b0e5a10d34c6340a795ebcf8ed2cb9dba2ae3a7d6839e464271d979760d351a6a8777224c31cdb262b6
7
- data.tar.gz: d209464e6fe089945638063caa4ec6ab80d7c0d171678c43e320314cdc2ff0f980806b92941d0529b8ec93d598e5a637673fe3d97df4755503d2b484cc8f670b
6
+ metadata.gz: 84509541f5c16afc20cc89720a0d784ae7b89d49b1b72eab160b31c600e3c9fc167bee43bc123782cca002cad06aa370fc902838514411eab9dd4b40b32a7e4f
7
+ data.tar.gz: 8fe627c353b195df6424c3979816eabca7e6ce1dbec2f7f1edc92d7d3f111346fb6935620cf0c2ec5815c2bcd39c5b16e2618cd80b84b346fa024fab35842cf3
data/.travis.yml CHANGED
@@ -14,9 +14,21 @@ rvm:
14
14
  - 2.4
15
15
  - 2.5
16
16
  - 2.6
17
+ - jruby-9.2.9.0
17
18
 
18
19
  before_install:
19
20
  - gem update --system
21
+ - |
22
+ r_eng="$(ruby -e 'STDOUT.write RUBY_ENGINE')";
23
+ if [ "$r_eng" == "jruby" ]; then
24
+ sudo apt-get update && \
25
+ sudo apt-get install -y git && \
26
+ sudo apt-get install -y libpthread-stubs0-dev && \
27
+ sudo apt-get install -y build-essential && \
28
+ sudo apt-get install -y zlib1g-dev && \
29
+ sudo apt-get install -y libssl-dev && \
30
+ sudo apt-get install -y libsasl2-dev
31
+ fi
20
32
 
21
33
  before_script:
22
34
  - docker-compose up -d
@@ -26,7 +38,7 @@ before_script:
26
38
  - ./cc-test-reporter before-build
27
39
 
28
40
  script:
29
- - bundle exec rspec
41
+ - bundle exec rspec --format documentation
30
42
 
31
43
  after_script:
32
44
  - docker-compose stop
data/CHANGELOG.md CHANGED
@@ -1,6 +1,14 @@
1
- # 0.6.0
1
+ # 0.8.0
2
+ * Upgrade librdkafka to 1.4.0
3
+ * Integrate librdkafka metadata API and add partition_key (by Adithya-copart)
4
+ * Ruby 2.7 compatibility fix (by Geoff Thé)A
5
+ * Add error to delivery report (by Alex Stanovsky)
6
+ * Don't override CPPFLAGS and LDFLAGS if already set on Mac (by Hiroshi Hatake)
7
+ * Allow use of Rake 13.x and up (by Tomasz Pajor)
8
+
9
+ # 0.7.0
2
10
  * Bump librdkafka to 1.2.0 (by rob-as)
3
- * Allow customizing the wait time for delivery report availability (by mensfeld )
11
+ * Allow customizing the wait time for delivery report availability (by mensfeld)
4
12
 
5
13
  # 0.6.0
6
14
  * Bump librdkafka to 1.1.0 (by Chris Gaffney)
data/docker-compose.yml CHANGED
@@ -1,18 +1,22 @@
1
+
1
2
  version: '2'
2
3
  services:
3
4
  zookeeper:
4
- image: wurstmeister/zookeeper
5
- ports:
6
- - "2181:2181"
5
+ image: confluentinc/cp-zookeeper:latest
6
+ environment:
7
+ ZOOKEEPER_CLIENT_PORT: 2181
8
+ ZOOKEEPER_TICK_TIME: 2000
9
+
7
10
  kafka:
8
- image: wurstmeister/kafka:1.0.1
11
+ image: confluentinc/cp-kafka:latest
12
+ depends_on:
13
+ - zookeeper
9
14
  ports:
10
- - "9092:9092"
15
+ - 9092:9092
11
16
  environment:
12
- KAFKA_ADVERTISED_HOST_NAME: localhost
13
- KAFKA_ADVERTISED_PORT: 9092
17
+ KAFKA_BROKER_ID: 1
14
18
  KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
15
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
16
- KAFKA_CREATE_TOPICS: "consume_test_topic:3:1,empty_test_topic:3:1,load_test_topic:3:1,produce_test_topic:3:1,rake_test_topic:3:1,watermarks_test_topic:3:1"
17
- volumes:
18
- - /var/run/docker.sock:/var/run/docker.sock
19
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
20
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
21
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
22
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
data/ext/README.md CHANGED
@@ -5,19 +5,7 @@ this gem is installed.
5
5
 
6
6
  To update the `librdkafka` version follow the following steps:
7
7
 
8
- * Download the new version `tar.gz` from https://github.com/edenhill/librdkafka/
9
- * Generate a `sha256` with (if using MacOS) `shasum -a 256 <file>`
10
- * Change the `sha256` in `lib/rdkafka/version.rb`
8
+ * Go to https://github.com/edenhill/librdkafka/releases to get the new
9
+ version number and asset checksum for `tar.gz`.
11
10
  * Change the version in `lib/rdkafka/version.rb`
12
-
13
- ## Disclaimer
14
-
15
- Currently the `librdkafka` project does not provide
16
- checksums of releases. The checksum provided here is generated on a best
17
- effort basis. If the CDN would be compromised at the time of download the
18
- checksum could be incorrect.
19
-
20
- Do your own verification if you rely on this behaviour.
21
-
22
- Once https://github.com/appsignal/rdkafka-ruby/issues/44 is implemented
23
- we will change this process.
11
+ * Change the `sha256` in `lib/rdkafka/version.rb`
data/ext/Rakefile CHANGED
@@ -23,9 +23,10 @@ task :default => :clean do
23
23
  recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
24
24
 
25
25
  # Use default homebrew openssl if we're on mac and the directory exists
26
- if recipe.host.include?("darwin") && Dir.exists?("/usr/local/opt/openssl")
27
- ENV["CPPFLAGS"] = "-I/usr/local/opt/openssl/include"
28
- ENV["LDFLAGS"] = "-L/usr/local/opt/openssl/lib"
26
+ # and each of flags is not empty
27
+ if recipe.host&.include?("darwin") && Dir.exist?("/usr/local/opt/openssl")
28
+ ENV["CPPFLAGS"] = "-I/usr/local/opt/openssl/include" unless ENV["CPPFLAGS"]
29
+ ENV["LDFLAGS"] = "-L/usr/local/opt/openssl/lib" unless ENV["LDFLAGS"]
29
30
  end
30
31
 
31
32
  recipe.files << {
@@ -55,3 +56,22 @@ task :clean do
55
56
  FileUtils.rm_rf File.join(File.dirname(__FILE__), "ports")
56
57
  FileUtils.rm_rf File.join(File.dirname(__FILE__), "tmp")
57
58
  end
59
+
60
+ namespace :build do
61
+ desc "Build librdkafka at the given git sha or tag"
62
+ task :git, [:ref] do |task, args|
63
+ ref = args[:ref]
64
+ version = "git-#{ref}"
65
+
66
+ recipe = MiniPortile.new("librdkafka", version)
67
+ recipe.files << "https://github.com/edenhill/librdkafka/archive/#{ref}.tar.gz"
68
+ recipe.configure_options = ["--host=#{recipe.host}"]
69
+ recipe.cook
70
+
71
+ ext = recipe.host.include?("darwin") ? "dylib" : "so"
72
+ lib = File.expand_path("ports/#{recipe.host}/librdkafka/#{version}/lib/librdkafka.#{ext}", __dir__)
73
+
74
+ # Copy will copy the content, following any symlinks
75
+ FileUtils.cp(lib, __dir__)
76
+ end
77
+ end
data/lib/rdkafka.rb CHANGED
@@ -8,6 +8,7 @@ require "rdkafka/consumer/message"
8
8
  require "rdkafka/consumer/partition"
9
9
  require "rdkafka/consumer/topic_partition_list"
10
10
  require "rdkafka/error"
11
+ require "rdkafka/metadata"
11
12
  require "rdkafka/producer"
12
13
  require "rdkafka/producer/delivery_handle"
13
14
  require "rdkafka/producer/delivery_report"
@@ -8,7 +8,7 @@ module Rdkafka
8
8
  extend FFI::Library
9
9
 
10
10
  def self.lib_extension
11
- if Gem::Platform.local.os.include?("darwin")
11
+ if RbConfig::CONFIG['host_os'] =~ /darwin/
12
12
  'dylib'
13
13
  else
14
14
  'so'
@@ -22,6 +22,11 @@ module Rdkafka
22
22
  RD_KAFKA_RESP_ERR__NOENT = -156
23
23
  RD_KAFKA_RESP_ERR_NO_ERROR = 0
24
24
 
25
+ RD_KAFKA_OFFSET_END = -1
26
+ RD_KAFKA_OFFSET_BEGINNING = -2
27
+ RD_KAFKA_OFFSET_STORED = -1000
28
+ RD_KAFKA_OFFSET_INVALID = -1001
29
+
25
30
  class SizePtr < FFI::Struct
26
31
  layout :value, :size_t
27
32
  end
@@ -35,6 +40,8 @@ module Rdkafka
35
40
 
36
41
  attach_function :rd_kafka_memberid, [:pointer], :string
37
42
  attach_function :rd_kafka_clusterid, [:pointer], :string
43
+ attach_function :rd_kafka_metadata, [:pointer, :int, :pointer, :pointer, :int], :int
44
+ attach_function :rd_kafka_metadata_destroy, [:pointer], :void
38
45
 
39
46
  # Message struct
40
47
 
@@ -227,6 +234,17 @@ module Rdkafka
227
234
  callback :delivery_cb, [:pointer, :pointer, :pointer], :void
228
235
  attach_function :rd_kafka_conf_set_dr_msg_cb, [:pointer, :delivery_cb], :void
229
236
 
237
+ # Partitioner
238
+ attach_function :rd_kafka_msg_partitioner_consistent_random, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
239
+
240
+ def self.partitioner(str, partition_count)
241
+ # Return RD_KAFKA_PARTITION_UA(unassigned partition) when partition count is nil/zero.
242
+ return -1 unless partition_count&.nonzero?
243
+
244
+ str_ptr = FFI::MemoryPointer.from_string(str)
245
+ rd_kafka_msg_partitioner_consistent_random(nil, str_ptr, str.size, partition_count, nil, nil)
246
+ end
247
+
230
248
  DeliveryCallback = FFI::Function.new(
231
249
  :void, [:pointer, :pointer, :pointer]
232
250
  ) do |client_ptr, message_ptr, opaque_ptr|
@@ -240,7 +258,7 @@ module Rdkafka
240
258
  delivery_handle[:offset] = message[:offset]
241
259
  # Call delivery callback on opaque
242
260
  if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
243
- opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset]))
261
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset], message[:err]))
244
262
  end
245
263
  end
246
264
  end
@@ -212,10 +212,8 @@ module Rdkafka
212
212
  Rdkafka::Bindings.rd_kafka_queue_get_main(handle)
213
213
  )
214
214
 
215
- FFI::AutoPointer.new(
216
- handle,
217
- Rdkafka::Bindings.method(:rd_kafka_destroy)
218
- )
215
+ # Return handle which should be closed using rd_kafka_destroy after usage.
216
+ handle
219
217
  end
220
218
  end
221
219
 
@@ -5,6 +5,9 @@ module Rdkafka
5
5
  #
6
6
  # To create a consumer set up a {Config} and call {Config#consumer consumer} on that. It is
7
7
  # mandatory to set `:"group.id"` in the configuration.
8
+ #
9
+ # Consumer implements `Enumerable`, so you can use `each` to consume messages, or for example
10
+ # `each_slice` to consume batches of messages.
8
11
  class Consumer
9
12
  include Enumerable
10
13
 
@@ -17,8 +20,12 @@ module Rdkafka
17
20
  # Close this consumer
18
21
  # @return [nil]
19
22
  def close
23
+ return unless @native_kafka
24
+
20
25
  @closing = true
21
26
  Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
27
+ Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
28
+ @native_kafka = nil
22
29
  end
23
30
 
24
31
  # Subscribe to one or more topics letting Kafka handle partition assignments.
@@ -30,20 +37,19 @@ module Rdkafka
30
37
  # @return [nil]
31
38
  def subscribe(*topics)
32
39
  # Create topic partition list with topics and no partition set
33
- tpl = TopicPartitionList.new_native_tpl(topics.length)
40
+ tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(topics.length)
34
41
 
35
42
  topics.each do |topic|
36
- Rdkafka::Bindings.rd_kafka_topic_partition_list_add(
37
- tpl,
38
- topic,
39
- -1
40
- )
43
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_add(tpl, topic, -1)
41
44
  end
45
+
42
46
  # Subscribe to topic partition list and check this was successful
43
47
  response = Rdkafka::Bindings.rd_kafka_subscribe(@native_kafka, tpl)
44
48
  if response != 0
45
49
  raise Rdkafka::RdkafkaError.new(response, "Error subscribing to '#{topics.join(', ')}'")
46
50
  end
51
+ ensure
52
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
47
53
  end
48
54
 
49
55
  # Unsubscribe from all subscribed topics.
@@ -69,12 +75,18 @@ module Rdkafka
69
75
  unless list.is_a?(TopicPartitionList)
70
76
  raise TypeError.new("list has to be a TopicPartitionList")
71
77
  end
78
+
72
79
  tpl = list.to_native_tpl
73
- response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
74
80
 
75
- if response != 0
76
- list = TopicPartitionList.from_native_tpl(tpl)
77
- raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
81
+ begin
82
+ response = Rdkafka::Bindings.rd_kafka_pause_partitions(@native_kafka, tpl)
83
+
84
+ if response != 0
85
+ list = TopicPartitionList.from_native_tpl(tpl)
86
+ raise Rdkafka::RdkafkaTopicPartitionListError.new(response, list, "Error pausing '#{list.to_h}'")
87
+ end
88
+ ensure
89
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
78
90
  end
79
91
  end
80
92
 
@@ -89,10 +101,16 @@ module Rdkafka
89
101
  unless list.is_a?(TopicPartitionList)
90
102
  raise TypeError.new("list has to be a TopicPartitionList")
91
103
  end
104
+
92
105
  tpl = list.to_native_tpl
93
- response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
94
- if response != 0
95
- raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
106
+
107
+ begin
108
+ response = Rdkafka::Bindings.rd_kafka_resume_partitions(@native_kafka, tpl)
109
+ if response != 0
110
+ raise Rdkafka::RdkafkaError.new(response, "Error resume '#{list.to_h}'")
111
+ end
112
+ ensure
113
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
96
114
  end
97
115
  end
98
116
 
@@ -102,17 +120,19 @@ module Rdkafka
102
120
  #
103
121
  # @return [TopicPartitionList]
104
122
  def subscription
105
- tpl = FFI::MemoryPointer.new(:pointer)
106
- response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, tpl)
123
+ ptr = FFI::MemoryPointer.new(:pointer)
124
+ response = Rdkafka::Bindings.rd_kafka_subscription(@native_kafka, ptr)
125
+
107
126
  if response != 0
108
127
  raise Rdkafka::RdkafkaError.new(response)
109
128
  end
110
- tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
129
+
130
+ native = ptr.read_pointer
111
131
 
112
132
  begin
113
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
133
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(native)
114
134
  ensure
115
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
135
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(native)
116
136
  end
117
137
  end
118
138
 
@@ -125,10 +145,16 @@ module Rdkafka
125
145
  unless list.is_a?(TopicPartitionList)
126
146
  raise TypeError.new("list has to be a TopicPartitionList")
127
147
  end
148
+
128
149
  tpl = list.to_native_tpl
129
- response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
130
- if response != 0
131
- raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
150
+
151
+ begin
152
+ response = Rdkafka::Bindings.rd_kafka_assign(@native_kafka, tpl)
153
+ if response != 0
154
+ raise Rdkafka::RdkafkaError.new(response, "Error assigning '#{list.to_h}'")
155
+ end
156
+ ensure
157
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
132
158
  end
133
159
  end
134
160
 
@@ -138,19 +164,23 @@ module Rdkafka
138
164
  #
139
165
  # @return [TopicPartitionList]
140
166
  def assignment
141
- tpl = FFI::MemoryPointer.new(:pointer)
142
- response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, tpl)
167
+ ptr = FFI::MemoryPointer.new(:pointer)
168
+ response = Rdkafka::Bindings.rd_kafka_assignment(@native_kafka, ptr)
143
169
  if response != 0
144
170
  raise Rdkafka::RdkafkaError.new(response)
145
171
  end
146
172
 
147
- tpl = tpl.read(:pointer).tap { |it| it.autorelease = false }
173
+ tpl = ptr.read_pointer
148
174
 
149
- begin
150
- Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
151
- ensure
152
- Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
175
+ if !tpl.null?
176
+ begin
177
+ Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
178
+ ensure
179
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy tpl
180
+ end
153
181
  end
182
+ ensure
183
+ ptr.free
154
184
  end
155
185
 
156
186
  # Return the current committed offset per partition for this consumer group.
@@ -168,12 +198,18 @@ module Rdkafka
168
198
  elsif !list.is_a?(TopicPartitionList)
169
199
  raise TypeError.new("list has to be nil or a TopicPartitionList")
170
200
  end
201
+
171
202
  tpl = list.to_native_tpl
172
- response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
173
- if response != 0
174
- raise Rdkafka::RdkafkaError.new(response)
203
+
204
+ begin
205
+ response = Rdkafka::Bindings.rd_kafka_committed(@native_kafka, tpl, timeout_ms)
206
+ if response != 0
207
+ raise Rdkafka::RdkafkaError.new(response)
208
+ end
209
+ TopicPartitionList.from_native_tpl(tpl)
210
+ ensure
211
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl)
175
212
  end
176
- TopicPartitionList.from_native_tpl(tpl)
177
213
  end
178
214
 
179
215
  # Query broker for low (oldest/beginning) and high (newest/end) offsets for a partition.
@@ -195,13 +231,16 @@ module Rdkafka
195
231
  partition,
196
232
  low,
197
233
  high,
198
- timeout_ms
234
+ timeout_ms,
199
235
  )
200
236
  if response != 0
201
237
  raise Rdkafka::RdkafkaError.new(response, "Error querying watermark offsets for partition #{partition} of #{topic}")
202
238
  end
203
239
 
204
- return low.read_int64, high.read_int64
240
+ return low.read_array_of_int64(1).first, high.read_array_of_int64(1).first
241
+ ensure
242
+ low.free
243
+ high.free
205
244
  end
206
245
 
207
246
  # Calculate the consumer lag per partition for the provided topic partition list.
@@ -217,6 +256,7 @@ module Rdkafka
217
256
  # @return [Hash<String, Hash<Integer, Integer>>] A hash containing all topics with the lag per partition
218
257
  def lag(topic_partition_list, watermark_timeout_ms=100)
219
258
  out = {}
259
+
220
260
  topic_partition_list.to_h.each do |topic, partitions|
221
261
  # Query high watermarks for this topic's partitions
222
262
  # and compare to the offset in the list.
@@ -313,7 +353,14 @@ module Rdkafka
313
353
  end
314
354
  end
315
355
 
316
- # Commit the current offsets of this consumer
356
+ # Manually commit the current offsets of this consumer.
357
+ #
358
+ # To use this set `enable.auto.commit`to `false` to disable automatic triggering
359
+ # of commits.
360
+ #
361
+ # If `enable.auto.offset.store` is set to `true` the offset of the last consumed
362
+ # message for every partition is used. If set to `false` you can use {store_offset} to
363
+ # indicate when a message has been fully processed.
317
364
  #
318
365
  # @param list [TopicPartitionList,nil] The topic with partitions to commit
319
366
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
@@ -325,14 +372,16 @@ module Rdkafka
325
372
  if !list.nil? && !list.is_a?(TopicPartitionList)
326
373
  raise TypeError.new("list has to be nil or a TopicPartitionList")
327
374
  end
328
- tpl = if list
329
- list.to_native_tpl
330
- else
331
- nil
332
- end
333
- response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
334
- if response != 0
335
- raise Rdkafka::RdkafkaError.new(response)
375
+
376
+ tpl = list ? list.to_native_tpl : nil
377
+
378
+ begin
379
+ response = Rdkafka::Bindings.rd_kafka_commit(@native_kafka, tpl, async)
380
+ if response != 0
381
+ raise Rdkafka::RdkafkaError.new(response)
382
+ end
383
+ ensure
384
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(tpl) if tpl
336
385
  end
337
386
  end
338
387
 
@@ -344,6 +393,8 @@ module Rdkafka
344
393
  #
345
394
  # @return [Message, nil] A message or nil if there was no new message within the timeout
346
395
  def poll(timeout_ms)
396
+ return unless @native_kafka
397
+
347
398
  message_ptr = Rdkafka::Bindings.rd_kafka_consumer_poll(@native_kafka, timeout_ms)
348
399
  if message_ptr.null?
349
400
  nil