rdkafka 0.4.1 → 0.4.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a5e34bbb283ceedce620aa0533d2f45887a260e2c57fadd7cfac48a95a9fc62c
4
- data.tar.gz: 5ea6f9e7d2b50b6e1a34f28e012034b84853555d312e64e0bf5ac567e1b28f37
3
+ metadata.gz: 277ff64f82622eddd6c31265d1b4b91b6b60bed802590bf9b4d015ea7a32ce0a
4
+ data.tar.gz: 027fc22c22349729bb04288f9010eda156a9d04cf9a616bb237b1fc56eb9aed5
5
5
  SHA512:
6
- metadata.gz: 380b0b88c89b6926ed88fe667751779b61f1d1d6a8a495fb854c887f8049ed4197025d5cb2d5394d477096dacb4a805528bf619a55b412815859b71f1d903464
7
- data.tar.gz: a316eaf6e438164c2b3ba8f095d769d48410c2271cf3653017e328531dd5c6c009bd94f3e193395ce1f84fdb2fa7b73ff0c131d4ca55a353e85f6f07aaa1c9e8
6
+ metadata.gz: 0cb6c97fa7ac62ead42bcd8da1d22f56b02f56e54a7671e7e2722d029839d24c7390c0fe10890530afb4e9a002e9f5fcc7e213777cf956e762d443edf466715e
7
+ data.tar.gz: 54865cb4b0772cffa1c91fd4e971a3e23f9f9b24959c83402460bca8b2d268e15bef8df8ea011848a57d75c4088198d02f77713ff9123647f62b7b5ece38a787
@@ -1,3 +1,12 @@
1
+ # 0.4.2
2
+ * Delivery callback for producer
3
+ * Document list param of commit method
4
+ * Use default Homebrew openssl location if present
5
+ * Consumer lag handles empty topics
6
+ * End iteration in consumer when it is closed
7
+ * Add suport for storing message offsets
8
+ * Add missing runtime dependency to rake
9
+
1
10
  # 0.4.1
2
11
  * Bump librdkafka to 0.11.6
3
12
 
data/README.md CHANGED
@@ -25,7 +25,7 @@ have any problems installing the gem please open an issue.
25
25
 
26
26
  ## Usage
27
27
 
28
- See the [documentation](http://www.rubydoc.info/github/thijsc/rdkafka-ruby/master) for full details on how to use this gem. Two quick examples:
28
+ See the [documentation](https://www.rubydoc.info/github/appsignal/rdkafka-ruby) for full details on how to use this gem. Two quick examples:
29
29
 
30
30
  ### Consuming messages
31
31
 
@@ -13,6 +13,6 @@ services:
13
13
  KAFKA_ADVERTISED_PORT: 9092
14
14
  KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
15
15
  KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
16
- KAFKA_CREATE_TOPICS: "consume_test_topic:3:1,empty_test_topic:3:1,load_test_topic:3:1,produce_test_topic:3:1,rake_test_topic:3:1"
16
+ KAFKA_CREATE_TOPICS: "consume_test_topic:3:1,empty_test_topic:3:1,load_test_topic:3:1,produce_test_topic:3:1,rake_test_topic:3:1,empty_test_topic:3:1"
17
17
  volumes:
18
18
  - /var/run/docker.sock:/var/run/docker.sock
@@ -21,6 +21,13 @@ task :default => :clean do
21
21
 
22
22
  # Download and compile librdkafka
23
23
  recipe = MiniPortile.new("librdkafka", Rdkafka::LIBRDKAFKA_VERSION)
24
+
25
+ # Use default homebrew openssl if we're on mac and the directory exists
26
+ if recipe.host.include?("darwin") && Dir.exists?("/usr/local/opt/openssl")
27
+ ENV["CPPFLAGS"] = "-I/usr/local/opt/openssl/include"
28
+ ENV["LDFLAGS"] = "-L/usr/local/opt/openssl/lib"
29
+ end
30
+
24
31
  recipe.files << {
25
32
  :url => "https://codeload.github.com/edenhill/librdkafka/tar.gz/v#{Rdkafka::LIBRDKAFKA_VERSION}",
26
33
  :sha256 => Rdkafka::LIBRDKAFKA_SOURCE_SHA256
@@ -39,6 +39,7 @@ module Rdkafka
39
39
  attach_function :rd_kafka_message_destroy, [:pointer], :void
40
40
  attach_function :rd_kafka_message_timestamp, [:pointer, :pointer], :int64
41
41
  attach_function :rd_kafka_topic_new, [:pointer, :string, :pointer], :pointer
42
+ attach_function :rd_kafka_topic_destroy, [:pointer], :pointer
42
43
  attach_function :rd_kafka_topic_name, [:pointer], :string
43
44
 
44
45
  # TopicPartition ad TopicPartitionList structs
@@ -83,6 +84,7 @@ module Rdkafka
83
84
  attach_function :rd_kafka_conf_set, [:pointer, :string, :string, :pointer, :int], :kafka_config_response
84
85
  callback :log_cb, [:pointer, :int, :string, :string], :void
85
86
  attach_function :rd_kafka_conf_set_log_cb, [:pointer, :log_cb], :void
87
+ attach_function :rd_kafka_conf_set_opaque, [:pointer, :pointer], :void
86
88
  callback :stats_cb, [:pointer, :string, :int, :pointer], :int
87
89
  attach_function :rd_kafka_conf_set_stats_cb, [:pointer, :stats_cb], :void
88
90
 
@@ -145,6 +147,7 @@ module Rdkafka
145
147
  attach_function :rd_kafka_poll_set_consumer, [:pointer], :void
146
148
  attach_function :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: true
147
149
  attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
150
+ attach_function :rd_kafka_offset_store, [:pointer, :int32, :int64], :int
148
151
 
149
152
  # Stats
150
153
 
@@ -174,10 +177,15 @@ module Rdkafka
174
177
  message = Message.new(message_ptr)
175
178
  delivery_handle_ptr_address = message[:_private].address
176
179
  if delivery_handle = Rdkafka::Producer::DeliveryHandle.remove(delivery_handle_ptr_address)
180
+ # Update delivery handle
177
181
  delivery_handle[:pending] = false
178
182
  delivery_handle[:response] = message[:err]
179
183
  delivery_handle[:partition] = message[:partition]
180
184
  delivery_handle[:offset] = message[:offset]
185
+ # Call delivery callback on opaque
186
+ if opaque = Rdkafka::Config.opaques[opaque_ptr.to_i]
187
+ opaque.call_delivery_callback(Rdkafka::Producer::DeliveryReport.new(message[:partition], message[:offset]))
188
+ end
181
189
  end
182
190
  end
183
191
  end
@@ -7,7 +7,10 @@ module Rdkafka
7
7
  class Config
8
8
  # @private
9
9
  @@logger = Logger.new(STDOUT)
10
+ # @private
10
11
  @@statistics_callback = nil
12
+ # @private
13
+ @@opaques = {}
11
14
 
12
15
  # Returns the current logger, by default this is a logger to stdout.
13
16
  #
@@ -45,6 +48,11 @@ module Rdkafka
45
48
  @@statistics_callback
46
49
  end
47
50
 
51
+ # @private
52
+ def self.opaques
53
+ @@opaques
54
+ end
55
+
48
56
  # Default config that can be overwritten.
49
57
  DEFAULT_CONFIG = {
50
58
  # Request api version so advanced features work
@@ -106,12 +114,16 @@ module Rdkafka
106
114
  #
107
115
  # @return [Producer] The created producer
108
116
  def producer
117
+ # Create opaque
118
+ opaque = Opaque.new
109
119
  # Create Kafka config
110
- config = native_config
120
+ config = native_config(opaque)
111
121
  # Set callback to receive delivery reports on config
112
122
  Rdkafka::Bindings.rd_kafka_conf_set_dr_msg_cb(config, Rdkafka::Bindings::DeliveryCallback)
113
123
  # Return producer with Kafka client
114
- Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer))
124
+ Rdkafka::Producer.new(native_kafka(config, :rd_kafka_producer)).tap do |producer|
125
+ opaque.producer = producer
126
+ end
115
127
  end
116
128
 
117
129
  # Error that is returned by the underlying rdkafka error if an invalid configuration option is present.
@@ -127,8 +139,9 @@ module Rdkafka
127
139
 
128
140
  # This method is only intented to be used to create a client,
129
141
  # using it in another way will leak memory.
130
- def native_config
142
+ def native_config(opaque=nil)
131
143
  Rdkafka::Bindings.rd_kafka_conf_new.tap do |config|
144
+ # Create config
132
145
  @config_hash.merge(REQUIRED_CONFIG).each do |key, value|
133
146
  error_buffer = FFI::MemoryPointer.from_string(" " * 256)
134
147
  result = Rdkafka::Bindings.rd_kafka_conf_set(
@@ -142,10 +155,21 @@ module Rdkafka
142
155
  raise ConfigError.new(error_buffer.read_string)
143
156
  end
144
157
  end
145
- # Set opaque pointer back to this config
146
- #Rdkafka::Bindings.rd_kafka_conf_set_opaque(config, self)
158
+
159
+ # Set opaque pointer that's used as a proxy for callbacks
160
+ if opaque
161
+ pointer = ::FFI::Pointer.new(:pointer, opaque.object_id)
162
+ Rdkafka::Bindings.rd_kafka_conf_set_opaque(config, pointer)
163
+
164
+ # Store opaque with the pointer as key. We use this approach instead
165
+ # of trying to convert the pointer to a Ruby object because there is
166
+ # no risk of a segfault this way.
167
+ Rdkafka::Config.opaques[pointer.to_i] = opaque
168
+ end
169
+
147
170
  # Set log callback
148
171
  Rdkafka::Bindings.rd_kafka_conf_set_log_cb(config, Rdkafka::Bindings::LogCallback)
172
+
149
173
  # Set stats callback
150
174
  Rdkafka::Bindings.rd_kafka_conf_set_stats_cb(config, Rdkafka::Bindings::StatsCallback)
151
175
  end
@@ -176,4 +200,13 @@ module Rdkafka
176
200
  )
177
201
  end
178
202
  end
203
+
204
+ # @private
205
+ class Opaque
206
+ attr_accessor :producer
207
+
208
+ def call_delivery_callback(delivery_handle)
209
+ producer.call_delivery_callback(delivery_handle) if producer
210
+ end
211
+ end
179
212
  end
@@ -11,11 +11,13 @@ module Rdkafka
11
11
  # @private
12
12
  def initialize(native_kafka)
13
13
  @native_kafka = native_kafka
14
+ @closing = false
14
15
  end
15
16
 
16
17
  # Close this consumer
17
18
  # @return [nil]
18
19
  def close
20
+ @closing = true
19
21
  Rdkafka::Bindings.rd_kafka_consumer_close(@native_kafka)
20
22
  end
21
23
 
@@ -175,6 +177,7 @@ module Rdkafka
175
177
  # and compare to the offset in the list.
176
178
  topic_out = {}
177
179
  partitions.each do |p|
180
+ next if p.offset.nil?
178
181
  low, high = query_watermark_offsets(
179
182
  topic,
180
183
  p.partition,
@@ -187,8 +190,40 @@ module Rdkafka
187
190
  out
188
191
  end
189
192
 
193
+ # Store offset of a message to be used in the next commit of this consumer
194
+ #
195
+ # When using this `enable.auto.offset.store` should be set to `false` in the config.
196
+ #
197
+ # @param message [Rdkafka::Consumer::Message] The message which offset will be stored
198
+ #
199
+ # @raise [RdkafkaError] When storing the offset fails
200
+ #
201
+ # @return [nil]
202
+ def store_offset(message)
203
+ # rd_kafka_offset_store is one of the few calls that does not support
204
+ # a string as the topic, so create a native topic for it.
205
+ native_topic = Rdkafka::Bindings.rd_kafka_topic_new(
206
+ @native_kafka,
207
+ message.topic,
208
+ nil
209
+ )
210
+ response = Rdkafka::Bindings.rd_kafka_offset_store(
211
+ native_topic,
212
+ message.partition,
213
+ message.offset
214
+ )
215
+ if response != 0
216
+ raise Rdkafka::RdkafkaError.new(response)
217
+ end
218
+ ensure
219
+ if native_topic && !native_topic.null?
220
+ Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic)
221
+ end
222
+ end
223
+
190
224
  # Commit the current offsets of this consumer
191
225
  #
226
+ # @param list [TopicPartitionList,nil] The topic with partitions to commit
192
227
  # @param async [Boolean] Whether to commit async or wait for the commit to finish
193
228
  #
194
229
  # @raise [RdkafkaError] When comitting fails
@@ -239,7 +274,8 @@ module Rdkafka
239
274
  end
240
275
  end
241
276
 
242
- # Poll for new messages and yield for each received one
277
+ # Poll for new messages and yield for each received one. Iteration
278
+ # will end when the consumer is closed.
243
279
  #
244
280
  # @raise [RdkafkaError] When polling fails
245
281
  #
@@ -252,7 +288,11 @@ module Rdkafka
252
288
  if message
253
289
  block.call(message)
254
290
  else
255
- next
291
+ if @closing
292
+ break
293
+ else
294
+ next
295
+ end
256
296
  end
257
297
  end
258
298
  end
@@ -1,6 +1,9 @@
1
1
  module Rdkafka
2
2
  # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
3
3
  class Producer
4
+ # @private
5
+ @delivery_callback = nil
6
+
4
7
  # @private
5
8
  def initialize(native_kafka)
6
9
  @closing = false
@@ -18,6 +21,24 @@ module Rdkafka
18
21
  @polling_thread.abort_on_exception = true
19
22
  end
20
23
 
24
+ # Set a callback that will be called every time a message is successfully produced.
25
+ # The callback is called with a {DeliveryReport}
26
+ #
27
+ # @param callback [Proc] The callback
28
+ #
29
+ # @return [nil]
30
+ def delivery_callback=(callback)
31
+ raise TypeError.new("Callback has to be a proc or lambda") unless callback.is_a? Proc
32
+ @delivery_callback = callback
33
+ end
34
+
35
+ # Returns the current delivery callback, by default this is nil.
36
+ #
37
+ # @return [Proc, nil]
38
+ def delivery_callback
39
+ @delivery_callback
40
+ end
41
+
21
42
  # Close this producer and wait for the internal poll queue to empty.
22
43
  def close
23
44
  # Indicate to polling thread that we're closing
@@ -101,5 +122,10 @@ module Rdkafka
101
122
 
102
123
  delivery_handle
103
124
  end
125
+
126
+ # @private
127
+ def call_delivery_callback(delivery_handle)
128
+ @delivery_callback.call(delivery_handle) if @delivery_callback
129
+ end
104
130
  end
105
131
  end
@@ -1,5 +1,5 @@
1
1
  module Rdkafka
2
- VERSION = "0.4.1"
2
+ VERSION = "0.4.2"
3
3
  LIBRDKAFKA_VERSION = "0.11.6"
4
4
  LIBRDKAFKA_SOURCE_SHA256 = '9c0afb8b53779d968225edf1e79da48a162895ad557900f75e7978f65e642032'
5
5
  end
@@ -19,6 +19,7 @@ Gem::Specification.new do |gem|
19
19
 
20
20
  gem.add_dependency 'ffi', '~> 1.9'
21
21
  gem.add_dependency 'mini_portile2', '~> 2.1'
22
+ gem.add_dependency 'rake', '~> 12.3'
22
23
 
23
24
  gem.add_development_dependency 'pry', '~> 0.10'
24
25
  gem.add_development_dependency 'rspec', '~> 3.5'
@@ -27,6 +27,7 @@ describe Rdkafka::Config do
27
27
  puts stats
28
28
  end
29
29
  }.not_to raise_error
30
+ expect(Rdkafka::Config.statistics_callback).to be_a Proc
30
31
  end
31
32
 
32
33
  it "should not accept a callback that's not a proc" do
@@ -59,6 +60,21 @@ describe Rdkafka::Config do
59
60
  }.to raise_error(Rdkafka::Config::ConfigError, "No such configuration property: \"invalid.key\"")
60
61
  end
61
62
 
63
+ it "should raise an error when creating a consumer with a nil key in the config" do
64
+ config = Rdkafka::Config.new(nil => 'value')
65
+ expect {
66
+ config.consumer
67
+ }.to raise_error(Rdkafka::Config::ConfigError, "No such configuration property: \"\"")
68
+ end
69
+
70
+ it "should treat a nil value as blank" do
71
+ config = Rdkafka::Config.new('security.protocol' => nil)
72
+ expect {
73
+ config.consumer
74
+ config.producer
75
+ }.to raise_error(Rdkafka::Config::ConfigError, "Configuration property \"security.protocol\" cannot be set to empty value")
76
+ end
77
+
62
78
  it "should create a producer with valid config" do
63
79
  expect(rdkafka_config.producer).to be_a Rdkafka::Producer
64
80
  end
@@ -112,21 +112,22 @@ describe Rdkafka::Consumer do
112
112
  end
113
113
  end
114
114
 
115
- describe "#commit and #committed" do
116
- before do
117
- # Make sure there's a stored offset
115
+ describe "#commit, #committed and #store_offset" do
116
+ # Make sure there's a stored offset
117
+ let!(:report) do
118
118
  report = producer.produce(
119
119
  topic: "consume_test_topic",
120
120
  payload: "payload 1",
121
121
  key: "key 1",
122
122
  partition: 0
123
123
  ).wait
124
- # Wait for message commits the current state,
125
- # commit is therefore tested here.
126
- message = wait_for_message(
124
+ end
125
+
126
+ let(:message) do
127
+ wait_for_message(
127
128
  topic: "consume_test_topic",
128
129
  delivery_report: report,
129
- config: config
130
+ consumer: consumer
130
131
  )
131
132
  end
132
133
 
@@ -148,70 +149,107 @@ describe Rdkafka::Consumer do
148
149
  }.to raise_error TypeError
149
150
  end
150
151
 
151
- it "should commit a specific topic partion list" do
152
- # Make sure there are some message
153
- 3.times do |i|
154
- producer.produce(
155
- topic: "consume_test_topic",
156
- payload: "payload 1",
157
- key: "key 1",
158
- partition: i
159
- ).wait
152
+ context "with a commited consumer" do
153
+ before :all do
154
+ # Make sure there are some message
155
+ producer = rdkafka_config.producer
156
+ handles = []
157
+ 10.times do
158
+ (0..2).each do |i|
159
+ handles << producer.produce(
160
+ topic: "consume_test_topic",
161
+ payload: "payload 1",
162
+ key: "key 1",
163
+ partition: i
164
+ )
165
+ end
166
+ end
167
+ handles.each(&:wait)
160
168
  end
161
169
 
162
- list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
163
- list.add_topic_and_partitions_with_offsets("consume_test_topic", {0 => 1, 1 => 1, 2 => 1})
170
+ before do
171
+ consumer.subscribe("consume_test_topic")
172
+ wait_for_assignment(consumer)
173
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
174
+ list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => 1, 1 => 1, 2 => 1)
175
+ end
176
+ consumer.commit(list)
164
177
  end
165
- consumer.commit(list)
166
178
 
167
- partitions = consumer.committed(list).to_h["consume_test_topic"]
168
- expect(partitions[0].offset).to eq 1
169
- expect(partitions[1].offset).to eq 1
170
- expect(partitions[2].offset).to eq 1
171
- end
179
+ it "should commit a specific topic partion list" do
180
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
181
+ list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => 1, 1 => 2, 2 => 3)
182
+ end
183
+ consumer.commit(list)
172
184
 
173
- it "should raise an error when committing fails" do
174
- expect(Rdkafka::Bindings).to receive(:rd_kafka_commit).and_return(20)
185
+ partitions = consumer.committed(list).to_h["consume_test_topic"]
186
+ expect(partitions[0].offset).to eq 1
187
+ expect(partitions[1].offset).to eq 2
188
+ expect(partitions[2].offset).to eq 3
189
+ end
175
190
 
176
- expect {
177
- consumer.commit
178
- }.to raise_error(Rdkafka::RdkafkaError)
179
- end
191
+ it "should raise an error when committing fails" do
192
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_commit).and_return(20)
180
193
 
181
- it "should fetch the committed offsets for the current assignment" do
182
- consumer.subscribe("consume_test_topic")
183
- # Wait for the assignment to be made
184
- 10.times do
185
- break if !consumer.assignment.empty?
186
- sleep 1
194
+ expect {
195
+ consumer.commit
196
+ }.to raise_error(Rdkafka::RdkafkaError)
187
197
  end
188
198
 
189
- partitions = consumer.committed.to_h["consume_test_topic"]
190
- expect(partitions).not_to be_nil
191
- expect(partitions[0].offset).to be > 0
192
- expect(partitions[1].offset).to be nil
193
- expect(partitions[2].offset).to be nil
194
- end
199
+ it "should fetch the committed offsets for the current assignment" do
200
+ partitions = consumer.committed.to_h["consume_test_topic"]
201
+ expect(partitions).not_to be_nil
202
+ expect(partitions[0].offset).to eq 1
203
+ end
195
204
 
196
- it "should fetch the committed offsets for a specified topic partition list" do
197
- list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
198
- list.add_topic("consume_test_topic", [0, 1, 2])
205
+ it "should fetch the committed offsets for a specified topic partition list" do
206
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
207
+ list.add_topic("consume_test_topic", [0, 1, 2])
208
+ end
209
+ partitions = consumer.committed(list).to_h["consume_test_topic"]
210
+ expect(partitions).not_to be_nil
211
+ expect(partitions[0].offset).to eq 1
212
+ expect(partitions[1].offset).to eq 1
213
+ expect(partitions[2].offset).to eq 1
199
214
  end
200
- partitions = consumer.committed(list).to_h["consume_test_topic"]
201
- expect(partitions).not_to be_nil
202
- expect(partitions[0].offset).to be > 0
203
- expect(partitions[1].offset).to be nil
204
- expect(partitions[2].offset).to be nil
205
- end
206
215
 
207
- it "should raise an error when getting committed fails" do
208
- expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
209
- list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
210
- list.add_topic("consume_test_topic", [0, 1, 2])
216
+ it "should raise an error when getting committed fails" do
217
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
218
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
219
+ list.add_topic("consume_test_topic", [0, 1, 2])
220
+ end
221
+ expect {
222
+ consumer.committed(list)
223
+ }.to raise_error Rdkafka::RdkafkaError
224
+ end
225
+
226
+ describe "#store_offset" do
227
+ before do
228
+ config[:'enable.auto.offset.store'] = false
229
+ config[:'enable.auto.commit'] = false
230
+ consumer.subscribe("consume_test_topic")
231
+ wait_for_assignment(consumer)
232
+ end
233
+
234
+ it "should store the offset for a message" do
235
+ consumer.store_offset(message)
236
+ consumer.commit
237
+
238
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
239
+ list.add_topic("consume_test_topic", [0, 1, 2])
240
+ end
241
+ partitions = consumer.committed(list).to_h["consume_test_topic"]
242
+ expect(partitions).not_to be_nil
243
+ expect(partitions[message.partition].offset).to eq(message.offset + 1)
244
+ end
245
+
246
+ it "should raise an error with invalid input" do
247
+ allow(message).to receive(:partition).and_return(9999)
248
+ expect {
249
+ consumer.store_offset(message)
250
+ }.to raise_error Rdkafka::RdkafkaError
251
+ end
211
252
  end
212
- expect {
213
- consumer.committed(list)
214
- }.to raise_error Rdkafka::RdkafkaError
215
253
  end
216
254
  end
217
255
 
@@ -306,6 +344,18 @@ describe Rdkafka::Consumer do
306
344
  }
307
345
  expect(lag).to eq(expected_lag)
308
346
  end
347
+
348
+ it "returns nil if there are no messages on the topic" do
349
+ list = consumer.committed(Rdkafka::Consumer::TopicPartitionList.new.tap do |l|
350
+ l.add_topic("consume_test_topic", (0..2))
351
+ end)
352
+
353
+ lag = consumer.lag(list)
354
+ expected_lag = {
355
+ "consume_test_topic" => {}
356
+ }
357
+ expect(lag).to eq(expected_lag)
358
+ end
309
359
  end
310
360
 
311
361
  describe "#poll" do
@@ -347,22 +397,23 @@ describe Rdkafka::Consumer do
347
397
 
348
398
  describe "#each" do
349
399
  it "should yield messages" do
400
+ handles = []
350
401
  10.times do
351
- producer.produce(
402
+ handles << producer.produce(
352
403
  topic: "consume_test_topic",
353
404
  payload: "payload 1",
354
405
  key: "key 1",
355
406
  partition: 0
356
- ).wait
407
+ )
357
408
  end
409
+ handles.each(&:wait)
358
410
 
359
411
  consumer.subscribe("consume_test_topic")
360
- count = 0
361
- # Check the first 10 messages
362
- consumer.each do |message|
412
+ # Check the first 10 messages. Then close the consumer, which
413
+ # should break the each loop.
414
+ consumer.each_with_index do |message, i|
363
415
  expect(message).to be_a Rdkafka::Consumer::Message
364
- count += 1
365
- break if count == 10
416
+ consumer.close if i == 10
366
417
  end
367
418
  end
368
419
  end
@@ -8,6 +8,47 @@ describe Rdkafka::Producer do
8
8
  expect(Rdkafka::Producer::DeliveryHandle::REGISTRY).to be_empty
9
9
  end
10
10
 
11
+ context "delivery callback" do
12
+ it "should set the callback" do
13
+ expect {
14
+ producer.delivery_callback = lambda do |delivery_handle|
15
+ puts stats
16
+ end
17
+ }.not_to raise_error
18
+ expect(producer.delivery_callback).to be_a Proc
19
+ end
20
+
21
+ it "should not accept a callback that's not a proc" do
22
+ expect {
23
+ producer.delivery_callback = 'a string'
24
+ }.to raise_error(TypeError)
25
+ end
26
+
27
+ it "should call the callback when a message is delivered" do
28
+ @callback_called = false
29
+
30
+ producer.delivery_callback = lambda do |report|
31
+ expect(report).not_to be_nil
32
+ expect(report.partition).to eq 1
33
+ expect(report.offset).to be >= 0
34
+ @callback_called = true
35
+ end
36
+
37
+ # Produce a message
38
+ handle = producer.produce(
39
+ topic: "produce_test_topic",
40
+ payload: "payload",
41
+ key: "key"
42
+ )
43
+
44
+ # Wait for it to be delivered
45
+ handle.wait(5)
46
+
47
+ # Callback should have been called
48
+ expect(@callback_called).to be true
49
+ end
50
+ end
51
+
11
52
  it "should require a topic" do
12
53
  expect {
13
54
  producer.produce(
@@ -9,6 +9,8 @@ require "rdkafka"
9
9
 
10
10
  def rdkafka_config(config_overrides={})
11
11
  config = {
12
+ :"api.version.request" => false,
13
+ :"broker.version.fallback" => "1.0",
12
14
  :"bootstrap.servers" => "localhost:9092",
13
15
  :"group.id" => "ruby-test-#{Random.new.rand(0..1_000_000)}",
14
16
  :"auto.offset.reset" => "earliest",
@@ -36,9 +38,8 @@ def new_native_topic(topic_name="topic_name")
36
38
  )
37
39
  end
38
40
 
39
- def wait_for_message(topic:, delivery_report:, timeout_in_seconds: 30, config: nil)
40
- config = rdkafka_config if config.nil?
41
- consumer = config.consumer
41
+ def wait_for_message(topic:, delivery_report:, timeout_in_seconds: 30, consumer: nil)
42
+ consumer = rdkafka_config.consumer if consumer.nil?
42
43
  consumer.subscribe(topic)
43
44
  timeout = Time.now.to_i + timeout_in_seconds
44
45
  loop do
@@ -52,7 +53,11 @@ def wait_for_message(topic:, delivery_report:, timeout_in_seconds: 30, config: n
52
53
  return message
53
54
  end
54
55
  end
55
- ensure
56
- consumer.commit
57
- consumer.close
56
+ end
57
+
58
+ def wait_for_assignment(consumer)
59
+ 10.times do
60
+ break if !consumer.assignment.empty?
61
+ sleep 1
62
+ end
58
63
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rdkafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.1
4
+ version: 0.4.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Thijs Cadier
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-10-19 00:00:00.000000000 Z
11
+ date: 2019-01-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ffi
@@ -38,6 +38,20 @@ dependencies:
38
38
  - - "~>"
39
39
  - !ruby/object:Gem::Version
40
40
  version: '2.1'
41
+ - !ruby/object:Gem::Dependency
42
+ name: rake
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: '12.3'
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: '12.3'
41
55
  - !ruby/object:Gem::Dependency
42
56
  name: pry
43
57
  requirement: !ruby/object:Gem::Requirement