karafka-rdkafka 0.12.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- checksums.yaml.gz.sig +2 -0
- data/.gitignore +8 -0
- data/.rspec +1 -0
- data/.semaphore/semaphore.yml +23 -0
- data/.yardopts +2 -0
- data/CHANGELOG.md +104 -0
- data/Gemfile +3 -0
- data/Guardfile +19 -0
- data/LICENSE +21 -0
- data/README.md +114 -0
- data/Rakefile +96 -0
- data/bin/console +11 -0
- data/docker-compose.yml +24 -0
- data/ext/README.md +18 -0
- data/ext/Rakefile +62 -0
- data/lib/rdkafka/abstract_handle.rb +82 -0
- data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
- data/lib/rdkafka/admin/create_topic_report.rb +22 -0
- data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
- data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
- data/lib/rdkafka/admin.rb +155 -0
- data/lib/rdkafka/bindings.rb +312 -0
- data/lib/rdkafka/callbacks.rb +106 -0
- data/lib/rdkafka/config.rb +299 -0
- data/lib/rdkafka/consumer/headers.rb +63 -0
- data/lib/rdkafka/consumer/message.rb +84 -0
- data/lib/rdkafka/consumer/partition.rb +49 -0
- data/lib/rdkafka/consumer/topic_partition_list.rb +164 -0
- data/lib/rdkafka/consumer.rb +565 -0
- data/lib/rdkafka/error.rb +86 -0
- data/lib/rdkafka/metadata.rb +92 -0
- data/lib/rdkafka/producer/client.rb +47 -0
- data/lib/rdkafka/producer/delivery_handle.rb +22 -0
- data/lib/rdkafka/producer/delivery_report.rb +26 -0
- data/lib/rdkafka/producer.rb +178 -0
- data/lib/rdkafka/version.rb +5 -0
- data/lib/rdkafka.rb +22 -0
- data/rdkafka.gemspec +36 -0
- data/spec/rdkafka/abstract_handle_spec.rb +113 -0
- data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
- data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
- data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
- data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
- data/spec/rdkafka/admin_spec.rb +203 -0
- data/spec/rdkafka/bindings_spec.rb +134 -0
- data/spec/rdkafka/callbacks_spec.rb +20 -0
- data/spec/rdkafka/config_spec.rb +182 -0
- data/spec/rdkafka/consumer/message_spec.rb +139 -0
- data/spec/rdkafka/consumer/partition_spec.rb +57 -0
- data/spec/rdkafka/consumer/topic_partition_list_spec.rb +223 -0
- data/spec/rdkafka/consumer_spec.rb +1008 -0
- data/spec/rdkafka/error_spec.rb +89 -0
- data/spec/rdkafka/metadata_spec.rb +78 -0
- data/spec/rdkafka/producer/client_spec.rb +145 -0
- data/spec/rdkafka/producer/delivery_handle_spec.rb +42 -0
- data/spec/rdkafka/producer/delivery_report_spec.rb +17 -0
- data/spec/rdkafka/producer_spec.rb +525 -0
- data/spec/spec_helper.rb +139 -0
- data.tar.gz.sig +0 -0
- metadata +277 -0
- metadata.gz.sig +0 -0
@@ -0,0 +1,1008 @@
|
|
1
|
+
require "spec_helper"
|
2
|
+
require "ostruct"
|
3
|
+
require 'securerandom'
|
4
|
+
|
5
|
+
describe Rdkafka::Consumer do
|
6
|
+
let(:consumer) { rdkafka_consumer_config.consumer }
|
7
|
+
let(:producer) { rdkafka_producer_config.producer }
|
8
|
+
|
9
|
+
after { consumer.close }
|
10
|
+
after { producer.close }
|
11
|
+
|
12
|
+
describe "#subscribe, #unsubscribe and #subscription" do
|
13
|
+
it "should subscribe, unsubscribe and return the subscription" do
|
14
|
+
expect(consumer.subscription).to be_empty
|
15
|
+
|
16
|
+
consumer.subscribe("consume_test_topic")
|
17
|
+
|
18
|
+
expect(consumer.subscription).not_to be_empty
|
19
|
+
expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
20
|
+
list.add_topic("consume_test_topic")
|
21
|
+
end
|
22
|
+
expect(consumer.subscription).to eq expected_subscription
|
23
|
+
|
24
|
+
consumer.unsubscribe
|
25
|
+
|
26
|
+
expect(consumer.subscription).to be_empty
|
27
|
+
end
|
28
|
+
|
29
|
+
it "should raise an error when subscribing fails" do
|
30
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_subscribe).and_return(20)
|
31
|
+
|
32
|
+
expect {
|
33
|
+
consumer.subscribe("consume_test_topic")
|
34
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
35
|
+
end
|
36
|
+
|
37
|
+
it "should raise an error when unsubscribing fails" do
|
38
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_unsubscribe).and_return(20)
|
39
|
+
|
40
|
+
expect {
|
41
|
+
consumer.unsubscribe
|
42
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
43
|
+
end
|
44
|
+
|
45
|
+
it "should raise an error when fetching the subscription fails" do
|
46
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_subscription).and_return(20)
|
47
|
+
|
48
|
+
expect {
|
49
|
+
consumer.subscription
|
50
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
describe "#pause and #resume" do
|
55
|
+
context "subscription" do
|
56
|
+
let(:timeout) { 1000 }
|
57
|
+
|
58
|
+
before { consumer.subscribe("consume_test_topic") }
|
59
|
+
after { consumer.unsubscribe }
|
60
|
+
|
61
|
+
it "should pause and then resume" do
|
62
|
+
# 1. partitions are assigned
|
63
|
+
wait_for_assignment(consumer)
|
64
|
+
expect(consumer.assignment).not_to be_empty
|
65
|
+
|
66
|
+
# 2. send a first message
|
67
|
+
send_one_message
|
68
|
+
|
69
|
+
# 3. ensure that message is successfully consumed
|
70
|
+
records = consumer.poll(timeout)
|
71
|
+
expect(records).not_to be_nil
|
72
|
+
consumer.commit
|
73
|
+
|
74
|
+
# 4. send a second message
|
75
|
+
send_one_message
|
76
|
+
|
77
|
+
# 5. pause the subscription
|
78
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
79
|
+
tpl.add_topic("consume_test_topic", (0..2))
|
80
|
+
consumer.pause(tpl)
|
81
|
+
|
82
|
+
# 6. ensure that messages are not available
|
83
|
+
records = consumer.poll(timeout)
|
84
|
+
expect(records).to be_nil
|
85
|
+
|
86
|
+
# 7. resume the subscription
|
87
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
88
|
+
tpl.add_topic("consume_test_topic", (0..2))
|
89
|
+
consumer.resume(tpl)
|
90
|
+
|
91
|
+
# 8. ensure that message is successfully consumed
|
92
|
+
records = consumer.poll(timeout)
|
93
|
+
expect(records).not_to be_nil
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
it "should raise when not TopicPartitionList" do
|
98
|
+
expect { consumer.pause(true) }.to raise_error(TypeError)
|
99
|
+
expect { consumer.resume(true) }.to raise_error(TypeError)
|
100
|
+
end
|
101
|
+
|
102
|
+
it "should raise an error when pausing fails" do
|
103
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap { |tpl| tpl.add_topic('topic', (0..1)) }
|
104
|
+
|
105
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_pause_partitions).and_return(20)
|
106
|
+
expect {
|
107
|
+
consumer.pause(list)
|
108
|
+
}.to raise_error do |err|
|
109
|
+
expect(err).to be_instance_of(Rdkafka::RdkafkaTopicPartitionListError)
|
110
|
+
expect(err.topic_partition_list).to be
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
it "should raise an error when resume fails" do
|
115
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_resume_partitions).and_return(20)
|
116
|
+
expect {
|
117
|
+
consumer.resume(Rdkafka::Consumer::TopicPartitionList.new)
|
118
|
+
}.to raise_error Rdkafka::RdkafkaError
|
119
|
+
end
|
120
|
+
|
121
|
+
def send_one_message
|
122
|
+
producer.produce(
|
123
|
+
topic: "consume_test_topic",
|
124
|
+
payload: "payload 1",
|
125
|
+
key: "key 1"
|
126
|
+
).wait
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
describe "#seek" do
|
131
|
+
it "should raise an error when seeking fails" do
|
132
|
+
fake_msg = OpenStruct.new(topic: "consume_test_topic", partition: 0, offset: 0)
|
133
|
+
|
134
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20)
|
135
|
+
expect {
|
136
|
+
consumer.seek(fake_msg)
|
137
|
+
}.to raise_error Rdkafka::RdkafkaError
|
138
|
+
end
|
139
|
+
|
140
|
+
context "subscription" do
|
141
|
+
let(:timeout) { 1000 }
|
142
|
+
|
143
|
+
before do
|
144
|
+
consumer.subscribe("consume_test_topic")
|
145
|
+
|
146
|
+
# 1. partitions are assigned
|
147
|
+
wait_for_assignment(consumer)
|
148
|
+
expect(consumer.assignment).not_to be_empty
|
149
|
+
|
150
|
+
# 2. eat unrelated messages
|
151
|
+
while(consumer.poll(timeout)) do; end
|
152
|
+
end
|
153
|
+
after { consumer.unsubscribe }
|
154
|
+
|
155
|
+
def send_one_message(val)
|
156
|
+
producer.produce(
|
157
|
+
topic: "consume_test_topic",
|
158
|
+
payload: "payload #{val}",
|
159
|
+
key: "key 1",
|
160
|
+
partition: 0
|
161
|
+
).wait
|
162
|
+
end
|
163
|
+
|
164
|
+
it "works when a partition is paused" do
|
165
|
+
# 3. get reference message
|
166
|
+
send_one_message(:a)
|
167
|
+
message1 = consumer.poll(timeout)
|
168
|
+
expect(message1&.payload).to eq "payload a"
|
169
|
+
|
170
|
+
# 4. pause the subscription
|
171
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
172
|
+
tpl.add_topic("consume_test_topic", 1)
|
173
|
+
consumer.pause(tpl)
|
174
|
+
|
175
|
+
# 5. seek to previous message
|
176
|
+
consumer.seek(message1)
|
177
|
+
|
178
|
+
# 6. resume the subscription
|
179
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
180
|
+
tpl.add_topic("consume_test_topic", 1)
|
181
|
+
consumer.resume(tpl)
|
182
|
+
|
183
|
+
# 7. ensure same message is read again
|
184
|
+
message2 = consumer.poll(timeout)
|
185
|
+
consumer.commit
|
186
|
+
expect(message1.offset).to eq message2.offset
|
187
|
+
expect(message1.payload).to eq message2.payload
|
188
|
+
end
|
189
|
+
|
190
|
+
it "allows skipping messages" do
|
191
|
+
# 3. send messages
|
192
|
+
send_one_message(:a)
|
193
|
+
send_one_message(:b)
|
194
|
+
send_one_message(:c)
|
195
|
+
|
196
|
+
# 4. get reference message
|
197
|
+
message = consumer.poll(timeout)
|
198
|
+
expect(message&.payload).to eq "payload a"
|
199
|
+
|
200
|
+
# 5. seek over one message
|
201
|
+
fake_msg = message.dup
|
202
|
+
fake_msg.instance_variable_set(:@offset, fake_msg.offset + 2)
|
203
|
+
consumer.seek(fake_msg)
|
204
|
+
|
205
|
+
# 6. ensure that only one message is available
|
206
|
+
records = consumer.poll(timeout)
|
207
|
+
expect(records&.payload).to eq "payload c"
|
208
|
+
records = consumer.poll(timeout)
|
209
|
+
expect(records).to be_nil
|
210
|
+
end
|
211
|
+
end
|
212
|
+
end
|
213
|
+
|
214
|
+
describe "#assign and #assignment" do
|
215
|
+
it "should return an empty assignment if nothing is assigned" do
|
216
|
+
expect(consumer.assignment).to be_empty
|
217
|
+
end
|
218
|
+
|
219
|
+
it "should only accept a topic partition list in assign" do
|
220
|
+
expect {
|
221
|
+
consumer.assign("list")
|
222
|
+
}.to raise_error TypeError
|
223
|
+
end
|
224
|
+
|
225
|
+
it "should raise an error when assigning fails" do
|
226
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_assign).and_return(20)
|
227
|
+
expect {
|
228
|
+
consumer.assign(Rdkafka::Consumer::TopicPartitionList.new)
|
229
|
+
}.to raise_error Rdkafka::RdkafkaError
|
230
|
+
end
|
231
|
+
|
232
|
+
it "should assign specific topic/partitions and return that assignment" do
|
233
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
234
|
+
tpl.add_topic("consume_test_topic", (0..2))
|
235
|
+
consumer.assign(tpl)
|
236
|
+
|
237
|
+
assignment = consumer.assignment
|
238
|
+
expect(assignment).not_to be_empty
|
239
|
+
expect(assignment.to_h["consume_test_topic"].length).to eq 3
|
240
|
+
end
|
241
|
+
|
242
|
+
it "should return the assignment when subscribed" do
|
243
|
+
# Make sure there's a message
|
244
|
+
producer.produce(
|
245
|
+
topic: "consume_test_topic",
|
246
|
+
payload: "payload 1",
|
247
|
+
key: "key 1",
|
248
|
+
partition: 0
|
249
|
+
).wait
|
250
|
+
|
251
|
+
# Subscribe and poll until partitions are assigned
|
252
|
+
consumer.subscribe("consume_test_topic")
|
253
|
+
100.times do
|
254
|
+
consumer.poll(100)
|
255
|
+
break unless consumer.assignment.empty?
|
256
|
+
end
|
257
|
+
|
258
|
+
assignment = consumer.assignment
|
259
|
+
expect(assignment).not_to be_empty
|
260
|
+
expect(assignment.to_h["consume_test_topic"].length).to eq 3
|
261
|
+
end
|
262
|
+
|
263
|
+
it "should raise an error when getting assignment fails" do
|
264
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_assignment).and_return(20)
|
265
|
+
expect {
|
266
|
+
consumer.assignment
|
267
|
+
}.to raise_error Rdkafka::RdkafkaError
|
268
|
+
end
|
269
|
+
end
|
270
|
+
|
271
|
+
describe "#close" do
|
272
|
+
it "should close a consumer" do
|
273
|
+
consumer.subscribe("consume_test_topic")
|
274
|
+
100.times do |i|
|
275
|
+
producer.produce(
|
276
|
+
topic: "consume_test_topic",
|
277
|
+
payload: "payload #{i}",
|
278
|
+
key: "key #{i}",
|
279
|
+
partition: 0
|
280
|
+
).wait
|
281
|
+
end
|
282
|
+
consumer.close
|
283
|
+
expect {
|
284
|
+
consumer.poll(100)
|
285
|
+
}.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
|
286
|
+
end
|
287
|
+
end
|
288
|
+
|
289
|
+
describe "#commit, #committed and #store_offset" do
|
290
|
+
# Make sure there's a stored offset
|
291
|
+
let!(:report) do
|
292
|
+
producer.produce(
|
293
|
+
topic: "consume_test_topic",
|
294
|
+
payload: "payload 1",
|
295
|
+
key: "key 1",
|
296
|
+
partition: 0
|
297
|
+
).wait
|
298
|
+
end
|
299
|
+
|
300
|
+
let(:message) do
|
301
|
+
wait_for_message(
|
302
|
+
topic: "consume_test_topic",
|
303
|
+
delivery_report: report,
|
304
|
+
consumer: consumer
|
305
|
+
)
|
306
|
+
end
|
307
|
+
|
308
|
+
it "should only accept a topic partition list in committed" do
|
309
|
+
expect {
|
310
|
+
consumer.committed("list")
|
311
|
+
}.to raise_error TypeError
|
312
|
+
end
|
313
|
+
|
314
|
+
it "should commit in sync mode" do
|
315
|
+
expect {
|
316
|
+
consumer.commit(nil, true)
|
317
|
+
}.not_to raise_error
|
318
|
+
end
|
319
|
+
|
320
|
+
it "should only accept a topic partition list in commit if not nil" do
|
321
|
+
expect {
|
322
|
+
consumer.commit("list")
|
323
|
+
}.to raise_error TypeError
|
324
|
+
end
|
325
|
+
|
326
|
+
context "with a committed consumer" do
|
327
|
+
before :all do
|
328
|
+
# Make sure there are some messages.
|
329
|
+
handles = []
|
330
|
+
producer = rdkafka_producer_config.producer
|
331
|
+
10.times do
|
332
|
+
(0..2).each do |i|
|
333
|
+
handles << producer.produce(
|
334
|
+
topic: "consume_test_topic",
|
335
|
+
payload: "payload 1",
|
336
|
+
key: "key 1",
|
337
|
+
partition: i
|
338
|
+
)
|
339
|
+
end
|
340
|
+
end
|
341
|
+
handles.each(&:wait)
|
342
|
+
producer.close
|
343
|
+
end
|
344
|
+
|
345
|
+
before do
|
346
|
+
consumer.subscribe("consume_test_topic")
|
347
|
+
wait_for_assignment(consumer)
|
348
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
349
|
+
list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => 1, 1 => 1, 2 => 1)
|
350
|
+
end
|
351
|
+
consumer.commit(list)
|
352
|
+
end
|
353
|
+
|
354
|
+
it "should commit a specific topic partion list" do
|
355
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
356
|
+
list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => 1, 1 => 2, 2 => 3)
|
357
|
+
end
|
358
|
+
consumer.commit(list)
|
359
|
+
|
360
|
+
partitions = consumer.committed(list).to_h["consume_test_topic"]
|
361
|
+
expect(partitions[0].offset).to eq 1
|
362
|
+
expect(partitions[1].offset).to eq 2
|
363
|
+
expect(partitions[2].offset).to eq 3
|
364
|
+
end
|
365
|
+
|
366
|
+
it "should raise an error when committing fails" do
|
367
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_commit).and_return(20)
|
368
|
+
|
369
|
+
expect {
|
370
|
+
consumer.commit
|
371
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
372
|
+
end
|
373
|
+
|
374
|
+
it "should fetch the committed offsets for the current assignment" do
|
375
|
+
partitions = consumer.committed.to_h["consume_test_topic"]
|
376
|
+
expect(partitions).not_to be_nil
|
377
|
+
expect(partitions[0].offset).to eq 1
|
378
|
+
end
|
379
|
+
|
380
|
+
it "should fetch the committed offsets for a specified topic partition list" do
|
381
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
382
|
+
list.add_topic("consume_test_topic", [0, 1, 2])
|
383
|
+
end
|
384
|
+
partitions = consumer.committed(list).to_h["consume_test_topic"]
|
385
|
+
expect(partitions).not_to be_nil
|
386
|
+
expect(partitions[0].offset).to eq 1
|
387
|
+
expect(partitions[1].offset).to eq 1
|
388
|
+
expect(partitions[2].offset).to eq 1
|
389
|
+
end
|
390
|
+
|
391
|
+
it "should raise an error when getting committed fails" do
|
392
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
|
393
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
394
|
+
list.add_topic("consume_test_topic", [0, 1, 2])
|
395
|
+
end
|
396
|
+
expect {
|
397
|
+
consumer.committed(list)
|
398
|
+
}.to raise_error Rdkafka::RdkafkaError
|
399
|
+
end
|
400
|
+
|
401
|
+
describe "#store_offset" do
|
402
|
+
before do
|
403
|
+
config = {}
|
404
|
+
config[:'enable.auto.offset.store'] = false
|
405
|
+
config[:'enable.auto.commit'] = false
|
406
|
+
@new_consumer = rdkafka_consumer_config(config).consumer
|
407
|
+
@new_consumer.subscribe("consume_test_topic")
|
408
|
+
wait_for_assignment(@new_consumer)
|
409
|
+
end
|
410
|
+
|
411
|
+
after do
|
412
|
+
@new_consumer.close
|
413
|
+
end
|
414
|
+
|
415
|
+
it "should store the offset for a message" do
|
416
|
+
@new_consumer.store_offset(message)
|
417
|
+
@new_consumer.commit
|
418
|
+
|
419
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
420
|
+
list.add_topic("consume_test_topic", [0, 1, 2])
|
421
|
+
end
|
422
|
+
partitions = @new_consumer.committed(list).to_h["consume_test_topic"]
|
423
|
+
expect(partitions).not_to be_nil
|
424
|
+
expect(partitions[message.partition].offset).to eq(message.offset + 1)
|
425
|
+
end
|
426
|
+
|
427
|
+
it "should raise an error with invalid input" do
|
428
|
+
allow(message).to receive(:partition).and_return(9999)
|
429
|
+
expect {
|
430
|
+
@new_consumer.store_offset(message)
|
431
|
+
}.to raise_error Rdkafka::RdkafkaError
|
432
|
+
end
|
433
|
+
end
|
434
|
+
end
|
435
|
+
end
|
436
|
+
|
437
|
+
describe "#query_watermark_offsets" do
|
438
|
+
it "should return the watermark offsets" do
|
439
|
+
# Make sure there's a message
|
440
|
+
producer.produce(
|
441
|
+
topic: "watermarks_test_topic",
|
442
|
+
payload: "payload 1",
|
443
|
+
key: "key 1",
|
444
|
+
partition: 0
|
445
|
+
).wait
|
446
|
+
|
447
|
+
low, high = consumer.query_watermark_offsets("watermarks_test_topic", 0, 5000)
|
448
|
+
expect(low).to eq 0
|
449
|
+
expect(high).to be > 0
|
450
|
+
end
|
451
|
+
|
452
|
+
it "should raise an error when querying offsets fails" do
|
453
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_query_watermark_offsets).and_return(20)
|
454
|
+
expect {
|
455
|
+
consumer.query_watermark_offsets("consume_test_topic", 0, 5000)
|
456
|
+
}.to raise_error Rdkafka::RdkafkaError
|
457
|
+
end
|
458
|
+
end
|
459
|
+
|
460
|
+
describe "#lag" do
|
461
|
+
let(:consumer) { rdkafka_consumer_config(:"enable.partition.eof" => true).consumer }
|
462
|
+
|
463
|
+
it "should calculate the consumer lag" do
|
464
|
+
# Make sure there's a message in every partition and
|
465
|
+
# wait for the message to make sure everything is committed.
|
466
|
+
(0..2).each do |i|
|
467
|
+
producer.produce(
|
468
|
+
topic: "consume_test_topic",
|
469
|
+
key: "key lag #{i}",
|
470
|
+
partition: i
|
471
|
+
).wait
|
472
|
+
end
|
473
|
+
|
474
|
+
# Consume to the end
|
475
|
+
consumer.subscribe("consume_test_topic")
|
476
|
+
eof_count = 0
|
477
|
+
loop do
|
478
|
+
begin
|
479
|
+
consumer.poll(100)
|
480
|
+
rescue Rdkafka::RdkafkaError => error
|
481
|
+
if error.is_partition_eof?
|
482
|
+
eof_count += 1
|
483
|
+
end
|
484
|
+
break if eof_count == 3
|
485
|
+
end
|
486
|
+
end
|
487
|
+
|
488
|
+
# Commit
|
489
|
+
consumer.commit
|
490
|
+
|
491
|
+
# Create list to fetch lag for. TODO creating the list will not be necessary
|
492
|
+
# after committed uses the subscription.
|
493
|
+
list = consumer.committed(Rdkafka::Consumer::TopicPartitionList.new.tap do |l|
|
494
|
+
l.add_topic("consume_test_topic", (0..2))
|
495
|
+
end)
|
496
|
+
|
497
|
+
# Lag should be 0 now
|
498
|
+
lag = consumer.lag(list)
|
499
|
+
expected_lag = {
|
500
|
+
"consume_test_topic" => {
|
501
|
+
0 => 0,
|
502
|
+
1 => 0,
|
503
|
+
2 => 0
|
504
|
+
}
|
505
|
+
}
|
506
|
+
expect(lag).to eq(expected_lag)
|
507
|
+
|
508
|
+
# Produce message on every topic again
|
509
|
+
(0..2).each do |i|
|
510
|
+
producer.produce(
|
511
|
+
topic: "consume_test_topic",
|
512
|
+
key: "key lag #{i}",
|
513
|
+
partition: i
|
514
|
+
).wait
|
515
|
+
end
|
516
|
+
|
517
|
+
# Lag should be 1 now
|
518
|
+
lag = consumer.lag(list)
|
519
|
+
expected_lag = {
|
520
|
+
"consume_test_topic" => {
|
521
|
+
0 => 1,
|
522
|
+
1 => 1,
|
523
|
+
2 => 1
|
524
|
+
}
|
525
|
+
}
|
526
|
+
expect(lag).to eq(expected_lag)
|
527
|
+
end
|
528
|
+
|
529
|
+
it "returns nil if there are no messages on the topic" do
|
530
|
+
list = consumer.committed(Rdkafka::Consumer::TopicPartitionList.new.tap do |l|
|
531
|
+
l.add_topic("consume_test_topic", (0..2))
|
532
|
+
end)
|
533
|
+
|
534
|
+
lag = consumer.lag(list)
|
535
|
+
expected_lag = {
|
536
|
+
"consume_test_topic" => {}
|
537
|
+
}
|
538
|
+
expect(lag).to eq(expected_lag)
|
539
|
+
end
|
540
|
+
end
|
541
|
+
|
542
|
+
describe "#cluster_id" do
|
543
|
+
it 'should return the current ClusterId' do
|
544
|
+
consumer.subscribe("consume_test_topic")
|
545
|
+
wait_for_assignment(consumer)
|
546
|
+
expect(consumer.cluster_id).not_to be_empty
|
547
|
+
end
|
548
|
+
end
|
549
|
+
|
550
|
+
describe "#member_id" do
|
551
|
+
it 'should return the current MemberId' do
|
552
|
+
consumer.subscribe("consume_test_topic")
|
553
|
+
wait_for_assignment(consumer)
|
554
|
+
expect(consumer.member_id).to start_with('rdkafka-')
|
555
|
+
end
|
556
|
+
end
|
557
|
+
|
558
|
+
describe "#poll" do
|
559
|
+
it "should return nil if there is no subscription" do
|
560
|
+
expect(consumer.poll(1000)).to be_nil
|
561
|
+
end
|
562
|
+
|
563
|
+
it "should return nil if there are no messages" do
|
564
|
+
consumer.subscribe("empty_test_topic")
|
565
|
+
expect(consumer.poll(1000)).to be_nil
|
566
|
+
end
|
567
|
+
|
568
|
+
it "should return a message if there is one" do
|
569
|
+
producer.produce(
|
570
|
+
topic: "consume_test_topic",
|
571
|
+
payload: "payload 1",
|
572
|
+
key: "key 1"
|
573
|
+
).wait
|
574
|
+
consumer.subscribe("consume_test_topic")
|
575
|
+
message = consumer.each {|m| break m}
|
576
|
+
|
577
|
+
expect(message).to be_a Rdkafka::Consumer::Message
|
578
|
+
expect(message.payload).to eq('payload 1')
|
579
|
+
expect(message.key).to eq('key 1')
|
580
|
+
end
|
581
|
+
|
582
|
+
it "should raise an error when polling fails" do
|
583
|
+
message = Rdkafka::Bindings::Message.new.tap do |message|
|
584
|
+
message[:err] = 20
|
585
|
+
end
|
586
|
+
message_pointer = message.to_ptr
|
587
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_consumer_poll).and_return(message_pointer)
|
588
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_message_destroy).with(message_pointer)
|
589
|
+
expect {
|
590
|
+
consumer.poll(100)
|
591
|
+
}.to raise_error Rdkafka::RdkafkaError
|
592
|
+
end
|
593
|
+
end
|
594
|
+
|
595
|
+
describe "#poll with headers" do
|
596
|
+
it "should return message with headers" do
|
597
|
+
report = producer.produce(
|
598
|
+
topic: "consume_test_topic",
|
599
|
+
key: "key headers",
|
600
|
+
headers: { foo: 'bar' }
|
601
|
+
).wait
|
602
|
+
|
603
|
+
message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
604
|
+
expect(message).to be
|
605
|
+
expect(message.key).to eq('key headers')
|
606
|
+
expect(message.headers).to include(foo: 'bar')
|
607
|
+
end
|
608
|
+
|
609
|
+
it "should return message with no headers" do
|
610
|
+
report = producer.produce(
|
611
|
+
topic: "consume_test_topic",
|
612
|
+
key: "key no headers",
|
613
|
+
headers: nil
|
614
|
+
).wait
|
615
|
+
|
616
|
+
message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
617
|
+
expect(message).to be
|
618
|
+
expect(message.key).to eq('key no headers')
|
619
|
+
expect(message.headers).to be_empty
|
620
|
+
end
|
621
|
+
|
622
|
+
it "should raise an error when message headers aren't readable" do
|
623
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(any_args) { 1 }
|
624
|
+
|
625
|
+
report = producer.produce(
|
626
|
+
topic: "consume_test_topic",
|
627
|
+
key: "key err headers",
|
628
|
+
headers: nil
|
629
|
+
).wait
|
630
|
+
|
631
|
+
expect {
|
632
|
+
wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
633
|
+
}.to raise_error do |err|
|
634
|
+
expect(err).to be_instance_of(Rdkafka::RdkafkaError)
|
635
|
+
expect(err.message).to start_with("Error reading message headers")
|
636
|
+
end
|
637
|
+
end
|
638
|
+
|
639
|
+
it "should raise an error when the first message header aren't readable" do
|
640
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_header_get_all).with(any_args) { 1 }
|
641
|
+
|
642
|
+
report = producer.produce(
|
643
|
+
topic: "consume_test_topic",
|
644
|
+
key: "key err headers",
|
645
|
+
headers: { foo: 'bar' }
|
646
|
+
).wait
|
647
|
+
|
648
|
+
expect {
|
649
|
+
wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
650
|
+
}.to raise_error do |err|
|
651
|
+
expect(err).to be_instance_of(Rdkafka::RdkafkaError)
|
652
|
+
expect(err.message).to start_with("Error reading a message header at index 0")
|
653
|
+
end
|
654
|
+
end
|
655
|
+
end
|
656
|
+
|
657
|
+
describe "#each" do
|
658
|
+
it "should yield messages" do
|
659
|
+
handles = []
|
660
|
+
10.times do
|
661
|
+
handles << producer.produce(
|
662
|
+
topic: "consume_test_topic",
|
663
|
+
payload: "payload 1",
|
664
|
+
key: "key 1",
|
665
|
+
partition: 0
|
666
|
+
)
|
667
|
+
end
|
668
|
+
handles.each(&:wait)
|
669
|
+
|
670
|
+
consumer.subscribe("consume_test_topic")
|
671
|
+
# Check the first 10 messages. Then close the consumer, which
|
672
|
+
# should break the each loop.
|
673
|
+
consumer.each_with_index do |message, i|
|
674
|
+
expect(message).to be_a Rdkafka::Consumer::Message
|
675
|
+
break if i == 10
|
676
|
+
end
|
677
|
+
consumer.close
|
678
|
+
end
|
679
|
+
end
|
680
|
+
|
681
|
+
describe "#each_batch" do
|
682
|
+
let(:message_payload) { 'a' * 10 }
|
683
|
+
|
684
|
+
before do
|
685
|
+
@topic = SecureRandom.base64(10).tr('+=/', '')
|
686
|
+
end
|
687
|
+
|
688
|
+
after do
|
689
|
+
@topic = nil
|
690
|
+
end
|
691
|
+
|
692
|
+
def topic_name
|
693
|
+
@topic
|
694
|
+
end
|
695
|
+
|
696
|
+
def produce_n(n)
|
697
|
+
handles = []
|
698
|
+
n.times do |i|
|
699
|
+
handles << producer.produce(
|
700
|
+
topic: topic_name,
|
701
|
+
payload: Time.new.to_f.to_s,
|
702
|
+
key: i.to_s,
|
703
|
+
partition: 0
|
704
|
+
)
|
705
|
+
end
|
706
|
+
handles.each(&:wait)
|
707
|
+
end
|
708
|
+
|
709
|
+
def new_message
|
710
|
+
instance_double("Rdkafka::Consumer::Message").tap do |message|
|
711
|
+
allow(message).to receive(:payload).and_return(message_payload)
|
712
|
+
end
|
713
|
+
end
|
714
|
+
|
715
|
+
it "retrieves messages produced into a topic" do
|
716
|
+
# This is the only each_batch test that actually produces real messages
|
717
|
+
# into a topic in the real kafka of the container.
|
718
|
+
#
|
719
|
+
# The other tests stub 'poll' which makes them faster and more reliable,
|
720
|
+
# but it makes sense to keep a single test with a fully integrated flow.
|
721
|
+
# This will help to catch breaking changes in the behavior of 'poll',
|
722
|
+
# libdrkafka, or Kafka.
|
723
|
+
#
|
724
|
+
# This is, in effect, an integration test and the subsequent specs are
|
725
|
+
# unit tests.
|
726
|
+
create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
|
727
|
+
create_topic_handle.wait(max_wait_timeout: 15.0)
|
728
|
+
consumer.subscribe(topic_name)
|
729
|
+
produce_n 42
|
730
|
+
all_yields = []
|
731
|
+
consumer.each_batch(max_items: 10) do |batch|
|
732
|
+
all_yields << batch
|
733
|
+
break if all_yields.flatten.size >= 42
|
734
|
+
end
|
735
|
+
expect(all_yields.flatten.first).to be_a Rdkafka::Consumer::Message
|
736
|
+
expect(all_yields.flatten.size).to eq 42
|
737
|
+
expect(all_yields.size).to be > 4
|
738
|
+
expect(all_yields.flatten.map(&:key)).to eq (0..41).map { |x| x.to_s }
|
739
|
+
end
|
740
|
+
|
741
|
+
it "should batch poll results and yield arrays of messages" do
|
742
|
+
consumer.subscribe(topic_name)
|
743
|
+
all_yields = []
|
744
|
+
expect(consumer)
|
745
|
+
.to receive(:poll)
|
746
|
+
.exactly(10).times
|
747
|
+
.and_return(new_message)
|
748
|
+
consumer.each_batch(max_items: 10) do |batch|
|
749
|
+
all_yields << batch
|
750
|
+
break if all_yields.flatten.size >= 10
|
751
|
+
end
|
752
|
+
expect(all_yields.first).to be_instance_of(Array)
|
753
|
+
expect(all_yields.flatten.size).to eq 10
|
754
|
+
non_empty_yields = all_yields.reject { |batch| batch.empty? }
|
755
|
+
expect(non_empty_yields.size).to be < 10
|
756
|
+
end
|
757
|
+
|
758
|
+
it "should yield a partial batch if the timeout is hit with some messages" do
|
759
|
+
consumer.subscribe(topic_name)
|
760
|
+
poll_count = 0
|
761
|
+
expect(consumer)
|
762
|
+
.to receive(:poll)
|
763
|
+
.at_least(3).times do
|
764
|
+
poll_count = poll_count + 1
|
765
|
+
if poll_count > 2
|
766
|
+
sleep 0.1
|
767
|
+
nil
|
768
|
+
else
|
769
|
+
new_message
|
770
|
+
end
|
771
|
+
end
|
772
|
+
all_yields = []
|
773
|
+
consumer.each_batch(max_items: 10) do |batch|
|
774
|
+
all_yields << batch
|
775
|
+
break if all_yields.flatten.size >= 2
|
776
|
+
end
|
777
|
+
expect(all_yields.flatten.size).to eq 2
|
778
|
+
end
|
779
|
+
|
780
|
+
it "should yield [] if nothing is received before the timeout" do
|
781
|
+
create_topic_handle = rdkafka_config.admin.create_topic(topic_name, 1, 1)
|
782
|
+
create_topic_handle.wait(max_wait_timeout: 15.0)
|
783
|
+
consumer.subscribe(topic_name)
|
784
|
+
consumer.each_batch do |batch|
|
785
|
+
expect(batch).to eq([])
|
786
|
+
break
|
787
|
+
end
|
788
|
+
end
|
789
|
+
|
790
|
+
it "should yield batchs of max_items in size if messages are already fetched" do
|
791
|
+
yielded_batches = []
|
792
|
+
expect(consumer)
|
793
|
+
.to receive(:poll)
|
794
|
+
.with(anything)
|
795
|
+
.exactly(20).times
|
796
|
+
.and_return(new_message)
|
797
|
+
|
798
|
+
consumer.each_batch(max_items: 10, timeout_ms: 500) do |batch|
|
799
|
+
yielded_batches << batch
|
800
|
+
break if yielded_batches.flatten.size >= 20
|
801
|
+
break if yielded_batches.size >= 20 # so failure doesn't hang
|
802
|
+
end
|
803
|
+
expect(yielded_batches.size).to eq 2
|
804
|
+
expect(yielded_batches.map(&:size)).to eq 2.times.map { 10 }
|
805
|
+
end
|
806
|
+
|
807
|
+
it "should yield batchs as soon as bytes_threshold is hit" do
|
808
|
+
yielded_batches = []
|
809
|
+
expect(consumer)
|
810
|
+
.to receive(:poll)
|
811
|
+
.with(anything)
|
812
|
+
.exactly(20).times
|
813
|
+
.and_return(new_message)
|
814
|
+
|
815
|
+
consumer.each_batch(bytes_threshold: message_payload.size * 4, timeout_ms: 500) do |batch|
|
816
|
+
yielded_batches << batch
|
817
|
+
break if yielded_batches.flatten.size >= 20
|
818
|
+
break if yielded_batches.size >= 20 # so failure doesn't hang
|
819
|
+
end
|
820
|
+
expect(yielded_batches.size).to eq 5
|
821
|
+
expect(yielded_batches.map(&:size)).to eq 5.times.map { 4 }
|
822
|
+
end
|
823
|
+
|
824
|
+
context "error raised from poll and yield_on_error is true" do
|
825
|
+
it "should yield buffered exceptions on rebalance, then break" do
|
826
|
+
config = rdkafka_consumer_config(
|
827
|
+
{
|
828
|
+
:"enable.auto.commit" => false,
|
829
|
+
:"enable.auto.offset.store" => false
|
830
|
+
}
|
831
|
+
)
|
832
|
+
consumer = config.consumer
|
833
|
+
consumer.subscribe(topic_name)
|
834
|
+
batches_yielded = []
|
835
|
+
exceptions_yielded = []
|
836
|
+
each_batch_iterations = 0
|
837
|
+
poll_count = 0
|
838
|
+
expect(consumer)
|
839
|
+
.to receive(:poll)
|
840
|
+
.with(anything)
|
841
|
+
.exactly(3).times
|
842
|
+
.and_wrap_original do |method, *args|
|
843
|
+
poll_count = poll_count + 1
|
844
|
+
if poll_count == 3
|
845
|
+
raise Rdkafka::RdkafkaError.new(27,
|
846
|
+
"partitions ... too ... heavy ... must ... rebalance")
|
847
|
+
else
|
848
|
+
new_message
|
849
|
+
end
|
850
|
+
end
|
851
|
+
expect {
|
852
|
+
consumer.each_batch(max_items: 30, yield_on_error: true) do |batch, pending_error|
|
853
|
+
batches_yielded << batch
|
854
|
+
exceptions_yielded << pending_error
|
855
|
+
each_batch_iterations = each_batch_iterations + 1
|
856
|
+
end
|
857
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
858
|
+
expect(poll_count).to eq 3
|
859
|
+
expect(each_batch_iterations).to eq 1
|
860
|
+
expect(batches_yielded.size).to eq 1
|
861
|
+
expect(batches_yielded.first.size).to eq 2
|
862
|
+
expect(exceptions_yielded.flatten.size).to eq 1
|
863
|
+
expect(exceptions_yielded.flatten.first).to be_instance_of(Rdkafka::RdkafkaError)
|
864
|
+
end
|
865
|
+
end
|
866
|
+
|
867
|
+
context "error raised from poll and yield_on_error is false" do
|
868
|
+
it "should yield buffered exceptions on rebalance, then break" do
|
869
|
+
config = rdkafka_consumer_config(
|
870
|
+
{
|
871
|
+
:"enable.auto.commit" => false,
|
872
|
+
:"enable.auto.offset.store" => false
|
873
|
+
}
|
874
|
+
)
|
875
|
+
consumer = config.consumer
|
876
|
+
consumer.subscribe(topic_name)
|
877
|
+
batches_yielded = []
|
878
|
+
exceptions_yielded = []
|
879
|
+
each_batch_iterations = 0
|
880
|
+
poll_count = 0
|
881
|
+
expect(consumer)
|
882
|
+
.to receive(:poll)
|
883
|
+
.with(anything)
|
884
|
+
.exactly(3).times
|
885
|
+
.and_wrap_original do |method, *args|
|
886
|
+
poll_count = poll_count + 1
|
887
|
+
if poll_count == 3
|
888
|
+
raise Rdkafka::RdkafkaError.new(27,
|
889
|
+
"partitions ... too ... heavy ... must ... rebalance")
|
890
|
+
else
|
891
|
+
new_message
|
892
|
+
end
|
893
|
+
end
|
894
|
+
expect {
|
895
|
+
consumer.each_batch(max_items: 30, yield_on_error: false) do |batch, pending_error|
|
896
|
+
batches_yielded << batch
|
897
|
+
exceptions_yielded << pending_error
|
898
|
+
each_batch_iterations = each_batch_iterations + 1
|
899
|
+
end
|
900
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
901
|
+
expect(poll_count).to eq 3
|
902
|
+
expect(each_batch_iterations).to eq 0
|
903
|
+
expect(batches_yielded.size).to eq 0
|
904
|
+
expect(exceptions_yielded.size).to eq 0
|
905
|
+
end
|
906
|
+
end
|
907
|
+
end
|
908
|
+
|
909
|
+
describe "a rebalance listener" do
|
910
|
+
let(:consumer) do
|
911
|
+
config = rdkafka_consumer_config
|
912
|
+
config.consumer_rebalance_listener = listener
|
913
|
+
config.consumer
|
914
|
+
end
|
915
|
+
|
916
|
+
context "with a working listener" do
|
917
|
+
let(:listener) do
|
918
|
+
Struct.new(:queue) do
|
919
|
+
def on_partitions_assigned(consumer, list)
|
920
|
+
collect(:assign, list)
|
921
|
+
end
|
922
|
+
|
923
|
+
def on_partitions_revoked(consumer, list)
|
924
|
+
collect(:revoke, list)
|
925
|
+
end
|
926
|
+
|
927
|
+
def collect(name, list)
|
928
|
+
partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
|
929
|
+
queue << ([name] + partitions)
|
930
|
+
end
|
931
|
+
end.new([])
|
932
|
+
end
|
933
|
+
|
934
|
+
it "should get notifications" do
|
935
|
+
notify_listener(listener)
|
936
|
+
|
937
|
+
expect(listener.queue).to eq([
|
938
|
+
[:assign, "consume_test_topic", 0, 1, 2],
|
939
|
+
[:revoke, "consume_test_topic", 0, 1, 2]
|
940
|
+
])
|
941
|
+
end
|
942
|
+
end
|
943
|
+
|
944
|
+
context "with a broken listener" do
|
945
|
+
let(:listener) do
|
946
|
+
Struct.new(:queue) do
|
947
|
+
def on_partitions_assigned(consumer, list)
|
948
|
+
queue << :assigned
|
949
|
+
raise 'boom'
|
950
|
+
end
|
951
|
+
|
952
|
+
def on_partitions_revoked(consumer, list)
|
953
|
+
queue << :revoked
|
954
|
+
raise 'boom'
|
955
|
+
end
|
956
|
+
end.new([])
|
957
|
+
end
|
958
|
+
|
959
|
+
it 'should handle callback exceptions' do
|
960
|
+
notify_listener(listener)
|
961
|
+
|
962
|
+
expect(listener.queue).to eq([:assigned, :revoked])
|
963
|
+
end
|
964
|
+
end
|
965
|
+
|
966
|
+
def notify_listener(listener)
|
967
|
+
# 1. subscribe and poll
|
968
|
+
consumer.subscribe("consume_test_topic")
|
969
|
+
wait_for_assignment(consumer)
|
970
|
+
consumer.poll(100)
|
971
|
+
|
972
|
+
# 2. unsubscribe
|
973
|
+
consumer.unsubscribe
|
974
|
+
wait_for_unassignment(consumer)
|
975
|
+
consumer.close
|
976
|
+
end
|
977
|
+
end
|
978
|
+
|
979
|
+
context "methods that should not be called after a consumer has been closed" do
|
980
|
+
before do
|
981
|
+
consumer.close
|
982
|
+
end
|
983
|
+
|
984
|
+
# Affected methods and a non-invalid set of parameters for the method
|
985
|
+
{
|
986
|
+
:subscribe => [ nil ],
|
987
|
+
:unsubscribe => nil,
|
988
|
+
:each_batch => nil,
|
989
|
+
:pause => [ nil ],
|
990
|
+
:resume => [ nil ],
|
991
|
+
:subscription => nil,
|
992
|
+
:assign => [ nil ],
|
993
|
+
:assignment => nil,
|
994
|
+
:committed => [],
|
995
|
+
:query_watermark_offsets => [ nil, nil ],
|
996
|
+
}.each do |method, args|
|
997
|
+
it "raises an exception if #{method} is called" do
|
998
|
+
expect {
|
999
|
+
if args.nil?
|
1000
|
+
consumer.public_send(method)
|
1001
|
+
else
|
1002
|
+
consumer.public_send(method, *args)
|
1003
|
+
end
|
1004
|
+
}.to raise_exception(Rdkafka::ClosedConsumerError, /#{method.to_s}/)
|
1005
|
+
end
|
1006
|
+
end
|
1007
|
+
end
|
1008
|
+
end
|