karafka-rdkafka 0.20.0.rc3-x86_64-linux-gnu
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/CODEOWNERS +3 -0
- data/.github/FUNDING.yml +1 -0
- data/.github/workflows/ci_linux_x86_64_gnu.yml +248 -0
- data/.github/workflows/ci_macos_arm64.yml +301 -0
- data/.github/workflows/push_linux_x86_64_gnu.yml +60 -0
- data/.github/workflows/push_ruby.yml +37 -0
- data/.github/workflows/verify-action-pins.yml +16 -0
- data/.gitignore +15 -0
- data/.rspec +2 -0
- data/.ruby-gemset +1 -0
- data/.ruby-version +1 -0
- data/.yardopts +2 -0
- data/CHANGELOG.md +323 -0
- data/Gemfile +5 -0
- data/MIT-LICENSE +22 -0
- data/README.md +177 -0
- data/Rakefile +96 -0
- data/docker-compose.yml +25 -0
- data/ext/README.md +19 -0
- data/ext/Rakefile +131 -0
- data/ext/build_common.sh +361 -0
- data/ext/build_linux_x86_64_gnu.sh +306 -0
- data/ext/build_macos_arm64.sh +550 -0
- data/ext/librdkafka.so +0 -0
- data/karafka-rdkafka.gemspec +61 -0
- data/lib/rdkafka/abstract_handle.rb +116 -0
- data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
- data/lib/rdkafka/admin/config_binding_result.rb +30 -0
- data/lib/rdkafka/admin/config_resource_binding_result.rb +18 -0
- data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
- data/lib/rdkafka/admin/create_acl_report.rb +24 -0
- data/lib/rdkafka/admin/create_partitions_handle.rb +30 -0
- data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
- data/lib/rdkafka/admin/create_topic_handle.rb +32 -0
- data/lib/rdkafka/admin/create_topic_report.rb +24 -0
- data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
- data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
- data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
- data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
- data/lib/rdkafka/admin/delete_topic_handle.rb +32 -0
- data/lib/rdkafka/admin/delete_topic_report.rb +24 -0
- data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
- data/lib/rdkafka/admin/describe_acl_report.rb +24 -0
- data/lib/rdkafka/admin/describe_configs_handle.rb +33 -0
- data/lib/rdkafka/admin/describe_configs_report.rb +48 -0
- data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +33 -0
- data/lib/rdkafka/admin/incremental_alter_configs_report.rb +48 -0
- data/lib/rdkafka/admin.rb +832 -0
- data/lib/rdkafka/bindings.rb +582 -0
- data/lib/rdkafka/callbacks.rb +415 -0
- data/lib/rdkafka/config.rb +398 -0
- data/lib/rdkafka/consumer/headers.rb +79 -0
- data/lib/rdkafka/consumer/message.rb +86 -0
- data/lib/rdkafka/consumer/partition.rb +57 -0
- data/lib/rdkafka/consumer/topic_partition_list.rb +190 -0
- data/lib/rdkafka/consumer.rb +663 -0
- data/lib/rdkafka/error.rb +201 -0
- data/lib/rdkafka/helpers/oauth.rb +58 -0
- data/lib/rdkafka/helpers/time.rb +14 -0
- data/lib/rdkafka/metadata.rb +115 -0
- data/lib/rdkafka/native_kafka.rb +139 -0
- data/lib/rdkafka/producer/delivery_handle.rb +48 -0
- data/lib/rdkafka/producer/delivery_report.rb +45 -0
- data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
- data/lib/rdkafka/producer.rb +492 -0
- data/lib/rdkafka/version.rb +7 -0
- data/lib/rdkafka.rb +54 -0
- data/renovate.json +92 -0
- data/spec/rdkafka/abstract_handle_spec.rb +117 -0
- data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
- data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
- data/spec/rdkafka/admin/create_topic_handle_spec.rb +54 -0
- data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
- data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
- data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
- data/spec/rdkafka/admin/delete_topic_handle_spec.rb +54 -0
- data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
- data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
- data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
- data/spec/rdkafka/admin_spec.rb +769 -0
- data/spec/rdkafka/bindings_spec.rb +222 -0
- data/spec/rdkafka/callbacks_spec.rb +20 -0
- data/spec/rdkafka/config_spec.rb +258 -0
- data/spec/rdkafka/consumer/headers_spec.rb +73 -0
- data/spec/rdkafka/consumer/message_spec.rb +139 -0
- data/spec/rdkafka/consumer/partition_spec.rb +57 -0
- data/spec/rdkafka/consumer/topic_partition_list_spec.rb +248 -0
- data/spec/rdkafka/consumer_spec.rb +1299 -0
- data/spec/rdkafka/error_spec.rb +95 -0
- data/spec/rdkafka/metadata_spec.rb +79 -0
- data/spec/rdkafka/native_kafka_spec.rb +130 -0
- data/spec/rdkafka/producer/delivery_handle_spec.rb +60 -0
- data/spec/rdkafka/producer/delivery_report_spec.rb +25 -0
- data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
- data/spec/rdkafka/producer/partitions_count_spec.rb +359 -0
- data/spec/rdkafka/producer_spec.rb +1234 -0
- data/spec/spec_helper.rb +181 -0
- metadata +244 -0
@@ -0,0 +1,1299 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "ostruct"
|
4
|
+
require 'securerandom'
|
5
|
+
|
6
|
+
describe Rdkafka::Consumer do
|
7
|
+
let(:consumer) { rdkafka_consumer_config.consumer }
|
8
|
+
let(:producer) { rdkafka_producer_config.producer }
|
9
|
+
|
10
|
+
after { consumer.close }
|
11
|
+
after { producer.close }
|
12
|
+
|
13
|
+
describe '#name' do
|
14
|
+
it { expect(consumer.name).to include('rdkafka#consumer-') }
|
15
|
+
end
|
16
|
+
|
17
|
+
describe 'consumer without auto-start' do
|
18
|
+
let(:consumer) { rdkafka_consumer_config.consumer(native_kafka_auto_start: false) }
|
19
|
+
|
20
|
+
it 'expect to be able to start it later and close' do
|
21
|
+
consumer.start
|
22
|
+
consumer.close
|
23
|
+
end
|
24
|
+
|
25
|
+
it 'expect to be able to close it without starting' do
|
26
|
+
consumer.close
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
describe "#subscribe, #unsubscribe and #subscription" do
|
31
|
+
it "should subscribe, unsubscribe and return the subscription" do
|
32
|
+
expect(consumer.subscription).to be_empty
|
33
|
+
|
34
|
+
consumer.subscribe("consume_test_topic")
|
35
|
+
|
36
|
+
expect(consumer.subscription).not_to be_empty
|
37
|
+
expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
38
|
+
list.add_topic("consume_test_topic")
|
39
|
+
end
|
40
|
+
expect(consumer.subscription).to eq expected_subscription
|
41
|
+
|
42
|
+
consumer.unsubscribe
|
43
|
+
|
44
|
+
expect(consumer.subscription).to be_empty
|
45
|
+
end
|
46
|
+
|
47
|
+
it "should raise an error when subscribing fails" do
|
48
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_subscribe).and_return(20)
|
49
|
+
|
50
|
+
expect {
|
51
|
+
consumer.subscribe("consume_test_topic")
|
52
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
53
|
+
end
|
54
|
+
|
55
|
+
it "should raise an error when unsubscribing fails" do
|
56
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_unsubscribe).and_return(20)
|
57
|
+
|
58
|
+
expect {
|
59
|
+
consumer.unsubscribe
|
60
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
61
|
+
end
|
62
|
+
|
63
|
+
it "should raise an error when fetching the subscription fails" do
|
64
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_subscription).and_return(20)
|
65
|
+
|
66
|
+
expect {
|
67
|
+
consumer.subscription
|
68
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
69
|
+
end
|
70
|
+
|
71
|
+
context "when using consumer without the poll set" do
|
72
|
+
let(:consumer) do
|
73
|
+
config = rdkafka_consumer_config
|
74
|
+
config.consumer_poll_set = false
|
75
|
+
config.consumer
|
76
|
+
end
|
77
|
+
|
78
|
+
it "should subscribe, unsubscribe and return the subscription" do
|
79
|
+
expect(consumer.subscription).to be_empty
|
80
|
+
|
81
|
+
consumer.subscribe("consume_test_topic")
|
82
|
+
|
83
|
+
expect(consumer.subscription).not_to be_empty
|
84
|
+
expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
85
|
+
list.add_topic("consume_test_topic")
|
86
|
+
end
|
87
|
+
expect(consumer.subscription).to eq expected_subscription
|
88
|
+
|
89
|
+
consumer.unsubscribe
|
90
|
+
|
91
|
+
expect(consumer.subscription).to be_empty
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
describe "#pause and #resume" do
|
97
|
+
context "subscription" do
|
98
|
+
let(:timeout) { 2000 }
|
99
|
+
|
100
|
+
before { consumer.subscribe("consume_test_topic") }
|
101
|
+
after { consumer.unsubscribe }
|
102
|
+
|
103
|
+
it "should pause and then resume" do
|
104
|
+
# 1. partitions are assigned
|
105
|
+
wait_for_assignment(consumer)
|
106
|
+
expect(consumer.assignment).not_to be_empty
|
107
|
+
|
108
|
+
# 2. send a first message
|
109
|
+
send_one_message
|
110
|
+
|
111
|
+
# 3. ensure that message is successfully consumed
|
112
|
+
records = consumer.poll(timeout)
|
113
|
+
expect(records).not_to be_nil
|
114
|
+
consumer.commit
|
115
|
+
|
116
|
+
# 4. send a second message
|
117
|
+
send_one_message
|
118
|
+
|
119
|
+
# 5. pause the subscription
|
120
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
121
|
+
tpl.add_topic("consume_test_topic", (0..2))
|
122
|
+
consumer.pause(tpl)
|
123
|
+
|
124
|
+
# 6. ensure that messages are not available
|
125
|
+
records = consumer.poll(timeout)
|
126
|
+
expect(records).to be_nil
|
127
|
+
|
128
|
+
# 7. resume the subscription
|
129
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
130
|
+
tpl.add_topic("consume_test_topic", (0..2))
|
131
|
+
consumer.resume(tpl)
|
132
|
+
|
133
|
+
# 8. ensure that message is successfully consumed
|
134
|
+
records = consumer.poll(timeout)
|
135
|
+
expect(records).not_to be_nil
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
it "should raise when not TopicPartitionList" do
|
140
|
+
expect { consumer.pause(true) }.to raise_error(TypeError)
|
141
|
+
expect { consumer.resume(true) }.to raise_error(TypeError)
|
142
|
+
end
|
143
|
+
|
144
|
+
it "should raise an error when pausing fails" do
|
145
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap { |tpl| tpl.add_topic('topic', (0..1)) }
|
146
|
+
|
147
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_pause_partitions).and_return(20)
|
148
|
+
expect {
|
149
|
+
consumer.pause(list)
|
150
|
+
}.to raise_error do |err|
|
151
|
+
expect(err).to be_instance_of(Rdkafka::RdkafkaTopicPartitionListError)
|
152
|
+
expect(err.topic_partition_list).to be
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
it "should raise an error when resume fails" do
|
157
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_resume_partitions).and_return(20)
|
158
|
+
expect {
|
159
|
+
consumer.resume(Rdkafka::Consumer::TopicPartitionList.new)
|
160
|
+
}.to raise_error Rdkafka::RdkafkaError
|
161
|
+
end
|
162
|
+
|
163
|
+
def send_one_message
|
164
|
+
producer.produce(
|
165
|
+
topic: "consume_test_topic",
|
166
|
+
payload: "payload 1",
|
167
|
+
key: "key 1"
|
168
|
+
).wait
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
describe "#seek" do
|
173
|
+
let(:topic) { "it-#{SecureRandom.uuid}" }
|
174
|
+
|
175
|
+
before do
|
176
|
+
admin = rdkafka_producer_config.admin
|
177
|
+
admin.create_topic(topic, 1, 1).wait
|
178
|
+
admin.close
|
179
|
+
end
|
180
|
+
|
181
|
+
it "should raise an error when seeking fails" do
|
182
|
+
fake_msg = OpenStruct.new(topic: topic, partition: 0, offset: 0)
|
183
|
+
|
184
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20)
|
185
|
+
expect {
|
186
|
+
consumer.seek(fake_msg)
|
187
|
+
}.to raise_error Rdkafka::RdkafkaError
|
188
|
+
end
|
189
|
+
|
190
|
+
context "subscription" do
|
191
|
+
let(:timeout) { 1000 }
|
192
|
+
# Some specs here test the manual offset commit hence we want to ensure, that we have some
|
193
|
+
# offsets in-memory that we can manually commit
|
194
|
+
let(:consumer) { rdkafka_consumer_config('auto.commit.interval.ms': 60_000).consumer }
|
195
|
+
|
196
|
+
before do
|
197
|
+
consumer.subscribe(topic)
|
198
|
+
|
199
|
+
# 1. partitions are assigned
|
200
|
+
wait_for_assignment(consumer)
|
201
|
+
expect(consumer.assignment).not_to be_empty
|
202
|
+
|
203
|
+
# 2. eat unrelated messages
|
204
|
+
while(consumer.poll(timeout)) do; end
|
205
|
+
end
|
206
|
+
after { consumer.unsubscribe }
|
207
|
+
|
208
|
+
def send_one_message(val)
|
209
|
+
producer.produce(
|
210
|
+
topic: topic,
|
211
|
+
payload: "payload #{val}",
|
212
|
+
key: "key 1",
|
213
|
+
partition: 0
|
214
|
+
).wait
|
215
|
+
end
|
216
|
+
|
217
|
+
it "works when a partition is paused" do
|
218
|
+
# 3. get reference message
|
219
|
+
send_one_message(:a)
|
220
|
+
message1 = consumer.poll(timeout)
|
221
|
+
expect(message1&.payload).to eq "payload a"
|
222
|
+
|
223
|
+
# 4. pause the subscription
|
224
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
225
|
+
tpl.add_topic(topic, 1)
|
226
|
+
consumer.pause(tpl)
|
227
|
+
|
228
|
+
# 5. seek to previous message
|
229
|
+
consumer.seek(message1)
|
230
|
+
|
231
|
+
# 6. resume the subscription
|
232
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
233
|
+
tpl.add_topic(topic, 1)
|
234
|
+
consumer.resume(tpl)
|
235
|
+
|
236
|
+
# 7. ensure same message is read again
|
237
|
+
message2 = consumer.poll(timeout)
|
238
|
+
|
239
|
+
# This is needed because `enable.auto.offset.store` is true but when running in CI that
|
240
|
+
# is overloaded, offset store lags
|
241
|
+
sleep(1)
|
242
|
+
|
243
|
+
consumer.commit
|
244
|
+
expect(message1.offset).to eq message2.offset
|
245
|
+
expect(message1.payload).to eq message2.payload
|
246
|
+
end
|
247
|
+
|
248
|
+
it "allows skipping messages" do
|
249
|
+
# 3. send messages
|
250
|
+
send_one_message(:a)
|
251
|
+
send_one_message(:b)
|
252
|
+
send_one_message(:c)
|
253
|
+
|
254
|
+
# 4. get reference message
|
255
|
+
message = consumer.poll(timeout)
|
256
|
+
expect(message&.payload).to eq "payload a"
|
257
|
+
|
258
|
+
# 5. seek over one message
|
259
|
+
fake_msg = message.dup
|
260
|
+
fake_msg.instance_variable_set(:@offset, fake_msg.offset + 2)
|
261
|
+
consumer.seek(fake_msg)
|
262
|
+
|
263
|
+
# 6. ensure that only one message is available
|
264
|
+
records = consumer.poll(timeout)
|
265
|
+
expect(records&.payload).to eq "payload c"
|
266
|
+
records = consumer.poll(timeout)
|
267
|
+
expect(records).to be_nil
|
268
|
+
end
|
269
|
+
end
|
270
|
+
end
|
271
|
+
|
272
|
+
describe "#seek_by" do
|
273
|
+
let(:consumer) { rdkafka_consumer_config('auto.commit.interval.ms': 60_000).consumer }
|
274
|
+
let(:topic) { "it-#{SecureRandom.uuid}" }
|
275
|
+
let(:partition) { 0 }
|
276
|
+
let(:offset) { 0 }
|
277
|
+
|
278
|
+
before do
|
279
|
+
admin = rdkafka_producer_config.admin
|
280
|
+
admin.create_topic(topic, 1, 1).wait
|
281
|
+
admin.close
|
282
|
+
end
|
283
|
+
|
284
|
+
it "should raise an error when seeking fails" do
|
285
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_seek).and_return(20)
|
286
|
+
expect {
|
287
|
+
consumer.seek_by(topic, partition, offset)
|
288
|
+
}.to raise_error Rdkafka::RdkafkaError
|
289
|
+
end
|
290
|
+
|
291
|
+
context "subscription" do
|
292
|
+
let(:timeout) { 1000 }
|
293
|
+
|
294
|
+
before do
|
295
|
+
consumer.subscribe(topic)
|
296
|
+
|
297
|
+
# 1. partitions are assigned
|
298
|
+
wait_for_assignment(consumer)
|
299
|
+
expect(consumer.assignment).not_to be_empty
|
300
|
+
|
301
|
+
# 2. eat unrelated messages
|
302
|
+
while(consumer.poll(timeout)) do; end
|
303
|
+
end
|
304
|
+
|
305
|
+
after { consumer.unsubscribe }
|
306
|
+
|
307
|
+
def send_one_message(val)
|
308
|
+
producer.produce(
|
309
|
+
topic: topic,
|
310
|
+
payload: "payload #{val}",
|
311
|
+
key: "key 1",
|
312
|
+
partition: 0
|
313
|
+
).wait
|
314
|
+
end
|
315
|
+
|
316
|
+
it "works when a partition is paused" do
|
317
|
+
# 3. get reference message
|
318
|
+
send_one_message(:a)
|
319
|
+
message1 = consumer.poll(timeout)
|
320
|
+
expect(message1&.payload).to eq "payload a"
|
321
|
+
|
322
|
+
# 4. pause the subscription
|
323
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
324
|
+
tpl.add_topic(topic, 1)
|
325
|
+
consumer.pause(tpl)
|
326
|
+
|
327
|
+
# 5. seek by the previous message fields
|
328
|
+
consumer.seek_by(message1.topic, message1.partition, message1.offset)
|
329
|
+
|
330
|
+
# 6. resume the subscription
|
331
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
332
|
+
tpl.add_topic(topic, 1)
|
333
|
+
consumer.resume(tpl)
|
334
|
+
|
335
|
+
# 7. ensure same message is read again
|
336
|
+
message2 = consumer.poll(timeout)
|
337
|
+
|
338
|
+
# This is needed because `enable.auto.offset.store` is true but when running in CI that
|
339
|
+
# is overloaded, offset store lags
|
340
|
+
sleep(2)
|
341
|
+
|
342
|
+
consumer.commit
|
343
|
+
expect(message1.offset).to eq message2.offset
|
344
|
+
expect(message1.payload).to eq message2.payload
|
345
|
+
end
|
346
|
+
|
347
|
+
it "allows skipping messages" do
|
348
|
+
# 3. send messages
|
349
|
+
send_one_message(:a)
|
350
|
+
send_one_message(:b)
|
351
|
+
send_one_message(:c)
|
352
|
+
|
353
|
+
# 4. get reference message
|
354
|
+
message = consumer.poll(timeout)
|
355
|
+
expect(message&.payload).to eq "payload a"
|
356
|
+
|
357
|
+
# 5. seek over one message
|
358
|
+
consumer.seek_by(message.topic, message.partition, message.offset + 2)
|
359
|
+
|
360
|
+
# 6. ensure that only one message is available
|
361
|
+
records = consumer.poll(timeout)
|
362
|
+
expect(records&.payload).to eq "payload c"
|
363
|
+
records = consumer.poll(timeout)
|
364
|
+
expect(records).to be_nil
|
365
|
+
end
|
366
|
+
end
|
367
|
+
end
|
368
|
+
|
369
|
+
describe "#assign and #assignment" do
|
370
|
+
it "should return an empty assignment if nothing is assigned" do
|
371
|
+
expect(consumer.assignment).to be_empty
|
372
|
+
end
|
373
|
+
|
374
|
+
it "should only accept a topic partition list in assign" do
|
375
|
+
expect {
|
376
|
+
consumer.assign("list")
|
377
|
+
}.to raise_error TypeError
|
378
|
+
end
|
379
|
+
|
380
|
+
it "should raise an error when assigning fails" do
|
381
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_assign).and_return(20)
|
382
|
+
expect {
|
383
|
+
consumer.assign(Rdkafka::Consumer::TopicPartitionList.new)
|
384
|
+
}.to raise_error Rdkafka::RdkafkaError
|
385
|
+
end
|
386
|
+
|
387
|
+
it "should assign specific topic/partitions and return that assignment" do
|
388
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
389
|
+
tpl.add_topic("consume_test_topic", (0..2))
|
390
|
+
consumer.assign(tpl)
|
391
|
+
|
392
|
+
assignment = consumer.assignment
|
393
|
+
expect(assignment).not_to be_empty
|
394
|
+
expect(assignment.to_h["consume_test_topic"].length).to eq 3
|
395
|
+
end
|
396
|
+
|
397
|
+
it "should return the assignment when subscribed" do
|
398
|
+
# Make sure there's a message
|
399
|
+
producer.produce(
|
400
|
+
topic: "consume_test_topic",
|
401
|
+
payload: "payload 1",
|
402
|
+
key: "key 1",
|
403
|
+
partition: 0
|
404
|
+
).wait
|
405
|
+
|
406
|
+
# Subscribe and poll until partitions are assigned
|
407
|
+
consumer.subscribe("consume_test_topic")
|
408
|
+
100.times do
|
409
|
+
consumer.poll(100)
|
410
|
+
break unless consumer.assignment.empty?
|
411
|
+
end
|
412
|
+
|
413
|
+
assignment = consumer.assignment
|
414
|
+
expect(assignment).not_to be_empty
|
415
|
+
expect(assignment.to_h["consume_test_topic"].length).to eq 3
|
416
|
+
end
|
417
|
+
|
418
|
+
it "should raise an error when getting assignment fails" do
|
419
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_assignment).and_return(20)
|
420
|
+
expect {
|
421
|
+
consumer.assignment
|
422
|
+
}.to raise_error Rdkafka::RdkafkaError
|
423
|
+
end
|
424
|
+
end
|
425
|
+
|
426
|
+
describe '#assignment_lost?' do
|
427
|
+
it "should not return true as we do have an assignment" do
|
428
|
+
consumer.subscribe("consume_test_topic")
|
429
|
+
expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
430
|
+
list.add_topic("consume_test_topic")
|
431
|
+
end
|
432
|
+
|
433
|
+
expect(consumer.assignment_lost?).to eq false
|
434
|
+
consumer.unsubscribe
|
435
|
+
end
|
436
|
+
|
437
|
+
it "should not return true after voluntary unsubscribing" do
|
438
|
+
consumer.subscribe("consume_test_topic")
|
439
|
+
expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
440
|
+
list.add_topic("consume_test_topic")
|
441
|
+
end
|
442
|
+
|
443
|
+
consumer.unsubscribe
|
444
|
+
expect(consumer.assignment_lost?).to eq false
|
445
|
+
end
|
446
|
+
end
|
447
|
+
|
448
|
+
describe "#close" do
|
449
|
+
it "should close a consumer" do
|
450
|
+
consumer.subscribe("consume_test_topic")
|
451
|
+
100.times do |i|
|
452
|
+
producer.produce(
|
453
|
+
topic: "consume_test_topic",
|
454
|
+
payload: "payload #{i}",
|
455
|
+
key: "key #{i}",
|
456
|
+
partition: 0
|
457
|
+
).wait
|
458
|
+
end
|
459
|
+
consumer.close
|
460
|
+
expect {
|
461
|
+
consumer.poll(100)
|
462
|
+
}.to raise_error(Rdkafka::ClosedConsumerError, /poll/)
|
463
|
+
end
|
464
|
+
|
465
|
+
context 'when there are outgoing operations in other threads' do
|
466
|
+
it 'should wait and not crash' do
|
467
|
+
times = []
|
468
|
+
|
469
|
+
# Run a long running poll
|
470
|
+
thread = Thread.new do
|
471
|
+
times << Time.now
|
472
|
+
consumer.subscribe("empty_test_topic")
|
473
|
+
times << Time.now
|
474
|
+
consumer.poll(1_000)
|
475
|
+
times << Time.now
|
476
|
+
end
|
477
|
+
|
478
|
+
# Make sure it starts before we close
|
479
|
+
sleep(0.1)
|
480
|
+
consumer.close
|
481
|
+
close_time = Time.now
|
482
|
+
thread.join
|
483
|
+
|
484
|
+
times.each { |op_time| expect(op_time).to be < close_time }
|
485
|
+
end
|
486
|
+
end
|
487
|
+
end
|
488
|
+
|
489
|
+
describe "#position, #commit, #committed and #store_offset" do
|
490
|
+
# Make sure there are messages to work with
|
491
|
+
let!(:report) do
|
492
|
+
producer.produce(
|
493
|
+
topic: "consume_test_topic",
|
494
|
+
payload: "payload 1",
|
495
|
+
key: "key 1",
|
496
|
+
partition: 0
|
497
|
+
).wait
|
498
|
+
end
|
499
|
+
|
500
|
+
let(:message) do
|
501
|
+
wait_for_message(
|
502
|
+
topic: "consume_test_topic",
|
503
|
+
delivery_report: report,
|
504
|
+
consumer: consumer
|
505
|
+
)
|
506
|
+
end
|
507
|
+
|
508
|
+
describe "#position" do
|
509
|
+
it "should only accept a topic partition list in position if not nil" do
|
510
|
+
expect {
|
511
|
+
consumer.position("list")
|
512
|
+
}.to raise_error TypeError
|
513
|
+
end
|
514
|
+
end
|
515
|
+
|
516
|
+
describe "#committed" do
|
517
|
+
it "should only accept a topic partition list in commit if not nil" do
|
518
|
+
expect {
|
519
|
+
consumer.commit("list")
|
520
|
+
}.to raise_error TypeError
|
521
|
+
end
|
522
|
+
|
523
|
+
it "should commit in sync mode" do
|
524
|
+
expect {
|
525
|
+
consumer.commit(nil, true)
|
526
|
+
}.not_to raise_error
|
527
|
+
end
|
528
|
+
end
|
529
|
+
|
530
|
+
context "with a committed consumer" do
|
531
|
+
before :all do
|
532
|
+
# Make sure there are some messages.
|
533
|
+
handles = []
|
534
|
+
producer = rdkafka_config.producer
|
535
|
+
10.times do
|
536
|
+
(0..2).each do |i|
|
537
|
+
handles << producer.produce(
|
538
|
+
topic: "consume_test_topic",
|
539
|
+
payload: "payload 1",
|
540
|
+
key: "key 1",
|
541
|
+
partition: i
|
542
|
+
)
|
543
|
+
end
|
544
|
+
end
|
545
|
+
handles.each(&:wait)
|
546
|
+
producer.close
|
547
|
+
end
|
548
|
+
|
549
|
+
before do
|
550
|
+
consumer.subscribe("consume_test_topic")
|
551
|
+
wait_for_assignment(consumer)
|
552
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
553
|
+
list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => 1, 1 => 1, 2 => 1)
|
554
|
+
end
|
555
|
+
consumer.commit(list)
|
556
|
+
end
|
557
|
+
|
558
|
+
it "should commit a specific topic partion list" do
|
559
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
560
|
+
list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => 1, 1 => 2, 2 => 3)
|
561
|
+
end
|
562
|
+
consumer.commit(list)
|
563
|
+
|
564
|
+
partitions = consumer.committed(list).to_h["consume_test_topic"]
|
565
|
+
expect(partitions[0].offset).to eq 1
|
566
|
+
expect(partitions[1].offset).to eq 2
|
567
|
+
expect(partitions[2].offset).to eq 3
|
568
|
+
end
|
569
|
+
|
570
|
+
it "should raise an error when committing fails" do
|
571
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_commit).and_return(20)
|
572
|
+
|
573
|
+
expect {
|
574
|
+
consumer.commit
|
575
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
576
|
+
end
|
577
|
+
|
578
|
+
describe "#committed" do
|
579
|
+
it "should fetch the committed offsets for the current assignment" do
|
580
|
+
partitions = consumer.committed.to_h["consume_test_topic"]
|
581
|
+
expect(partitions).not_to be_nil
|
582
|
+
expect(partitions[0].offset).to eq 1
|
583
|
+
end
|
584
|
+
|
585
|
+
it "should fetch the committed offsets for a specified topic partition list" do
|
586
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
587
|
+
list.add_topic("consume_test_topic", [0, 1, 2])
|
588
|
+
end
|
589
|
+
partitions = consumer.committed(list).to_h["consume_test_topic"]
|
590
|
+
expect(partitions).not_to be_nil
|
591
|
+
expect(partitions[0].offset).to eq 1
|
592
|
+
expect(partitions[1].offset).to eq 1
|
593
|
+
expect(partitions[2].offset).to eq 1
|
594
|
+
end
|
595
|
+
|
596
|
+
it "should raise an error when getting committed fails" do
|
597
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
|
598
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
599
|
+
list.add_topic("consume_test_topic", [0, 1, 2])
|
600
|
+
end
|
601
|
+
expect {
|
602
|
+
consumer.committed(list)
|
603
|
+
}.to raise_error Rdkafka::RdkafkaError
|
604
|
+
end
|
605
|
+
end
|
606
|
+
|
607
|
+
describe "#store_offset" do
|
608
|
+
let(:consumer) { rdkafka_consumer_config('enable.auto.offset.store': false).consumer }
|
609
|
+
let(:metadata) { SecureRandom.uuid }
|
610
|
+
let(:group_id) { SecureRandom.uuid }
|
611
|
+
let(:base_config) do
|
612
|
+
{
|
613
|
+
'group.id': group_id,
|
614
|
+
'enable.auto.offset.store': false,
|
615
|
+
'enable.auto.commit': false
|
616
|
+
}
|
617
|
+
end
|
618
|
+
|
619
|
+
before do
|
620
|
+
@new_consumer = rdkafka_consumer_config(base_config).consumer
|
621
|
+
@new_consumer.subscribe("consume_test_topic")
|
622
|
+
wait_for_assignment(@new_consumer)
|
623
|
+
end
|
624
|
+
|
625
|
+
after do
|
626
|
+
@new_consumer.close
|
627
|
+
end
|
628
|
+
|
629
|
+
it "should store the offset for a message" do
|
630
|
+
@new_consumer.store_offset(message)
|
631
|
+
@new_consumer.commit
|
632
|
+
|
633
|
+
# TODO use position here, should be at offset
|
634
|
+
|
635
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
636
|
+
list.add_topic("consume_test_topic", [0, 1, 2])
|
637
|
+
end
|
638
|
+
partitions = @new_consumer.committed(list).to_h["consume_test_topic"]
|
639
|
+
expect(partitions).not_to be_nil
|
640
|
+
expect(partitions[message.partition].offset).to eq(message.offset + 1)
|
641
|
+
end
|
642
|
+
|
643
|
+
it "should store the offset for a message with metadata" do
|
644
|
+
@new_consumer.store_offset(message, metadata)
|
645
|
+
@new_consumer.commit
|
646
|
+
@new_consumer.close
|
647
|
+
|
648
|
+
meta_consumer = rdkafka_consumer_config(base_config).consumer
|
649
|
+
meta_consumer.subscribe("consume_test_topic")
|
650
|
+
wait_for_assignment(meta_consumer)
|
651
|
+
meta_consumer.poll(1_000)
|
652
|
+
expect(meta_consumer.committed.to_h[message.topic][message.partition].metadata).to eq(metadata)
|
653
|
+
meta_consumer.close
|
654
|
+
end
|
655
|
+
|
656
|
+
it "should raise an error with invalid input" do
|
657
|
+
allow(message).to receive(:partition).and_return(9999)
|
658
|
+
expect {
|
659
|
+
@new_consumer.store_offset(message)
|
660
|
+
}.to raise_error Rdkafka::RdkafkaError
|
661
|
+
end
|
662
|
+
|
663
|
+
describe "#position" do
|
664
|
+
it "should fetch the positions for the current assignment" do
|
665
|
+
consumer.store_offset(message)
|
666
|
+
|
667
|
+
partitions = consumer.position.to_h["consume_test_topic"]
|
668
|
+
expect(partitions).not_to be_nil
|
669
|
+
expect(partitions[0].offset).to eq message.offset + 1
|
670
|
+
end
|
671
|
+
|
672
|
+
it "should fetch the positions for a specified assignment" do
|
673
|
+
consumer.store_offset(message)
|
674
|
+
|
675
|
+
list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
676
|
+
list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => nil, 1 => nil, 2 => nil)
|
677
|
+
end
|
678
|
+
partitions = consumer.position(list).to_h["consume_test_topic"]
|
679
|
+
expect(partitions).not_to be_nil
|
680
|
+
expect(partitions[0].offset).to eq message.offset + 1
|
681
|
+
end
|
682
|
+
|
683
|
+
it "should raise an error when getting the position fails" do
|
684
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_position).and_return(20)
|
685
|
+
|
686
|
+
expect {
|
687
|
+
consumer.position
|
688
|
+
}.to raise_error(Rdkafka::RdkafkaError)
|
689
|
+
end
|
690
|
+
end
|
691
|
+
|
692
|
+
context "when trying to use with enable.auto.offset.store set to true" do
|
693
|
+
let(:consumer) { rdkafka_consumer_config('enable.auto.offset.store': true).consumer }
|
694
|
+
|
695
|
+
it "expect to raise invalid configuration error" do
|
696
|
+
expect { consumer.store_offset(message) }.to raise_error(Rdkafka::RdkafkaError, /invalid_arg/)
|
697
|
+
end
|
698
|
+
end
|
699
|
+
end
|
700
|
+
end
|
701
|
+
end
|
702
|
+
|
703
|
+
describe "#query_watermark_offsets" do
|
704
|
+
it "should return the watermark offsets" do
|
705
|
+
# Make sure there's a message
|
706
|
+
producer.produce(
|
707
|
+
topic: "watermarks_test_topic",
|
708
|
+
payload: "payload 1",
|
709
|
+
key: "key 1",
|
710
|
+
partition: 0
|
711
|
+
).wait
|
712
|
+
|
713
|
+
low, high = consumer.query_watermark_offsets("watermarks_test_topic", 0, 5000)
|
714
|
+
expect(low).to eq 0
|
715
|
+
expect(high).to be > 0
|
716
|
+
end
|
717
|
+
|
718
|
+
it "should raise an error when querying offsets fails" do
|
719
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_query_watermark_offsets).and_return(20)
|
720
|
+
expect {
|
721
|
+
consumer.query_watermark_offsets("consume_test_topic", 0, 5000)
|
722
|
+
}.to raise_error Rdkafka::RdkafkaError
|
723
|
+
end
|
724
|
+
end
|
725
|
+
|
726
|
+
describe "#lag" do
|
727
|
+
let(:consumer) { rdkafka_consumer_config(:"enable.partition.eof" => true).consumer }
|
728
|
+
|
729
|
+
it "should calculate the consumer lag" do
|
730
|
+
# Make sure there's a message in every partition and
|
731
|
+
# wait for the message to make sure everything is committed.
|
732
|
+
(0..2).each do |i|
|
733
|
+
producer.produce(
|
734
|
+
topic: "consume_test_topic",
|
735
|
+
key: "key lag #{i}",
|
736
|
+
partition: i
|
737
|
+
).wait
|
738
|
+
end
|
739
|
+
|
740
|
+
# Consume to the end
|
741
|
+
consumer.subscribe("consume_test_topic")
|
742
|
+
eof_count = 0
|
743
|
+
loop do
|
744
|
+
begin
|
745
|
+
consumer.poll(100)
|
746
|
+
rescue Rdkafka::RdkafkaError => error
|
747
|
+
if error.is_partition_eof?
|
748
|
+
eof_count += 1
|
749
|
+
end
|
750
|
+
break if eof_count == 3
|
751
|
+
end
|
752
|
+
end
|
753
|
+
|
754
|
+
# Commit
|
755
|
+
consumer.commit
|
756
|
+
|
757
|
+
# Create list to fetch lag for. TODO creating the list will not be necessary
|
758
|
+
# after committed uses the subscription.
|
759
|
+
list = consumer.committed(Rdkafka::Consumer::TopicPartitionList.new.tap do |l|
|
760
|
+
l.add_topic("consume_test_topic", (0..2))
|
761
|
+
end)
|
762
|
+
|
763
|
+
# Lag should be 0 now
|
764
|
+
lag = consumer.lag(list)
|
765
|
+
expected_lag = {
|
766
|
+
"consume_test_topic" => {
|
767
|
+
0 => 0,
|
768
|
+
1 => 0,
|
769
|
+
2 => 0
|
770
|
+
}
|
771
|
+
}
|
772
|
+
expect(lag).to eq(expected_lag)
|
773
|
+
|
774
|
+
# Produce message on every topic again
|
775
|
+
(0..2).each do |i|
|
776
|
+
producer.produce(
|
777
|
+
topic: "consume_test_topic",
|
778
|
+
key: "key lag #{i}",
|
779
|
+
partition: i
|
780
|
+
).wait
|
781
|
+
end
|
782
|
+
|
783
|
+
# Lag should be 1 now
|
784
|
+
lag = consumer.lag(list)
|
785
|
+
expected_lag = {
|
786
|
+
"consume_test_topic" => {
|
787
|
+
0 => 1,
|
788
|
+
1 => 1,
|
789
|
+
2 => 1
|
790
|
+
}
|
791
|
+
}
|
792
|
+
expect(lag).to eq(expected_lag)
|
793
|
+
end
|
794
|
+
|
795
|
+
it "returns nil if there are no messages on the topic" do
|
796
|
+
list = consumer.committed(Rdkafka::Consumer::TopicPartitionList.new.tap do |l|
|
797
|
+
l.add_topic("consume_test_topic", (0..2))
|
798
|
+
end)
|
799
|
+
|
800
|
+
lag = consumer.lag(list)
|
801
|
+
expected_lag = {
|
802
|
+
"consume_test_topic" => {}
|
803
|
+
}
|
804
|
+
expect(lag).to eq(expected_lag)
|
805
|
+
end
|
806
|
+
end
|
807
|
+
|
808
|
+
describe "#cluster_id" do
|
809
|
+
it 'should return the current ClusterId' do
|
810
|
+
consumer.subscribe("consume_test_topic")
|
811
|
+
wait_for_assignment(consumer)
|
812
|
+
expect(consumer.cluster_id).not_to be_empty
|
813
|
+
end
|
814
|
+
end
|
815
|
+
|
816
|
+
describe "#member_id" do
|
817
|
+
it 'should return the current MemberId' do
|
818
|
+
consumer.subscribe("consume_test_topic")
|
819
|
+
wait_for_assignment(consumer)
|
820
|
+
expect(consumer.member_id).to start_with('rdkafka-')
|
821
|
+
end
|
822
|
+
end
|
823
|
+
|
824
|
+
describe "#poll" do
|
825
|
+
it "should return nil if there is no subscription" do
|
826
|
+
expect(consumer.poll(1000)).to be_nil
|
827
|
+
end
|
828
|
+
|
829
|
+
it "should return nil if there are no messages" do
|
830
|
+
consumer.subscribe("empty_test_topic")
|
831
|
+
expect(consumer.poll(1000)).to be_nil
|
832
|
+
end
|
833
|
+
|
834
|
+
it "should return a message if there is one" do
|
835
|
+
topic = "it-#{SecureRandom.uuid}"
|
836
|
+
|
837
|
+
producer.produce(
|
838
|
+
topic: topic,
|
839
|
+
payload: "payload 1",
|
840
|
+
key: "key 1"
|
841
|
+
).wait
|
842
|
+
consumer.subscribe(topic)
|
843
|
+
message = consumer.each {|m| break m}
|
844
|
+
|
845
|
+
expect(message).to be_a Rdkafka::Consumer::Message
|
846
|
+
expect(message.payload).to eq('payload 1')
|
847
|
+
expect(message.key).to eq('key 1')
|
848
|
+
end
|
849
|
+
|
850
|
+
it "should raise an error when polling fails" do
|
851
|
+
message = Rdkafka::Bindings::Message.new.tap do |message|
|
852
|
+
message[:err] = 20
|
853
|
+
end
|
854
|
+
message_pointer = message.to_ptr
|
855
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_consumer_poll).and_return(message_pointer)
|
856
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_message_destroy).with(message_pointer)
|
857
|
+
expect {
|
858
|
+
consumer.poll(100)
|
859
|
+
}.to raise_error Rdkafka::RdkafkaError
|
860
|
+
end
|
861
|
+
|
862
|
+
it "expect to raise error when polling non-existing topic" do
|
863
|
+
missing_topic = SecureRandom.uuid
|
864
|
+
consumer.subscribe(missing_topic)
|
865
|
+
|
866
|
+
expect {
|
867
|
+
consumer.poll(1_000)
|
868
|
+
}.to raise_error Rdkafka::RdkafkaError, /Subscribed topic not available: #{missing_topic}/
|
869
|
+
end
|
870
|
+
end
|
871
|
+
|
872
|
+
describe "#poll with headers" do
|
873
|
+
it "should return message with headers using string keys (when produced with symbol keys)" do
|
874
|
+
report = producer.produce(
|
875
|
+
topic: "consume_test_topic",
|
876
|
+
key: "key headers",
|
877
|
+
headers: { foo: 'bar' }
|
878
|
+
).wait
|
879
|
+
|
880
|
+
message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
881
|
+
expect(message).to be
|
882
|
+
expect(message.key).to eq('key headers')
|
883
|
+
expect(message.headers).to include('foo' => 'bar')
|
884
|
+
end
|
885
|
+
|
886
|
+
it "should return message with headers using string keys (when produced with string keys)" do
|
887
|
+
report = producer.produce(
|
888
|
+
topic: "consume_test_topic",
|
889
|
+
key: "key headers",
|
890
|
+
headers: { 'foo' => 'bar' }
|
891
|
+
).wait
|
892
|
+
|
893
|
+
message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
894
|
+
expect(message).to be
|
895
|
+
expect(message.key).to eq('key headers')
|
896
|
+
expect(message.headers).to include('foo' => 'bar')
|
897
|
+
end
|
898
|
+
|
899
|
+
it "should return message with no headers" do
|
900
|
+
report = producer.produce(
|
901
|
+
topic: "consume_test_topic",
|
902
|
+
key: "key no headers",
|
903
|
+
headers: nil
|
904
|
+
).wait
|
905
|
+
|
906
|
+
message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
907
|
+
expect(message).to be
|
908
|
+
expect(message.key).to eq('key no headers')
|
909
|
+
expect(message.headers).to be_empty
|
910
|
+
end
|
911
|
+
|
912
|
+
it "should raise an error when message headers aren't readable" do
|
913
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_message_headers).with(any_args) { 1 }
|
914
|
+
|
915
|
+
report = producer.produce(
|
916
|
+
topic: "consume_test_topic",
|
917
|
+
key: "key err headers",
|
918
|
+
headers: nil
|
919
|
+
).wait
|
920
|
+
|
921
|
+
expect {
|
922
|
+
wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
923
|
+
}.to raise_error do |err|
|
924
|
+
expect(err).to be_instance_of(Rdkafka::RdkafkaError)
|
925
|
+
expect(err.message).to start_with("Error reading message headers")
|
926
|
+
end
|
927
|
+
end
|
928
|
+
|
929
|
+
it "should raise an error when the first message header aren't readable" do
|
930
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_header_get_all).with(any_args) { 1 }
|
931
|
+
|
932
|
+
report = producer.produce(
|
933
|
+
topic: "consume_test_topic",
|
934
|
+
key: "key err headers",
|
935
|
+
headers: { foo: 'bar' }
|
936
|
+
).wait
|
937
|
+
|
938
|
+
expect {
|
939
|
+
wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
|
940
|
+
}.to raise_error do |err|
|
941
|
+
expect(err).to be_instance_of(Rdkafka::RdkafkaError)
|
942
|
+
expect(err.message).to start_with("Error reading a message header at index 0")
|
943
|
+
end
|
944
|
+
end
|
945
|
+
end
|
946
|
+
|
947
|
+
describe "#each" do
|
948
|
+
it "should yield messages" do
|
949
|
+
handles = []
|
950
|
+
10.times do
|
951
|
+
handles << producer.produce(
|
952
|
+
topic: "consume_test_topic",
|
953
|
+
payload: "payload 1",
|
954
|
+
key: "key 1",
|
955
|
+
partition: 0
|
956
|
+
)
|
957
|
+
end
|
958
|
+
handles.each(&:wait)
|
959
|
+
|
960
|
+
consumer.subscribe("consume_test_topic")
|
961
|
+
# Check the first 10 messages. Then close the consumer, which
|
962
|
+
# should break the each loop.
|
963
|
+
consumer.each_with_index do |message, i|
|
964
|
+
expect(message).to be_a Rdkafka::Consumer::Message
|
965
|
+
break if i == 10
|
966
|
+
end
|
967
|
+
consumer.close
|
968
|
+
end
|
969
|
+
end
|
970
|
+
|
971
|
+
describe "#each_batch" do
|
972
|
+
it 'expect to raise an error' do
|
973
|
+
expect do
|
974
|
+
consumer.each_batch {}
|
975
|
+
end.to raise_error(NotImplementedError)
|
976
|
+
end
|
977
|
+
end
|
978
|
+
|
979
|
+
describe "#offsets_for_times" do
|
980
|
+
it "should raise when not TopicPartitionList" do
|
981
|
+
expect { consumer.offsets_for_times([]) }.to raise_error(TypeError)
|
982
|
+
end
|
983
|
+
|
984
|
+
it "should raise an error when offsets_for_times fails" do
|
985
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new
|
986
|
+
|
987
|
+
expect(Rdkafka::Bindings).to receive(:rd_kafka_offsets_for_times).and_return(7)
|
988
|
+
|
989
|
+
expect { consumer.offsets_for_times(tpl) }.to raise_error(Rdkafka::RdkafkaError)
|
990
|
+
end
|
991
|
+
|
992
|
+
context "when subscribed" do
|
993
|
+
let(:timeout) { 1000 }
|
994
|
+
|
995
|
+
before do
|
996
|
+
consumer.subscribe("consume_test_topic")
|
997
|
+
|
998
|
+
# 1. partitions are assigned
|
999
|
+
wait_for_assignment(consumer)
|
1000
|
+
expect(consumer.assignment).not_to be_empty
|
1001
|
+
|
1002
|
+
# 2. eat unrelated messages
|
1003
|
+
while(consumer.poll(timeout)) do; end
|
1004
|
+
end
|
1005
|
+
|
1006
|
+
after { consumer.unsubscribe }
|
1007
|
+
|
1008
|
+
def send_one_message(val)
|
1009
|
+
producer.produce(
|
1010
|
+
topic: "consume_test_topic",
|
1011
|
+
payload: "payload #{val}",
|
1012
|
+
key: "key 0",
|
1013
|
+
partition: 0
|
1014
|
+
).wait
|
1015
|
+
end
|
1016
|
+
|
1017
|
+
it "returns a TopicParticionList with updated offsets" do
|
1018
|
+
send_one_message("a")
|
1019
|
+
send_one_message("b")
|
1020
|
+
send_one_message("c")
|
1021
|
+
|
1022
|
+
consumer.poll(timeout)
|
1023
|
+
message = consumer.poll(timeout)
|
1024
|
+
consumer.poll(timeout)
|
1025
|
+
|
1026
|
+
tpl = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
|
1027
|
+
list.add_topic_and_partitions_with_offsets(
|
1028
|
+
"consume_test_topic",
|
1029
|
+
[
|
1030
|
+
[0, message.timestamp]
|
1031
|
+
]
|
1032
|
+
)
|
1033
|
+
end
|
1034
|
+
|
1035
|
+
tpl_response = consumer.offsets_for_times(tpl)
|
1036
|
+
|
1037
|
+
expect(tpl_response.to_h["consume_test_topic"][0].offset).to eq message.offset
|
1038
|
+
end
|
1039
|
+
end
|
1040
|
+
end
|
1041
|
+
|
1042
|
+
# Only relevant in case of a consumer with separate queues
|
1043
|
+
describe '#events_poll' do
|
1044
|
+
let(:stats) { [] }
|
1045
|
+
|
1046
|
+
before { Rdkafka::Config.statistics_callback = ->(published) { stats << published } }
|
1047
|
+
|
1048
|
+
after { Rdkafka::Config.statistics_callback = nil }
|
1049
|
+
|
1050
|
+
let(:consumer) do
|
1051
|
+
config = rdkafka_consumer_config('statistics.interval.ms': 500)
|
1052
|
+
config.consumer_poll_set = false
|
1053
|
+
config.consumer
|
1054
|
+
end
|
1055
|
+
|
1056
|
+
it "expect to run events_poll, operate and propagate stats on events_poll and not poll" do
|
1057
|
+
consumer.subscribe("consume_test_topic")
|
1058
|
+
consumer.poll(1_000)
|
1059
|
+
expect(stats).to be_empty
|
1060
|
+
consumer.events_poll(-1)
|
1061
|
+
expect(stats).not_to be_empty
|
1062
|
+
end
|
1063
|
+
end
|
1064
|
+
|
1065
|
+
describe '#consumer_group_metadata_pointer' do
|
1066
|
+
let(:pointer) { consumer.consumer_group_metadata_pointer }
|
1067
|
+
|
1068
|
+
after { Rdkafka::Bindings.rd_kafka_consumer_group_metadata_destroy(pointer) }
|
1069
|
+
|
1070
|
+
it 'expect to return a pointer' do
|
1071
|
+
expect(pointer).to be_a(FFI::Pointer)
|
1072
|
+
end
|
1073
|
+
end
|
1074
|
+
|
1075
|
+
describe "a rebalance listener" do
|
1076
|
+
let(:consumer) do
|
1077
|
+
config = rdkafka_consumer_config
|
1078
|
+
config.consumer_rebalance_listener = listener
|
1079
|
+
config.consumer
|
1080
|
+
end
|
1081
|
+
|
1082
|
+
context "with a working listener" do
|
1083
|
+
let(:listener) do
|
1084
|
+
Struct.new(:queue) do
|
1085
|
+
def on_partitions_assigned(list)
|
1086
|
+
collect(:assign, list)
|
1087
|
+
end
|
1088
|
+
|
1089
|
+
def on_partitions_revoked(list)
|
1090
|
+
collect(:revoke, list)
|
1091
|
+
end
|
1092
|
+
|
1093
|
+
def collect(name, list)
|
1094
|
+
partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
|
1095
|
+
queue << ([name] + partitions)
|
1096
|
+
end
|
1097
|
+
end.new([])
|
1098
|
+
end
|
1099
|
+
|
1100
|
+
it "should get notifications" do
|
1101
|
+
notify_listener(listener)
|
1102
|
+
|
1103
|
+
expect(listener.queue).to eq([
|
1104
|
+
[:assign, "consume_test_topic", 0, 1, 2],
|
1105
|
+
[:revoke, "consume_test_topic", 0, 1, 2]
|
1106
|
+
])
|
1107
|
+
end
|
1108
|
+
end
|
1109
|
+
|
1110
|
+
context "with a broken listener" do
|
1111
|
+
let(:listener) do
|
1112
|
+
Struct.new(:queue) do
|
1113
|
+
def on_partitions_assigned(list)
|
1114
|
+
queue << :assigned
|
1115
|
+
raise 'boom'
|
1116
|
+
end
|
1117
|
+
|
1118
|
+
def on_partitions_revoked(list)
|
1119
|
+
queue << :revoked
|
1120
|
+
raise 'boom'
|
1121
|
+
end
|
1122
|
+
end.new([])
|
1123
|
+
end
|
1124
|
+
|
1125
|
+
it 'should handle callback exceptions' do
|
1126
|
+
notify_listener(listener)
|
1127
|
+
|
1128
|
+
expect(listener.queue).to eq([:assigned, :revoked])
|
1129
|
+
end
|
1130
|
+
end
|
1131
|
+
end
|
1132
|
+
|
1133
|
+
context "methods that should not be called after a consumer has been closed" do
|
1134
|
+
before do
|
1135
|
+
consumer.close
|
1136
|
+
end
|
1137
|
+
|
1138
|
+
# Affected methods and a non-invalid set of parameters for the method
|
1139
|
+
{
|
1140
|
+
:subscribe => [ nil ],
|
1141
|
+
:unsubscribe => nil,
|
1142
|
+
:pause => [ nil ],
|
1143
|
+
:resume => [ nil ],
|
1144
|
+
:subscription => nil,
|
1145
|
+
:assign => [ nil ],
|
1146
|
+
:assignment => nil,
|
1147
|
+
:committed => [],
|
1148
|
+
:query_watermark_offsets => [ nil, nil ],
|
1149
|
+
:assignment_lost? => []
|
1150
|
+
}.each do |method, args|
|
1151
|
+
it "raises an exception if #{method} is called" do
|
1152
|
+
expect {
|
1153
|
+
if args.nil?
|
1154
|
+
consumer.public_send(method)
|
1155
|
+
else
|
1156
|
+
consumer.public_send(method, *args)
|
1157
|
+
end
|
1158
|
+
}.to raise_exception(Rdkafka::ClosedConsumerError, /#{method.to_s}/)
|
1159
|
+
end
|
1160
|
+
end
|
1161
|
+
end
|
1162
|
+
|
1163
|
+
it "provides a finalizer that closes the native kafka client" do
|
1164
|
+
expect(consumer.closed?).to eq(false)
|
1165
|
+
|
1166
|
+
consumer.finalizer.call("some-ignored-object-id")
|
1167
|
+
|
1168
|
+
expect(consumer.closed?).to eq(true)
|
1169
|
+
end
|
1170
|
+
|
1171
|
+
context "when the rebalance protocol is cooperative" do
|
1172
|
+
let(:consumer) do
|
1173
|
+
config = rdkafka_consumer_config(
|
1174
|
+
{
|
1175
|
+
:"partition.assignment.strategy" => "cooperative-sticky",
|
1176
|
+
:"debug" => "consumer",
|
1177
|
+
}
|
1178
|
+
)
|
1179
|
+
config.consumer_rebalance_listener = listener
|
1180
|
+
config.consumer
|
1181
|
+
end
|
1182
|
+
|
1183
|
+
let(:listener) do
|
1184
|
+
Struct.new(:queue) do
|
1185
|
+
def on_partitions_assigned(list)
|
1186
|
+
collect(:assign, list)
|
1187
|
+
end
|
1188
|
+
|
1189
|
+
def on_partitions_revoked(list)
|
1190
|
+
collect(:revoke, list)
|
1191
|
+
end
|
1192
|
+
|
1193
|
+
def collect(name, list)
|
1194
|
+
partitions = list.to_h.map { |key, values| [key, values.map(&:partition)] }.flatten
|
1195
|
+
queue << ([name] + partitions)
|
1196
|
+
end
|
1197
|
+
end.new([])
|
1198
|
+
end
|
1199
|
+
|
1200
|
+
it "should be able to assign and unassign partitions using the cooperative partition assignment APIs" do
|
1201
|
+
notify_listener(listener) do
|
1202
|
+
handles = []
|
1203
|
+
10.times do
|
1204
|
+
handles << producer.produce(
|
1205
|
+
topic: "consume_test_topic",
|
1206
|
+
payload: "payload 1",
|
1207
|
+
key: "key 1",
|
1208
|
+
partition: 0
|
1209
|
+
)
|
1210
|
+
end
|
1211
|
+
handles.each(&:wait)
|
1212
|
+
|
1213
|
+
consumer.subscribe("consume_test_topic")
|
1214
|
+
# Check the first 10 messages. Then close the consumer, which
|
1215
|
+
# should break the each loop.
|
1216
|
+
consumer.each_with_index do |message, i|
|
1217
|
+
expect(message).to be_a Rdkafka::Consumer::Message
|
1218
|
+
break if i == 10
|
1219
|
+
end
|
1220
|
+
end
|
1221
|
+
|
1222
|
+
expect(listener.queue).to eq([
|
1223
|
+
[:assign, "consume_test_topic", 0, 1, 2],
|
1224
|
+
[:revoke, "consume_test_topic", 0, 1, 2]
|
1225
|
+
])
|
1226
|
+
end
|
1227
|
+
end
|
1228
|
+
|
1229
|
+
describe '#oauthbearer_set_token' do
|
1230
|
+
context 'when sasl not configured' do
|
1231
|
+
it 'should return RD_KAFKA_RESP_ERR__STATE' do
|
1232
|
+
response = consumer.oauthbearer_set_token(
|
1233
|
+
token: "foo",
|
1234
|
+
lifetime_ms: Time.now.to_i*1000 + 900 * 1000,
|
1235
|
+
principal_name: "kafka-cluster"
|
1236
|
+
)
|
1237
|
+
expect(response).to eq(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE)
|
1238
|
+
end
|
1239
|
+
end
|
1240
|
+
|
1241
|
+
context 'when sasl configured' do
|
1242
|
+
before do
|
1243
|
+
$consumer_sasl = rdkafka_producer_config(
|
1244
|
+
"security.protocol": "sasl_ssl",
|
1245
|
+
"sasl.mechanisms": 'OAUTHBEARER'
|
1246
|
+
).consumer
|
1247
|
+
end
|
1248
|
+
|
1249
|
+
after do
|
1250
|
+
$consumer_sasl.close
|
1251
|
+
end
|
1252
|
+
|
1253
|
+
it 'should succeed' do
|
1254
|
+
|
1255
|
+
response = $consumer_sasl.oauthbearer_set_token(
|
1256
|
+
token: "foo",
|
1257
|
+
lifetime_ms: Time.now.to_i*1000 + 900 * 1000,
|
1258
|
+
principal_name: "kafka-cluster"
|
1259
|
+
)
|
1260
|
+
expect(response).to eq(0)
|
1261
|
+
end
|
1262
|
+
end
|
1263
|
+
end
|
1264
|
+
|
1265
|
+
describe "when reaching eof on a topic and eof reporting enabled" do
|
1266
|
+
let(:consumer) { rdkafka_consumer_config(:"enable.partition.eof" => true).consumer }
|
1267
|
+
|
1268
|
+
it "should return proper details" do
|
1269
|
+
(0..2).each do |i|
|
1270
|
+
producer.produce(
|
1271
|
+
topic: "consume_test_topic",
|
1272
|
+
key: "key lag #{i}",
|
1273
|
+
partition: i
|
1274
|
+
).wait
|
1275
|
+
end
|
1276
|
+
|
1277
|
+
# Consume to the end
|
1278
|
+
consumer.subscribe("consume_test_topic")
|
1279
|
+
eof_count = 0
|
1280
|
+
eof_error = nil
|
1281
|
+
|
1282
|
+
loop do
|
1283
|
+
begin
|
1284
|
+
consumer.poll(100)
|
1285
|
+
rescue Rdkafka::RdkafkaError => error
|
1286
|
+
if error.is_partition_eof?
|
1287
|
+
eof_error = error
|
1288
|
+
end
|
1289
|
+
break if eof_error
|
1290
|
+
end
|
1291
|
+
end
|
1292
|
+
|
1293
|
+
expect(eof_error.code).to eq(:partition_eof)
|
1294
|
+
expect(eof_error.details[:topic]).to eq('consume_test_topic')
|
1295
|
+
expect(eof_error.details[:partition]).to be_a(Integer)
|
1296
|
+
expect(eof_error.details[:offset]).to be_a(Integer)
|
1297
|
+
end
|
1298
|
+
end
|
1299
|
+
end
|