rdkafka 0.13.0 → 0.15.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (75) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +0 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +57 -0
  5. data/.gitignore +4 -0
  6. data/.rspec +1 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +155 -111
  10. data/{LICENSE → MIT-LICENSE} +2 -1
  11. data/README.md +60 -39
  12. data/certs/cert_chain.pem +26 -0
  13. data/docker-compose.yml +18 -15
  14. data/ext/README.md +1 -1
  15. data/ext/Rakefile +43 -26
  16. data/lib/rdkafka/abstract_handle.rb +40 -26
  17. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  18. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  19. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  20. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  21. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  22. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  23. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  24. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  25. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  26. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  27. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  28. data/lib/rdkafka/admin.rb +449 -7
  29. data/lib/rdkafka/bindings.rb +133 -7
  30. data/lib/rdkafka/callbacks.rb +196 -1
  31. data/lib/rdkafka/config.rb +53 -19
  32. data/lib/rdkafka/consumer/headers.rb +2 -4
  33. data/lib/rdkafka/consumer/topic_partition_list.rb +11 -8
  34. data/lib/rdkafka/consumer.rb +164 -74
  35. data/lib/rdkafka/helpers/time.rb +14 -0
  36. data/lib/rdkafka/metadata.rb +22 -1
  37. data/lib/rdkafka/native_kafka.rb +6 -1
  38. data/lib/rdkafka/producer/delivery_handle.rb +12 -1
  39. data/lib/rdkafka/producer/delivery_report.rb +16 -3
  40. data/lib/rdkafka/producer.rb +121 -13
  41. data/lib/rdkafka/version.rb +3 -3
  42. data/lib/rdkafka.rb +21 -1
  43. data/rdkafka.gemspec +19 -5
  44. data/renovate.json +6 -0
  45. data/spec/rdkafka/abstract_handle_spec.rb +0 -2
  46. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  47. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  48. data/spec/rdkafka/admin/create_topic_handle_spec.rb +0 -2
  49. data/spec/rdkafka/admin/create_topic_report_spec.rb +0 -2
  50. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  51. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  52. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +0 -2
  53. data/spec/rdkafka/admin/delete_topic_report_spec.rb +0 -2
  54. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  55. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  56. data/spec/rdkafka/admin_spec.rb +205 -2
  57. data/spec/rdkafka/bindings_spec.rb +0 -1
  58. data/spec/rdkafka/callbacks_spec.rb +0 -2
  59. data/spec/rdkafka/config_spec.rb +8 -2
  60. data/spec/rdkafka/consumer/headers_spec.rb +0 -2
  61. data/spec/rdkafka/consumer/message_spec.rb +0 -2
  62. data/spec/rdkafka/consumer/partition_spec.rb +0 -2
  63. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +19 -2
  64. data/spec/rdkafka/consumer_spec.rb +232 -39
  65. data/spec/rdkafka/error_spec.rb +0 -2
  66. data/spec/rdkafka/metadata_spec.rb +2 -3
  67. data/spec/rdkafka/native_kafka_spec.rb +2 -3
  68. data/spec/rdkafka/producer/delivery_handle_spec.rb +0 -2
  69. data/spec/rdkafka/producer/delivery_report_spec.rb +4 -2
  70. data/spec/rdkafka/producer_spec.rb +183 -3
  71. data/spec/spec_helper.rb +3 -1
  72. data.tar.gz.sig +0 -0
  73. metadata +78 -14
  74. metadata.gz.sig +0 -0
  75. data/.semaphore/semaphore.yml +0 -27
@@ -1,6 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
3
  require "ostruct"
5
4
  require 'securerandom'
6
5
 
@@ -11,6 +10,10 @@ describe Rdkafka::Consumer do
11
10
  after { consumer.close }
12
11
  after { producer.close }
13
12
 
13
+ describe '#name' do
14
+ it { expect(consumer.name).to include('rdkafka#consumer-') }
15
+ end
16
+
14
17
  describe "#subscribe, #unsubscribe and #subscription" do
15
18
  it "should subscribe, unsubscribe and return the subscription" do
16
19
  expect(consumer.subscription).to be_empty
@@ -51,6 +54,30 @@ describe Rdkafka::Consumer do
51
54
  consumer.subscription
52
55
  }.to raise_error(Rdkafka::RdkafkaError)
53
56
  end
57
+
58
+ context "when using consumer without the poll set" do
59
+ let(:consumer) do
60
+ config = rdkafka_consumer_config
61
+ config.consumer_poll_set = false
62
+ config.consumer
63
+ end
64
+
65
+ it "should subscribe, unsubscribe and return the subscription" do
66
+ expect(consumer.subscription).to be_empty
67
+
68
+ consumer.subscribe("consume_test_topic")
69
+
70
+ expect(consumer.subscription).not_to be_empty
71
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
72
+ list.add_topic("consume_test_topic")
73
+ end
74
+ expect(consumer.subscription).to eq expected_subscription
75
+
76
+ consumer.unsubscribe
77
+
78
+ expect(consumer.subscription).to be_empty
79
+ end
80
+ end
54
81
  end
55
82
 
56
83
  describe "#pause and #resume" do
@@ -270,6 +297,28 @@ describe Rdkafka::Consumer do
270
297
  end
271
298
  end
272
299
 
300
+ describe '#assignment_lost?' do
301
+ it "should not return true as we do have an assignment" do
302
+ consumer.subscribe("consume_test_topic")
303
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
304
+ list.add_topic("consume_test_topic")
305
+ end
306
+
307
+ expect(consumer.assignment_lost?).to eq false
308
+ consumer.unsubscribe
309
+ end
310
+
311
+ it "should not return true after voluntary unsubscribing" do
312
+ consumer.subscribe("consume_test_topic")
313
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
314
+ list.add_topic("consume_test_topic")
315
+ end
316
+
317
+ consumer.unsubscribe
318
+ expect(consumer.assignment_lost?).to eq false
319
+ end
320
+ end
321
+
273
322
  describe "#close" do
274
323
  it "should close a consumer" do
275
324
  consumer.subscribe("consume_test_topic")
@@ -311,8 +360,9 @@ describe Rdkafka::Consumer do
311
360
  end
312
361
  end
313
362
 
314
- describe "#commit, #committed and #store_offset" do
315
- # Make sure there's a stored offset
363
+
364
+ describe "#position, #commit, #committed and #store_offset" do
365
+ # Make sure there are messages to work with
316
366
  let!(:report) do
317
367
  producer.produce(
318
368
  topic: "consume_test_topic",
@@ -330,29 +380,33 @@ describe Rdkafka::Consumer do
330
380
  )
331
381
  end
332
382
 
333
- it "should only accept a topic partition list in committed" do
334
- expect {
335
- consumer.committed("list")
336
- }.to raise_error TypeError
383
+ describe "#position" do
384
+ it "should only accept a topic partition list in position if not nil" do
385
+ expect {
386
+ consumer.position("list")
387
+ }.to raise_error TypeError
388
+ end
337
389
  end
338
390
 
339
- it "should commit in sync mode" do
340
- expect {
341
- consumer.commit(nil, true)
342
- }.not_to raise_error
343
- end
391
+ describe "#committed" do
392
+ it "should only accept a topic partition list in commit if not nil" do
393
+ expect {
394
+ consumer.commit("list")
395
+ }.to raise_error TypeError
396
+ end
344
397
 
345
- it "should only accept a topic partition list in commit if not nil" do
346
- expect {
347
- consumer.commit("list")
348
- }.to raise_error TypeError
398
+ it "should commit in sync mode" do
399
+ expect {
400
+ consumer.commit(nil, true)
401
+ }.not_to raise_error
402
+ end
349
403
  end
350
404
 
351
405
  context "with a committed consumer" do
352
406
  before :all do
353
407
  # Make sure there are some messages.
354
408
  handles = []
355
- producer = rdkafka_producer_config.producer
409
+ producer = rdkafka_config.producer
356
410
  10.times do
357
411
  (0..2).each do |i|
358
412
  handles << producer.produce(
@@ -396,34 +450,38 @@ describe Rdkafka::Consumer do
396
450
  }.to raise_error(Rdkafka::RdkafkaError)
397
451
  end
398
452
 
399
- it "should fetch the committed offsets for the current assignment" do
400
- partitions = consumer.committed.to_h["consume_test_topic"]
401
- expect(partitions).not_to be_nil
402
- expect(partitions[0].offset).to eq 1
403
- end
453
+ describe "#committed" do
454
+ it "should fetch the committed offsets for the current assignment" do
455
+ partitions = consumer.committed.to_h["consume_test_topic"]
456
+ expect(partitions).not_to be_nil
457
+ expect(partitions[0].offset).to eq 1
458
+ end
404
459
 
405
- it "should fetch the committed offsets for a specified topic partition list" do
406
- list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
407
- list.add_topic("consume_test_topic", [0, 1, 2])
460
+ it "should fetch the committed offsets for a specified topic partition list" do
461
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
462
+ list.add_topic("consume_test_topic", [0, 1, 2])
463
+ end
464
+ partitions = consumer.committed(list).to_h["consume_test_topic"]
465
+ expect(partitions).not_to be_nil
466
+ expect(partitions[0].offset).to eq 1
467
+ expect(partitions[1].offset).to eq 1
468
+ expect(partitions[2].offset).to eq 1
408
469
  end
409
- partitions = consumer.committed(list).to_h["consume_test_topic"]
410
- expect(partitions).not_to be_nil
411
- expect(partitions[0].offset).to eq 1
412
- expect(partitions[1].offset).to eq 1
413
- expect(partitions[2].offset).to eq 1
414
- end
415
470
 
416
- it "should raise an error when getting committed fails" do
417
- expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
418
- list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
419
- list.add_topic("consume_test_topic", [0, 1, 2])
471
+ it "should raise an error when getting committed fails" do
472
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
473
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
474
+ list.add_topic("consume_test_topic", [0, 1, 2])
475
+ end
476
+ expect {
477
+ consumer.committed(list)
478
+ }.to raise_error Rdkafka::RdkafkaError
420
479
  end
421
- expect {
422
- consumer.committed(list)
423
- }.to raise_error Rdkafka::RdkafkaError
424
480
  end
425
481
 
426
482
  describe "#store_offset" do
483
+ let(:consumer) { rdkafka_consumer_config('enable.auto.offset.store': false).consumer }
484
+
427
485
  before do
428
486
  config = {}
429
487
  config[:'enable.auto.offset.store'] = false
@@ -441,6 +499,8 @@ describe Rdkafka::Consumer do
441
499
  @new_consumer.store_offset(message)
442
500
  @new_consumer.commit
443
501
 
502
+ # TODO use position here, should be at offset
503
+
444
504
  list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
445
505
  list.add_topic("consume_test_topic", [0, 1, 2])
446
506
  end
@@ -455,6 +515,43 @@ describe Rdkafka::Consumer do
455
515
  @new_consumer.store_offset(message)
456
516
  }.to raise_error Rdkafka::RdkafkaError
457
517
  end
518
+
519
+ describe "#position" do
520
+ it "should fetch the positions for the current assignment" do
521
+ consumer.store_offset(message)
522
+
523
+ partitions = consumer.position.to_h["consume_test_topic"]
524
+ expect(partitions).not_to be_nil
525
+ expect(partitions[0].offset).to eq message.offset + 1
526
+ end
527
+
528
+ it "should fetch the positions for a specified assignment" do
529
+ consumer.store_offset(message)
530
+
531
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
532
+ list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => nil, 1 => nil, 2 => nil)
533
+ end
534
+ partitions = consumer.position(list).to_h["consume_test_topic"]
535
+ expect(partitions).not_to be_nil
536
+ expect(partitions[0].offset).to eq message.offset + 1
537
+ end
538
+
539
+ it "should raise an error when getting the position fails" do
540
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_position).and_return(20)
541
+
542
+ expect {
543
+ consumer.position
544
+ }.to raise_error(Rdkafka::RdkafkaError)
545
+ end
546
+ end
547
+
548
+ context "when trying to use with enable.auto.offset.store set to true" do
549
+ let(:consumer) { rdkafka_consumer_config('enable.auto.offset.store': true).consumer }
550
+
551
+ it "expect to raise invalid configuration error" do
552
+ expect { consumer.store_offset(message) }.to raise_error(Rdkafka::RdkafkaError, /invalid_arg/)
553
+ end
554
+ end
458
555
  end
459
556
  end
460
557
  end
@@ -950,6 +1047,102 @@ describe Rdkafka::Consumer do
950
1047
  end
951
1048
  end
952
1049
 
1050
+ describe "#offsets_for_times" do
1051
+ it "should raise when not TopicPartitionList" do
1052
+ expect { consumer.offsets_for_times([]) }.to raise_error(TypeError)
1053
+ end
1054
+
1055
+ it "should raise an error when offsets_for_times fails" do
1056
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
1057
+
1058
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_offsets_for_times).and_return(7)
1059
+
1060
+ expect { consumer.offsets_for_times(tpl) }.to raise_error(Rdkafka::RdkafkaError)
1061
+ end
1062
+
1063
+ context "when subscribed" do
1064
+ let(:timeout) { 1000 }
1065
+
1066
+ before do
1067
+ consumer.subscribe("consume_test_topic")
1068
+
1069
+ # 1. partitions are assigned
1070
+ wait_for_assignment(consumer)
1071
+ expect(consumer.assignment).not_to be_empty
1072
+
1073
+ # 2. eat unrelated messages
1074
+ while(consumer.poll(timeout)) do; end
1075
+ end
1076
+
1077
+ after { consumer.unsubscribe }
1078
+
1079
+ def send_one_message(val)
1080
+ producer.produce(
1081
+ topic: "consume_test_topic",
1082
+ payload: "payload #{val}",
1083
+ key: "key 0",
1084
+ partition: 0
1085
+ ).wait
1086
+ end
1087
+
1088
+ it "returns a TopicParticionList with updated offsets" do
1089
+ send_one_message("a")
1090
+ send_one_message("b")
1091
+ send_one_message("c")
1092
+
1093
+ consumer.poll(timeout)
1094
+ message = consumer.poll(timeout)
1095
+ consumer.poll(timeout)
1096
+
1097
+ tpl = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
1098
+ list.add_topic_and_partitions_with_offsets(
1099
+ "consume_test_topic",
1100
+ [
1101
+ [0, message.timestamp]
1102
+ ]
1103
+ )
1104
+ end
1105
+
1106
+ tpl_response = consumer.offsets_for_times(tpl)
1107
+
1108
+ expect(tpl_response.to_h["consume_test_topic"][0].offset).to eq message.offset
1109
+ end
1110
+ end
1111
+ end
1112
+
1113
+ # Only relevant in case of a consumer with separate queues
1114
+ describe '#events_poll' do
1115
+ let(:stats) { [] }
1116
+
1117
+ before { Rdkafka::Config.statistics_callback = ->(published) { stats << published } }
1118
+
1119
+ after { Rdkafka::Config.statistics_callback = nil }
1120
+
1121
+ let(:consumer) do
1122
+ config = rdkafka_consumer_config('statistics.interval.ms': 100)
1123
+ config.consumer_poll_set = false
1124
+ config.consumer
1125
+ end
1126
+
1127
+ it "expect to run events_poll, operate and propagate stats on events_poll and not poll" do
1128
+ consumer.subscribe("consume_test_topic")
1129
+ consumer.poll(1_000)
1130
+ expect(stats).to be_empty
1131
+ consumer.events_poll(-1)
1132
+ expect(stats).not_to be_empty
1133
+ end
1134
+ end
1135
+
1136
+ describe '#consumer_group_metadata_pointer' do
1137
+ let(:pointer) { consumer.consumer_group_metadata_pointer }
1138
+
1139
+ after { Rdkafka::Bindings.rd_kafka_consumer_group_metadata_destroy(pointer) }
1140
+
1141
+ it 'expect to return a pointer' do
1142
+ expect(pointer).to be_a(FFI::Pointer)
1143
+ end
1144
+ end
1145
+
953
1146
  describe "a rebalance listener" do
954
1147
  let(:consumer) do
955
1148
  config = rdkafka_consumer_config
@@ -1024,7 +1217,7 @@ describe Rdkafka::Consumer do
1024
1217
  :assign => [ nil ],
1025
1218
  :assignment => nil,
1026
1219
  :committed => [],
1027
- :query_watermark_offsets => [ nil, nil ],
1220
+ :query_watermark_offsets => [ nil, nil ]
1028
1221
  }.each do |method, args|
1029
1222
  it "raises an exception if #{method} is called" do
1030
1223
  expect {
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::RdkafkaError do
6
4
  it "should raise a type error for a nil response" do
7
5
  expect {
@@ -1,6 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
3
  require "securerandom"
5
4
 
6
5
  describe Rdkafka::Metadata do
@@ -31,7 +30,7 @@ describe Rdkafka::Metadata do
31
30
  it "#brokers returns our single broker" do
32
31
  expect(subject.brokers.length).to eq(1)
33
32
  expect(subject.brokers[0][:broker_id]).to eq(1)
34
- expect(subject.brokers[0][:broker_name]).to eq("localhost")
33
+ expect(subject.brokers[0][:broker_name]).to eq("127.0.0.1")
35
34
  expect(subject.brokers[0][:broker_port]).to eq(9092)
36
35
  end
37
36
 
@@ -54,7 +53,7 @@ describe Rdkafka::Metadata do
54
53
  it "#brokers returns our single broker" do
55
54
  expect(subject.brokers.length).to eq(1)
56
55
  expect(subject.brokers[0][:broker_id]).to eq(1)
57
- expect(subject.brokers[0][:broker_name]).to eq("localhost")
56
+ expect(subject.brokers[0][:broker_name]).to eq("127.0.0.1")
58
57
  expect(subject.brokers[0][:broker_port]).to eq(9092)
59
58
  end
60
59
 
@@ -1,14 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::NativeKafka do
6
4
  let(:config) { rdkafka_producer_config }
7
5
  let(:native) { config.send(:native_kafka, config.send(:native_config), :rd_kafka_producer) }
8
6
  let(:closing) { false }
9
7
  let(:thread) { double(Thread) }
8
+ let(:opaque) { Rdkafka::Opaque.new }
10
9
 
11
- subject(:client) { described_class.new(native, run_polling_thread: true) }
10
+ subject(:client) { described_class.new(native, run_polling_thread: true, opaque: opaque) }
12
11
 
13
12
  before do
14
13
  allow(Thread).to receive(:new).and_return(thread)
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Producer::DeliveryHandle do
6
4
  let(:response) { 0 }
7
5
 
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Producer::DeliveryReport do
6
4
  subject { Rdkafka::Producer::DeliveryReport.new(2, 100, "topic", -1) }
7
5
 
@@ -17,6 +15,10 @@ describe Rdkafka::Producer::DeliveryReport do
17
15
  expect(subject.topic_name).to eq "topic"
18
16
  end
19
17
 
18
+ it "should get the same topic name under topic alias" do
19
+ expect(subject.topic).to eq "topic"
20
+ end
21
+
20
22
  it "should get the error" do
21
23
  expect(subject.error).to eq -1
22
24
  end
@@ -1,6 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
3
  require "zlib"
5
4
 
6
5
  describe Rdkafka::Producer do
@@ -15,6 +14,10 @@ describe Rdkafka::Producer do
15
14
  consumer.close
16
15
  end
17
16
 
17
+ describe '#name' do
18
+ it { expect(producer.name).to include('rdkafka#producer-') }
19
+ end
20
+
18
21
  context "delivery callback" do
19
22
  context "with a proc/lambda" do
20
23
  it "should set the callback" do
@@ -31,6 +34,7 @@ describe Rdkafka::Producer do
31
34
 
32
35
  producer.delivery_callback = lambda do |report|
33
36
  expect(report).not_to be_nil
37
+ expect(report.label).to eq "label"
34
38
  expect(report.partition).to eq 1
35
39
  expect(report.offset).to be >= 0
36
40
  expect(report.topic_name).to eq "produce_test_topic"
@@ -41,9 +45,12 @@ describe Rdkafka::Producer do
41
45
  handle = producer.produce(
42
46
  topic: "produce_test_topic",
43
47
  payload: "payload",
44
- key: "key"
48
+ key: "key",
49
+ label: "label"
45
50
  )
46
51
 
52
+ expect(handle.label).to eq "label"
53
+
47
54
  # Wait for it to be delivered
48
55
  handle.wait(max_wait_timeout: 15)
49
56
 
@@ -172,11 +179,13 @@ describe Rdkafka::Producer do
172
179
  handle = producer.produce(
173
180
  topic: "produce_test_topic",
174
181
  payload: "payload",
175
- key: "key"
182
+ key: "key",
183
+ label: "label"
176
184
  )
177
185
 
178
186
  # Should be pending at first
179
187
  expect(handle.pending?).to be true
188
+ expect(handle.label).to eq "label"
180
189
 
181
190
  # Check delivery handle and report
182
191
  report = handle.wait(max_wait_timeout: 5)
@@ -184,6 +193,7 @@ describe Rdkafka::Producer do
184
193
  expect(report).not_to be_nil
185
194
  expect(report.partition).to eq 1
186
195
  expect(report.offset).to be >= 0
196
+ expect(report.label).to eq "label"
187
197
 
188
198
  # Flush and close producer
189
199
  producer.flush
@@ -554,4 +564,174 @@ describe Rdkafka::Producer do
554
564
  end
555
565
  end
556
566
  end
567
+
568
+ context "when not being able to deliver the message" do
569
+ let(:producer) do
570
+ rdkafka_producer_config(
571
+ "bootstrap.servers": "localhost:9093",
572
+ "message.timeout.ms": 100
573
+ ).producer
574
+ end
575
+
576
+ it "should contain the error in the response when not deliverable" do
577
+ handler = producer.produce(topic: 'produce_test_topic', payload: nil, label: 'na')
578
+ # Wait for the async callbacks and delivery registry to update
579
+ sleep(2)
580
+ expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
581
+ expect(handler.create_result.label).to eq('na')
582
+ end
583
+ end
584
+
585
+ describe '#partition_count' do
586
+ it { expect(producer.partition_count('consume_test_topic')).to eq(3) }
587
+
588
+ context 'when the partition count value is already cached' do
589
+ before do
590
+ producer.partition_count('consume_test_topic')
591
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
592
+ end
593
+
594
+ it 'expect not to query it again' do
595
+ producer.partition_count('consume_test_topic')
596
+ expect(::Rdkafka::Metadata).not_to have_received(:new)
597
+ end
598
+ end
599
+
600
+ context 'when the partition count value was cached but time expired' do
601
+ before do
602
+ allow(::Process).to receive(:clock_gettime).and_return(0, 30.02)
603
+ producer.partition_count('consume_test_topic')
604
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
605
+ end
606
+
607
+ it 'expect not to query it again' do
608
+ producer.partition_count('consume_test_topic')
609
+ expect(::Rdkafka::Metadata).to have_received(:new)
610
+ end
611
+ end
612
+
613
+ context 'when the partition count value was cached and time did not expire' do
614
+ before do
615
+ allow(::Process).to receive(:clock_gettime).and_return(0, 29.001)
616
+ producer.partition_count('consume_test_topic')
617
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
618
+ end
619
+
620
+ it 'expect not to query it again' do
621
+ producer.partition_count('consume_test_topic')
622
+ expect(::Rdkafka::Metadata).not_to have_received(:new)
623
+ end
624
+ end
625
+ end
626
+
627
+ describe '#flush' do
628
+ it "should return flush when it can flush all outstanding messages or when no messages" do
629
+ producer.produce(
630
+ topic: "produce_test_topic",
631
+ payload: "payload headers",
632
+ key: "key headers",
633
+ headers: {}
634
+ )
635
+
636
+ expect(producer.flush(5_000)).to eq(true)
637
+ end
638
+
639
+ context 'when it cannot flush due to a timeout' do
640
+ let(:producer) do
641
+ rdkafka_producer_config(
642
+ "bootstrap.servers": "localhost:9093",
643
+ "message.timeout.ms": 2_000
644
+ ).producer
645
+ end
646
+
647
+ after do
648
+ # Allow rdkafka to evict message preventing memory-leak
649
+ sleep(2)
650
+ end
651
+
652
+ it "should return false on flush when cannot deliver and beyond timeout" do
653
+ producer.produce(
654
+ topic: "produce_test_topic",
655
+ payload: "payload headers",
656
+ key: "key headers",
657
+ headers: {}
658
+ )
659
+
660
+ expect(producer.flush(1_000)).to eq(false)
661
+ end
662
+ end
663
+
664
+ context 'when there is a different error' do
665
+ before { allow(Rdkafka::Bindings).to receive(:rd_kafka_flush).and_return(-199) }
666
+
667
+ it 'should raise it' do
668
+ expect { producer.flush }.to raise_error(Rdkafka::RdkafkaError)
669
+ end
670
+ end
671
+ end
672
+
673
+ describe '#purge' do
674
+ context 'when no outgoing messages' do
675
+ it { expect(producer.purge).to eq(true) }
676
+ end
677
+
678
+ context 'when librdkafka purge returns an error' do
679
+ before { expect(Rdkafka::Bindings).to receive(:rd_kafka_purge).and_return(-153) }
680
+
681
+ it 'expect to raise an error' do
682
+ expect { producer.purge }.to raise_error(Rdkafka::RdkafkaError, /retry/)
683
+ end
684
+ end
685
+
686
+ context 'when there are outgoing things in the queue' do
687
+ let(:producer) do
688
+ rdkafka_producer_config(
689
+ "bootstrap.servers": "localhost:9093",
690
+ "message.timeout.ms": 2_000
691
+ ).producer
692
+ end
693
+
694
+ it "should should purge and move forward" do
695
+ producer.produce(
696
+ topic: "produce_test_topic",
697
+ payload: "payload headers"
698
+ )
699
+
700
+ expect(producer.purge).to eq(true)
701
+ expect(producer.flush(1_000)).to eq(true)
702
+ end
703
+
704
+ it "should materialize the delivery handles" do
705
+ handle = producer.produce(
706
+ topic: "produce_test_topic",
707
+ payload: "payload headers"
708
+ )
709
+
710
+ expect(producer.purge).to eq(true)
711
+
712
+ expect { handle.wait }.to raise_error(Rdkafka::RdkafkaError, /purge_queue/)
713
+ end
714
+
715
+ context "when using delivery_callback" do
716
+ let(:delivery_reports) { [] }
717
+
718
+ let(:delivery_callback) do
719
+ ->(delivery_report) { delivery_reports << delivery_report }
720
+ end
721
+
722
+ before { producer.delivery_callback = delivery_callback }
723
+
724
+ it "should run the callback" do
725
+ handle = producer.produce(
726
+ topic: "produce_test_topic",
727
+ payload: "payload headers"
728
+ )
729
+
730
+ expect(producer.purge).to eq(true)
731
+ # queue purge
732
+ expect(delivery_reports[0].error).to eq(-152)
733
+ end
734
+ end
735
+ end
736
+ end
557
737
  end
data/spec/spec_helper.rb CHANGED
@@ -11,6 +11,7 @@ require "pry"
11
11
  require "rspec"
12
12
  require "rdkafka"
13
13
  require "timeout"
14
+ require "securerandom"
14
15
 
15
16
  def rdkafka_base_config
16
17
  {
@@ -35,7 +36,7 @@ def rdkafka_consumer_config(config_overrides={})
35
36
  # Add consumer specific fields to it
36
37
  config[:"auto.offset.reset"] = "earliest"
37
38
  config[:"enable.partition.eof"] = false
38
- config[:"group.id"] = "ruby-test-#{Random.new.rand(0..1_000_000)}"
39
+ config[:"group.id"] = "ruby-test-#{SecureRandom.uuid}"
39
40
  # Enable debug mode if required
40
41
  if ENV["DEBUG_CONSUMER"]
41
42
  config[:debug] = "cgrp,topic,fetch"
@@ -134,6 +135,7 @@ RSpec.configure do |config|
134
135
  rake_test_topic: 3,
135
136
  watermarks_test_topic: 3,
136
137
  partitioner_test_topic: 25,
138
+ example_topic: 1
137
139
  }.each do |topic, partitions|
138
140
  create_topic_handle = admin.create_topic(topic.to_s, partitions, 1)
139
141
  begin