rdkafka 0.13.0 → 0.15.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (73) hide show
  1. checksums.yaml +4 -4
  2. checksums.yaml.gz.sig +2 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci.yml +58 -0
  5. data/.gitignore +4 -0
  6. data/.rspec +1 -0
  7. data/.ruby-gemset +1 -0
  8. data/.ruby-version +1 -0
  9. data/CHANGELOG.md +141 -111
  10. data/{LICENSE → MIT-LICENSE} +2 -1
  11. data/README.md +48 -39
  12. data/certs/cert_chain.pem +26 -0
  13. data/docker-compose.yml +18 -15
  14. data/ext/README.md +1 -1
  15. data/ext/Rakefile +1 -1
  16. data/lib/rdkafka/abstract_handle.rb +40 -26
  17. data/lib/rdkafka/admin/acl_binding_result.rb +37 -0
  18. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  19. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  20. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  21. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  22. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  23. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  24. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  25. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  26. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  27. data/lib/rdkafka/admin/describe_acl_report.rb +23 -0
  28. data/lib/rdkafka/admin.rb +449 -7
  29. data/lib/rdkafka/bindings.rb +127 -5
  30. data/lib/rdkafka/callbacks.rb +187 -0
  31. data/lib/rdkafka/config.rb +53 -19
  32. data/lib/rdkafka/consumer/headers.rb +2 -4
  33. data/lib/rdkafka/consumer/topic_partition_list.rb +11 -8
  34. data/lib/rdkafka/consumer.rb +134 -59
  35. data/lib/rdkafka/helpers/time.rb +14 -0
  36. data/lib/rdkafka/metadata.rb +22 -1
  37. data/lib/rdkafka/native_kafka.rb +6 -1
  38. data/lib/rdkafka/producer.rb +87 -9
  39. data/lib/rdkafka/version.rb +3 -3
  40. data/lib/rdkafka.rb +21 -1
  41. data/rdkafka.gemspec +17 -3
  42. data/renovate.json +6 -0
  43. data/spec/rdkafka/abstract_handle_spec.rb +0 -2
  44. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  45. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  46. data/spec/rdkafka/admin/create_topic_handle_spec.rb +0 -2
  47. data/spec/rdkafka/admin/create_topic_report_spec.rb +0 -2
  48. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  49. data/spec/rdkafka/admin/delete_acl_report_spec.rb +71 -0
  50. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +0 -2
  51. data/spec/rdkafka/admin/delete_topic_report_spec.rb +0 -2
  52. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  53. data/spec/rdkafka/admin/describe_acl_report_spec.rb +72 -0
  54. data/spec/rdkafka/admin_spec.rb +205 -2
  55. data/spec/rdkafka/bindings_spec.rb +0 -1
  56. data/spec/rdkafka/callbacks_spec.rb +0 -2
  57. data/spec/rdkafka/config_spec.rb +8 -2
  58. data/spec/rdkafka/consumer/headers_spec.rb +0 -2
  59. data/spec/rdkafka/consumer/message_spec.rb +0 -2
  60. data/spec/rdkafka/consumer/partition_spec.rb +0 -2
  61. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +19 -2
  62. data/spec/rdkafka/consumer_spec.rb +212 -39
  63. data/spec/rdkafka/error_spec.rb +0 -2
  64. data/spec/rdkafka/metadata_spec.rb +2 -3
  65. data/spec/rdkafka/native_kafka_spec.rb +2 -3
  66. data/spec/rdkafka/producer/delivery_handle_spec.rb +0 -2
  67. data/spec/rdkafka/producer/delivery_report_spec.rb +0 -2
  68. data/spec/rdkafka/producer_spec.rb +157 -1
  69. data/spec/spec_helper.rb +3 -1
  70. data.tar.gz.sig +3 -0
  71. metadata +76 -13
  72. metadata.gz.sig +3 -0
  73. data/.semaphore/semaphore.yml +0 -27
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Callbacks do
6
4
 
7
5
  # The code in the call back functions is 100% covered by other specs. Due to
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Config do
6
4
  context "logger" do
7
5
  it "should have a default logger" do
@@ -115,6 +113,14 @@ describe Rdkafka::Config do
115
113
  consumer.close
116
114
  end
117
115
 
116
+ it "should create a consumer with consumer_poll_set set to false" do
117
+ config = rdkafka_consumer_config
118
+ config.consumer_poll_set = false
119
+ consumer = config.consumer
120
+ expect(consumer).to be_a Rdkafka::Consumer
121
+ consumer.close
122
+ end
123
+
118
124
  it "should raise an error when creating a consumer with invalid config" do
119
125
  config = Rdkafka::Config.new('invalid.key' => 'value')
120
126
  expect {
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Consumer::Headers do
6
4
  let(:headers) do
7
5
  { # Note String keys!
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Consumer::Message do
6
4
  let(:native_client) { new_native_client }
7
5
  let(:native_topic) { new_native_topic(native_client: native_client) }
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Consumer::Partition do
6
4
  let(:offset) { 100 }
7
5
  let(:err) { 0 }
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Consumer::TopicPartitionList do
6
4
  it "should create a new list and add unassigned topics" do
7
5
  list = Rdkafka::Consumer::TopicPartitionList.new
@@ -221,5 +219,24 @@ describe Rdkafka::Consumer::TopicPartitionList do
221
219
 
222
220
  expect(list).to eq other
223
221
  end
222
+
223
+ it "should create a native list with timetamp offsets if offsets are Time" do
224
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
225
+ list.add_topic_and_partitions_with_offsets("topic", 0 => Time.at(1505069646, 250_000))
226
+ end
227
+
228
+ tpl = list.to_native_tpl
229
+
230
+ compare_list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
231
+ list.add_topic_and_partitions_with_offsets(
232
+ "topic",
233
+ 0 => (Time.at(1505069646, 250_000).to_f * 1000).floor
234
+ )
235
+ end
236
+
237
+ native_list = Rdkafka::Consumer::TopicPartitionList.from_native_tpl(tpl)
238
+
239
+ expect(native_list).to eq compare_list
240
+ end
224
241
  end
225
242
  end
@@ -1,6 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
3
  require "ostruct"
5
4
  require 'securerandom'
6
5
 
@@ -11,6 +10,10 @@ describe Rdkafka::Consumer do
11
10
  after { consumer.close }
12
11
  after { producer.close }
13
12
 
13
+ describe '#name' do
14
+ it { expect(consumer.name).to include('rdkafka#consumer-') }
15
+ end
16
+
14
17
  describe "#subscribe, #unsubscribe and #subscription" do
15
18
  it "should subscribe, unsubscribe and return the subscription" do
16
19
  expect(consumer.subscription).to be_empty
@@ -51,6 +54,30 @@ describe Rdkafka::Consumer do
51
54
  consumer.subscription
52
55
  }.to raise_error(Rdkafka::RdkafkaError)
53
56
  end
57
+
58
+ context "when using consumer without the poll set" do
59
+ let(:consumer) do
60
+ config = rdkafka_consumer_config
61
+ config.consumer_poll_set = false
62
+ config.consumer
63
+ end
64
+
65
+ it "should subscribe, unsubscribe and return the subscription" do
66
+ expect(consumer.subscription).to be_empty
67
+
68
+ consumer.subscribe("consume_test_topic")
69
+
70
+ expect(consumer.subscription).not_to be_empty
71
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
72
+ list.add_topic("consume_test_topic")
73
+ end
74
+ expect(consumer.subscription).to eq expected_subscription
75
+
76
+ consumer.unsubscribe
77
+
78
+ expect(consumer.subscription).to be_empty
79
+ end
80
+ end
54
81
  end
55
82
 
56
83
  describe "#pause and #resume" do
@@ -270,6 +297,28 @@ describe Rdkafka::Consumer do
270
297
  end
271
298
  end
272
299
 
300
+ describe '#assignment_lost?' do
301
+ it "should not return true as we do have an assignment" do
302
+ consumer.subscribe("consume_test_topic")
303
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
304
+ list.add_topic("consume_test_topic")
305
+ end
306
+
307
+ expect(consumer.assignment_lost?).to eq false
308
+ consumer.unsubscribe
309
+ end
310
+
311
+ it "should not return true after voluntary unsubscribing" do
312
+ consumer.subscribe("consume_test_topic")
313
+ expected_subscription = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
314
+ list.add_topic("consume_test_topic")
315
+ end
316
+
317
+ consumer.unsubscribe
318
+ expect(consumer.assignment_lost?).to eq false
319
+ end
320
+ end
321
+
273
322
  describe "#close" do
274
323
  it "should close a consumer" do
275
324
  consumer.subscribe("consume_test_topic")
@@ -311,8 +360,9 @@ describe Rdkafka::Consumer do
311
360
  end
312
361
  end
313
362
 
314
- describe "#commit, #committed and #store_offset" do
315
- # Make sure there's a stored offset
363
+
364
+ describe "#position, #commit, #committed and #store_offset" do
365
+ # Make sure there are messages to work with
316
366
  let!(:report) do
317
367
  producer.produce(
318
368
  topic: "consume_test_topic",
@@ -330,29 +380,33 @@ describe Rdkafka::Consumer do
330
380
  )
331
381
  end
332
382
 
333
- it "should only accept a topic partition list in committed" do
334
- expect {
335
- consumer.committed("list")
336
- }.to raise_error TypeError
383
+ describe "#position" do
384
+ it "should only accept a topic partition list in position if not nil" do
385
+ expect {
386
+ consumer.position("list")
387
+ }.to raise_error TypeError
388
+ end
337
389
  end
338
390
 
339
- it "should commit in sync mode" do
340
- expect {
341
- consumer.commit(nil, true)
342
- }.not_to raise_error
343
- end
391
+ describe "#committed" do
392
+ it "should only accept a topic partition list in commit if not nil" do
393
+ expect {
394
+ consumer.commit("list")
395
+ }.to raise_error TypeError
396
+ end
344
397
 
345
- it "should only accept a topic partition list in commit if not nil" do
346
- expect {
347
- consumer.commit("list")
348
- }.to raise_error TypeError
398
+ it "should commit in sync mode" do
399
+ expect {
400
+ consumer.commit(nil, true)
401
+ }.not_to raise_error
402
+ end
349
403
  end
350
404
 
351
405
  context "with a committed consumer" do
352
406
  before :all do
353
407
  # Make sure there are some messages.
354
408
  handles = []
355
- producer = rdkafka_producer_config.producer
409
+ producer = rdkafka_config.producer
356
410
  10.times do
357
411
  (0..2).each do |i|
358
412
  handles << producer.produce(
@@ -396,31 +450,33 @@ describe Rdkafka::Consumer do
396
450
  }.to raise_error(Rdkafka::RdkafkaError)
397
451
  end
398
452
 
399
- it "should fetch the committed offsets for the current assignment" do
400
- partitions = consumer.committed.to_h["consume_test_topic"]
401
- expect(partitions).not_to be_nil
402
- expect(partitions[0].offset).to eq 1
403
- end
453
+ describe "#committed" do
454
+ it "should fetch the committed offsets for the current assignment" do
455
+ partitions = consumer.committed.to_h["consume_test_topic"]
456
+ expect(partitions).not_to be_nil
457
+ expect(partitions[0].offset).to eq 1
458
+ end
404
459
 
405
- it "should fetch the committed offsets for a specified topic partition list" do
406
- list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
407
- list.add_topic("consume_test_topic", [0, 1, 2])
460
+ it "should fetch the committed offsets for a specified topic partition list" do
461
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
462
+ list.add_topic("consume_test_topic", [0, 1, 2])
463
+ end
464
+ partitions = consumer.committed(list).to_h["consume_test_topic"]
465
+ expect(partitions).not_to be_nil
466
+ expect(partitions[0].offset).to eq 1
467
+ expect(partitions[1].offset).to eq 1
468
+ expect(partitions[2].offset).to eq 1
408
469
  end
409
- partitions = consumer.committed(list).to_h["consume_test_topic"]
410
- expect(partitions).not_to be_nil
411
- expect(partitions[0].offset).to eq 1
412
- expect(partitions[1].offset).to eq 1
413
- expect(partitions[2].offset).to eq 1
414
- end
415
470
 
416
- it "should raise an error when getting committed fails" do
417
- expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
418
- list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
419
- list.add_topic("consume_test_topic", [0, 1, 2])
471
+ it "should raise an error when getting committed fails" do
472
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_committed).and_return(20)
473
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
474
+ list.add_topic("consume_test_topic", [0, 1, 2])
475
+ end
476
+ expect {
477
+ consumer.committed(list)
478
+ }.to raise_error Rdkafka::RdkafkaError
420
479
  end
421
- expect {
422
- consumer.committed(list)
423
- }.to raise_error Rdkafka::RdkafkaError
424
480
  end
425
481
 
426
482
  describe "#store_offset" do
@@ -441,6 +497,8 @@ describe Rdkafka::Consumer do
441
497
  @new_consumer.store_offset(message)
442
498
  @new_consumer.commit
443
499
 
500
+ # TODO use position here, should be at offset
501
+
444
502
  list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
445
503
  list.add_topic("consume_test_topic", [0, 1, 2])
446
504
  end
@@ -455,6 +513,35 @@ describe Rdkafka::Consumer do
455
513
  @new_consumer.store_offset(message)
456
514
  }.to raise_error Rdkafka::RdkafkaError
457
515
  end
516
+
517
+ describe "#position" do
518
+ it "should fetch the positions for the current assignment" do
519
+ consumer.store_offset(message)
520
+
521
+ partitions = consumer.position.to_h["consume_test_topic"]
522
+ expect(partitions).not_to be_nil
523
+ expect(partitions[0].offset).to eq message.offset + 1
524
+ end
525
+
526
+ it "should fetch the positions for a specified assignment" do
527
+ consumer.store_offset(message)
528
+
529
+ list = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
530
+ list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => nil, 1 => nil, 2 => nil)
531
+ end
532
+ partitions = consumer.position(list).to_h["consume_test_topic"]
533
+ expect(partitions).not_to be_nil
534
+ expect(partitions[0].offset).to eq message.offset + 1
535
+ end
536
+
537
+ it "should raise an error when getting the position fails" do
538
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_position).and_return(20)
539
+
540
+ expect {
541
+ consumer.position
542
+ }.to raise_error(Rdkafka::RdkafkaError)
543
+ end
544
+ end
458
545
  end
459
546
  end
460
547
  end
@@ -950,6 +1037,92 @@ describe Rdkafka::Consumer do
950
1037
  end
951
1038
  end
952
1039
 
1040
+ describe "#offsets_for_times" do
1041
+ it "should raise when not TopicPartitionList" do
1042
+ expect { consumer.offsets_for_times([]) }.to raise_error(TypeError)
1043
+ end
1044
+
1045
+ it "should raise an error when offsets_for_times fails" do
1046
+ tpl = Rdkafka::Consumer::TopicPartitionList.new
1047
+
1048
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_offsets_for_times).and_return(7)
1049
+
1050
+ expect { consumer.offsets_for_times(tpl) }.to raise_error(Rdkafka::RdkafkaError)
1051
+ end
1052
+
1053
+ context "when subscribed" do
1054
+ let(:timeout) { 1000 }
1055
+
1056
+ before do
1057
+ consumer.subscribe("consume_test_topic")
1058
+
1059
+ # 1. partitions are assigned
1060
+ wait_for_assignment(consumer)
1061
+ expect(consumer.assignment).not_to be_empty
1062
+
1063
+ # 2. eat unrelated messages
1064
+ while(consumer.poll(timeout)) do; end
1065
+ end
1066
+
1067
+ after { consumer.unsubscribe }
1068
+
1069
+ def send_one_message(val)
1070
+ producer.produce(
1071
+ topic: "consume_test_topic",
1072
+ payload: "payload #{val}",
1073
+ key: "key 0",
1074
+ partition: 0
1075
+ ).wait
1076
+ end
1077
+
1078
+ it "returns a TopicParticionList with updated offsets" do
1079
+ send_one_message("a")
1080
+ send_one_message("b")
1081
+ send_one_message("c")
1082
+
1083
+ consumer.poll(timeout)
1084
+ message = consumer.poll(timeout)
1085
+ consumer.poll(timeout)
1086
+
1087
+ tpl = Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
1088
+ list.add_topic_and_partitions_with_offsets(
1089
+ "consume_test_topic",
1090
+ [
1091
+ [0, message.timestamp]
1092
+ ]
1093
+ )
1094
+ end
1095
+
1096
+ tpl_response = consumer.offsets_for_times(tpl)
1097
+
1098
+ expect(tpl_response.to_h["consume_test_topic"][0].offset).to eq message.offset
1099
+ end
1100
+ end
1101
+ end
1102
+
1103
+ # Only relevant in case of a consumer with separate queues
1104
+ describe '#events_poll' do
1105
+ let(:stats) { [] }
1106
+
1107
+ before { Rdkafka::Config.statistics_callback = ->(published) { stats << published } }
1108
+
1109
+ after { Rdkafka::Config.statistics_callback = nil }
1110
+
1111
+ let(:consumer) do
1112
+ config = rdkafka_consumer_config('statistics.interval.ms': 100)
1113
+ config.consumer_poll_set = false
1114
+ config.consumer
1115
+ end
1116
+
1117
+ it "expect to run events_poll, operate and propagate stats on events_poll and not poll" do
1118
+ consumer.subscribe("consume_test_topic")
1119
+ consumer.poll(1_000)
1120
+ expect(stats).to be_empty
1121
+ consumer.events_poll(-1)
1122
+ expect(stats).not_to be_empty
1123
+ end
1124
+ end
1125
+
953
1126
  describe "a rebalance listener" do
954
1127
  let(:consumer) do
955
1128
  config = rdkafka_consumer_config
@@ -1024,7 +1197,7 @@ describe Rdkafka::Consumer do
1024
1197
  :assign => [ nil ],
1025
1198
  :assignment => nil,
1026
1199
  :committed => [],
1027
- :query_watermark_offsets => [ nil, nil ],
1200
+ :query_watermark_offsets => [ nil, nil ]
1028
1201
  }.each do |method, args|
1029
1202
  it "raises an exception if #{method} is called" do
1030
1203
  expect {
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::RdkafkaError do
6
4
  it "should raise a type error for a nil response" do
7
5
  expect {
@@ -1,6 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
3
  require "securerandom"
5
4
 
6
5
  describe Rdkafka::Metadata do
@@ -31,7 +30,7 @@ describe Rdkafka::Metadata do
31
30
  it "#brokers returns our single broker" do
32
31
  expect(subject.brokers.length).to eq(1)
33
32
  expect(subject.brokers[0][:broker_id]).to eq(1)
34
- expect(subject.brokers[0][:broker_name]).to eq("localhost")
33
+ expect(subject.brokers[0][:broker_name]).to eq("127.0.0.1")
35
34
  expect(subject.brokers[0][:broker_port]).to eq(9092)
36
35
  end
37
36
 
@@ -54,7 +53,7 @@ describe Rdkafka::Metadata do
54
53
  it "#brokers returns our single broker" do
55
54
  expect(subject.brokers.length).to eq(1)
56
55
  expect(subject.brokers[0][:broker_id]).to eq(1)
57
- expect(subject.brokers[0][:broker_name]).to eq("localhost")
56
+ expect(subject.brokers[0][:broker_name]).to eq("127.0.0.1")
58
57
  expect(subject.brokers[0][:broker_port]).to eq(9092)
59
58
  end
60
59
 
@@ -1,14 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::NativeKafka do
6
4
  let(:config) { rdkafka_producer_config }
7
5
  let(:native) { config.send(:native_kafka, config.send(:native_config), :rd_kafka_producer) }
8
6
  let(:closing) { false }
9
7
  let(:thread) { double(Thread) }
8
+ let(:opaque) { Rdkafka::Opaque.new }
10
9
 
11
- subject(:client) { described_class.new(native, run_polling_thread: true) }
10
+ subject(:client) { described_class.new(native, run_polling_thread: true, opaque: opaque) }
12
11
 
13
12
  before do
14
13
  allow(Thread).to receive(:new).and_return(thread)
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Producer::DeliveryHandle do
6
4
  let(:response) { 0 }
7
5
 
@@ -1,7 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
-
5
3
  describe Rdkafka::Producer::DeliveryReport do
6
4
  subject { Rdkafka::Producer::DeliveryReport.new(2, 100, "topic", -1) }
7
5
 
@@ -1,6 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "spec_helper"
4
3
  require "zlib"
5
4
 
6
5
  describe Rdkafka::Producer do
@@ -15,6 +14,10 @@ describe Rdkafka::Producer do
15
14
  consumer.close
16
15
  end
17
16
 
17
+ describe '#name' do
18
+ it { expect(producer.name).to include('rdkafka#producer-') }
19
+ end
20
+
18
21
  context "delivery callback" do
19
22
  context "with a proc/lambda" do
20
23
  it "should set the callback" do
@@ -554,4 +557,157 @@ describe Rdkafka::Producer do
554
557
  end
555
558
  end
556
559
  end
560
+
561
+ describe '#partition_count' do
562
+ it { expect(producer.partition_count('consume_test_topic')).to eq(3) }
563
+
564
+ context 'when the partition count value is already cached' do
565
+ before do
566
+ producer.partition_count('consume_test_topic')
567
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
568
+ end
569
+
570
+ it 'expect not to query it again' do
571
+ producer.partition_count('consume_test_topic')
572
+ expect(::Rdkafka::Metadata).not_to have_received(:new)
573
+ end
574
+ end
575
+
576
+ context 'when the partition count value was cached but time expired' do
577
+ before do
578
+ allow(::Process).to receive(:clock_gettime).and_return(0, 30.02)
579
+ producer.partition_count('consume_test_topic')
580
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
581
+ end
582
+
583
+ it 'expect not to query it again' do
584
+ producer.partition_count('consume_test_topic')
585
+ expect(::Rdkafka::Metadata).to have_received(:new)
586
+ end
587
+ end
588
+
589
+ context 'when the partition count value was cached and time did not expire' do
590
+ before do
591
+ allow(::Process).to receive(:clock_gettime).and_return(0, 29.001)
592
+ producer.partition_count('consume_test_topic')
593
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
594
+ end
595
+
596
+ it 'expect not to query it again' do
597
+ producer.partition_count('consume_test_topic')
598
+ expect(::Rdkafka::Metadata).not_to have_received(:new)
599
+ end
600
+ end
601
+ end
602
+
603
+ describe '#flush' do
604
+ it "should return flush when it can flush all outstanding messages or when no messages" do
605
+ producer.produce(
606
+ topic: "produce_test_topic",
607
+ payload: "payload headers",
608
+ key: "key headers",
609
+ headers: {}
610
+ )
611
+
612
+ expect(producer.flush(5_000)).to eq(true)
613
+ end
614
+
615
+ context 'when it cannot flush due to a timeout' do
616
+ let(:producer) do
617
+ rdkafka_producer_config(
618
+ "bootstrap.servers": "localhost:9093",
619
+ "message.timeout.ms": 2_000
620
+ ).producer
621
+ end
622
+
623
+ after do
624
+ # Allow rdkafka to evict message preventing memory-leak
625
+ sleep(2)
626
+ end
627
+
628
+ it "should return false on flush when cannot deliver and beyond timeout" do
629
+ producer.produce(
630
+ topic: "produce_test_topic",
631
+ payload: "payload headers",
632
+ key: "key headers",
633
+ headers: {}
634
+ )
635
+
636
+ expect(producer.flush(1_000)).to eq(false)
637
+ end
638
+ end
639
+
640
+ context 'when there is a different error' do
641
+ before { allow(Rdkafka::Bindings).to receive(:rd_kafka_flush).and_return(-199) }
642
+
643
+ it 'should raise it' do
644
+ expect { producer.flush }.to raise_error(Rdkafka::RdkafkaError)
645
+ end
646
+ end
647
+ end
648
+
649
+ describe '#purge' do
650
+ context 'when no outgoing messages' do
651
+ it { expect(producer.purge).to eq(true) }
652
+ end
653
+
654
+ context 'when librdkafka purge returns an error' do
655
+ before { expect(Rdkafka::Bindings).to receive(:rd_kafka_purge).and_return(-153) }
656
+
657
+ it 'expect to raise an error' do
658
+ expect { producer.purge }.to raise_error(Rdkafka::RdkafkaError, /retry/)
659
+ end
660
+ end
661
+
662
+ context 'when there are outgoing things in the queue' do
663
+ let(:producer) do
664
+ rdkafka_producer_config(
665
+ "bootstrap.servers": "localhost:9093",
666
+ "message.timeout.ms": 2_000
667
+ ).producer
668
+ end
669
+
670
+ it "should should purge and move forward" do
671
+ producer.produce(
672
+ topic: "produce_test_topic",
673
+ payload: "payload headers"
674
+ )
675
+
676
+ expect(producer.purge).to eq(true)
677
+ expect(producer.flush(1_000)).to eq(true)
678
+ end
679
+
680
+ it "should materialize the delivery handles" do
681
+ handle = producer.produce(
682
+ topic: "produce_test_topic",
683
+ payload: "payload headers"
684
+ )
685
+
686
+ expect(producer.purge).to eq(true)
687
+
688
+ expect { handle.wait }.to raise_error(Rdkafka::RdkafkaError, /purge_queue/)
689
+ end
690
+
691
+ context "when using delivery_callback" do
692
+ let(:delivery_reports) { [] }
693
+
694
+ let(:delivery_callback) do
695
+ ->(delivery_report) { delivery_reports << delivery_report }
696
+ end
697
+
698
+ before { producer.delivery_callback = delivery_callback }
699
+
700
+ it "should run the callback" do
701
+ handle = producer.produce(
702
+ topic: "produce_test_topic",
703
+ payload: "payload headers"
704
+ )
705
+
706
+ expect(producer.purge).to eq(true)
707
+ # queue purge
708
+ expect(delivery_reports[0].error).to eq(-152)
709
+ end
710
+ end
711
+ end
712
+ end
557
713
  end