karafka-rdkafka 0.21.0.rc1-arm64-darwin → 0.22.0-arm64-darwin

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +15 -2
  3. data/README.md +36 -135
  4. data/docker-compose-ssl.yml +35 -0
  5. data/ext/librdkafka.dylib +0 -0
  6. data/karafka-rdkafka.gemspec +35 -6
  7. data/lib/rdkafka/bindings.rb +0 -1
  8. data/lib/rdkafka/consumer.rb +1 -1
  9. data/lib/rdkafka/version.rb +3 -3
  10. data/renovate.json +5 -17
  11. metadata +32 -52
  12. data/.github/CODEOWNERS +0 -3
  13. data/.github/FUNDING.yml +0 -1
  14. data/.github/workflows/ci_linux_x86_64_gnu.yml +0 -271
  15. data/.github/workflows/ci_linux_x86_64_musl.yml +0 -194
  16. data/.github/workflows/ci_macos_arm64.yml +0 -284
  17. data/.github/workflows/push_linux_x86_64_gnu.yml +0 -65
  18. data/.github/workflows/push_linux_x86_64_musl.yml +0 -79
  19. data/.github/workflows/push_macos_arm64.yml +0 -54
  20. data/.github/workflows/push_ruby.yml +0 -37
  21. data/.github/workflows/verify-action-pins.yml +0 -16
  22. data/.gitignore +0 -15
  23. data/.rspec +0 -2
  24. data/.ruby-gemset +0 -1
  25. data/.ruby-version +0 -1
  26. data/.yardopts +0 -2
  27. data/ext/README.md +0 -19
  28. data/ext/Rakefile +0 -131
  29. data/ext/build_common.sh +0 -361
  30. data/ext/build_linux_x86_64_gnu.sh +0 -306
  31. data/ext/build_linux_x86_64_musl.sh +0 -763
  32. data/ext/build_macos_arm64.sh +0 -550
  33. data/spec/rdkafka/abstract_handle_spec.rb +0 -117
  34. data/spec/rdkafka/admin/create_acl_handle_spec.rb +0 -56
  35. data/spec/rdkafka/admin/create_acl_report_spec.rb +0 -18
  36. data/spec/rdkafka/admin/create_topic_handle_spec.rb +0 -54
  37. data/spec/rdkafka/admin/create_topic_report_spec.rb +0 -16
  38. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +0 -85
  39. data/spec/rdkafka/admin/delete_acl_report_spec.rb +0 -72
  40. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +0 -54
  41. data/spec/rdkafka/admin/delete_topic_report_spec.rb +0 -16
  42. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +0 -85
  43. data/spec/rdkafka/admin/describe_acl_report_spec.rb +0 -73
  44. data/spec/rdkafka/admin_spec.rb +0 -970
  45. data/spec/rdkafka/bindings_spec.rb +0 -198
  46. data/spec/rdkafka/callbacks_spec.rb +0 -20
  47. data/spec/rdkafka/config_spec.rb +0 -258
  48. data/spec/rdkafka/consumer/headers_spec.rb +0 -73
  49. data/spec/rdkafka/consumer/message_spec.rb +0 -139
  50. data/spec/rdkafka/consumer/partition_spec.rb +0 -57
  51. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +0 -248
  52. data/spec/rdkafka/consumer_spec.rb +0 -1296
  53. data/spec/rdkafka/error_spec.rb +0 -95
  54. data/spec/rdkafka/metadata_spec.rb +0 -79
  55. data/spec/rdkafka/native_kafka_spec.rb +0 -130
  56. data/spec/rdkafka/producer/delivery_handle_spec.rb +0 -60
  57. data/spec/rdkafka/producer/delivery_report_spec.rb +0 -25
  58. data/spec/rdkafka/producer/partitions_count_cache_spec.rb +0 -359
  59. data/spec/rdkafka/producer_spec.rb +0 -1526
  60. data/spec/spec_helper.rb +0 -193
@@ -1,1526 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "zlib"
4
-
5
- describe Rdkafka::Producer do
6
- let(:producer) { rdkafka_producer_config.producer }
7
- let(:consumer) { rdkafka_consumer_config.consumer }
8
-
9
- after do
10
- # Registry should always end up being empty
11
- registry = Rdkafka::Producer::DeliveryHandle::REGISTRY
12
- expect(registry).to be_empty, registry.inspect
13
- producer.close
14
- consumer.close
15
- end
16
-
17
- describe 'producer without auto-start' do
18
- let(:producer) { rdkafka_producer_config.producer(native_kafka_auto_start: false) }
19
-
20
- it 'expect to be able to start it later and close' do
21
- producer.start
22
- producer.close
23
- end
24
-
25
- it 'expect to be able to close it without starting' do
26
- producer.close
27
- end
28
- end
29
-
30
- describe '#name' do
31
- it { expect(producer.name).to include('rdkafka#producer-') }
32
- end
33
-
34
- describe '#produce with topic config alterations' do
35
- context 'when config is not valid' do
36
- it 'expect to raise error' do
37
- expect do
38
- producer.produce(topic: 'test', payload: '', topic_config: { 'invalid': 'invalid' })
39
- end.to raise_error(Rdkafka::Config::ConfigError)
40
- end
41
- end
42
-
43
- context 'when config is valid' do
44
- it 'expect to raise error' do
45
- expect do
46
- producer.produce(topic: 'test', payload: '', topic_config: { 'acks': 1 }).wait
47
- end.not_to raise_error
48
- end
49
-
50
- context 'when alteration should change behavior' do
51
- # This is set incorrectly for a reason
52
- # If alteration would not work, this will hang the spec suite
53
- let(:producer) do
54
- rdkafka_producer_config(
55
- 'message.timeout.ms': 1_000_000,
56
- :"bootstrap.servers" => "127.0.0.1:9094",
57
- ).producer
58
- end
59
-
60
- it 'expect to give up on delivery fast based on alteration config' do
61
- expect do
62
- producer.produce(
63
- topic: 'produce_config_test',
64
- payload: 'test',
65
- topic_config: {
66
- 'compression.type': 'gzip',
67
- 'message.timeout.ms': 1
68
- }
69
- ).wait
70
- end.to raise_error(Rdkafka::RdkafkaError, /msg_timed_out/)
71
- end
72
- end
73
- end
74
- end
75
-
76
- context "delivery callback" do
77
- context "with a proc/lambda" do
78
- it "should set the callback" do
79
- expect {
80
- producer.delivery_callback = lambda do |delivery_handle|
81
- puts delivery_handle
82
- end
83
- }.not_to raise_error
84
- expect(producer.delivery_callback).to respond_to :call
85
- end
86
-
87
- it "should call the callback when a message is delivered" do
88
- @callback_called = false
89
-
90
- producer.delivery_callback = lambda do |report|
91
- expect(report).not_to be_nil
92
- expect(report.label).to eq "label"
93
- expect(report.partition).to eq 1
94
- expect(report.offset).to be >= 0
95
- expect(report.topic_name).to eq "produce_test_topic"
96
- @callback_called = true
97
- end
98
-
99
- # Produce a message
100
- handle = producer.produce(
101
- topic: "produce_test_topic",
102
- payload: "payload",
103
- key: "key",
104
- label: "label"
105
- )
106
-
107
- expect(handle.label).to eq "label"
108
-
109
- # Wait for it to be delivered
110
- handle.wait(max_wait_timeout: 15)
111
-
112
- # Join the producer thread.
113
- producer.close
114
-
115
- # Callback should have been called
116
- expect(@callback_called).to be true
117
- end
118
-
119
- it "should provide handle" do
120
- @callback_handle = nil
121
-
122
- producer.delivery_callback = lambda { |_, handle| @callback_handle = handle }
123
-
124
- # Produce a message
125
- handle = producer.produce(
126
- topic: "produce_test_topic",
127
- payload: "payload",
128
- key: "key"
129
- )
130
-
131
- # Wait for it to be delivered
132
- handle.wait(max_wait_timeout: 15)
133
-
134
- # Join the producer thread.
135
- producer.close
136
-
137
- expect(handle).to be @callback_handle
138
- end
139
- end
140
-
141
- context "with a callable object" do
142
- it "should set the callback" do
143
- callback = Class.new do
144
- def call(stats); end
145
- end
146
- expect {
147
- producer.delivery_callback = callback.new
148
- }.not_to raise_error
149
- expect(producer.delivery_callback).to respond_to :call
150
- end
151
-
152
- it "should call the callback when a message is delivered" do
153
- called_report = []
154
- callback = Class.new do
155
- def initialize(called_report)
156
- @called_report = called_report
157
- end
158
-
159
- def call(report)
160
- @called_report << report
161
- end
162
- end
163
- producer.delivery_callback = callback.new(called_report)
164
-
165
- # Produce a message
166
- handle = producer.produce(
167
- topic: "produce_test_topic",
168
- payload: "payload",
169
- key: "key"
170
- )
171
-
172
- # Wait for it to be delivered
173
- handle.wait(max_wait_timeout: 15)
174
-
175
- # Join the producer thread.
176
- producer.close
177
-
178
- # Callback should have been called
179
- expect(called_report.first).not_to be_nil
180
- expect(called_report.first.partition).to eq 1
181
- expect(called_report.first.offset).to be >= 0
182
- expect(called_report.first.topic_name).to eq "produce_test_topic"
183
- end
184
-
185
- it "should provide handle" do
186
- callback_handles = []
187
- callback = Class.new do
188
- def initialize(callback_handles)
189
- @callback_handles = callback_handles
190
- end
191
-
192
- def call(_, handle)
193
- @callback_handles << handle
194
- end
195
- end
196
- producer.delivery_callback = callback.new(callback_handles)
197
-
198
- # Produce a message
199
- handle = producer.produce(
200
- topic: "produce_test_topic",
201
- payload: "payload",
202
- key: "key"
203
- )
204
-
205
- # Wait for it to be delivered
206
- handle.wait(max_wait_timeout: 15)
207
-
208
- # Join the producer thread.
209
- producer.close
210
-
211
- # Callback should have been called
212
- expect(handle).to be callback_handles.first
213
- end
214
- end
215
-
216
- it "should not accept a callback that's not callable" do
217
- expect {
218
- producer.delivery_callback = 'a string'
219
- }.to raise_error(TypeError)
220
- end
221
- end
222
-
223
- it "should require a topic" do
224
- expect {
225
- producer.produce(
226
- payload: "payload",
227
- key: "key"
228
- )
229
- }.to raise_error ArgumentError, /missing keyword: [\:]?topic/
230
- end
231
-
232
- it "should produce a message" do
233
- # Produce a message
234
- handle = producer.produce(
235
- topic: "produce_test_topic",
236
- payload: "payload",
237
- key: "key",
238
- label: "label"
239
- )
240
-
241
- # Should be pending at first
242
- expect(handle.pending?).to be true
243
- expect(handle.label).to eq "label"
244
-
245
- # Check delivery handle and report
246
- report = handle.wait(max_wait_timeout: 5)
247
- expect(handle.pending?).to be false
248
- expect(report).not_to be_nil
249
- expect(report.partition).to eq 1
250
- expect(report.offset).to be >= 0
251
- expect(report.label).to eq "label"
252
-
253
- # Flush and close producer
254
- producer.flush
255
- producer.close
256
-
257
- # Consume message and verify its content
258
- message = wait_for_message(
259
- topic: "produce_test_topic",
260
- delivery_report: report,
261
- consumer: consumer
262
- )
263
- expect(message.partition).to eq 1
264
- expect(message.payload).to eq "payload"
265
- expect(message.key).to eq "key"
266
- expect(message.timestamp).to be_within(10).of(Time.now)
267
- end
268
-
269
- it "should produce a message with a specified partition" do
270
- # Produce a message
271
- handle = producer.produce(
272
- topic: "produce_test_topic",
273
- payload: "payload partition",
274
- key: "key partition",
275
- partition: 1
276
- )
277
- report = handle.wait(max_wait_timeout: 5)
278
-
279
- # Consume message and verify its content
280
- message = wait_for_message(
281
- topic: "produce_test_topic",
282
- delivery_report: report,
283
- consumer: consumer
284
- )
285
- expect(message.partition).to eq 1
286
- expect(message.key).to eq "key partition"
287
- end
288
-
289
- it "should produce a message to the same partition with a similar partition key" do
290
- # Avoid partitioner collisions.
291
- while true
292
- key = ('a'..'z').to_a.shuffle.take(10).join('')
293
- partition_key = ('a'..'z').to_a.shuffle.take(10).join('')
294
- partition_count = producer.partition_count('partitioner_test_topic')
295
- break if (Zlib.crc32(key) % partition_count) != (Zlib.crc32(partition_key) % partition_count)
296
- end
297
-
298
- # Produce a message with key, partition_key and key + partition_key
299
- messages = [{key: key}, {partition_key: partition_key}, {key: key, partition_key: partition_key}]
300
-
301
- messages = messages.map do |m|
302
- handle = producer.produce(
303
- topic: "partitioner_test_topic",
304
- payload: "payload partition",
305
- key: m[:key],
306
- partition_key: m[:partition_key]
307
- )
308
- report = handle.wait(max_wait_timeout: 5)
309
-
310
- wait_for_message(
311
- topic: "partitioner_test_topic",
312
- delivery_report: report,
313
- )
314
- end
315
-
316
- expect(messages[0].partition).not_to eq(messages[2].partition)
317
- expect(messages[1].partition).to eq(messages[2].partition)
318
- expect(messages[0].key).to eq key
319
- expect(messages[1].key).to be_nil
320
- expect(messages[2].key).to eq key
321
- end
322
-
323
- it "should produce a message with empty string without crashing" do
324
- messages = [{key: 'a', partition_key: ''}]
325
-
326
- messages = messages.map do |m|
327
- handle = producer.produce(
328
- topic: "partitioner_test_topic",
329
- payload: "payload partition",
330
- key: m[:key],
331
- partition_key: m[:partition_key]
332
- )
333
- report = handle.wait(max_wait_timeout: 5)
334
-
335
- wait_for_message(
336
- topic: "partitioner_test_topic",
337
- delivery_report: report,
338
- )
339
- end
340
-
341
- expect(messages[0].partition).to be >= 0
342
- expect(messages[0].key).to eq 'a'
343
- end
344
-
345
- it "should produce a message with utf-8 encoding" do
346
- handle = producer.produce(
347
- topic: "produce_test_topic",
348
- payload: "Τη γλώσσα μου έδωσαν ελληνική",
349
- key: "key utf8"
350
- )
351
- report = handle.wait(max_wait_timeout: 5)
352
-
353
- # Consume message and verify its content
354
- message = wait_for_message(
355
- topic: "produce_test_topic",
356
- delivery_report: report,
357
- consumer: consumer
358
- )
359
-
360
- expect(message.partition).to eq 1
361
- expect(message.payload.force_encoding("utf-8")).to eq "Τη γλώσσα μου έδωσαν ελληνική"
362
- expect(message.key).to eq "key utf8"
363
- end
364
-
365
- it "should produce a message to a non-existing topic with key and partition key" do
366
- new_topic = "it-#{SecureRandom.uuid}"
367
-
368
- handle = producer.produce(
369
- # Needs to be a new topic each time
370
- topic: new_topic,
371
- payload: "payload",
372
- key: "key",
373
- partition_key: "partition_key",
374
- label: "label"
375
- )
376
-
377
- # Should be pending at first
378
- expect(handle.pending?).to be true
379
- expect(handle.label).to eq "label"
380
-
381
- # Check delivery handle and report
382
- report = handle.wait(max_wait_timeout: 5)
383
- expect(handle.pending?).to be false
384
- expect(report).not_to be_nil
385
- expect(report.partition).to eq 0
386
- expect(report.offset).to be >= 0
387
- expect(report.label).to eq "label"
388
-
389
- # Flush and close producer
390
- producer.flush
391
- producer.close
392
-
393
- # Consume message and verify its content
394
- message = wait_for_message(
395
- topic: new_topic,
396
- delivery_report: report,
397
- consumer: consumer
398
- )
399
- expect(message.partition).to eq 0
400
- expect(message.payload).to eq "payload"
401
- expect(message.key).to eq "key"
402
- # Since api.version.request is on by default we will get
403
- # the message creation timestamp if it's not set.
404
- expect(message.timestamp).to be_within(10).of(Time.now)
405
- end
406
-
407
- context "timestamp" do
408
- it "should raise a type error if not nil, integer or time" do
409
- expect {
410
- producer.produce(
411
- topic: "produce_test_topic",
412
- payload: "payload timestamp",
413
- key: "key timestamp",
414
- timestamp: "10101010"
415
- )
416
- }.to raise_error TypeError
417
- end
418
-
419
- it "should produce a message with an integer timestamp" do
420
- handle = producer.produce(
421
- topic: "produce_test_topic",
422
- payload: "payload timestamp",
423
- key: "key timestamp",
424
- timestamp: 1505069646252
425
- )
426
- report = handle.wait(max_wait_timeout: 5)
427
-
428
- # Consume message and verify its content
429
- message = wait_for_message(
430
- topic: "produce_test_topic",
431
- delivery_report: report,
432
- consumer: consumer
433
- )
434
-
435
- expect(message.partition).to eq 2
436
- expect(message.key).to eq "key timestamp"
437
- expect(message.timestamp).to eq Time.at(1505069646, 252_000)
438
- end
439
-
440
- it "should produce a message with a time timestamp" do
441
- handle = producer.produce(
442
- topic: "produce_test_topic",
443
- payload: "payload timestamp",
444
- key: "key timestamp",
445
- timestamp: Time.at(1505069646, 353_000)
446
- )
447
- report = handle.wait(max_wait_timeout: 5)
448
-
449
- # Consume message and verify its content
450
- message = wait_for_message(
451
- topic: "produce_test_topic",
452
- delivery_report: report,
453
- consumer: consumer
454
- )
455
-
456
- expect(message.partition).to eq 2
457
- expect(message.key).to eq "key timestamp"
458
- expect(message.timestamp).to eq Time.at(1505069646, 353_000)
459
- end
460
- end
461
-
462
- it "should produce a message with nil key" do
463
- handle = producer.produce(
464
- topic: "produce_test_topic",
465
- payload: "payload no key"
466
- )
467
- report = handle.wait(max_wait_timeout: 5)
468
-
469
- # Consume message and verify its content
470
- message = wait_for_message(
471
- topic: "produce_test_topic",
472
- delivery_report: report,
473
- consumer: consumer
474
- )
475
-
476
- expect(message.key).to be_nil
477
- expect(message.payload).to eq "payload no key"
478
- end
479
-
480
- it "should produce a message with nil payload" do
481
- handle = producer.produce(
482
- topic: "produce_test_topic",
483
- key: "key no payload"
484
- )
485
- report = handle.wait(max_wait_timeout: 5)
486
-
487
- # Consume message and verify its content
488
- message = wait_for_message(
489
- topic: "produce_test_topic",
490
- delivery_report: report,
491
- consumer: consumer
492
- )
493
-
494
- expect(message.key).to eq "key no payload"
495
- expect(message.payload).to be_nil
496
- end
497
-
498
- it "should produce a message with headers" do
499
- handle = producer.produce(
500
- topic: "produce_test_topic",
501
- payload: "payload headers",
502
- key: "key headers",
503
- headers: { foo: :bar, baz: :foobar }
504
- )
505
- report = handle.wait(max_wait_timeout: 5)
506
-
507
- # Consume message and verify its content
508
- message = wait_for_message(
509
- topic: "produce_test_topic",
510
- delivery_report: report,
511
- consumer: consumer
512
- )
513
-
514
- expect(message.payload).to eq "payload headers"
515
- expect(message.key).to eq "key headers"
516
- expect(message.headers["foo"]).to eq "bar"
517
- expect(message.headers["baz"]).to eq "foobar"
518
- expect(message.headers["foobar"]).to be_nil
519
- end
520
-
521
- it "should produce a message with empty headers" do
522
- handle = producer.produce(
523
- topic: "produce_test_topic",
524
- payload: "payload headers",
525
- key: "key headers",
526
- headers: {}
527
- )
528
- report = handle.wait(max_wait_timeout: 5)
529
-
530
- # Consume message and verify its content
531
- message = wait_for_message(
532
- topic: "produce_test_topic",
533
- delivery_report: report,
534
- consumer: consumer
535
- )
536
-
537
- expect(message.payload).to eq "payload headers"
538
- expect(message.key).to eq "key headers"
539
- expect(message.headers).to be_empty
540
- end
541
-
542
- it "should produce message that aren't waited for and not crash" do
543
- 5.times do
544
- 200.times do
545
- producer.produce(
546
- topic: "produce_test_topic",
547
- payload: "payload not waiting",
548
- key: "key not waiting"
549
- )
550
- end
551
-
552
- # Allow some time for a GC run
553
- sleep 1
554
- end
555
-
556
- # Wait for the delivery notifications
557
- 10.times do
558
- break if Rdkafka::Producer::DeliveryHandle::REGISTRY.empty?
559
- sleep 1
560
- end
561
- end
562
-
563
- it "should produce a message in a forked process", skip: defined?(JRUBY_VERSION) && "Kernel#fork is not available" do
564
- # Fork, produce a message, send the report over a pipe and
565
- # wait for and check the message in the main process.
566
- reader, writer = IO.pipe
567
-
568
- pid = fork do
569
- reader.close
570
-
571
- # Avoid sharing the client between processes.
572
- producer = rdkafka_producer_config.producer
573
-
574
- handle = producer.produce(
575
- topic: "produce_test_topic",
576
- payload: "payload-forked",
577
- key: "key-forked"
578
- )
579
-
580
- report = handle.wait(max_wait_timeout: 5)
581
-
582
- report_json = JSON.generate(
583
- "partition" => report.partition,
584
- "offset" => report.offset,
585
- "topic_name" => report.topic_name
586
- )
587
-
588
- writer.write(report_json)
589
- writer.close
590
- producer.flush
591
- producer.close
592
- end
593
- Process.wait(pid)
594
-
595
- writer.close
596
- report_hash = JSON.parse(reader.read)
597
- report = Rdkafka::Producer::DeliveryReport.new(
598
- report_hash["partition"],
599
- report_hash["offset"],
600
- report_hash["topic_name"]
601
- )
602
-
603
- reader.close
604
-
605
- # Consume message and verify its content
606
- message = wait_for_message(
607
- topic: "produce_test_topic",
608
- delivery_report: report,
609
- consumer: consumer
610
- )
611
- expect(message.partition).to eq 0
612
- expect(message.payload).to eq "payload-forked"
613
- expect(message.key).to eq "key-forked"
614
- end
615
-
616
- it "should raise an error when producing fails" do
617
- expect(Rdkafka::Bindings).to receive(:rd_kafka_producev).and_return(20)
618
-
619
- expect {
620
- producer.produce(
621
- topic: "produce_test_topic",
622
- key: "key error"
623
- )
624
- }.to raise_error Rdkafka::RdkafkaError
625
- end
626
-
627
- it "should raise a timeout error when waiting too long" do
628
- handle = producer.produce(
629
- topic: "produce_test_topic",
630
- payload: "payload timeout",
631
- key: "key timeout"
632
- )
633
- expect {
634
- handle.wait(max_wait_timeout: 0)
635
- }.to raise_error Rdkafka::Producer::DeliveryHandle::WaitTimeoutError
636
-
637
- # Waiting a second time should work
638
- handle.wait(max_wait_timeout: 5)
639
- end
640
-
641
- context "methods that should not be called after a producer has been closed" do
642
- before do
643
- producer.close
644
- end
645
-
646
- # Affected methods and a non-invalid set of parameters for the method
647
- {
648
- :produce => { topic: nil },
649
- :partition_count => nil,
650
- }.each do |method, args|
651
- it "raises an exception if #{method} is called" do
652
- expect {
653
- if args.is_a?(Hash)
654
- producer.public_send(method, **args)
655
- else
656
- producer.public_send(method, args)
657
- end
658
- }.to raise_exception(Rdkafka::ClosedProducerError, /#{method.to_s}/)
659
- end
660
- end
661
- end
662
-
663
- context "when not being able to deliver the message" do
664
- let(:producer) do
665
- rdkafka_producer_config(
666
- "bootstrap.servers": "127.0.0.1:9093",
667
- "message.timeout.ms": 100
668
- ).producer
669
- end
670
-
671
- it "should contain the error in the response when not deliverable" do
672
- handler = producer.produce(topic: 'produce_test_topic', payload: nil, label: 'na')
673
- # Wait for the async callbacks and delivery registry to update
674
- sleep(2)
675
- expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
676
- expect(handler.create_result.label).to eq('na')
677
- end
678
- end
679
-
680
- context "when topic does not exist and allow.auto.create.topics is false" do
681
- let(:producer) do
682
- rdkafka_producer_config(
683
- "bootstrap.servers": "127.0.0.1:9092",
684
- "message.timeout.ms": 100,
685
- "allow.auto.create.topics": false
686
- ).producer
687
- end
688
-
689
- it "should contain the error in the response when not deliverable" do
690
- handler = producer.produce(topic: "it-#{SecureRandom.uuid}", payload: nil, label: 'na')
691
- # Wait for the async callbacks and delivery registry to update
692
- sleep(2)
693
- expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
694
- expect(handler.create_result.error.code).to eq(:msg_timed_out)
695
- expect(handler.create_result.label).to eq('na')
696
- end
697
- end
698
-
699
- describe '#partition_count' do
700
- it { expect(producer.partition_count('example_topic')).to eq(1) }
701
-
702
- context 'when the partition count value is already cached' do
703
- before do
704
- producer.partition_count('example_topic')
705
- allow(::Rdkafka::Metadata).to receive(:new).and_call_original
706
- end
707
-
708
- it 'expect not to query it again' do
709
- producer.partition_count('example_topic')
710
- expect(::Rdkafka::Metadata).not_to have_received(:new)
711
- end
712
- end
713
-
714
- context 'when the partition count value was cached but time expired' do
715
- before do
716
- ::Rdkafka::Producer.partitions_count_cache = Rdkafka::Producer::PartitionsCountCache.new
717
- allow(::Rdkafka::Metadata).to receive(:new).and_call_original
718
- end
719
-
720
- it 'expect to query it again' do
721
- producer.partition_count('example_topic')
722
- expect(::Rdkafka::Metadata).to have_received(:new)
723
- end
724
- end
725
-
726
- context 'when the partition count value was cached and time did not expire' do
727
- before do
728
- allow(::Process).to receive(:clock_gettime).and_return(0, 29.001)
729
- producer.partition_count('example_topic')
730
- allow(::Rdkafka::Metadata).to receive(:new).and_call_original
731
- end
732
-
733
- it 'expect not to query it again' do
734
- producer.partition_count('example_topic')
735
- expect(::Rdkafka::Metadata).not_to have_received(:new)
736
- end
737
- end
738
- end
739
-
740
- describe 'metadata fetch request recovery' do
741
- subject(:partition_count) { producer.partition_count('example_topic') }
742
-
743
- describe 'metadata initialization recovery' do
744
- context 'when all good' do
745
- it { expect(partition_count).to eq(1) }
746
- end
747
-
748
- context 'when we fail for the first time with handled error' do
749
- before do
750
- raised = false
751
-
752
- allow(Rdkafka::Bindings).to receive(:rd_kafka_metadata).and_wrap_original do |m, *args|
753
- if raised
754
- m.call(*args)
755
- else
756
- raised = true
757
- -185
758
- end
759
- end
760
- end
761
-
762
- it { expect(partition_count).to eq(1) }
763
- end
764
- end
765
- end
766
-
767
- describe '#flush' do
768
- it "should return flush when it can flush all outstanding messages or when no messages" do
769
- producer.produce(
770
- topic: "produce_test_topic",
771
- payload: "payload headers",
772
- key: "key headers",
773
- headers: {}
774
- )
775
-
776
- expect(producer.flush(5_000)).to eq(true)
777
- end
778
-
779
- context 'when it cannot flush due to a timeout' do
780
- let(:producer) do
781
- rdkafka_producer_config(
782
- "bootstrap.servers": "127.0.0.1:9093",
783
- "message.timeout.ms": 2_000
784
- ).producer
785
- end
786
-
787
- after do
788
- # Allow rdkafka to evict message preventing memory-leak
789
- sleep(2)
790
- end
791
-
792
- it "should return false on flush when cannot deliver and beyond timeout" do
793
- producer.produce(
794
- topic: "produce_test_topic",
795
- payload: "payload headers",
796
- key: "key headers",
797
- headers: {}
798
- )
799
-
800
- expect(producer.flush(1_000)).to eq(false)
801
- end
802
- end
803
-
804
- context 'when there is a different error' do
805
- before { allow(Rdkafka::Bindings).to receive(:rd_kafka_flush).and_return(-199) }
806
-
807
- it 'should raise it' do
808
- expect { producer.flush }.to raise_error(Rdkafka::RdkafkaError)
809
- end
810
- end
811
- end
812
-
813
- describe '#purge' do
814
- context 'when no outgoing messages' do
815
- it { expect(producer.purge).to eq(true) }
816
- end
817
-
818
- context 'when librdkafka purge returns an error' do
819
- before { expect(Rdkafka::Bindings).to receive(:rd_kafka_purge).and_return(-153) }
820
-
821
- it 'expect to raise an error' do
822
- expect { producer.purge }.to raise_error(Rdkafka::RdkafkaError, /retry/)
823
- end
824
- end
825
-
826
- context 'when there are outgoing things in the queue' do
827
- let(:producer) do
828
- rdkafka_producer_config(
829
- "bootstrap.servers": "127.0.0.1:9093",
830
- "message.timeout.ms": 2_000
831
- ).producer
832
- end
833
-
834
- it "should should purge and move forward" do
835
- producer.produce(
836
- topic: "produce_test_topic",
837
- payload: "payload headers"
838
- )
839
-
840
- expect(producer.purge).to eq(true)
841
- expect(producer.flush(1_000)).to eq(true)
842
- end
843
-
844
- it "should materialize the delivery handles" do
845
- handle = producer.produce(
846
- topic: "produce_test_topic",
847
- payload: "payload headers"
848
- )
849
-
850
- expect(producer.purge).to eq(true)
851
-
852
- expect { handle.wait }.to raise_error(Rdkafka::RdkafkaError, /purge_queue/)
853
- end
854
-
855
- context "when using delivery_callback" do
856
- let(:delivery_reports) { [] }
857
-
858
- let(:delivery_callback) do
859
- ->(delivery_report) { delivery_reports << delivery_report }
860
- end
861
-
862
- before { producer.delivery_callback = delivery_callback }
863
-
864
- it "should run the callback" do
865
- handle = producer.produce(
866
- topic: "produce_test_topic",
867
- payload: "payload headers"
868
- )
869
-
870
- expect(producer.purge).to eq(true)
871
- # queue purge
872
- expect(delivery_reports[0].error).to eq(-152)
873
- end
874
- end
875
- end
876
- end
877
-
878
- context 'when working with transactions' do
879
- let(:producer) do
880
- rdkafka_producer_config(
881
- 'transactional.id': SecureRandom.uuid,
882
- 'transaction.timeout.ms': 5_000
883
- ).producer
884
- end
885
-
886
- it 'expect not to allow to produce without transaction init' do
887
- expect do
888
- producer.produce(topic: 'produce_test_topic', payload: 'data')
889
- end.to raise_error(Rdkafka::RdkafkaError, /Erroneous state \(state\)/)
890
- end
891
-
892
- it 'expect to raise error when transactions are initialized but producing not in one' do
893
- producer.init_transactions
894
-
895
- expect do
896
- producer.produce(topic: 'produce_test_topic', payload: 'data')
897
- end.to raise_error(Rdkafka::RdkafkaError, /Erroneous state \(state\)/)
898
- end
899
-
900
- it 'expect to allow to produce within a transaction, finalize and ship data' do
901
- producer.init_transactions
902
- producer.begin_transaction
903
- handle1 = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
904
- handle2 = producer.produce(topic: 'example_topic', payload: 'data2', partition: 0)
905
- producer.commit_transaction
906
-
907
- report1 = handle1.wait(max_wait_timeout: 15)
908
- report2 = handle2.wait(max_wait_timeout: 15)
909
-
910
- message1 = wait_for_message(
911
- topic: "produce_test_topic",
912
- delivery_report: report1,
913
- consumer: consumer
914
- )
915
-
916
- expect(message1.partition).to eq 1
917
- expect(message1.payload).to eq "data1"
918
- expect(message1.timestamp).to be_within(10).of(Time.now)
919
-
920
- message2 = wait_for_message(
921
- topic: "example_topic",
922
- delivery_report: report2,
923
- consumer: consumer
924
- )
925
-
926
- expect(message2.partition).to eq 0
927
- expect(message2.payload).to eq "data2"
928
- expect(message2.timestamp).to be_within(10).of(Time.now)
929
- end
930
-
931
- it 'expect not to send data and propagate purge queue error on abort' do
932
- producer.init_transactions
933
- producer.begin_transaction
934
- handle1 = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
935
- handle2 = producer.produce(topic: 'example_topic', payload: 'data2', partition: 0)
936
- producer.abort_transaction
937
-
938
- expect { handle1.wait(max_wait_timeout: 15) }
939
- .to raise_error(Rdkafka::RdkafkaError, /Purged in queue \(purge_queue\)/)
940
- expect { handle2.wait(max_wait_timeout: 15) }
941
- .to raise_error(Rdkafka::RdkafkaError, /Purged in queue \(purge_queue\)/)
942
- end
943
-
944
- it 'expect to have non retryable, non abortable and not fatal error on abort' do
945
- producer.init_transactions
946
- producer.begin_transaction
947
- handle = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
948
- producer.abort_transaction
949
-
950
- response = handle.wait(raise_response_error: false)
951
-
952
- expect(response.error).to be_a(Rdkafka::RdkafkaError)
953
- expect(response.error.retryable?).to eq(false)
954
- expect(response.error.fatal?).to eq(false)
955
- expect(response.error.abortable?).to eq(false)
956
- end
957
-
958
- context 'fencing against previous active producer with same transactional id' do
959
- let(:transactional_id) { SecureRandom.uuid }
960
-
961
- let(:producer1) do
962
- rdkafka_producer_config(
963
- 'transactional.id': transactional_id,
964
- 'transaction.timeout.ms': 10_000
965
- ).producer
966
- end
967
-
968
- let(:producer2) do
969
- rdkafka_producer_config(
970
- 'transactional.id': transactional_id,
971
- 'transaction.timeout.ms': 10_000
972
- ).producer
973
- end
974
-
975
- after do
976
- producer1.close
977
- producer2.close
978
- end
979
-
980
- it 'expect older producer not to be able to commit when fanced out' do
981
- producer1.init_transactions
982
- producer1.begin_transaction
983
- producer1.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
984
-
985
- producer2.init_transactions
986
- producer2.begin_transaction
987
- producer2.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
988
-
989
- expect { producer1.commit_transaction }
990
- .to raise_error(Rdkafka::RdkafkaError, /This instance has been fenced/)
991
-
992
- error = false
993
-
994
- begin
995
- producer1.commit_transaction
996
- rescue Rdkafka::RdkafkaError => e
997
- error = e
998
- end
999
-
1000
- expect(error.fatal?).to eq(true)
1001
- expect(error.abortable?).to eq(false)
1002
- expect(error.retryable?).to eq(false)
1003
-
1004
- expect { producer2.commit_transaction }.not_to raise_error
1005
- end
1006
- end
1007
-
1008
- context 'when having a consumer with tpls for exactly once semantics' do
1009
- let(:tpl) do
1010
- producer.produce(topic: 'consume_test_topic', payload: 'data1', partition: 0).wait
1011
- result = producer.produce(topic: 'consume_test_topic', payload: 'data1', partition: 0).wait
1012
-
1013
- Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
1014
- list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => result.offset + 1)
1015
- end
1016
- end
1017
-
1018
- before do
1019
- consumer.subscribe("consume_test_topic")
1020
- wait_for_assignment(consumer)
1021
- producer.init_transactions
1022
- producer.begin_transaction
1023
- end
1024
-
1025
- after { consumer.unsubscribe }
1026
-
1027
- it 'expect to store offsets and not crash' do
1028
- producer.send_offsets_to_transaction(consumer, tpl)
1029
- producer.commit_transaction
1030
- end
1031
- end
1032
- end
1033
-
1034
- describe '#oauthbearer_set_token' do
1035
- context 'when sasl not configured' do
1036
- it 'should return RD_KAFKA_RESP_ERR__STATE' do
1037
- response = producer.oauthbearer_set_token(
1038
- token: "foo",
1039
- lifetime_ms: Time.now.to_i*1000 + 900 * 1000,
1040
- principal_name: "kafka-cluster"
1041
- )
1042
- expect(response).to eq(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE)
1043
- end
1044
- end
1045
-
1046
- context 'when sasl configured' do
1047
- it 'should succeed' do
1048
- producer_sasl = rdkafka_producer_config(
1049
- {
1050
- "security.protocol": "sasl_ssl",
1051
- "sasl.mechanisms": 'OAUTHBEARER'
1052
- }
1053
- ).producer
1054
- response = producer_sasl.oauthbearer_set_token(
1055
- token: "foo",
1056
- lifetime_ms: Time.now.to_i*1000 + 900 * 1000,
1057
- principal_name: "kafka-cluster"
1058
- )
1059
- expect(response).to eq(0)
1060
- end
1061
- end
1062
- end
1063
-
1064
- describe "#produce with headers" do
1065
- it "should produce a message with array headers" do
1066
- headers = {
1067
- "version" => ["2.1.3", "2.1.4"],
1068
- "type" => "String"
1069
- }
1070
-
1071
- report = producer.produce(
1072
- topic: "consume_test_topic",
1073
- key: "key headers",
1074
- headers: headers
1075
- ).wait
1076
-
1077
- message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
1078
- expect(message).to be
1079
- expect(message.key).to eq('key headers')
1080
- expect(message.headers['type']).to eq('String')
1081
- expect(message.headers['version']).to eq(["2.1.3", "2.1.4"])
1082
- end
1083
-
1084
- it "should produce a message with single value headers" do
1085
- headers = {
1086
- "version" => "2.1.3",
1087
- "type" => "String"
1088
- }
1089
-
1090
- report = producer.produce(
1091
- topic: "consume_test_topic",
1092
- key: "key headers",
1093
- headers: headers
1094
- ).wait
1095
-
1096
- message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
1097
- expect(message).to be
1098
- expect(message.key).to eq('key headers')
1099
- expect(message.headers['type']).to eq('String')
1100
- expect(message.headers['version']).to eq('2.1.3')
1101
- end
1102
- end
1103
-
1104
- describe 'with active statistics callback' do
1105
- let(:producer) do
1106
- rdkafka_producer_config('statistics.interval.ms': 1_000).producer
1107
- end
1108
-
1109
- let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
1110
- let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
1111
- let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
1112
-
1113
- context "when using partition key" do
1114
- before do
1115
- Rdkafka::Config.statistics_callback = ->(*) {}
1116
-
1117
- # This call will make a blocking request to the metadata cache
1118
- producer.produce(
1119
- topic: "produce_test_topic",
1120
- payload: "payload headers",
1121
- partition_key: "test"
1122
- ).wait
1123
-
1124
- pre_statistics_ttl
1125
-
1126
- # We wait to make sure that statistics are triggered and that there is a refresh
1127
- sleep(1.5)
1128
-
1129
- post_statistics_ttl
1130
- end
1131
-
1132
- it 'expect to update ttl on the partitions count cache via statistics' do
1133
- expect(pre_statistics_ttl).to be < post_statistics_ttl
1134
- end
1135
- end
1136
-
1137
- context "when not using partition key" do
1138
- before do
1139
- Rdkafka::Config.statistics_callback = ->(*) {}
1140
-
1141
- # This call will make a blocking request to the metadata cache
1142
- producer.produce(
1143
- topic: "produce_test_topic",
1144
- payload: "payload headers"
1145
- ).wait
1146
-
1147
- pre_statistics_ttl
1148
-
1149
- # We wait to make sure that statistics are triggered and that there is a refresh
1150
- sleep(1.5)
1151
-
1152
- # This will anyhow be populated from statistic
1153
- post_statistics_ttl
1154
- end
1155
-
1156
- it 'expect not to update ttl on the partitions count cache via blocking but via use stats' do
1157
- expect(pre_statistics_ttl).to be_nil
1158
- expect(post_statistics_ttl).not_to be_nil
1159
- end
1160
- end
1161
- end
1162
-
1163
- describe 'without active statistics callback' do
1164
- let(:producer) do
1165
- rdkafka_producer_config('statistics.interval.ms': 1_000).producer
1166
- end
1167
-
1168
- let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
1169
- let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
1170
- let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
1171
-
1172
- context "when using partition key" do
1173
- before do
1174
- # This call will make a blocking request to the metadata cache
1175
- producer.produce(
1176
- topic: "produce_test_topic",
1177
- payload: "payload headers",
1178
- partition_key: "test"
1179
- ).wait
1180
-
1181
- pre_statistics_ttl
1182
-
1183
- # We wait to make sure that statistics are triggered and that there is a refresh
1184
- sleep(1.5)
1185
-
1186
- post_statistics_ttl
1187
- end
1188
-
1189
- it 'expect not to update ttl on the partitions count cache via statistics' do
1190
- expect(pre_statistics_ttl).to eq post_statistics_ttl
1191
- end
1192
- end
1193
-
1194
- context "when not using partition key" do
1195
- before do
1196
- # This call will make a blocking request to the metadata cache
1197
- producer.produce(
1198
- topic: "produce_test_topic",
1199
- payload: "payload headers"
1200
- ).wait
1201
-
1202
- pre_statistics_ttl
1203
-
1204
- # We wait to make sure that statistics are triggered and that there is a refresh
1205
- sleep(1.5)
1206
-
1207
- # This should not be populated because stats are not in use
1208
- post_statistics_ttl
1209
- end
1210
-
1211
- it 'expect not to update ttl on the partitions count cache via anything' do
1212
- expect(pre_statistics_ttl).to be_nil
1213
- expect(post_statistics_ttl).to be_nil
1214
- end
1215
- end
1216
- end
1217
-
1218
- describe 'with other fiber closing' do
1219
- context 'when we create many fibers and close producer in some of them' do
1220
- it 'expect not to crash ruby' do
1221
- 10.times do |i|
1222
- producer = rdkafka_producer_config.producer
1223
-
1224
- Fiber.new do
1225
- GC.start
1226
- producer.close
1227
- end.resume
1228
- end
1229
- end
1230
- end
1231
- end
1232
-
1233
- let(:producer) { rdkafka_producer_config.producer }
1234
- let(:all_partitioners) { %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random) }
1235
-
1236
- describe "partitioner behavior through producer API" do
1237
- context "testing all partitioners with same key" do
1238
- it "should not return partition 0 for all partitioners" do
1239
- test_key = "test-key-123"
1240
- results = {}
1241
-
1242
- all_partitioners.each do |partitioner|
1243
- handle = producer.produce(
1244
- topic: "partitioner_test_topic",
1245
- payload: "test payload",
1246
- partition_key: test_key,
1247
- partitioner: partitioner
1248
- )
1249
-
1250
- report = handle.wait(max_wait_timeout: 5)
1251
- results[partitioner] = report.partition
1252
- end
1253
-
1254
- # Should not all be the same partition (especially not all 0)
1255
- unique_partitions = results.values.uniq
1256
- expect(unique_partitions.size).to be > 1
1257
- end
1258
- end
1259
-
1260
- context "empty string partition key" do
1261
- it "should produce message with empty partition key without crashing and go to partition 0 for all partitioners" do
1262
- all_partitioners.each do |partitioner|
1263
- handle = producer.produce(
1264
- topic: "partitioner_test_topic",
1265
- payload: "test payload",
1266
- key: "test-key",
1267
- partition_key: "",
1268
- partitioner: partitioner
1269
- )
1270
-
1271
- report = handle.wait(max_wait_timeout: 5)
1272
- expect(report.partition).to be >= 0
1273
- end
1274
- end
1275
- end
1276
-
1277
- context "nil partition key" do
1278
- it "should handle nil partition key gracefully" do
1279
- handle = producer.produce(
1280
- topic: "partitioner_test_topic",
1281
- payload: "test payload",
1282
- key: "test-key",
1283
- partition_key: nil
1284
- )
1285
-
1286
- report = handle.wait(max_wait_timeout: 5)
1287
- expect(report.partition).to be >= 0
1288
- expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1289
- end
1290
- end
1291
-
1292
- context "various key types and lengths with different partitioners" do
1293
- it "should handle very short keys with all partitioners" do
1294
- all_partitioners.each do |partitioner|
1295
- handle = producer.produce(
1296
- topic: "partitioner_test_topic",
1297
- payload: "test payload",
1298
- partition_key: "a",
1299
- partitioner: partitioner
1300
- )
1301
-
1302
- report = handle.wait(max_wait_timeout: 5)
1303
- expect(report.partition).to be >= 0
1304
- expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1305
- end
1306
- end
1307
-
1308
- it "should handle very long keys with all partitioners" do
1309
- long_key = "a" * 1000
1310
-
1311
- all_partitioners.each do |partitioner|
1312
- handle = producer.produce(
1313
- topic: "partitioner_test_topic",
1314
- payload: "test payload",
1315
- partition_key: long_key,
1316
- partitioner: partitioner
1317
- )
1318
-
1319
- report = handle.wait(max_wait_timeout: 5)
1320
- expect(report.partition).to be >= 0
1321
- expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1322
- end
1323
- end
1324
-
1325
- it "should handle unicode keys with all partitioners" do
1326
- unicode_key = "测试键值🚀"
1327
-
1328
- all_partitioners.each do |partitioner|
1329
- handle = producer.produce(
1330
- topic: "partitioner_test_topic",
1331
- payload: "test payload",
1332
- partition_key: unicode_key,
1333
- partitioner: partitioner
1334
- )
1335
-
1336
- report = handle.wait(max_wait_timeout: 5)
1337
- expect(report.partition).to be >= 0
1338
- expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1339
- end
1340
- end
1341
- end
1342
-
1343
- context "consistency testing for deterministic partitioners" do
1344
- %w(consistent murmur2 fnv1a).each do |partitioner|
1345
- it "should consistently route same partition key to same partition with #{partitioner}" do
1346
- partition_key = "consistent-test-key"
1347
-
1348
- # Produce multiple messages with same partition key
1349
- reports = 5.times.map do
1350
- handle = producer.produce(
1351
- topic: "partitioner_test_topic",
1352
- payload: "test payload #{Time.now.to_f}",
1353
- partition_key: partition_key,
1354
- partitioner: partitioner
1355
- )
1356
- handle.wait(max_wait_timeout: 5)
1357
- end
1358
-
1359
- # All should go to same partition
1360
- partitions = reports.map(&:partition).uniq
1361
- expect(partitions.size).to eq(1)
1362
- end
1363
- end
1364
- end
1365
-
1366
- context "randomness testing for random partitioners" do
1367
- %w(random consistent_random murmur2_random fnv1a_random).each do |partitioner|
1368
- it "should potentially distribute across partitions with #{partitioner}" do
1369
- # Note: random partitioners might still return same value by chance
1370
- partition_key = "random-test-key"
1371
-
1372
- reports = 10.times.map do
1373
- handle = producer.produce(
1374
- topic: "partitioner_test_topic",
1375
- payload: "test payload #{Time.now.to_f}",
1376
- partition_key: partition_key,
1377
- partitioner: partitioner
1378
- )
1379
- handle.wait(max_wait_timeout: 5)
1380
- end
1381
-
1382
- partitions = reports.map(&:partition)
1383
-
1384
- # Just ensure they're valid partitions
1385
- partitions.each do |partition|
1386
- expect(partition).to be >= 0
1387
- expect(partition).to be < producer.partition_count("partitioner_test_topic")
1388
- end
1389
- end
1390
- end
1391
- end
1392
-
1393
- context "comparing different partitioners with same key" do
1394
- it "should route different partition keys to potentially different partitions" do
1395
- keys = ["key1", "key2", "key3", "key4", "key5"]
1396
-
1397
- all_partitioners.each do |partitioner|
1398
- reports = keys.map do |key|
1399
- handle = producer.produce(
1400
- topic: "partitioner_test_topic",
1401
- payload: "test payload",
1402
- partition_key: key,
1403
- partitioner: partitioner
1404
- )
1405
- handle.wait(max_wait_timeout: 5)
1406
- end
1407
-
1408
- partitions = reports.map(&:partition).uniq
1409
-
1410
- # Should distribute across multiple partitions for most partitioners
1411
- # (though some might hash all keys to same partition by chance)
1412
- expect(partitions.all? { |p| p >= 0 && p < producer.partition_count("partitioner_test_topic") }).to be true
1413
- end
1414
- end
1415
- end
1416
-
1417
- context "partition key vs regular key behavior" do
1418
- it "should use partition key for partitioning when both key and partition_key are provided" do
1419
- # Use keys that would hash to different partitions
1420
- regular_key = "regular-key-123"
1421
- partition_key = "partition-key-456"
1422
-
1423
- # Message with both keys
1424
- handle1 = producer.produce(
1425
- topic: "partitioner_test_topic",
1426
- payload: "test payload 1",
1427
- key: regular_key,
1428
- partition_key: partition_key
1429
- )
1430
-
1431
- # Message with only partition key (should go to same partition)
1432
- handle2 = producer.produce(
1433
- topic: "partitioner_test_topic",
1434
- payload: "test payload 2",
1435
- partition_key: partition_key
1436
- )
1437
-
1438
- # Message with only regular key (should go to different partition)
1439
- handle3 = producer.produce(
1440
- topic: "partitioner_test_topic",
1441
- payload: "test payload 3",
1442
- key: regular_key
1443
- )
1444
-
1445
- report1 = handle1.wait(max_wait_timeout: 5)
1446
- report2 = handle2.wait(max_wait_timeout: 5)
1447
- report3 = handle3.wait(max_wait_timeout: 5)
1448
-
1449
- # Messages 1 and 2 should go to same partition (both use partition_key)
1450
- expect(report1.partition).to eq(report2.partition)
1451
-
1452
- # Message 3 should potentially go to different partition (uses regular key)
1453
- expect(report3.partition).not_to eq(report1.partition)
1454
- end
1455
- end
1456
-
1457
- context "edge case combinations with different partitioners" do
1458
- it "should handle nil partition key with all partitioners" do
1459
- all_partitioners.each do |partitioner|
1460
- handle = producer.produce(
1461
- topic: "partitioner_test_topic",
1462
- payload: "test payload",
1463
- key: "test-key",
1464
- partition_key: nil,
1465
- partitioner: partitioner
1466
- )
1467
-
1468
- report = handle.wait(max_wait_timeout: 5)
1469
- expect(report.partition).to be >= 0
1470
- expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1471
- end
1472
- end
1473
-
1474
- it "should handle whitespace-only partition key with all partitioners" do
1475
- all_partitioners.each do |partitioner|
1476
- handle = producer.produce(
1477
- topic: "partitioner_test_topic",
1478
- payload: "test payload",
1479
- partition_key: " ",
1480
- partitioner: partitioner
1481
- )
1482
-
1483
- report = handle.wait(max_wait_timeout: 5)
1484
- expect(report.partition).to be >= 0
1485
- expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1486
- end
1487
- end
1488
-
1489
- it "should handle newline characters in partition key with all partitioners" do
1490
- all_partitioners.each do |partitioner|
1491
- handle = producer.produce(
1492
- topic: "partitioner_test_topic",
1493
- payload: "test payload",
1494
- partition_key: "key\nwith\nnewlines",
1495
- partitioner: partitioner
1496
- )
1497
-
1498
- report = handle.wait(max_wait_timeout: 5)
1499
- expect(report.partition).to be >= 0
1500
- expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1501
- end
1502
- end
1503
- end
1504
-
1505
- context "debugging partitioner issues" do
1506
- it "should show if all partitioners return 0 (indicating a problem)" do
1507
- test_key = "debug-test-key"
1508
- zero_count = 0
1509
-
1510
- all_partitioners.each do |partitioner|
1511
- handle = producer.produce(
1512
- topic: "partitioner_test_topic",
1513
- payload: "debug payload",
1514
- partition_key: test_key,
1515
- partitioner: partitioner
1516
- )
1517
-
1518
- report = handle.wait(max_wait_timeout: 5)
1519
- zero_count += 1 if report.partition == 0
1520
- end
1521
-
1522
- expect(zero_count).to be < all_partitioners.size
1523
- end
1524
- end
1525
- end
1526
- end