rdkafka 0.22.0-arm64-darwin

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +7 -0
  2. data/.github/CODEOWNERS +3 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci_linux_x86_64_gnu.yml +249 -0
  5. data/.github/workflows/ci_linux_x86_64_musl.yml +205 -0
  6. data/.github/workflows/ci_macos_arm64.yml +306 -0
  7. data/.github/workflows/push_linux_x86_64_gnu.yml +64 -0
  8. data/.github/workflows/push_linux_x86_64_musl.yml +77 -0
  9. data/.github/workflows/push_macos_arm64.yml +54 -0
  10. data/.github/workflows/push_ruby.yml +37 -0
  11. data/.github/workflows/verify-action-pins.yml +16 -0
  12. data/.gitignore +14 -0
  13. data/.rspec +2 -0
  14. data/.ruby-gemset +1 -0
  15. data/.ruby-version +1 -0
  16. data/.yardopts +2 -0
  17. data/CHANGELOG.md +249 -0
  18. data/Gemfile +5 -0
  19. data/MIT-LICENSE +22 -0
  20. data/README.md +178 -0
  21. data/Rakefile +96 -0
  22. data/docker-compose.yml +25 -0
  23. data/ext/README.md +19 -0
  24. data/ext/Rakefile +131 -0
  25. data/ext/build_common.sh +361 -0
  26. data/ext/build_linux_x86_64_gnu.sh +306 -0
  27. data/ext/build_linux_x86_64_musl.sh +763 -0
  28. data/ext/build_macos_arm64.sh +550 -0
  29. data/ext/librdkafka.dylib +0 -0
  30. data/lib/rdkafka/abstract_handle.rb +116 -0
  31. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  32. data/lib/rdkafka/admin/config_binding_result.rb +30 -0
  33. data/lib/rdkafka/admin/config_resource_binding_result.rb +18 -0
  34. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  35. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  36. data/lib/rdkafka/admin/create_partitions_handle.rb +27 -0
  37. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  38. data/lib/rdkafka/admin/create_topic_handle.rb +29 -0
  39. data/lib/rdkafka/admin/create_topic_report.rb +24 -0
  40. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  41. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  42. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  43. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  44. data/lib/rdkafka/admin/delete_topic_handle.rb +29 -0
  45. data/lib/rdkafka/admin/delete_topic_report.rb +24 -0
  46. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  47. data/lib/rdkafka/admin/describe_acl_report.rb +24 -0
  48. data/lib/rdkafka/admin/describe_configs_handle.rb +33 -0
  49. data/lib/rdkafka/admin/describe_configs_report.rb +54 -0
  50. data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +33 -0
  51. data/lib/rdkafka/admin/incremental_alter_configs_report.rb +54 -0
  52. data/lib/rdkafka/admin.rb +833 -0
  53. data/lib/rdkafka/bindings.rb +565 -0
  54. data/lib/rdkafka/callbacks.rb +415 -0
  55. data/lib/rdkafka/config.rb +398 -0
  56. data/lib/rdkafka/consumer/headers.rb +79 -0
  57. data/lib/rdkafka/consumer/message.rb +86 -0
  58. data/lib/rdkafka/consumer/partition.rb +51 -0
  59. data/lib/rdkafka/consumer/topic_partition_list.rb +169 -0
  60. data/lib/rdkafka/consumer.rb +653 -0
  61. data/lib/rdkafka/error.rb +101 -0
  62. data/lib/rdkafka/helpers/oauth.rb +58 -0
  63. data/lib/rdkafka/helpers/time.rb +14 -0
  64. data/lib/rdkafka/metadata.rb +115 -0
  65. data/lib/rdkafka/native_kafka.rb +139 -0
  66. data/lib/rdkafka/producer/delivery_handle.rb +40 -0
  67. data/lib/rdkafka/producer/delivery_report.rb +46 -0
  68. data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
  69. data/lib/rdkafka/producer.rb +435 -0
  70. data/lib/rdkafka/version.rb +7 -0
  71. data/lib/rdkafka.rb +54 -0
  72. data/rdkafka.gemspec +65 -0
  73. data/renovate.json +92 -0
  74. data/spec/rdkafka/abstract_handle_spec.rb +117 -0
  75. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  76. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  77. data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
  78. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  79. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  80. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  81. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
  82. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  83. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  84. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  85. data/spec/rdkafka/admin_spec.rb +971 -0
  86. data/spec/rdkafka/bindings_spec.rb +199 -0
  87. data/spec/rdkafka/callbacks_spec.rb +20 -0
  88. data/spec/rdkafka/config_spec.rb +258 -0
  89. data/spec/rdkafka/consumer/headers_spec.rb +73 -0
  90. data/spec/rdkafka/consumer/message_spec.rb +139 -0
  91. data/spec/rdkafka/consumer/partition_spec.rb +57 -0
  92. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +248 -0
  93. data/spec/rdkafka/consumer_spec.rb +1274 -0
  94. data/spec/rdkafka/error_spec.rb +89 -0
  95. data/spec/rdkafka/metadata_spec.rb +79 -0
  96. data/spec/rdkafka/native_kafka_spec.rb +130 -0
  97. data/spec/rdkafka/producer/delivery_handle_spec.rb +45 -0
  98. data/spec/rdkafka/producer/delivery_report_spec.rb +25 -0
  99. data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
  100. data/spec/rdkafka/producer_spec.rb +1345 -0
  101. data/spec/spec_helper.rb +195 -0
  102. metadata +276 -0
@@ -0,0 +1,1345 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "zlib"
4
+
5
+ describe Rdkafka::Producer do
6
+ let(:producer) { rdkafka_producer_config.producer }
7
+ let(:consumer) { rdkafka_consumer_config.consumer }
8
+
9
+ after do
10
+ # Registry should always end up being empty
11
+ registry = Rdkafka::Producer::DeliveryHandle::REGISTRY
12
+ expect(registry).to be_empty, registry.inspect
13
+ producer.close
14
+ consumer.close
15
+ end
16
+
17
+ describe 'producer without auto-start' do
18
+ let(:producer) { rdkafka_producer_config.producer(native_kafka_auto_start: false) }
19
+
20
+ it 'expect to be able to start it later and close' do
21
+ producer.start
22
+ producer.close
23
+ end
24
+
25
+ it 'expect to be able to close it without starting' do
26
+ producer.close
27
+ end
28
+ end
29
+
30
+ describe '#name' do
31
+ it { expect(producer.name).to include('rdkafka#producer-') }
32
+ end
33
+
34
+ describe '#produce with topic config alterations' do
35
+ context 'when config is not valid' do
36
+ it 'expect to raise error' do
37
+ expect do
38
+ producer.produce(topic: 'test', payload: '', topic_config: { 'invalid': 'invalid' })
39
+ end.to raise_error(Rdkafka::Config::ConfigError)
40
+ end
41
+ end
42
+
43
+ context 'when config is valid' do
44
+ it 'expect to raise error' do
45
+ expect do
46
+ producer.produce(topic: 'test', payload: '', topic_config: { 'acks': 1 }).wait
47
+ end.not_to raise_error
48
+ end
49
+
50
+ context 'when alteration should change behavior' do
51
+ # This is set incorrectly for a reason
52
+ # If alteration would not work, this will hang the spec suite
53
+ let(:producer) do
54
+ rdkafka_producer_config(
55
+ 'message.timeout.ms': 1_000_000,
56
+ :"bootstrap.servers" => "127.0.0.1:9094",
57
+ ).producer
58
+ end
59
+
60
+ it 'expect to give up on delivery fast based on alteration config' do
61
+ expect do
62
+ producer.produce(
63
+ topic: 'produce_config_test',
64
+ payload: 'test',
65
+ topic_config: {
66
+ 'compression.type': 'gzip',
67
+ 'message.timeout.ms': 1
68
+ }
69
+ ).wait
70
+ end.to raise_error(Rdkafka::RdkafkaError, /msg_timed_out/)
71
+ end
72
+ end
73
+ end
74
+ end
75
+
76
+ context "delivery callback" do
77
+ context "with a proc/lambda" do
78
+ it "should set the callback" do
79
+ expect {
80
+ producer.delivery_callback = lambda do |delivery_handle|
81
+ puts delivery_handle
82
+ end
83
+ }.not_to raise_error
84
+ expect(producer.delivery_callback).to respond_to :call
85
+ end
86
+
87
+ it "should call the callback when a message is delivered" do
88
+ @callback_called = false
89
+
90
+ producer.delivery_callback = lambda do |report|
91
+ expect(report).not_to be_nil
92
+ expect(report.label).to eq "label"
93
+ expect(report.partition).to eq 1
94
+ expect(report.offset).to be >= 0
95
+ expect(report.topic_name).to eq "produce_test_topic"
96
+ @callback_called = true
97
+ end
98
+
99
+ # Produce a message
100
+ handle = producer.produce(
101
+ topic: "produce_test_topic",
102
+ payload: "payload",
103
+ key: "key",
104
+ label: "label"
105
+ )
106
+
107
+ expect(handle.label).to eq "label"
108
+
109
+ # Wait for it to be delivered
110
+ handle.wait(max_wait_timeout: 15)
111
+
112
+ # Join the producer thread.
113
+ producer.close
114
+
115
+ # Callback should have been called
116
+ expect(@callback_called).to be true
117
+ end
118
+
119
+ it "should provide handle" do
120
+ @callback_handle = nil
121
+
122
+ producer.delivery_callback = lambda { |_, handle| @callback_handle = handle }
123
+
124
+ # Produce a message
125
+ handle = producer.produce(
126
+ topic: "produce_test_topic",
127
+ payload: "payload",
128
+ key: "key"
129
+ )
130
+
131
+ # Wait for it to be delivered
132
+ handle.wait(max_wait_timeout: 15)
133
+
134
+ # Join the producer thread.
135
+ producer.close
136
+
137
+ expect(handle).to be @callback_handle
138
+ end
139
+ end
140
+
141
+ context "with a callable object" do
142
+ it "should set the callback" do
143
+ callback = Class.new do
144
+ def call(stats); end
145
+ end
146
+ expect {
147
+ producer.delivery_callback = callback.new
148
+ }.not_to raise_error
149
+ expect(producer.delivery_callback).to respond_to :call
150
+ end
151
+
152
+ it "should call the callback when a message is delivered" do
153
+ called_report = []
154
+ callback = Class.new do
155
+ def initialize(called_report)
156
+ @called_report = called_report
157
+ end
158
+
159
+ def call(report)
160
+ @called_report << report
161
+ end
162
+ end
163
+ producer.delivery_callback = callback.new(called_report)
164
+
165
+ # Produce a message
166
+ handle = producer.produce(
167
+ topic: "produce_test_topic",
168
+ payload: "payload",
169
+ key: "key"
170
+ )
171
+
172
+ # Wait for it to be delivered
173
+ handle.wait(max_wait_timeout: 15)
174
+
175
+ # Join the producer thread.
176
+ producer.close
177
+
178
+ # Callback should have been called
179
+ expect(called_report.first).not_to be_nil
180
+ expect(called_report.first.partition).to eq 1
181
+ expect(called_report.first.offset).to be >= 0
182
+ expect(called_report.first.topic_name).to eq "produce_test_topic"
183
+ end
184
+
185
+ it "should provide handle" do
186
+ callback_handles = []
187
+ callback = Class.new do
188
+ def initialize(callback_handles)
189
+ @callback_handles = callback_handles
190
+ end
191
+
192
+ def call(_, handle)
193
+ @callback_handles << handle
194
+ end
195
+ end
196
+ producer.delivery_callback = callback.new(callback_handles)
197
+
198
+ # Produce a message
199
+ handle = producer.produce(
200
+ topic: "produce_test_topic",
201
+ payload: "payload",
202
+ key: "key"
203
+ )
204
+
205
+ # Wait for it to be delivered
206
+ handle.wait(max_wait_timeout: 15)
207
+
208
+ # Join the producer thread.
209
+ producer.close
210
+
211
+ # Callback should have been called
212
+ expect(handle).to be callback_handles.first
213
+ end
214
+ end
215
+
216
+ it "should not accept a callback that's not callable" do
217
+ expect {
218
+ producer.delivery_callback = 'a string'
219
+ }.to raise_error(TypeError)
220
+ end
221
+ end
222
+
223
+ it "should require a topic" do
224
+ expect {
225
+ producer.produce(
226
+ payload: "payload",
227
+ key: "key"
228
+ )
229
+ }.to raise_error ArgumentError, /missing keyword: [\:]?topic/
230
+ end
231
+
232
+ it "should produce a message" do
233
+ # Produce a message
234
+ handle = producer.produce(
235
+ topic: "produce_test_topic",
236
+ payload: "payload",
237
+ key: "key",
238
+ label: "label"
239
+ )
240
+
241
+ # Should be pending at first
242
+ expect(handle.pending?).to be true
243
+ expect(handle.label).to eq "label"
244
+
245
+ # Check delivery handle and report
246
+ report = handle.wait(max_wait_timeout: 5)
247
+ expect(handle.pending?).to be false
248
+ expect(report).not_to be_nil
249
+ expect(report.partition).to eq 1
250
+ expect(report.offset).to be >= 0
251
+ expect(report.label).to eq "label"
252
+
253
+ # Flush and close producer
254
+ producer.flush
255
+ producer.close
256
+
257
+ # Consume message and verify its content
258
+ message = wait_for_message(
259
+ topic: "produce_test_topic",
260
+ delivery_report: report,
261
+ consumer: consumer
262
+ )
263
+ expect(message.partition).to eq 1
264
+ expect(message.payload).to eq "payload"
265
+ expect(message.key).to eq "key"
266
+ # Since api.version.request is on by default we will get
267
+ # the message creation timestamp if it's not set.
268
+ expect(message.timestamp).to be_within(10).of(Time.now)
269
+ end
270
+
271
+ it "should produce a message with a specified partition" do
272
+ # Produce a message
273
+ handle = producer.produce(
274
+ topic: "produce_test_topic",
275
+ payload: "payload partition",
276
+ key: "key partition",
277
+ partition: 1
278
+ )
279
+ report = handle.wait(max_wait_timeout: 5)
280
+
281
+ # Consume message and verify its content
282
+ message = wait_for_message(
283
+ topic: "produce_test_topic",
284
+ delivery_report: report,
285
+ consumer: consumer
286
+ )
287
+ expect(message.partition).to eq 1
288
+ expect(message.key).to eq "key partition"
289
+ end
290
+
291
+ it "should produce a message to the same partition with a similar partition key" do
292
+ # Avoid partitioner collisions.
293
+ while true
294
+ key = ('a'..'z').to_a.shuffle.take(10).join('')
295
+ partition_key = ('a'..'z').to_a.shuffle.take(10).join('')
296
+ partition_count = producer.partition_count('partitioner_test_topic')
297
+ break if (Zlib.crc32(key) % partition_count) != (Zlib.crc32(partition_key) % partition_count)
298
+ end
299
+
300
+ # Produce a message with key, partition_key and key + partition_key
301
+ messages = [{key: key}, {partition_key: partition_key}, {key: key, partition_key: partition_key}]
302
+
303
+ messages = messages.map do |m|
304
+ handle = producer.produce(
305
+ topic: "partitioner_test_topic",
306
+ payload: "payload partition",
307
+ key: m[:key],
308
+ partition_key: m[:partition_key]
309
+ )
310
+ report = handle.wait(max_wait_timeout: 5)
311
+
312
+ wait_for_message(
313
+ topic: "partitioner_test_topic",
314
+ delivery_report: report,
315
+ )
316
+ end
317
+
318
+ expect(messages[0].partition).not_to eq(messages[2].partition)
319
+ expect(messages[1].partition).to eq(messages[2].partition)
320
+ expect(messages[0].key).to eq key
321
+ expect(messages[1].key).to be_nil
322
+ expect(messages[2].key).to eq key
323
+ end
324
+
325
+ it "should produce a message with empty string without crashing" do
326
+ messages = [{key: 'a', partition_key: ''}]
327
+
328
+ messages = messages.map do |m|
329
+ handle = producer.produce(
330
+ topic: "partitioner_test_topic",
331
+ payload: "payload partition",
332
+ key: m[:key],
333
+ partition_key: m[:partition_key]
334
+ )
335
+ report = handle.wait(max_wait_timeout: 5)
336
+
337
+ wait_for_message(
338
+ topic: "partitioner_test_topic",
339
+ delivery_report: report,
340
+ )
341
+ end
342
+
343
+ expect(messages[0].partition).to be >= 0
344
+ expect(messages[0].key).to eq 'a'
345
+ end
346
+
347
+ it "should produce a message with utf-8 encoding" do
348
+ handle = producer.produce(
349
+ topic: "produce_test_topic",
350
+ payload: "Τη γλώσσα μου έδωσαν ελληνική",
351
+ key: "key utf8"
352
+ )
353
+ report = handle.wait(max_wait_timeout: 5)
354
+
355
+ # Consume message and verify its content
356
+ message = wait_for_message(
357
+ topic: "produce_test_topic",
358
+ delivery_report: report,
359
+ consumer: consumer
360
+ )
361
+
362
+ expect(message.partition).to eq 1
363
+ expect(message.payload.force_encoding("utf-8")).to eq "Τη γλώσσα μου έδωσαν ελληνική"
364
+ expect(message.key).to eq "key utf8"
365
+ end
366
+
367
+ it "should produce a message to a non-existing topic with key and partition key" do
368
+ new_topic = "it-#{SecureRandom.uuid}"
369
+
370
+ handle = producer.produce(
371
+ # Needs to be a new topic each time
372
+ topic: new_topic,
373
+ payload: "payload",
374
+ key: "key",
375
+ partition_key: "partition_key",
376
+ label: "label"
377
+ )
378
+
379
+ # Should be pending at first
380
+ expect(handle.pending?).to be true
381
+ expect(handle.label).to eq "label"
382
+
383
+ # Check delivery handle and report
384
+ report = handle.wait(max_wait_timeout: 5)
385
+ expect(handle.pending?).to be false
386
+ expect(report).not_to be_nil
387
+ expect(report.partition).to eq 0
388
+ expect(report.offset).to be >= 0
389
+ expect(report.label).to eq "label"
390
+
391
+ # Flush and close producer
392
+ producer.flush
393
+ producer.close
394
+
395
+ # Consume message and verify its content
396
+ message = wait_for_message(
397
+ topic: new_topic,
398
+ delivery_report: report,
399
+ consumer: consumer
400
+ )
401
+ expect(message.partition).to eq 0
402
+ expect(message.payload).to eq "payload"
403
+ expect(message.key).to eq "key"
404
+ # Since api.version.request is on by default we will get
405
+ # the message creation timestamp if it's not set.
406
+ expect(message.timestamp).to be_within(10).of(Time.now)
407
+ end
408
+
409
+ context "timestamp" do
410
+ it "should raise a type error if not nil, integer or time" do
411
+ expect {
412
+ producer.produce(
413
+ topic: "produce_test_topic",
414
+ payload: "payload timestamp",
415
+ key: "key timestamp",
416
+ timestamp: "10101010"
417
+ )
418
+ }.to raise_error TypeError
419
+ end
420
+
421
+ it "should produce a message with an integer timestamp" do
422
+ handle = producer.produce(
423
+ topic: "produce_test_topic",
424
+ payload: "payload timestamp",
425
+ key: "key timestamp",
426
+ timestamp: 1505069646252
427
+ )
428
+ report = handle.wait(max_wait_timeout: 5)
429
+
430
+ # Consume message and verify its content
431
+ message = wait_for_message(
432
+ topic: "produce_test_topic",
433
+ delivery_report: report,
434
+ consumer: consumer
435
+ )
436
+
437
+ expect(message.partition).to eq 2
438
+ expect(message.key).to eq "key timestamp"
439
+ expect(message.timestamp).to eq Time.at(1505069646, 252_000)
440
+ end
441
+
442
+ it "should produce a message with a time timestamp" do
443
+ handle = producer.produce(
444
+ topic: "produce_test_topic",
445
+ payload: "payload timestamp",
446
+ key: "key timestamp",
447
+ timestamp: Time.at(1505069646, 353_000)
448
+ )
449
+ report = handle.wait(max_wait_timeout: 5)
450
+
451
+ # Consume message and verify its content
452
+ message = wait_for_message(
453
+ topic: "produce_test_topic",
454
+ delivery_report: report,
455
+ consumer: consumer
456
+ )
457
+
458
+ expect(message.partition).to eq 2
459
+ expect(message.key).to eq "key timestamp"
460
+ expect(message.timestamp).to eq Time.at(1505069646, 353_000)
461
+ end
462
+ end
463
+
464
+ it "should produce a message with nil key" do
465
+ handle = producer.produce(
466
+ topic: "produce_test_topic",
467
+ payload: "payload no key"
468
+ )
469
+ report = handle.wait(max_wait_timeout: 5)
470
+
471
+ # Consume message and verify its content
472
+ message = wait_for_message(
473
+ topic: "produce_test_topic",
474
+ delivery_report: report,
475
+ consumer: consumer
476
+ )
477
+
478
+ expect(message.key).to be_nil
479
+ expect(message.payload).to eq "payload no key"
480
+ end
481
+
482
+ it "should produce a message with nil payload" do
483
+ handle = producer.produce(
484
+ topic: "produce_test_topic",
485
+ key: "key no payload"
486
+ )
487
+ report = handle.wait(max_wait_timeout: 5)
488
+
489
+ # Consume message and verify its content
490
+ message = wait_for_message(
491
+ topic: "produce_test_topic",
492
+ delivery_report: report,
493
+ consumer: consumer
494
+ )
495
+
496
+ expect(message.key).to eq "key no payload"
497
+ expect(message.payload).to be_nil
498
+ end
499
+
500
+ it "should produce a message with headers" do
501
+ handle = producer.produce(
502
+ topic: "produce_test_topic",
503
+ payload: "payload headers",
504
+ key: "key headers",
505
+ headers: { foo: :bar, baz: :foobar }
506
+ )
507
+ report = handle.wait(max_wait_timeout: 5)
508
+
509
+ # Consume message and verify its content
510
+ message = wait_for_message(
511
+ topic: "produce_test_topic",
512
+ delivery_report: report,
513
+ consumer: consumer
514
+ )
515
+
516
+ expect(message.payload).to eq "payload headers"
517
+ expect(message.key).to eq "key headers"
518
+ expect(message.headers["foo"]).to eq "bar"
519
+ expect(message.headers["baz"]).to eq "foobar"
520
+ expect(message.headers["foobar"]).to be_nil
521
+ end
522
+
523
+ it "should produce a message with empty headers" do
524
+ handle = producer.produce(
525
+ topic: "produce_test_topic",
526
+ payload: "payload headers",
527
+ key: "key headers",
528
+ headers: {}
529
+ )
530
+ report = handle.wait(max_wait_timeout: 5)
531
+
532
+ # Consume message and verify its content
533
+ message = wait_for_message(
534
+ topic: "produce_test_topic",
535
+ delivery_report: report,
536
+ consumer: consumer
537
+ )
538
+
539
+ expect(message.payload).to eq "payload headers"
540
+ expect(message.key).to eq "key headers"
541
+ expect(message.headers).to be_empty
542
+ end
543
+
544
+ it "should produce message that aren't waited for and not crash" do
545
+ 5.times do
546
+ 200.times do
547
+ producer.produce(
548
+ topic: "produce_test_topic",
549
+ payload: "payload not waiting",
550
+ key: "key not waiting"
551
+ )
552
+ end
553
+
554
+ # Allow some time for a GC run
555
+ sleep 1
556
+ end
557
+
558
+ # Wait for the delivery notifications
559
+ 10.times do
560
+ break if Rdkafka::Producer::DeliveryHandle::REGISTRY.empty?
561
+ sleep 1
562
+ end
563
+ end
564
+
565
+ it "should produce a message in a forked process", skip: defined?(JRUBY_VERSION) && "Kernel#fork is not available" do
566
+ # Fork, produce a message, send the report over a pipe and
567
+ # wait for and check the message in the main process.
568
+ reader, writer = IO.pipe
569
+
570
+ pid = fork do
571
+ reader.close
572
+
573
+ # Avoid sharing the client between processes.
574
+ producer = rdkafka_producer_config.producer
575
+
576
+ handle = producer.produce(
577
+ topic: "produce_test_topic",
578
+ payload: "payload-forked",
579
+ key: "key-forked"
580
+ )
581
+
582
+ report = handle.wait(max_wait_timeout: 5)
583
+
584
+ report_json = JSON.generate(
585
+ "partition" => report.partition,
586
+ "offset" => report.offset,
587
+ "topic_name" => report.topic_name
588
+ )
589
+
590
+ writer.write(report_json)
591
+ writer.close
592
+ producer.flush
593
+ producer.close
594
+ end
595
+ Process.wait(pid)
596
+
597
+ writer.close
598
+ report_hash = JSON.parse(reader.read)
599
+ report = Rdkafka::Producer::DeliveryReport.new(
600
+ report_hash["partition"],
601
+ report_hash["offset"],
602
+ report_hash["topic_name"]
603
+ )
604
+
605
+ reader.close
606
+
607
+ # Consume message and verify its content
608
+ message = wait_for_message(
609
+ topic: "produce_test_topic",
610
+ delivery_report: report,
611
+ consumer: consumer
612
+ )
613
+ expect(message.partition).to eq 0
614
+ expect(message.payload).to eq "payload-forked"
615
+ expect(message.key).to eq "key-forked"
616
+ end
617
+
618
+ it "should raise an error when producing fails" do
619
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_producev).and_return(20)
620
+
621
+ expect {
622
+ producer.produce(
623
+ topic: "produce_test_topic",
624
+ key: "key error"
625
+ )
626
+ }.to raise_error Rdkafka::RdkafkaError
627
+ end
628
+
629
+ it "should raise a timeout error when waiting too long" do
630
+ handle = producer.produce(
631
+ topic: "produce_test_topic",
632
+ payload: "payload timeout",
633
+ key: "key timeout"
634
+ )
635
+ expect {
636
+ handle.wait(max_wait_timeout: 0)
637
+ }.to raise_error Rdkafka::Producer::DeliveryHandle::WaitTimeoutError
638
+
639
+ # Waiting a second time should work
640
+ handle.wait(max_wait_timeout: 5)
641
+ end
642
+
643
+ context "methods that should not be called after a producer has been closed" do
644
+ before do
645
+ producer.close
646
+ end
647
+
648
+ # Affected methods and a non-invalid set of parameters for the method
649
+ {
650
+ :produce => { topic: nil },
651
+ :partition_count => nil,
652
+ }.each do |method, args|
653
+ it "raises an exception if #{method} is called" do
654
+ expect {
655
+ if args.is_a?(Hash)
656
+ producer.public_send(method, **args)
657
+ else
658
+ producer.public_send(method, args)
659
+ end
660
+ }.to raise_exception(Rdkafka::ClosedProducerError, /#{method.to_s}/)
661
+ end
662
+ end
663
+ end
664
+
665
+ context "when not being able to deliver the message" do
666
+ let(:producer) do
667
+ rdkafka_producer_config(
668
+ "bootstrap.servers": "127.0.0.1:9093",
669
+ "message.timeout.ms": 100
670
+ ).producer
671
+ end
672
+
673
+ it "should contain the error in the response when not deliverable" do
674
+ handler = producer.produce(topic: 'produce_test_topic', payload: nil, label: 'na')
675
+ # Wait for the async callbacks and delivery registry to update
676
+ sleep(2)
677
+ expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
678
+ expect(handler.create_result.label).to eq('na')
679
+ end
680
+ end
681
+
682
+ context "when topic does not exist and allow.auto.create.topics is false" do
683
+ let(:producer) do
684
+ rdkafka_producer_config(
685
+ "bootstrap.servers": "127.0.0.1:9092",
686
+ "message.timeout.ms": 100,
687
+ "allow.auto.create.topics": false
688
+ ).producer
689
+ end
690
+
691
+ it "should contain the error in the response when not deliverable" do
692
+ handler = producer.produce(topic: "it-#{SecureRandom.uuid}", payload: nil, label: 'na')
693
+ # Wait for the async callbacks and delivery registry to update
694
+ sleep(2)
695
+ expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
696
+ expect(handler.create_result.error.code).to eq(:msg_timed_out)
697
+ expect(handler.create_result.label).to eq('na')
698
+ end
699
+ end
700
+
701
+ describe '#partition_count' do
702
+ it { expect(producer.partition_count('consume_test_topic')).to eq(3) }
703
+
704
+ context 'when the partition count value is already cached' do
705
+ before do
706
+ producer.partition_count('consume_test_topic')
707
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
708
+ end
709
+
710
+ it 'expect not to query it again' do
711
+ producer.partition_count('consume_test_topic')
712
+ expect(::Rdkafka::Metadata).not_to have_received(:new)
713
+ end
714
+ end
715
+
716
+ context 'when the partition count value was cached but time expired' do
717
+ before do
718
+ ::Rdkafka::Producer.partitions_count_cache = Rdkafka::Producer::PartitionsCountCache.new
719
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
720
+ end
721
+
722
+ it 'expect to query it again' do
723
+ producer.partition_count('consume_test_topic')
724
+ expect(::Rdkafka::Metadata).to have_received(:new)
725
+ end
726
+ end
727
+
728
+ context 'when the partition count value was cached and time did not expire' do
729
+ before do
730
+ allow(::Process).to receive(:clock_gettime).and_return(0, 29.001)
731
+ producer.partition_count('consume_test_topic')
732
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
733
+ end
734
+
735
+ it 'expect not to query it again' do
736
+ producer.partition_count('consume_test_topic')
737
+ expect(::Rdkafka::Metadata).not_to have_received(:new)
738
+ end
739
+ end
740
+ end
741
+
742
+ describe '#flush' do
743
+ it "should return flush when it can flush all outstanding messages or when no messages" do
744
+ producer.produce(
745
+ topic: "produce_test_topic",
746
+ payload: "payload headers",
747
+ key: "key headers",
748
+ headers: {}
749
+ )
750
+
751
+ expect(producer.flush(5_000)).to eq(true)
752
+ end
753
+
754
+ context 'when it cannot flush due to a timeout' do
755
+ let(:producer) do
756
+ rdkafka_producer_config(
757
+ "bootstrap.servers": "127.0.0.1:9093",
758
+ "message.timeout.ms": 2_000
759
+ ).producer
760
+ end
761
+
762
+ after do
763
+ # Allow rdkafka to evict message preventing memory-leak
764
+ sleep(2)
765
+ end
766
+
767
+ it "should return false on flush when cannot deliver and beyond timeout" do
768
+ producer.produce(
769
+ topic: "produce_test_topic",
770
+ payload: "payload headers",
771
+ key: "key headers",
772
+ headers: {}
773
+ )
774
+
775
+ expect(producer.flush(1_000)).to eq(false)
776
+ end
777
+ end
778
+
779
+ context 'when there is a different error' do
780
+ before { allow(Rdkafka::Bindings).to receive(:rd_kafka_flush).and_return(-199) }
781
+
782
+ it 'should raise it' do
783
+ expect { producer.flush }.to raise_error(Rdkafka::RdkafkaError)
784
+ end
785
+ end
786
+ end
787
+
788
+ describe '#purge' do
789
+ context 'when no outgoing messages' do
790
+ it { expect(producer.purge).to eq(true) }
791
+ end
792
+
793
+ context 'when librdkafka purge returns an error' do
794
+ before { expect(Rdkafka::Bindings).to receive(:rd_kafka_purge).and_return(-153) }
795
+
796
+ it 'expect to raise an error' do
797
+ expect { producer.purge }.to raise_error(Rdkafka::RdkafkaError, /retry/)
798
+ end
799
+ end
800
+
801
+ context 'when there are outgoing things in the queue' do
802
+ let(:producer) do
803
+ rdkafka_producer_config(
804
+ "bootstrap.servers": "127.0.0.1:9093",
805
+ "message.timeout.ms": 2_000
806
+ ).producer
807
+ end
808
+
809
+ it "should should purge and move forward" do
810
+ producer.produce(
811
+ topic: "produce_test_topic",
812
+ payload: "payload headers"
813
+ )
814
+
815
+ expect(producer.purge).to eq(true)
816
+ expect(producer.flush(1_000)).to eq(true)
817
+ end
818
+
819
+ it "should materialize the delivery handles" do
820
+ handle = producer.produce(
821
+ topic: "produce_test_topic",
822
+ payload: "payload headers"
823
+ )
824
+
825
+ expect(producer.purge).to eq(true)
826
+
827
+ expect { handle.wait }.to raise_error(Rdkafka::RdkafkaError, /purge_queue/)
828
+ end
829
+
830
+ context "when using delivery_callback" do
831
+ let(:delivery_reports) { [] }
832
+
833
+ let(:delivery_callback) do
834
+ ->(delivery_report) { delivery_reports << delivery_report }
835
+ end
836
+
837
+ before { producer.delivery_callback = delivery_callback }
838
+
839
+ it "should run the callback" do
840
+ handle = producer.produce(
841
+ topic: "produce_test_topic",
842
+ payload: "payload headers"
843
+ )
844
+
845
+ expect(producer.purge).to eq(true)
846
+ # queue purge
847
+ expect(delivery_reports[0].error).to eq(-152)
848
+ end
849
+ end
850
+ end
851
+ end
852
+
853
+ describe '#oauthbearer_set_token' do
854
+ context 'when sasl not configured' do
855
+ it 'should return RD_KAFKA_RESP_ERR__STATE' do
856
+ response = producer.oauthbearer_set_token(
857
+ token: "foo",
858
+ lifetime_ms: Time.now.to_i*1000 + 900 * 1000,
859
+ principal_name: "kafka-cluster"
860
+ )
861
+ expect(response).to eq(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE)
862
+ end
863
+ end
864
+
865
+ context 'when sasl configured' do
866
+ it 'should succeed' do
867
+ producer_sasl = rdkafka_producer_config(
868
+ {
869
+ "security.protocol": "sasl_ssl",
870
+ "sasl.mechanisms": 'OAUTHBEARER'
871
+ }
872
+ ).producer
873
+ response = producer_sasl.oauthbearer_set_token(
874
+ token: "foo",
875
+ lifetime_ms: Time.now.to_i*1000 + 900 * 1000,
876
+ principal_name: "kafka-cluster"
877
+ )
878
+ expect(response).to eq(0)
879
+ end
880
+ end
881
+ end
882
+
883
+ describe "#produce with headers" do
884
+ it "should produce a message with array headers" do
885
+ headers = {
886
+ "version" => ["2.1.3", "2.1.4"],
887
+ "type" => "String"
888
+ }
889
+
890
+ report = producer.produce(
891
+ topic: "consume_test_topic",
892
+ key: "key headers",
893
+ headers: headers
894
+ ).wait
895
+
896
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
897
+ expect(message).to be
898
+ expect(message.key).to eq('key headers')
899
+ expect(message.headers['type']).to eq('String')
900
+ expect(message.headers['version']).to eq(["2.1.3", "2.1.4"])
901
+ end
902
+
903
+ it "should produce a message with single value headers" do
904
+ headers = {
905
+ "version" => "2.1.3",
906
+ "type" => "String"
907
+ }
908
+
909
+ report = producer.produce(
910
+ topic: "consume_test_topic",
911
+ key: "key headers",
912
+ headers: headers
913
+ ).wait
914
+
915
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
916
+ expect(message).to be
917
+ expect(message.key).to eq('key headers')
918
+ expect(message.headers['type']).to eq('String')
919
+ expect(message.headers['version']).to eq('2.1.3')
920
+ end
921
+ end
922
+
923
+ describe 'with active statistics callback' do
924
+ let(:producer) do
925
+ rdkafka_producer_config('statistics.interval.ms': 1_000).producer
926
+ end
927
+
928
+ let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
929
+ let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
930
+ let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
931
+
932
+ context "when using partition key" do
933
+ before do
934
+ Rdkafka::Config.statistics_callback = ->(*) {}
935
+
936
+ # This call will make a blocking request to the metadata cache
937
+ producer.produce(
938
+ topic: "produce_test_topic",
939
+ payload: "payload headers",
940
+ partition_key: "test"
941
+ ).wait
942
+
943
+ pre_statistics_ttl
944
+
945
+ # We wait to make sure that statistics are triggered and that there is a refresh
946
+ sleep(1.5)
947
+
948
+ post_statistics_ttl
949
+ end
950
+
951
+ it 'expect to update ttl on the partitions count cache via statistics' do
952
+ expect(pre_statistics_ttl).to be < post_statistics_ttl
953
+ end
954
+ end
955
+
956
+ context "when not using partition key" do
957
+ before do
958
+ Rdkafka::Config.statistics_callback = ->(*) {}
959
+
960
+ # This call will make a blocking request to the metadata cache
961
+ producer.produce(
962
+ topic: "produce_test_topic",
963
+ payload: "payload headers"
964
+ ).wait
965
+
966
+ pre_statistics_ttl
967
+
968
+ # We wait to make sure that statistics are triggered and that there is a refresh
969
+ sleep(1.5)
970
+
971
+ # This will anyhow be populated from statistic
972
+ post_statistics_ttl
973
+ end
974
+
975
+ it 'expect not to update ttl on the partitions count cache via blocking but via use stats' do
976
+ expect(pre_statistics_ttl).to be_nil
977
+ expect(post_statistics_ttl).not_to be_nil
978
+ end
979
+ end
980
+ end
981
+
982
+ describe 'without active statistics callback' do
983
+ let(:producer) do
984
+ rdkafka_producer_config('statistics.interval.ms': 1_000).producer
985
+ end
986
+
987
+ let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
988
+ let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
989
+ let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
990
+
991
+ context "when using partition key" do
992
+ before do
993
+ # This call will make a blocking request to the metadata cache
994
+ producer.produce(
995
+ topic: "produce_test_topic",
996
+ payload: "payload headers",
997
+ partition_key: "test"
998
+ ).wait
999
+
1000
+ pre_statistics_ttl
1001
+
1002
+ # We wait to make sure that statistics are triggered and that there is a refresh
1003
+ sleep(1.5)
1004
+
1005
+ post_statistics_ttl
1006
+ end
1007
+
1008
+ it 'expect not to update ttl on the partitions count cache via statistics' do
1009
+ expect(pre_statistics_ttl).to eq post_statistics_ttl
1010
+ end
1011
+ end
1012
+
1013
+ context "when not using partition key" do
1014
+ before do
1015
+ # This call will make a blocking request to the metadata cache
1016
+ producer.produce(
1017
+ topic: "produce_test_topic",
1018
+ payload: "payload headers"
1019
+ ).wait
1020
+
1021
+ pre_statistics_ttl
1022
+
1023
+ # We wait to make sure that statistics are triggered and that there is a refresh
1024
+ sleep(1.5)
1025
+
1026
+ # This should not be populated because stats are not in use
1027
+ post_statistics_ttl
1028
+ end
1029
+
1030
+ it 'expect not to update ttl on the partitions count cache via anything' do
1031
+ expect(pre_statistics_ttl).to be_nil
1032
+ expect(post_statistics_ttl).to be_nil
1033
+ end
1034
+ end
1035
+ end
1036
+
1037
+ describe 'with other fiber closing' do
1038
+ context 'when we create many fibers and close producer in some of them' do
1039
+ it 'expect not to crash ruby' do
1040
+ 10.times do |i|
1041
+ producer = rdkafka_producer_config.producer
1042
+
1043
+ Fiber.new do
1044
+ GC.start
1045
+ producer.close
1046
+ end.resume
1047
+ end
1048
+ end
1049
+ end
1050
+ end
1051
+
1052
+ let(:producer) { rdkafka_producer_config.producer }
1053
+ let(:all_partitioners) { %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random) }
1054
+
1055
+ describe "partitioner behavior through producer API" do
1056
+ context "testing all partitioners with same key" do
1057
+ it "should not return partition 0 for all partitioners" do
1058
+ test_key = "test-key-123"
1059
+ results = {}
1060
+
1061
+ all_partitioners.each do |partitioner|
1062
+ handle = producer.produce(
1063
+ topic: "partitioner_test_topic",
1064
+ payload: "test payload",
1065
+ partition_key: test_key,
1066
+ partitioner: partitioner
1067
+ )
1068
+
1069
+ report = handle.wait(max_wait_timeout: 5)
1070
+ results[partitioner] = report.partition
1071
+ end
1072
+
1073
+ # Should not all be the same partition (especially not all 0)
1074
+ unique_partitions = results.values.uniq
1075
+ expect(unique_partitions.size).to be > 1
1076
+ end
1077
+ end
1078
+
1079
+ context "empty string partition key" do
1080
+ it "should produce message with empty partition key without crashing and go to partition 0 for all partitioners" do
1081
+ all_partitioners.each do |partitioner|
1082
+ handle = producer.produce(
1083
+ topic: "partitioner_test_topic",
1084
+ payload: "test payload",
1085
+ key: "test-key",
1086
+ partition_key: "",
1087
+ partitioner: partitioner
1088
+ )
1089
+
1090
+ report = handle.wait(max_wait_timeout: 5)
1091
+ expect(report.partition).to be >= 0
1092
+ end
1093
+ end
1094
+ end
1095
+
1096
+ context "nil partition key" do
1097
+ it "should handle nil partition key gracefully" do
1098
+ handle = producer.produce(
1099
+ topic: "partitioner_test_topic",
1100
+ payload: "test payload",
1101
+ key: "test-key",
1102
+ partition_key: nil
1103
+ )
1104
+
1105
+ report = handle.wait(max_wait_timeout: 5)
1106
+ expect(report.partition).to be >= 0
1107
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1108
+ end
1109
+ end
1110
+
1111
+ context "various key types and lengths with different partitioners" do
1112
+ it "should handle very short keys with all partitioners" do
1113
+ all_partitioners.each do |partitioner|
1114
+ handle = producer.produce(
1115
+ topic: "partitioner_test_topic",
1116
+ payload: "test payload",
1117
+ partition_key: "a",
1118
+ partitioner: partitioner
1119
+ )
1120
+
1121
+ report = handle.wait(max_wait_timeout: 5)
1122
+ expect(report.partition).to be >= 0
1123
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1124
+ end
1125
+ end
1126
+
1127
+ it "should handle very long keys with all partitioners" do
1128
+ long_key = "a" * 1000
1129
+
1130
+ all_partitioners.each do |partitioner|
1131
+ handle = producer.produce(
1132
+ topic: "partitioner_test_topic",
1133
+ payload: "test payload",
1134
+ partition_key: long_key,
1135
+ partitioner: partitioner
1136
+ )
1137
+
1138
+ report = handle.wait(max_wait_timeout: 5)
1139
+ expect(report.partition).to be >= 0
1140
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1141
+ end
1142
+ end
1143
+
1144
+ it "should handle unicode keys with all partitioners" do
1145
+ unicode_key = "测试键值🚀"
1146
+
1147
+ all_partitioners.each do |partitioner|
1148
+ handle = producer.produce(
1149
+ topic: "partitioner_test_topic",
1150
+ payload: "test payload",
1151
+ partition_key: unicode_key,
1152
+ partitioner: partitioner
1153
+ )
1154
+
1155
+ report = handle.wait(max_wait_timeout: 5)
1156
+ expect(report.partition).to be >= 0
1157
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1158
+ end
1159
+ end
1160
+ end
1161
+
1162
+ context "consistency testing for deterministic partitioners" do
1163
+ %w(consistent murmur2 fnv1a).each do |partitioner|
1164
+ it "should consistently route same partition key to same partition with #{partitioner}" do
1165
+ partition_key = "consistent-test-key"
1166
+
1167
+ # Produce multiple messages with same partition key
1168
+ reports = 5.times.map do
1169
+ handle = producer.produce(
1170
+ topic: "partitioner_test_topic",
1171
+ payload: "test payload #{Time.now.to_f}",
1172
+ partition_key: partition_key,
1173
+ partitioner: partitioner
1174
+ )
1175
+ handle.wait(max_wait_timeout: 5)
1176
+ end
1177
+
1178
+ # All should go to same partition
1179
+ partitions = reports.map(&:partition).uniq
1180
+ expect(partitions.size).to eq(1)
1181
+ end
1182
+ end
1183
+ end
1184
+
1185
+ context "randomness testing for random partitioners" do
1186
+ %w(random consistent_random murmur2_random fnv1a_random).each do |partitioner|
1187
+ it "should potentially distribute across partitions with #{partitioner}" do
1188
+ # Note: random partitioners might still return same value by chance
1189
+ partition_key = "random-test-key"
1190
+
1191
+ reports = 10.times.map do
1192
+ handle = producer.produce(
1193
+ topic: "partitioner_test_topic",
1194
+ payload: "test payload #{Time.now.to_f}",
1195
+ partition_key: partition_key,
1196
+ partitioner: partitioner
1197
+ )
1198
+ handle.wait(max_wait_timeout: 5)
1199
+ end
1200
+
1201
+ partitions = reports.map(&:partition)
1202
+
1203
+ # Just ensure they're valid partitions
1204
+ partitions.each do |partition|
1205
+ expect(partition).to be >= 0
1206
+ expect(partition).to be < producer.partition_count("partitioner_test_topic")
1207
+ end
1208
+ end
1209
+ end
1210
+ end
1211
+
1212
+ context "comparing different partitioners with same key" do
1213
+ it "should route different partition keys to potentially different partitions" do
1214
+ keys = ["key1", "key2", "key3", "key4", "key5"]
1215
+
1216
+ all_partitioners.each do |partitioner|
1217
+ reports = keys.map do |key|
1218
+ handle = producer.produce(
1219
+ topic: "partitioner_test_topic",
1220
+ payload: "test payload",
1221
+ partition_key: key,
1222
+ partitioner: partitioner
1223
+ )
1224
+ handle.wait(max_wait_timeout: 5)
1225
+ end
1226
+
1227
+ partitions = reports.map(&:partition).uniq
1228
+
1229
+ # Should distribute across multiple partitions for most partitioners
1230
+ # (though some might hash all keys to same partition by chance)
1231
+ expect(partitions.all? { |p| p >= 0 && p < producer.partition_count("partitioner_test_topic") }).to be true
1232
+ end
1233
+ end
1234
+ end
1235
+
1236
+ context "partition key vs regular key behavior" do
1237
+ it "should use partition key for partitioning when both key and partition_key are provided" do
1238
+ # Use keys that would hash to different partitions
1239
+ regular_key = "regular-key-123"
1240
+ partition_key = "partition-key-456"
1241
+
1242
+ # Message with both keys
1243
+ handle1 = producer.produce(
1244
+ topic: "partitioner_test_topic",
1245
+ payload: "test payload 1",
1246
+ key: regular_key,
1247
+ partition_key: partition_key
1248
+ )
1249
+
1250
+ # Message with only partition key (should go to same partition)
1251
+ handle2 = producer.produce(
1252
+ topic: "partitioner_test_topic",
1253
+ payload: "test payload 2",
1254
+ partition_key: partition_key
1255
+ )
1256
+
1257
+ # Message with only regular key (should go to different partition)
1258
+ handle3 = producer.produce(
1259
+ topic: "partitioner_test_topic",
1260
+ payload: "test payload 3",
1261
+ key: regular_key
1262
+ )
1263
+
1264
+ report1 = handle1.wait(max_wait_timeout: 5)
1265
+ report2 = handle2.wait(max_wait_timeout: 5)
1266
+ report3 = handle3.wait(max_wait_timeout: 5)
1267
+
1268
+ # Messages 1 and 2 should go to same partition (both use partition_key)
1269
+ expect(report1.partition).to eq(report2.partition)
1270
+
1271
+ # Message 3 should potentially go to different partition (uses regular key)
1272
+ expect(report3.partition).not_to eq(report1.partition)
1273
+ end
1274
+ end
1275
+
1276
+ context "edge case combinations with different partitioners" do
1277
+ it "should handle nil partition key with all partitioners" do
1278
+ all_partitioners.each do |partitioner|
1279
+ handle = producer.produce(
1280
+ topic: "partitioner_test_topic",
1281
+ payload: "test payload",
1282
+ key: "test-key",
1283
+ partition_key: nil,
1284
+ partitioner: partitioner
1285
+ )
1286
+
1287
+ report = handle.wait(max_wait_timeout: 5)
1288
+ expect(report.partition).to be >= 0
1289
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1290
+ end
1291
+ end
1292
+
1293
+ it "should handle whitespace-only partition key with all partitioners" do
1294
+ all_partitioners.each do |partitioner|
1295
+ handle = producer.produce(
1296
+ topic: "partitioner_test_topic",
1297
+ payload: "test payload",
1298
+ partition_key: " ",
1299
+ partitioner: partitioner
1300
+ )
1301
+
1302
+ report = handle.wait(max_wait_timeout: 5)
1303
+ expect(report.partition).to be >= 0
1304
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1305
+ end
1306
+ end
1307
+
1308
+ it "should handle newline characters in partition key with all partitioners" do
1309
+ all_partitioners.each do |partitioner|
1310
+ handle = producer.produce(
1311
+ topic: "partitioner_test_topic",
1312
+ payload: "test payload",
1313
+ partition_key: "key\nwith\nnewlines",
1314
+ partitioner: partitioner
1315
+ )
1316
+
1317
+ report = handle.wait(max_wait_timeout: 5)
1318
+ expect(report.partition).to be >= 0
1319
+ expect(report.partition).to be < producer.partition_count("partitioner_test_topic")
1320
+ end
1321
+ end
1322
+ end
1323
+
1324
+ context "debugging partitioner issues" do
1325
+ it "should show if all partitioners return 0 (indicating a problem)" do
1326
+ test_key = "debug-test-key"
1327
+ zero_count = 0
1328
+
1329
+ all_partitioners.each do |partitioner|
1330
+ handle = producer.produce(
1331
+ topic: "partitioner_test_topic",
1332
+ payload: "debug payload",
1333
+ partition_key: test_key,
1334
+ partitioner: partitioner
1335
+ )
1336
+
1337
+ report = handle.wait(max_wait_timeout: 5)
1338
+ zero_count += 1 if report.partition == 0
1339
+ end
1340
+
1341
+ expect(zero_count).to be < all_partitioners.size
1342
+ end
1343
+ end
1344
+ end
1345
+ end