karafka-rdkafka 0.20.0.rc3-x86_64-linux-gnu

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. checksums.yaml +7 -0
  2. data/.github/CODEOWNERS +3 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci_linux_x86_64_gnu.yml +248 -0
  5. data/.github/workflows/ci_macos_arm64.yml +301 -0
  6. data/.github/workflows/push_linux_x86_64_gnu.yml +60 -0
  7. data/.github/workflows/push_ruby.yml +37 -0
  8. data/.github/workflows/verify-action-pins.yml +16 -0
  9. data/.gitignore +15 -0
  10. data/.rspec +2 -0
  11. data/.ruby-gemset +1 -0
  12. data/.ruby-version +1 -0
  13. data/.yardopts +2 -0
  14. data/CHANGELOG.md +323 -0
  15. data/Gemfile +5 -0
  16. data/MIT-LICENSE +22 -0
  17. data/README.md +177 -0
  18. data/Rakefile +96 -0
  19. data/docker-compose.yml +25 -0
  20. data/ext/README.md +19 -0
  21. data/ext/Rakefile +131 -0
  22. data/ext/build_common.sh +361 -0
  23. data/ext/build_linux_x86_64_gnu.sh +306 -0
  24. data/ext/build_macos_arm64.sh +550 -0
  25. data/ext/librdkafka.so +0 -0
  26. data/karafka-rdkafka.gemspec +61 -0
  27. data/lib/rdkafka/abstract_handle.rb +116 -0
  28. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  29. data/lib/rdkafka/admin/config_binding_result.rb +30 -0
  30. data/lib/rdkafka/admin/config_resource_binding_result.rb +18 -0
  31. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  32. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  33. data/lib/rdkafka/admin/create_partitions_handle.rb +30 -0
  34. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  35. data/lib/rdkafka/admin/create_topic_handle.rb +32 -0
  36. data/lib/rdkafka/admin/create_topic_report.rb +24 -0
  37. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  38. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  39. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  40. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  41. data/lib/rdkafka/admin/delete_topic_handle.rb +32 -0
  42. data/lib/rdkafka/admin/delete_topic_report.rb +24 -0
  43. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  44. data/lib/rdkafka/admin/describe_acl_report.rb +24 -0
  45. data/lib/rdkafka/admin/describe_configs_handle.rb +33 -0
  46. data/lib/rdkafka/admin/describe_configs_report.rb +48 -0
  47. data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +33 -0
  48. data/lib/rdkafka/admin/incremental_alter_configs_report.rb +48 -0
  49. data/lib/rdkafka/admin.rb +832 -0
  50. data/lib/rdkafka/bindings.rb +582 -0
  51. data/lib/rdkafka/callbacks.rb +415 -0
  52. data/lib/rdkafka/config.rb +398 -0
  53. data/lib/rdkafka/consumer/headers.rb +79 -0
  54. data/lib/rdkafka/consumer/message.rb +86 -0
  55. data/lib/rdkafka/consumer/partition.rb +57 -0
  56. data/lib/rdkafka/consumer/topic_partition_list.rb +190 -0
  57. data/lib/rdkafka/consumer.rb +663 -0
  58. data/lib/rdkafka/error.rb +201 -0
  59. data/lib/rdkafka/helpers/oauth.rb +58 -0
  60. data/lib/rdkafka/helpers/time.rb +14 -0
  61. data/lib/rdkafka/metadata.rb +115 -0
  62. data/lib/rdkafka/native_kafka.rb +139 -0
  63. data/lib/rdkafka/producer/delivery_handle.rb +48 -0
  64. data/lib/rdkafka/producer/delivery_report.rb +45 -0
  65. data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
  66. data/lib/rdkafka/producer.rb +492 -0
  67. data/lib/rdkafka/version.rb +7 -0
  68. data/lib/rdkafka.rb +54 -0
  69. data/renovate.json +92 -0
  70. data/spec/rdkafka/abstract_handle_spec.rb +117 -0
  71. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  72. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  73. data/spec/rdkafka/admin/create_topic_handle_spec.rb +54 -0
  74. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  75. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  76. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  77. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +54 -0
  78. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  79. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  80. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  81. data/spec/rdkafka/admin_spec.rb +769 -0
  82. data/spec/rdkafka/bindings_spec.rb +222 -0
  83. data/spec/rdkafka/callbacks_spec.rb +20 -0
  84. data/spec/rdkafka/config_spec.rb +258 -0
  85. data/spec/rdkafka/consumer/headers_spec.rb +73 -0
  86. data/spec/rdkafka/consumer/message_spec.rb +139 -0
  87. data/spec/rdkafka/consumer/partition_spec.rb +57 -0
  88. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +248 -0
  89. data/spec/rdkafka/consumer_spec.rb +1299 -0
  90. data/spec/rdkafka/error_spec.rb +95 -0
  91. data/spec/rdkafka/metadata_spec.rb +79 -0
  92. data/spec/rdkafka/native_kafka_spec.rb +130 -0
  93. data/spec/rdkafka/producer/delivery_handle_spec.rb +60 -0
  94. data/spec/rdkafka/producer/delivery_report_spec.rb +25 -0
  95. data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
  96. data/spec/rdkafka/producer/partitions_count_spec.rb +359 -0
  97. data/spec/rdkafka/producer_spec.rb +1234 -0
  98. data/spec/spec_helper.rb +181 -0
  99. metadata +244 -0
@@ -0,0 +1,1234 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "zlib"
4
+
5
+ describe Rdkafka::Producer do
6
+ let(:producer) { rdkafka_producer_config.producer }
7
+ let(:consumer) { rdkafka_consumer_config.consumer }
8
+
9
+ after do
10
+ # Registry should always end up being empty
11
+ registry = Rdkafka::Producer::DeliveryHandle::REGISTRY
12
+ expect(registry).to be_empty, registry.inspect
13
+ producer.close
14
+ consumer.close
15
+ end
16
+
17
+ describe 'producer without auto-start' do
18
+ let(:producer) { rdkafka_producer_config.producer(native_kafka_auto_start: false) }
19
+
20
+ it 'expect to be able to start it later and close' do
21
+ producer.start
22
+ producer.close
23
+ end
24
+
25
+ it 'expect to be able to close it without starting' do
26
+ producer.close
27
+ end
28
+ end
29
+
30
+ describe '#name' do
31
+ it { expect(producer.name).to include('rdkafka#producer-') }
32
+ end
33
+
34
+ describe '#produce with topic config alterations' do
35
+ context 'when config is not valid' do
36
+ it 'expect to raise error' do
37
+ expect do
38
+ producer.produce(topic: 'test', payload: '', topic_config: { 'invalid': 'invalid' })
39
+ end.to raise_error(Rdkafka::Config::ConfigError)
40
+ end
41
+ end
42
+
43
+ context 'when config is valid' do
44
+ it 'expect to raise error' do
45
+ expect do
46
+ producer.produce(topic: 'test', payload: '', topic_config: { 'acks': 1 }).wait
47
+ end.not_to raise_error
48
+ end
49
+
50
+ context 'when alteration should change behavior' do
51
+ # This is set incorrectly for a reason
52
+ # If alteration would not work, this will hang the spec suite
53
+ let(:producer) do
54
+ rdkafka_producer_config(
55
+ 'message.timeout.ms': 1_000_000,
56
+ :"bootstrap.servers" => "127.0.0.1:9094",
57
+ ).producer
58
+ end
59
+
60
+ it 'expect to give up on delivery fast based on alteration config' do
61
+ expect do
62
+ producer.produce(
63
+ topic: 'produce_config_test',
64
+ payload: 'test',
65
+ topic_config: {
66
+ 'compression.type': 'gzip',
67
+ 'message.timeout.ms': 1
68
+ }
69
+ ).wait
70
+ end.to raise_error(Rdkafka::RdkafkaError, /msg_timed_out/)
71
+ end
72
+ end
73
+ end
74
+ end
75
+
76
+ context "delivery callback" do
77
+ context "with a proc/lambda" do
78
+ it "should set the callback" do
79
+ expect {
80
+ producer.delivery_callback = lambda do |delivery_handle|
81
+ puts delivery_handle
82
+ end
83
+ }.not_to raise_error
84
+ expect(producer.delivery_callback).to respond_to :call
85
+ end
86
+
87
+ it "should call the callback when a message is delivered" do
88
+ @callback_called = false
89
+
90
+ producer.delivery_callback = lambda do |report|
91
+ expect(report).not_to be_nil
92
+ expect(report.label).to eq "label"
93
+ expect(report.partition).to eq 1
94
+ expect(report.offset).to be >= 0
95
+ expect(report.topic_name).to eq "produce_test_topic"
96
+ @callback_called = true
97
+ end
98
+
99
+ # Produce a message
100
+ handle = producer.produce(
101
+ topic: "produce_test_topic",
102
+ payload: "payload",
103
+ key: "key",
104
+ label: "label"
105
+ )
106
+
107
+ expect(handle.label).to eq "label"
108
+
109
+ # Wait for it to be delivered
110
+ handle.wait(max_wait_timeout: 15)
111
+
112
+ # Join the producer thread.
113
+ producer.close
114
+
115
+ # Callback should have been called
116
+ expect(@callback_called).to be true
117
+ end
118
+
119
+ it "should provide handle" do
120
+ @callback_handle = nil
121
+
122
+ producer.delivery_callback = lambda { |_, handle| @callback_handle = handle }
123
+
124
+ # Produce a message
125
+ handle = producer.produce(
126
+ topic: "produce_test_topic",
127
+ payload: "payload",
128
+ key: "key"
129
+ )
130
+
131
+ # Wait for it to be delivered
132
+ handle.wait(max_wait_timeout: 15)
133
+
134
+ # Join the producer thread.
135
+ producer.close
136
+
137
+ expect(handle).to be @callback_handle
138
+ end
139
+ end
140
+
141
+ context "with a callable object" do
142
+ it "should set the callback" do
143
+ callback = Class.new do
144
+ def call(stats); end
145
+ end
146
+ expect {
147
+ producer.delivery_callback = callback.new
148
+ }.not_to raise_error
149
+ expect(producer.delivery_callback).to respond_to :call
150
+ end
151
+
152
+ it "should call the callback when a message is delivered" do
153
+ called_report = []
154
+ callback = Class.new do
155
+ def initialize(called_report)
156
+ @called_report = called_report
157
+ end
158
+
159
+ def call(report)
160
+ @called_report << report
161
+ end
162
+ end
163
+ producer.delivery_callback = callback.new(called_report)
164
+
165
+ # Produce a message
166
+ handle = producer.produce(
167
+ topic: "produce_test_topic",
168
+ payload: "payload",
169
+ key: "key"
170
+ )
171
+
172
+ # Wait for it to be delivered
173
+ handle.wait(max_wait_timeout: 15)
174
+
175
+ # Join the producer thread.
176
+ producer.close
177
+
178
+ # Callback should have been called
179
+ expect(called_report.first).not_to be_nil
180
+ expect(called_report.first.partition).to eq 1
181
+ expect(called_report.first.offset).to be >= 0
182
+ expect(called_report.first.topic_name).to eq "produce_test_topic"
183
+ end
184
+
185
+ it "should provide handle" do
186
+ callback_handles = []
187
+ callback = Class.new do
188
+ def initialize(callback_handles)
189
+ @callback_handles = callback_handles
190
+ end
191
+
192
+ def call(_, handle)
193
+ @callback_handles << handle
194
+ end
195
+ end
196
+ producer.delivery_callback = callback.new(callback_handles)
197
+
198
+ # Produce a message
199
+ handle = producer.produce(
200
+ topic: "produce_test_topic",
201
+ payload: "payload",
202
+ key: "key"
203
+ )
204
+
205
+ # Wait for it to be delivered
206
+ handle.wait(max_wait_timeout: 15)
207
+
208
+ # Join the producer thread.
209
+ producer.close
210
+
211
+ # Callback should have been called
212
+ expect(handle).to be callback_handles.first
213
+ end
214
+ end
215
+
216
+ it "should not accept a callback that's not callable" do
217
+ expect {
218
+ producer.delivery_callback = 'a string'
219
+ }.to raise_error(TypeError)
220
+ end
221
+ end
222
+
223
+ it "should require a topic" do
224
+ expect {
225
+ producer.produce(
226
+ payload: "payload",
227
+ key: "key"
228
+ )
229
+ }.to raise_error ArgumentError, /missing keyword: [\:]?topic/
230
+ end
231
+
232
+ it "should produce a message" do
233
+ # Produce a message
234
+ handle = producer.produce(
235
+ topic: "produce_test_topic",
236
+ payload: "payload",
237
+ key: "key",
238
+ label: "label"
239
+ )
240
+
241
+ # Should be pending at first
242
+ expect(handle.pending?).to be true
243
+ expect(handle.label).to eq "label"
244
+
245
+ # Check delivery handle and report
246
+ report = handle.wait(max_wait_timeout: 5)
247
+ expect(handle.pending?).to be false
248
+ expect(report).not_to be_nil
249
+ expect(report.partition).to eq 1
250
+ expect(report.offset).to be >= 0
251
+ expect(report.label).to eq "label"
252
+
253
+ # Flush and close producer
254
+ producer.flush
255
+ producer.close
256
+
257
+ # Consume message and verify its content
258
+ message = wait_for_message(
259
+ topic: "produce_test_topic",
260
+ delivery_report: report,
261
+ consumer: consumer
262
+ )
263
+ expect(message.partition).to eq 1
264
+ expect(message.payload).to eq "payload"
265
+ expect(message.key).to eq "key"
266
+ # Since api.version.request is on by default we will get
267
+ # the message creation timestamp if it's not set.
268
+ expect(message.timestamp).to be_within(10).of(Time.now)
269
+ end
270
+
271
+ it "should produce a message with a specified partition" do
272
+ # Produce a message
273
+ handle = producer.produce(
274
+ topic: "produce_test_topic",
275
+ payload: "payload partition",
276
+ key: "key partition",
277
+ partition: 1
278
+ )
279
+ report = handle.wait(max_wait_timeout: 5)
280
+
281
+ # Consume message and verify its content
282
+ message = wait_for_message(
283
+ topic: "produce_test_topic",
284
+ delivery_report: report,
285
+ consumer: consumer
286
+ )
287
+ expect(message.partition).to eq 1
288
+ expect(message.key).to eq "key partition"
289
+ end
290
+
291
+ it "should produce a message to the same partition with a similar partition key" do
292
+ # Avoid partitioner collisions.
293
+ while true
294
+ key = ('a'..'z').to_a.shuffle.take(10).join('')
295
+ partition_key = ('a'..'z').to_a.shuffle.take(10).join('')
296
+ partition_count = producer.partition_count('partitioner_test_topic')
297
+ break if (Zlib.crc32(key) % partition_count) != (Zlib.crc32(partition_key) % partition_count)
298
+ end
299
+
300
+ # Produce a message with key, partition_key and key + partition_key
301
+ messages = [{key: key}, {partition_key: partition_key}, {key: key, partition_key: partition_key}]
302
+
303
+ messages = messages.map do |m|
304
+ handle = producer.produce(
305
+ topic: "partitioner_test_topic",
306
+ payload: "payload partition",
307
+ key: m[:key],
308
+ partition_key: m[:partition_key]
309
+ )
310
+ report = handle.wait(max_wait_timeout: 5)
311
+
312
+ wait_for_message(
313
+ topic: "partitioner_test_topic",
314
+ delivery_report: report,
315
+ )
316
+ end
317
+
318
+ expect(messages[0].partition).not_to eq(messages[2].partition)
319
+ expect(messages[1].partition).to eq(messages[2].partition)
320
+ expect(messages[0].key).to eq key
321
+ expect(messages[1].key).to be_nil
322
+ expect(messages[2].key).to eq key
323
+ end
324
+
325
+ it "should produce a message with empty string without crashing" do
326
+ messages = [{key: 'a', partition_key: ''}]
327
+
328
+ messages = messages.map do |m|
329
+ handle = producer.produce(
330
+ topic: "partitioner_test_topic",
331
+ payload: "payload partition",
332
+ key: m[:key],
333
+ partition_key: m[:partition_key]
334
+ )
335
+ report = handle.wait(max_wait_timeout: 5)
336
+
337
+ wait_for_message(
338
+ topic: "partitioner_test_topic",
339
+ delivery_report: report,
340
+ )
341
+ end
342
+
343
+ expect(messages[0].partition).to eq 0
344
+ expect(messages[0].key).to eq 'a'
345
+ end
346
+
347
+ it "should produce a message with utf-8 encoding" do
348
+ handle = producer.produce(
349
+ topic: "produce_test_topic",
350
+ payload: "Τη γλώσσα μου έδωσαν ελληνική",
351
+ key: "key utf8"
352
+ )
353
+ report = handle.wait(max_wait_timeout: 5)
354
+
355
+ # Consume message and verify its content
356
+ message = wait_for_message(
357
+ topic: "produce_test_topic",
358
+ delivery_report: report,
359
+ consumer: consumer
360
+ )
361
+
362
+ expect(message.partition).to eq 1
363
+ expect(message.payload.force_encoding("utf-8")).to eq "Τη γλώσσα μου έδωσαν ελληνική"
364
+ expect(message.key).to eq "key utf8"
365
+ end
366
+
367
+ it "should produce a message to a non-existing topic with key and partition key" do
368
+ new_topic = "it-#{SecureRandom.uuid}"
369
+
370
+ handle = producer.produce(
371
+ # Needs to be a new topic each time
372
+ topic: new_topic,
373
+ payload: "payload",
374
+ key: "key",
375
+ partition_key: "partition_key",
376
+ label: "label"
377
+ )
378
+
379
+ # Should be pending at first
380
+ expect(handle.pending?).to be true
381
+ expect(handle.label).to eq "label"
382
+
383
+ # Check delivery handle and report
384
+ report = handle.wait(max_wait_timeout: 5)
385
+ expect(handle.pending?).to be false
386
+ expect(report).not_to be_nil
387
+ expect(report.partition).to eq 0
388
+ expect(report.offset).to be >= 0
389
+ expect(report.label).to eq "label"
390
+
391
+ # Flush and close producer
392
+ producer.flush
393
+ producer.close
394
+
395
+ # Consume message and verify its content
396
+ message = wait_for_message(
397
+ topic: new_topic,
398
+ delivery_report: report,
399
+ consumer: consumer
400
+ )
401
+ expect(message.partition).to eq 0
402
+ expect(message.payload).to eq "payload"
403
+ expect(message.key).to eq "key"
404
+ # Since api.version.request is on by default we will get
405
+ # the message creation timestamp if it's not set.
406
+ expect(message.timestamp).to be_within(10).of(Time.now)
407
+ end
408
+
409
+ context "timestamp" do
410
+ it "should raise a type error if not nil, integer or time" do
411
+ expect {
412
+ producer.produce(
413
+ topic: "produce_test_topic",
414
+ payload: "payload timestamp",
415
+ key: "key timestamp",
416
+ timestamp: "10101010"
417
+ )
418
+ }.to raise_error TypeError
419
+ end
420
+
421
+ it "should produce a message with an integer timestamp" do
422
+ handle = producer.produce(
423
+ topic: "produce_test_topic",
424
+ payload: "payload timestamp",
425
+ key: "key timestamp",
426
+ timestamp: 1505069646252
427
+ )
428
+ report = handle.wait(max_wait_timeout: 5)
429
+
430
+ # Consume message and verify its content
431
+ message = wait_for_message(
432
+ topic: "produce_test_topic",
433
+ delivery_report: report,
434
+ consumer: consumer
435
+ )
436
+
437
+ expect(message.partition).to eq 2
438
+ expect(message.key).to eq "key timestamp"
439
+ expect(message.timestamp).to eq Time.at(1505069646, 252_000)
440
+ end
441
+
442
+ it "should produce a message with a time timestamp" do
443
+ handle = producer.produce(
444
+ topic: "produce_test_topic",
445
+ payload: "payload timestamp",
446
+ key: "key timestamp",
447
+ timestamp: Time.at(1505069646, 353_000)
448
+ )
449
+ report = handle.wait(max_wait_timeout: 5)
450
+
451
+ # Consume message and verify its content
452
+ message = wait_for_message(
453
+ topic: "produce_test_topic",
454
+ delivery_report: report,
455
+ consumer: consumer
456
+ )
457
+
458
+ expect(message.partition).to eq 2
459
+ expect(message.key).to eq "key timestamp"
460
+ expect(message.timestamp).to eq Time.at(1505069646, 353_000)
461
+ end
462
+ end
463
+
464
+ it "should produce a message with nil key" do
465
+ handle = producer.produce(
466
+ topic: "produce_test_topic",
467
+ payload: "payload no key"
468
+ )
469
+ report = handle.wait(max_wait_timeout: 5)
470
+
471
+ # Consume message and verify its content
472
+ message = wait_for_message(
473
+ topic: "produce_test_topic",
474
+ delivery_report: report,
475
+ consumer: consumer
476
+ )
477
+
478
+ expect(message.key).to be_nil
479
+ expect(message.payload).to eq "payload no key"
480
+ end
481
+
482
+ it "should produce a message with nil payload" do
483
+ handle = producer.produce(
484
+ topic: "produce_test_topic",
485
+ key: "key no payload"
486
+ )
487
+ report = handle.wait(max_wait_timeout: 5)
488
+
489
+ # Consume message and verify its content
490
+ message = wait_for_message(
491
+ topic: "produce_test_topic",
492
+ delivery_report: report,
493
+ consumer: consumer
494
+ )
495
+
496
+ expect(message.key).to eq "key no payload"
497
+ expect(message.payload).to be_nil
498
+ end
499
+
500
+ it "should produce a message with headers" do
501
+ handle = producer.produce(
502
+ topic: "produce_test_topic",
503
+ payload: "payload headers",
504
+ key: "key headers",
505
+ headers: { foo: :bar, baz: :foobar }
506
+ )
507
+ report = handle.wait(max_wait_timeout: 5)
508
+
509
+ # Consume message and verify its content
510
+ message = wait_for_message(
511
+ topic: "produce_test_topic",
512
+ delivery_report: report,
513
+ consumer: consumer
514
+ )
515
+
516
+ expect(message.payload).to eq "payload headers"
517
+ expect(message.key).to eq "key headers"
518
+ expect(message.headers["foo"]).to eq "bar"
519
+ expect(message.headers["baz"]).to eq "foobar"
520
+ expect(message.headers["foobar"]).to be_nil
521
+ end
522
+
523
+ it "should produce a message with empty headers" do
524
+ handle = producer.produce(
525
+ topic: "produce_test_topic",
526
+ payload: "payload headers",
527
+ key: "key headers",
528
+ headers: {}
529
+ )
530
+ report = handle.wait(max_wait_timeout: 5)
531
+
532
+ # Consume message and verify its content
533
+ message = wait_for_message(
534
+ topic: "produce_test_topic",
535
+ delivery_report: report,
536
+ consumer: consumer
537
+ )
538
+
539
+ expect(message.payload).to eq "payload headers"
540
+ expect(message.key).to eq "key headers"
541
+ expect(message.headers).to be_empty
542
+ end
543
+
544
+ it "should produce message that aren't waited for and not crash" do
545
+ 5.times do
546
+ 200.times do
547
+ producer.produce(
548
+ topic: "produce_test_topic",
549
+ payload: "payload not waiting",
550
+ key: "key not waiting"
551
+ )
552
+ end
553
+
554
+ # Allow some time for a GC run
555
+ sleep 1
556
+ end
557
+
558
+ # Wait for the delivery notifications
559
+ 10.times do
560
+ break if Rdkafka::Producer::DeliveryHandle::REGISTRY.empty?
561
+ sleep 1
562
+ end
563
+ end
564
+
565
+ it "should produce a message in a forked process", skip: defined?(JRUBY_VERSION) && "Kernel#fork is not available" do
566
+ # Fork, produce a message, send the report over a pipe and
567
+ # wait for and check the message in the main process.
568
+ reader, writer = IO.pipe
569
+
570
+ pid = fork do
571
+ reader.close
572
+
573
+ # Avoid sharing the client between processes.
574
+ producer = rdkafka_producer_config.producer
575
+
576
+ handle = producer.produce(
577
+ topic: "produce_test_topic",
578
+ payload: "payload-forked",
579
+ key: "key-forked"
580
+ )
581
+
582
+ report = handle.wait(max_wait_timeout: 5)
583
+
584
+ report_json = JSON.generate(
585
+ "partition" => report.partition,
586
+ "offset" => report.offset,
587
+ "topic_name" => report.topic_name
588
+ )
589
+
590
+ writer.write(report_json)
591
+ writer.close
592
+ producer.flush
593
+ producer.close
594
+ end
595
+ Process.wait(pid)
596
+
597
+ writer.close
598
+ report_hash = JSON.parse(reader.read)
599
+ report = Rdkafka::Producer::DeliveryReport.new(
600
+ report_hash["partition"],
601
+ report_hash["offset"],
602
+ report_hash["topic_name"]
603
+ )
604
+
605
+ reader.close
606
+
607
+ # Consume message and verify its content
608
+ message = wait_for_message(
609
+ topic: "produce_test_topic",
610
+ delivery_report: report,
611
+ consumer: consumer
612
+ )
613
+ expect(message.partition).to eq 0
614
+ expect(message.payload).to eq "payload-forked"
615
+ expect(message.key).to eq "key-forked"
616
+ end
617
+
618
+ it "should raise an error when producing fails" do
619
+ expect(Rdkafka::Bindings).to receive(:rd_kafka_producev).and_return(20)
620
+
621
+ expect {
622
+ producer.produce(
623
+ topic: "produce_test_topic",
624
+ key: "key error"
625
+ )
626
+ }.to raise_error Rdkafka::RdkafkaError
627
+ end
628
+
629
+ it "should raise a timeout error when waiting too long" do
630
+ handle = producer.produce(
631
+ topic: "produce_test_topic",
632
+ payload: "payload timeout",
633
+ key: "key timeout"
634
+ )
635
+ expect {
636
+ handle.wait(max_wait_timeout: 0)
637
+ }.to raise_error Rdkafka::Producer::DeliveryHandle::WaitTimeoutError
638
+
639
+ # Waiting a second time should work
640
+ handle.wait(max_wait_timeout: 5)
641
+ end
642
+
643
+ context "methods that should not be called after a producer has been closed" do
644
+ before do
645
+ producer.close
646
+ end
647
+
648
+ # Affected methods and a non-invalid set of parameters for the method
649
+ {
650
+ :produce => { topic: nil },
651
+ :partition_count => nil,
652
+ }.each do |method, args|
653
+ it "raises an exception if #{method} is called" do
654
+ expect {
655
+ if args.is_a?(Hash)
656
+ producer.public_send(method, **args)
657
+ else
658
+ producer.public_send(method, args)
659
+ end
660
+ }.to raise_exception(Rdkafka::ClosedProducerError, /#{method.to_s}/)
661
+ end
662
+ end
663
+ end
664
+
665
+ context "when not being able to deliver the message" do
666
+ let(:producer) do
667
+ rdkafka_producer_config(
668
+ "bootstrap.servers": "127.0.0.1:9093",
669
+ "message.timeout.ms": 100
670
+ ).producer
671
+ end
672
+
673
+ it "should contain the error in the response when not deliverable" do
674
+ handler = producer.produce(topic: 'produce_test_topic', payload: nil, label: 'na')
675
+ # Wait for the async callbacks and delivery registry to update
676
+ sleep(2)
677
+ expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
678
+ expect(handler.create_result.label).to eq('na')
679
+ end
680
+ end
681
+
682
+ context "when topic does not exist and allow.auto.create.topics is false" do
683
+ let(:producer) do
684
+ rdkafka_producer_config(
685
+ "bootstrap.servers": "127.0.0.1:9092",
686
+ "message.timeout.ms": 100,
687
+ "allow.auto.create.topics": false
688
+ ).producer
689
+ end
690
+
691
+ it "should contain the error in the response when not deliverable" do
692
+ handler = producer.produce(topic: "it-#{SecureRandom.uuid}", payload: nil, label: 'na')
693
+ # Wait for the async callbacks and delivery registry to update
694
+ sleep(2)
695
+ expect(handler.create_result.error).to be_a(Rdkafka::RdkafkaError)
696
+ expect(handler.create_result.error.code).to eq(:msg_timed_out)
697
+ expect(handler.create_result.label).to eq('na')
698
+ end
699
+ end
700
+
701
+ describe '#partition_count' do
702
+ it { expect(producer.partition_count('example_topic')).to eq(1) }
703
+
704
+ context 'when the partition count value is already cached' do
705
+ before do
706
+ producer.partition_count('example_topic')
707
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
708
+ end
709
+
710
+ it 'expect not to query it again' do
711
+ producer.partition_count('example_topic')
712
+ expect(::Rdkafka::Metadata).not_to have_received(:new)
713
+ end
714
+ end
715
+
716
+ context 'when the partition count value was cached but time expired' do
717
+ before do
718
+ ::Rdkafka::Producer.partitions_count_cache = Rdkafka::Producer::PartitionsCountCache.new
719
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
720
+ end
721
+
722
+ it 'expect to query it again' do
723
+ producer.partition_count('example_topic')
724
+ expect(::Rdkafka::Metadata).to have_received(:new)
725
+ end
726
+ end
727
+
728
+ context 'when the partition count value was cached and time did not expire' do
729
+ before do
730
+ allow(::Process).to receive(:clock_gettime).and_return(0, 29.001)
731
+ producer.partition_count('example_topic')
732
+ allow(::Rdkafka::Metadata).to receive(:new).and_call_original
733
+ end
734
+
735
+ it 'expect not to query it again' do
736
+ producer.partition_count('example_topic')
737
+ expect(::Rdkafka::Metadata).not_to have_received(:new)
738
+ end
739
+ end
740
+ end
741
+
742
+ describe 'metadata fetch request recovery' do
743
+ subject(:partition_count) { producer.partition_count('example_topic') }
744
+
745
+ describe 'metadata initialization recovery' do
746
+ context 'when all good' do
747
+ it { expect(partition_count).to eq(1) }
748
+ end
749
+
750
+ context 'when we fail for the first time with handled error' do
751
+ before do
752
+ raised = false
753
+
754
+ allow(Rdkafka::Bindings).to receive(:rd_kafka_metadata).and_wrap_original do |m, *args|
755
+ if raised
756
+ m.call(*args)
757
+ else
758
+ raised = true
759
+ -185
760
+ end
761
+ end
762
+ end
763
+
764
+ it { expect(partition_count).to eq(1) }
765
+ end
766
+ end
767
+ end
768
+
769
+ describe '#flush' do
770
+ it "should return flush when it can flush all outstanding messages or when no messages" do
771
+ producer.produce(
772
+ topic: "produce_test_topic",
773
+ payload: "payload headers",
774
+ key: "key headers",
775
+ headers: {}
776
+ )
777
+
778
+ expect(producer.flush(5_000)).to eq(true)
779
+ end
780
+
781
+ context 'when it cannot flush due to a timeout' do
782
+ let(:producer) do
783
+ rdkafka_producer_config(
784
+ "bootstrap.servers": "127.0.0.1:9093",
785
+ "message.timeout.ms": 2_000
786
+ ).producer
787
+ end
788
+
789
+ after do
790
+ # Allow rdkafka to evict message preventing memory-leak
791
+ sleep(2)
792
+ end
793
+
794
+ it "should return false on flush when cannot deliver and beyond timeout" do
795
+ producer.produce(
796
+ topic: "produce_test_topic",
797
+ payload: "payload headers",
798
+ key: "key headers",
799
+ headers: {}
800
+ )
801
+
802
+ expect(producer.flush(1_000)).to eq(false)
803
+ end
804
+ end
805
+
806
+ context 'when there is a different error' do
807
+ before { allow(Rdkafka::Bindings).to receive(:rd_kafka_flush).and_return(-199) }
808
+
809
+ it 'should raise it' do
810
+ expect { producer.flush }.to raise_error(Rdkafka::RdkafkaError)
811
+ end
812
+ end
813
+ end
814
+
815
+ describe '#purge' do
816
+ context 'when no outgoing messages' do
817
+ it { expect(producer.purge).to eq(true) }
818
+ end
819
+
820
+ context 'when librdkafka purge returns an error' do
821
+ before { expect(Rdkafka::Bindings).to receive(:rd_kafka_purge).and_return(-153) }
822
+
823
+ it 'expect to raise an error' do
824
+ expect { producer.purge }.to raise_error(Rdkafka::RdkafkaError, /retry/)
825
+ end
826
+ end
827
+
828
+ context 'when there are outgoing things in the queue' do
829
+ let(:producer) do
830
+ rdkafka_producer_config(
831
+ "bootstrap.servers": "127.0.0.1:9093",
832
+ "message.timeout.ms": 2_000
833
+ ).producer
834
+ end
835
+
836
+ it "should should purge and move forward" do
837
+ producer.produce(
838
+ topic: "produce_test_topic",
839
+ payload: "payload headers"
840
+ )
841
+
842
+ expect(producer.purge).to eq(true)
843
+ expect(producer.flush(1_000)).to eq(true)
844
+ end
845
+
846
+ it "should materialize the delivery handles" do
847
+ handle = producer.produce(
848
+ topic: "produce_test_topic",
849
+ payload: "payload headers"
850
+ )
851
+
852
+ expect(producer.purge).to eq(true)
853
+
854
+ expect { handle.wait }.to raise_error(Rdkafka::RdkafkaError, /purge_queue/)
855
+ end
856
+
857
+ context "when using delivery_callback" do
858
+ let(:delivery_reports) { [] }
859
+
860
+ let(:delivery_callback) do
861
+ ->(delivery_report) { delivery_reports << delivery_report }
862
+ end
863
+
864
+ before { producer.delivery_callback = delivery_callback }
865
+
866
+ it "should run the callback" do
867
+ handle = producer.produce(
868
+ topic: "produce_test_topic",
869
+ payload: "payload headers"
870
+ )
871
+
872
+ expect(producer.purge).to eq(true)
873
+ # queue purge
874
+ expect(delivery_reports[0].error).to eq(-152)
875
+ end
876
+ end
877
+ end
878
+ end
879
+
880
+ context 'when working with transactions' do
881
+ let(:producer) do
882
+ rdkafka_producer_config(
883
+ 'transactional.id': SecureRandom.uuid,
884
+ 'transaction.timeout.ms': 5_000
885
+ ).producer
886
+ end
887
+
888
+ it 'expect not to allow to produce without transaction init' do
889
+ expect do
890
+ producer.produce(topic: 'produce_test_topic', payload: 'data')
891
+ end.to raise_error(Rdkafka::RdkafkaError, /Erroneous state \(state\)/)
892
+ end
893
+
894
+ it 'expect to raise error when transactions are initialized but producing not in one' do
895
+ producer.init_transactions
896
+
897
+ expect do
898
+ producer.produce(topic: 'produce_test_topic', payload: 'data')
899
+ end.to raise_error(Rdkafka::RdkafkaError, /Erroneous state \(state\)/)
900
+ end
901
+
902
+ it 'expect to allow to produce within a transaction, finalize and ship data' do
903
+ producer.init_transactions
904
+ producer.begin_transaction
905
+ handle1 = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
906
+ handle2 = producer.produce(topic: 'example_topic', payload: 'data2', partition: 0)
907
+ producer.commit_transaction
908
+
909
+ report1 = handle1.wait(max_wait_timeout: 15)
910
+ report2 = handle2.wait(max_wait_timeout: 15)
911
+
912
+ message1 = wait_for_message(
913
+ topic: "produce_test_topic",
914
+ delivery_report: report1,
915
+ consumer: consumer
916
+ )
917
+
918
+ expect(message1.partition).to eq 1
919
+ expect(message1.payload).to eq "data1"
920
+ expect(message1.timestamp).to be_within(10).of(Time.now)
921
+
922
+ message2 = wait_for_message(
923
+ topic: "example_topic",
924
+ delivery_report: report2,
925
+ consumer: consumer
926
+ )
927
+
928
+ expect(message2.partition).to eq 0
929
+ expect(message2.payload).to eq "data2"
930
+ expect(message2.timestamp).to be_within(10).of(Time.now)
931
+ end
932
+
933
+ it 'expect not to send data and propagate purge queue error on abort' do
934
+ producer.init_transactions
935
+ producer.begin_transaction
936
+ handle1 = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
937
+ handle2 = producer.produce(topic: 'example_topic', payload: 'data2', partition: 0)
938
+ producer.abort_transaction
939
+
940
+ expect { handle1.wait(max_wait_timeout: 15) }
941
+ .to raise_error(Rdkafka::RdkafkaError, /Purged in queue \(purge_queue\)/)
942
+ expect { handle2.wait(max_wait_timeout: 15) }
943
+ .to raise_error(Rdkafka::RdkafkaError, /Purged in queue \(purge_queue\)/)
944
+ end
945
+
946
+ it 'expect to have non retryable, non abortable and not fatal error on abort' do
947
+ producer.init_transactions
948
+ producer.begin_transaction
949
+ handle = producer.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
950
+ producer.abort_transaction
951
+
952
+ response = handle.wait(raise_response_error: false)
953
+
954
+ expect(response.error).to be_a(Rdkafka::RdkafkaError)
955
+ expect(response.error.retryable?).to eq(false)
956
+ expect(response.error.fatal?).to eq(false)
957
+ expect(response.error.abortable?).to eq(false)
958
+ end
959
+
960
+ context 'fencing against previous active producer with same transactional id' do
961
+ let(:transactional_id) { SecureRandom.uuid }
962
+
963
+ let(:producer1) do
964
+ rdkafka_producer_config(
965
+ 'transactional.id': transactional_id,
966
+ 'transaction.timeout.ms': 10_000
967
+ ).producer
968
+ end
969
+
970
+ let(:producer2) do
971
+ rdkafka_producer_config(
972
+ 'transactional.id': transactional_id,
973
+ 'transaction.timeout.ms': 10_000
974
+ ).producer
975
+ end
976
+
977
+ after do
978
+ producer1.close
979
+ producer2.close
980
+ end
981
+
982
+ it 'expect older producer not to be able to commit when fanced out' do
983
+ producer1.init_transactions
984
+ producer1.begin_transaction
985
+ producer1.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
986
+
987
+ producer2.init_transactions
988
+ producer2.begin_transaction
989
+ producer2.produce(topic: 'produce_test_topic', payload: 'data1', partition: 1)
990
+
991
+ expect { producer1.commit_transaction }
992
+ .to raise_error(Rdkafka::RdkafkaError, /This instance has been fenced/)
993
+
994
+ error = false
995
+
996
+ begin
997
+ producer1.commit_transaction
998
+ rescue Rdkafka::RdkafkaError => e
999
+ error = e
1000
+ end
1001
+
1002
+ expect(error.fatal?).to eq(true)
1003
+ expect(error.abortable?).to eq(false)
1004
+ expect(error.retryable?).to eq(false)
1005
+
1006
+ expect { producer2.commit_transaction }.not_to raise_error
1007
+ end
1008
+ end
1009
+
1010
+ context 'when having a consumer with tpls for exactly once semantics' do
1011
+ let(:tpl) do
1012
+ producer.produce(topic: 'consume_test_topic', payload: 'data1', partition: 0).wait
1013
+ result = producer.produce(topic: 'consume_test_topic', payload: 'data1', partition: 0).wait
1014
+
1015
+ Rdkafka::Consumer::TopicPartitionList.new.tap do |list|
1016
+ list.add_topic_and_partitions_with_offsets("consume_test_topic", 0 => result.offset + 1)
1017
+ end
1018
+ end
1019
+
1020
+ before do
1021
+ consumer.subscribe("consume_test_topic")
1022
+ wait_for_assignment(consumer)
1023
+ producer.init_transactions
1024
+ producer.begin_transaction
1025
+ end
1026
+
1027
+ after { consumer.unsubscribe }
1028
+
1029
+ it 'expect to store offsets and not crash' do
1030
+ producer.send_offsets_to_transaction(consumer, tpl)
1031
+ producer.commit_transaction
1032
+ end
1033
+ end
1034
+ end
1035
+
1036
+ describe '#oauthbearer_set_token' do
1037
+ context 'when sasl not configured' do
1038
+ it 'should return RD_KAFKA_RESP_ERR__STATE' do
1039
+ response = producer.oauthbearer_set_token(
1040
+ token: "foo",
1041
+ lifetime_ms: Time.now.to_i*1000 + 900 * 1000,
1042
+ principal_name: "kafka-cluster"
1043
+ )
1044
+ expect(response).to eq(Rdkafka::Bindings::RD_KAFKA_RESP_ERR__STATE)
1045
+ end
1046
+ end
1047
+
1048
+ context 'when sasl configured' do
1049
+ it 'should succeed' do
1050
+ producer_sasl = rdkafka_producer_config(
1051
+ {
1052
+ "security.protocol": "sasl_ssl",
1053
+ "sasl.mechanisms": 'OAUTHBEARER'
1054
+ }
1055
+ ).producer
1056
+ response = producer_sasl.oauthbearer_set_token(
1057
+ token: "foo",
1058
+ lifetime_ms: Time.now.to_i*1000 + 900 * 1000,
1059
+ principal_name: "kafka-cluster"
1060
+ )
1061
+ expect(response).to eq(0)
1062
+ end
1063
+ end
1064
+ end
1065
+
1066
+ describe "#produce with headers" do
1067
+ it "should produce a message with array headers" do
1068
+ headers = {
1069
+ "version" => ["2.1.3", "2.1.4"],
1070
+ "type" => "String"
1071
+ }
1072
+
1073
+ report = producer.produce(
1074
+ topic: "consume_test_topic",
1075
+ key: "key headers",
1076
+ headers: headers
1077
+ ).wait
1078
+
1079
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
1080
+ expect(message).to be
1081
+ expect(message.key).to eq('key headers')
1082
+ expect(message.headers['type']).to eq('String')
1083
+ expect(message.headers['version']).to eq(["2.1.3", "2.1.4"])
1084
+ end
1085
+
1086
+ it "should produce a message with single value headers" do
1087
+ headers = {
1088
+ "version" => "2.1.3",
1089
+ "type" => "String"
1090
+ }
1091
+
1092
+ report = producer.produce(
1093
+ topic: "consume_test_topic",
1094
+ key: "key headers",
1095
+ headers: headers
1096
+ ).wait
1097
+
1098
+ message = wait_for_message(topic: "consume_test_topic", consumer: consumer, delivery_report: report)
1099
+ expect(message).to be
1100
+ expect(message.key).to eq('key headers')
1101
+ expect(message.headers['type']).to eq('String')
1102
+ expect(message.headers['version']).to eq('2.1.3')
1103
+ end
1104
+ end
1105
+
1106
+ describe 'with active statistics callback' do
1107
+ let(:producer) do
1108
+ rdkafka_producer_config('statistics.interval.ms': 1_000).producer
1109
+ end
1110
+
1111
+ let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
1112
+ let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
1113
+ let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
1114
+
1115
+ context "when using partition key" do
1116
+ before do
1117
+ Rdkafka::Config.statistics_callback = ->(*) {}
1118
+
1119
+ # This call will make a blocking request to the metadata cache
1120
+ producer.produce(
1121
+ topic: "produce_test_topic",
1122
+ payload: "payload headers",
1123
+ partition_key: "test"
1124
+ ).wait
1125
+
1126
+ pre_statistics_ttl
1127
+
1128
+ # We wait to make sure that statistics are triggered and that there is a refresh
1129
+ sleep(1.5)
1130
+
1131
+ post_statistics_ttl
1132
+ end
1133
+
1134
+ it 'expect to update ttl on the partitions count cache via statistics' do
1135
+ expect(pre_statistics_ttl).to be < post_statistics_ttl
1136
+ end
1137
+ end
1138
+
1139
+ context "when not using partition key" do
1140
+ before do
1141
+ Rdkafka::Config.statistics_callback = ->(*) {}
1142
+
1143
+ # This call will make a blocking request to the metadata cache
1144
+ producer.produce(
1145
+ topic: "produce_test_topic",
1146
+ payload: "payload headers"
1147
+ ).wait
1148
+
1149
+ pre_statistics_ttl
1150
+
1151
+ # We wait to make sure that statistics are triggered and that there is a refresh
1152
+ sleep(1.5)
1153
+
1154
+ # This will anyhow be populated from statistic
1155
+ post_statistics_ttl
1156
+ end
1157
+
1158
+ it 'expect not to update ttl on the partitions count cache via blocking but via use stats' do
1159
+ expect(pre_statistics_ttl).to be_nil
1160
+ expect(post_statistics_ttl).not_to be_nil
1161
+ end
1162
+ end
1163
+ end
1164
+
1165
+ describe 'without active statistics callback' do
1166
+ let(:producer) do
1167
+ rdkafka_producer_config('statistics.interval.ms': 1_000).producer
1168
+ end
1169
+
1170
+ let(:count_cache_hash) { described_class.partitions_count_cache.to_h }
1171
+ let(:pre_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
1172
+ let(:post_statistics_ttl) { count_cache_hash.fetch('produce_test_topic', [])[0] }
1173
+
1174
+ context "when using partition key" do
1175
+ before do
1176
+ # This call will make a blocking request to the metadata cache
1177
+ producer.produce(
1178
+ topic: "produce_test_topic",
1179
+ payload: "payload headers",
1180
+ partition_key: "test"
1181
+ ).wait
1182
+
1183
+ pre_statistics_ttl
1184
+
1185
+ # We wait to make sure that statistics are triggered and that there is a refresh
1186
+ sleep(1.5)
1187
+
1188
+ post_statistics_ttl
1189
+ end
1190
+
1191
+ it 'expect not to update ttl on the partitions count cache via statistics' do
1192
+ expect(pre_statistics_ttl).to eq post_statistics_ttl
1193
+ end
1194
+ end
1195
+
1196
+ context "when not using partition key" do
1197
+ before do
1198
+ # This call will make a blocking request to the metadata cache
1199
+ producer.produce(
1200
+ topic: "produce_test_topic",
1201
+ payload: "payload headers"
1202
+ ).wait
1203
+
1204
+ pre_statistics_ttl
1205
+
1206
+ # We wait to make sure that statistics are triggered and that there is a refresh
1207
+ sleep(1.5)
1208
+
1209
+ # This should not be populated because stats are not in use
1210
+ post_statistics_ttl
1211
+ end
1212
+
1213
+ it 'expect not to update ttl on the partitions count cache via anything' do
1214
+ expect(pre_statistics_ttl).to be_nil
1215
+ expect(post_statistics_ttl).to be_nil
1216
+ end
1217
+ end
1218
+ end
1219
+
1220
+ describe 'with other fiber closing' do
1221
+ context 'when we create many fibers and close producer in some of them' do
1222
+ it 'expect not to crash ruby' do
1223
+ 10.times do |i|
1224
+ producer = rdkafka_producer_config.producer
1225
+
1226
+ Fiber.new do
1227
+ GC.start
1228
+ producer.close
1229
+ end.resume
1230
+ end
1231
+ end
1232
+ end
1233
+ end
1234
+ end