karafka-rdkafka 0.20.0-arm64-darwin

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +7 -0
  2. data/.github/CODEOWNERS +3 -0
  3. data/.github/FUNDING.yml +1 -0
  4. data/.github/workflows/ci_linux_x86_64_gnu.yml +249 -0
  5. data/.github/workflows/ci_linux_x86_64_musl.yml +205 -0
  6. data/.github/workflows/ci_macos_arm64.yml +306 -0
  7. data/.github/workflows/push_linux_x86_64_gnu.yml +64 -0
  8. data/.github/workflows/push_linux_x86_64_musl.yml +77 -0
  9. data/.github/workflows/push_macos_arm64.yml +54 -0
  10. data/.github/workflows/push_ruby.yml +37 -0
  11. data/.github/workflows/verify-action-pins.yml +16 -0
  12. data/.gitignore +15 -0
  13. data/.rspec +2 -0
  14. data/.ruby-gemset +1 -0
  15. data/.ruby-version +1 -0
  16. data/.yardopts +2 -0
  17. data/CHANGELOG.md +330 -0
  18. data/Gemfile +5 -0
  19. data/MIT-LICENSE +22 -0
  20. data/README.md +177 -0
  21. data/Rakefile +96 -0
  22. data/docker-compose.yml +25 -0
  23. data/ext/README.md +19 -0
  24. data/ext/Rakefile +131 -0
  25. data/ext/build_common.sh +361 -0
  26. data/ext/build_linux_x86_64_gnu.sh +306 -0
  27. data/ext/build_linux_x86_64_musl.sh +763 -0
  28. data/ext/build_macos_arm64.sh +550 -0
  29. data/ext/librdkafka.dylib +0 -0
  30. data/karafka-rdkafka.gemspec +88 -0
  31. data/lib/rdkafka/abstract_handle.rb +116 -0
  32. data/lib/rdkafka/admin/acl_binding_result.rb +51 -0
  33. data/lib/rdkafka/admin/config_binding_result.rb +30 -0
  34. data/lib/rdkafka/admin/config_resource_binding_result.rb +18 -0
  35. data/lib/rdkafka/admin/create_acl_handle.rb +28 -0
  36. data/lib/rdkafka/admin/create_acl_report.rb +24 -0
  37. data/lib/rdkafka/admin/create_partitions_handle.rb +30 -0
  38. data/lib/rdkafka/admin/create_partitions_report.rb +6 -0
  39. data/lib/rdkafka/admin/create_topic_handle.rb +32 -0
  40. data/lib/rdkafka/admin/create_topic_report.rb +24 -0
  41. data/lib/rdkafka/admin/delete_acl_handle.rb +30 -0
  42. data/lib/rdkafka/admin/delete_acl_report.rb +23 -0
  43. data/lib/rdkafka/admin/delete_groups_handle.rb +28 -0
  44. data/lib/rdkafka/admin/delete_groups_report.rb +24 -0
  45. data/lib/rdkafka/admin/delete_topic_handle.rb +32 -0
  46. data/lib/rdkafka/admin/delete_topic_report.rb +24 -0
  47. data/lib/rdkafka/admin/describe_acl_handle.rb +30 -0
  48. data/lib/rdkafka/admin/describe_acl_report.rb +24 -0
  49. data/lib/rdkafka/admin/describe_configs_handle.rb +33 -0
  50. data/lib/rdkafka/admin/describe_configs_report.rb +48 -0
  51. data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +33 -0
  52. data/lib/rdkafka/admin/incremental_alter_configs_report.rb +48 -0
  53. data/lib/rdkafka/admin.rb +832 -0
  54. data/lib/rdkafka/bindings.rb +584 -0
  55. data/lib/rdkafka/callbacks.rb +415 -0
  56. data/lib/rdkafka/config.rb +398 -0
  57. data/lib/rdkafka/consumer/headers.rb +79 -0
  58. data/lib/rdkafka/consumer/message.rb +86 -0
  59. data/lib/rdkafka/consumer/partition.rb +57 -0
  60. data/lib/rdkafka/consumer/topic_partition_list.rb +190 -0
  61. data/lib/rdkafka/consumer.rb +663 -0
  62. data/lib/rdkafka/error.rb +201 -0
  63. data/lib/rdkafka/helpers/oauth.rb +58 -0
  64. data/lib/rdkafka/helpers/time.rb +14 -0
  65. data/lib/rdkafka/metadata.rb +115 -0
  66. data/lib/rdkafka/native_kafka.rb +139 -0
  67. data/lib/rdkafka/producer/delivery_handle.rb +48 -0
  68. data/lib/rdkafka/producer/delivery_report.rb +45 -0
  69. data/lib/rdkafka/producer/partitions_count_cache.rb +216 -0
  70. data/lib/rdkafka/producer.rb +497 -0
  71. data/lib/rdkafka/version.rb +7 -0
  72. data/lib/rdkafka.rb +54 -0
  73. data/renovate.json +92 -0
  74. data/spec/rdkafka/abstract_handle_spec.rb +117 -0
  75. data/spec/rdkafka/admin/create_acl_handle_spec.rb +56 -0
  76. data/spec/rdkafka/admin/create_acl_report_spec.rb +18 -0
  77. data/spec/rdkafka/admin/create_topic_handle_spec.rb +54 -0
  78. data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
  79. data/spec/rdkafka/admin/delete_acl_handle_spec.rb +85 -0
  80. data/spec/rdkafka/admin/delete_acl_report_spec.rb +72 -0
  81. data/spec/rdkafka/admin/delete_topic_handle_spec.rb +54 -0
  82. data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
  83. data/spec/rdkafka/admin/describe_acl_handle_spec.rb +85 -0
  84. data/spec/rdkafka/admin/describe_acl_report_spec.rb +73 -0
  85. data/spec/rdkafka/admin_spec.rb +970 -0
  86. data/spec/rdkafka/bindings_spec.rb +198 -0
  87. data/spec/rdkafka/callbacks_spec.rb +20 -0
  88. data/spec/rdkafka/config_spec.rb +258 -0
  89. data/spec/rdkafka/consumer/headers_spec.rb +73 -0
  90. data/spec/rdkafka/consumer/message_spec.rb +139 -0
  91. data/spec/rdkafka/consumer/partition_spec.rb +57 -0
  92. data/spec/rdkafka/consumer/topic_partition_list_spec.rb +248 -0
  93. data/spec/rdkafka/consumer_spec.rb +1296 -0
  94. data/spec/rdkafka/error_spec.rb +95 -0
  95. data/spec/rdkafka/metadata_spec.rb +79 -0
  96. data/spec/rdkafka/native_kafka_spec.rb +130 -0
  97. data/spec/rdkafka/producer/delivery_handle_spec.rb +60 -0
  98. data/spec/rdkafka/producer/delivery_report_spec.rb +25 -0
  99. data/spec/rdkafka/producer/partitions_count_cache_spec.rb +359 -0
  100. data/spec/rdkafka/producer_spec.rb +1528 -0
  101. data/spec/spec_helper.rb +195 -0
  102. metadata +275 -0
@@ -0,0 +1,497 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ # A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
5
+ class Producer
6
+ include Helpers::Time
7
+ include Helpers::OAuth
8
+
9
+ # @private
10
+ @@partitions_count_cache = PartitionsCountCache.new
11
+
12
+ # Global (process wide) partitions cache. We use it to store number of topics partitions,
13
+ # either from the librdkafka statistics (if enabled) or via direct inline calls every now and
14
+ # then. Since the partitions count can only grow and should be same for all consumers and
15
+ # producers, we can use a global cache as long as we ensure that updates only move up.
16
+ #
17
+ # @note It is critical to remember, that not all users may have statistics callbacks enabled,
18
+ # hence we should not make assumption that this cache is always updated from the stats.
19
+ #
20
+ # @return [Rdkafka::Producer::PartitionsCountCache]
21
+ def self.partitions_count_cache
22
+ @@partitions_count_cache
23
+ end
24
+
25
+ # @param partitions_count_cache [Rdkafka::Producer::PartitionsCountCache]
26
+ def self.partitions_count_cache=(partitions_count_cache)
27
+ @@partitions_count_cache = partitions_count_cache
28
+ end
29
+
30
+ # Empty hash used as a default
31
+ EMPTY_HASH = {}.freeze
32
+
33
+ private_constant :EMPTY_HASH
34
+
35
+ # Raised when there was a critical issue when invoking rd_kafka_topic_new
36
+ # This is a temporary solution until https://github.com/karafka/rdkafka-ruby/issues/451 is
37
+ # resolved and this is normalized in all the places
38
+ class TopicHandleCreationError < RuntimeError; end
39
+
40
+ # @private
41
+ # Returns the current delivery callback, by default this is nil.
42
+ #
43
+ # @return [Proc, nil]
44
+ attr_reader :delivery_callback
45
+
46
+ # @private
47
+ # Returns the number of arguments accepted by the callback, by default this is nil.
48
+ #
49
+ # @return [Integer, nil]
50
+ attr_reader :delivery_callback_arity
51
+
52
+ # @private
53
+ # @param native_kafka [NativeKafka]
54
+ # @param partitioner [String, nil] name of the partitioner we want to use or nil to use
55
+ # the "consistent_random" default
56
+ def initialize(native_kafka, partitioner)
57
+ @topics_refs_map = {}
58
+ @topics_configs = {}
59
+ @native_kafka = native_kafka
60
+ @partitioner = partitioner || "consistent_random"
61
+
62
+ # Makes sure, that native kafka gets closed before it gets GCed by Ruby
63
+ ObjectSpace.define_finalizer(self, native_kafka.finalizer)
64
+ end
65
+
66
+ # Sets alternative set of configuration details that can be set per topic
67
+ # @note It is not allowed to re-set the same topic config twice because of the underlying
68
+ # librdkafka caching
69
+ # @param topic [String] The topic name
70
+ # @param config [Hash] config we want to use per topic basis
71
+ # @param config_hash [Integer] hash of the config. We expect it here instead of computing it,
72
+ # because it is already computed during the retrieval attempt in the `#produce` flow.
73
+ def set_topic_config(topic, config, config_hash)
74
+ # Ensure lock on topic reference just in case
75
+ @native_kafka.with_inner do |inner|
76
+ @topics_refs_map[topic] ||= {}
77
+ @topics_configs[topic] ||= {}
78
+
79
+ return if @topics_configs[topic].key?(config_hash)
80
+
81
+ # If config is empty, we create an empty reference that will be used with defaults
82
+ rd_topic_config = if config.empty?
83
+ nil
84
+ else
85
+ Rdkafka::Bindings.rd_kafka_topic_conf_new.tap do |topic_config|
86
+ config.each do |key, value|
87
+ error_buffer = FFI::MemoryPointer.new(:char, 256)
88
+ result = Rdkafka::Bindings.rd_kafka_topic_conf_set(
89
+ topic_config,
90
+ key.to_s,
91
+ value.to_s,
92
+ error_buffer,
93
+ 256
94
+ )
95
+
96
+ unless result == :config_ok
97
+ raise Config::ConfigError.new(error_buffer.read_string)
98
+ end
99
+ end
100
+ end
101
+ end
102
+
103
+ topic_handle = Bindings.rd_kafka_topic_new(inner, topic, rd_topic_config)
104
+
105
+ raise TopicHandleCreationError.new("Error creating topic handle for topic #{topic}") if topic_handle.null?
106
+
107
+ @topics_configs[topic][config_hash] = config
108
+ @topics_refs_map[topic][config_hash] = topic_handle
109
+ end
110
+ end
111
+
112
+ # Starts the native Kafka polling thread and kicks off the init polling
113
+ # @note Not needed to run unless explicit start was disabled
114
+ def start
115
+ @native_kafka.start
116
+ end
117
+
118
+ # @return [String] producer name
119
+ def name
120
+ @name ||= @native_kafka.with_inner do |inner|
121
+ ::Rdkafka::Bindings.rd_kafka_name(inner)
122
+ end
123
+ end
124
+
125
+ # Set a callback that will be called every time a message is successfully produced.
126
+ # The callback is called with a {DeliveryReport} and {DeliveryHandle}
127
+ #
128
+ # @param callback [Proc, #call] The callback
129
+ #
130
+ # @return [nil]
131
+ def delivery_callback=(callback)
132
+ raise TypeError.new("Callback has to be callable") unless callback.respond_to?(:call)
133
+ @delivery_callback = callback
134
+ @delivery_callback_arity = arity(callback)
135
+ end
136
+
137
+ # Init transactions
138
+ # Run once per producer
139
+ def init_transactions
140
+ closed_producer_check(__method__)
141
+
142
+ @native_kafka.with_inner do |inner|
143
+ response_ptr = Rdkafka::Bindings.rd_kafka_init_transactions(inner, -1)
144
+
145
+ Rdkafka::RdkafkaError.validate!(response_ptr) || true
146
+ end
147
+ end
148
+
149
+ def begin_transaction
150
+ closed_producer_check(__method__)
151
+
152
+ @native_kafka.with_inner do |inner|
153
+ response_ptr = Rdkafka::Bindings.rd_kafka_begin_transaction(inner)
154
+
155
+ Rdkafka::RdkafkaError.validate!(response_ptr) || true
156
+ end
157
+ end
158
+
159
+ def commit_transaction(timeout_ms = -1)
160
+ closed_producer_check(__method__)
161
+
162
+ @native_kafka.with_inner do |inner|
163
+ response_ptr = Rdkafka::Bindings.rd_kafka_commit_transaction(inner, timeout_ms)
164
+
165
+ Rdkafka::RdkafkaError.validate!(response_ptr) || true
166
+ end
167
+ end
168
+
169
+ def abort_transaction(timeout_ms = -1)
170
+ closed_producer_check(__method__)
171
+
172
+ @native_kafka.with_inner do |inner|
173
+ response_ptr = Rdkafka::Bindings.rd_kafka_abort_transaction(inner, timeout_ms)
174
+ Rdkafka::RdkafkaError.validate!(response_ptr) || true
175
+ end
176
+ end
177
+
178
+ # Sends provided offsets of a consumer to the transaction for collective commit
179
+ #
180
+ # @param consumer [Consumer] consumer that owns the given tpls
181
+ # @param tpl [Consumer::TopicPartitionList]
182
+ # @param timeout_ms [Integer] offsets send timeout
183
+ # @note Use **only** in the context of an active transaction
184
+ def send_offsets_to_transaction(consumer, tpl, timeout_ms = 5_000)
185
+ closed_producer_check(__method__)
186
+
187
+ return if tpl.empty?
188
+
189
+ cgmetadata = consumer.consumer_group_metadata_pointer
190
+ native_tpl = tpl.to_native_tpl
191
+
192
+ @native_kafka.with_inner do |inner|
193
+ response_ptr = Bindings.rd_kafka_send_offsets_to_transaction(inner, native_tpl, cgmetadata, timeout_ms)
194
+
195
+ Rdkafka::RdkafkaError.validate!(response_ptr)
196
+ end
197
+ ensure
198
+ if cgmetadata && !cgmetadata.null?
199
+ Bindings.rd_kafka_consumer_group_metadata_destroy(cgmetadata)
200
+ end
201
+
202
+ Rdkafka::Bindings.rd_kafka_topic_partition_list_destroy(native_tpl) unless native_tpl.nil?
203
+ end
204
+
205
+ # Close this producer and wait for the internal poll queue to empty.
206
+ def close
207
+ return if closed?
208
+ ObjectSpace.undefine_finalizer(self)
209
+
210
+ @native_kafka.close do
211
+ # We need to remove the topics references objects before we destroy the producer,
212
+ # otherwise they would leak out
213
+ @topics_refs_map.each_value do |refs|
214
+ refs.each_value do |ref|
215
+ Rdkafka::Bindings.rd_kafka_topic_destroy(ref)
216
+ end
217
+ end
218
+ end
219
+
220
+ @topics_refs_map.clear
221
+ end
222
+
223
+ # Whether this producer has closed
224
+ def closed?
225
+ @native_kafka.closed?
226
+ end
227
+
228
+ # Wait until all outstanding producer requests are completed, with the given timeout
229
+ # in seconds. Call this before closing a producer to ensure delivery of all messages.
230
+ #
231
+ # @param timeout_ms [Integer] how long should we wait for flush of all messages
232
+ # @return [Boolean] true if no more data and all was flushed, false in case there are still
233
+ # outgoing messages after the timeout
234
+ #
235
+ # @note We raise an exception for other errors because based on the librdkafka docs, there
236
+ # should be no other errors.
237
+ #
238
+ # @note For `timed_out` we do not raise an error to keep it backwards compatible
239
+ def flush(timeout_ms=5_000)
240
+ closed_producer_check(__method__)
241
+
242
+ error = @native_kafka.with_inner do |inner|
243
+ response = Rdkafka::Bindings.rd_kafka_flush(inner, timeout_ms)
244
+ Rdkafka::RdkafkaError.build(response)
245
+ end
246
+
247
+ # Early skip not to build the error message
248
+ return true unless error
249
+ return false if error.code == :timed_out
250
+
251
+ raise(error)
252
+ end
253
+
254
+ # Purges the outgoing queue and releases all resources.
255
+ #
256
+ # Useful when closing the producer with outgoing messages to unstable clusters or when for
257
+ # any other reasons waiting cannot go on anymore. This purges both the queue and all the
258
+ # inflight requests + updates the delivery handles statuses so they can be materialized into
259
+ # `purge_queue` errors.
260
+ def purge
261
+ closed_producer_check(__method__)
262
+
263
+ @native_kafka.with_inner do |inner|
264
+ response = Bindings.rd_kafka_purge(
265
+ inner,
266
+ Bindings::RD_KAFKA_PURGE_F_QUEUE | Bindings::RD_KAFKA_PURGE_F_INFLIGHT
267
+ )
268
+
269
+ Rdkafka::RdkafkaError.validate!(response)
270
+ end
271
+
272
+ # Wait for the purge to affect everything
273
+ sleep(0.001) until flush(100)
274
+
275
+ true
276
+ end
277
+
278
+ # Partition count for a given topic.
279
+ #
280
+ # @param topic [String] The topic name.
281
+ # @return [Integer] partition count for a given topic or `-1` if it could not be obtained.
282
+ #
283
+ # @note If 'allow.auto.create.topics' is set to true in the broker, the topic will be
284
+ # auto-created after returning nil.
285
+ #
286
+ # @note We cache the partition count for a given topic for given time. If statistics are
287
+ # enabled for any producer or consumer, it will take precedence over per instance fetching.
288
+ #
289
+ # This prevents us in case someone uses `partition_key` from querying for the count with
290
+ # each message. Instead we query at most once every 30 seconds at most if we have a valid
291
+ # partition count or every 5 seconds in case we were not able to obtain number of partitions.
292
+ def partition_count(topic)
293
+ closed_producer_check(__method__)
294
+
295
+ self.class.partitions_count_cache.get(topic) do
296
+ topic_metadata = nil
297
+
298
+ @native_kafka.with_inner do |inner|
299
+ topic_metadata = ::Rdkafka::Metadata.new(inner, topic).topics&.first
300
+ end
301
+
302
+ topic_metadata ? topic_metadata[:partition_count] : -1
303
+ end
304
+ rescue Rdkafka::RdkafkaError => e
305
+ # If the topic does not exist, it will be created or if not allowed another error will be
306
+ # raised. We here return -1 so this can happen without early error happening on metadata
307
+ # discovery.
308
+ return -1 if e.code == :unknown_topic_or_part
309
+
310
+ raise(e)
311
+ end
312
+
313
+ # Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
314
+ #
315
+ # When no partition is specified the underlying Kafka library picks a partition based on the key. If no key is specified, a random partition will be used.
316
+ # When a timestamp is provided this is used instead of the auto-generated timestamp.
317
+ #
318
+ # @param topic [String] The topic to produce to
319
+ # @param payload [String,nil] The message's payload
320
+ # @param key [String, nil] The message's key
321
+ # @param partition [Integer,nil] Optional partition to produce to
322
+ # @param partition_key [String, nil] Optional partition key based on which partition assignment can happen
323
+ # @param timestamp [Time,Integer,nil] Optional timestamp of this message. Integer timestamp is in milliseconds since Jan 1 1970.
324
+ # @param headers [Hash<String,String|Array<String>>] Optional message headers. Values can be either a single string or an array of strings to support duplicate headers per KIP-82
325
+ # @param label [Object, nil] a label that can be assigned when producing a message that will be part of the delivery handle and the delivery report
326
+ # @param topic_config [Hash] topic config for given message dispatch. Allows to send messages to topics with different configuration
327
+ #
328
+ # @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
329
+ #
330
+ # @raise [RdkafkaError] When adding the message to rdkafka's queue failed
331
+ def produce(
332
+ topic:,
333
+ payload: nil,
334
+ key: nil,
335
+ partition: nil,
336
+ partition_key: nil,
337
+ timestamp: nil,
338
+ headers: nil,
339
+ label: nil,
340
+ topic_config: EMPTY_HASH,
341
+ partitioner: @partitioner
342
+ )
343
+ closed_producer_check(__method__)
344
+
345
+ # Start by checking and converting the input
346
+
347
+ # Get payload length
348
+ payload_size = if payload.nil?
349
+ 0
350
+ else
351
+ payload.bytesize
352
+ end
353
+
354
+ # Get key length
355
+ key_size = if key.nil?
356
+ 0
357
+ else
358
+ key.bytesize
359
+ end
360
+
361
+ topic_config_hash = topic_config.hash
362
+
363
+ # Checks if we have the rdkafka topic reference object ready. It saves us on object
364
+ # allocation and allows to use custom config on demand.
365
+ set_topic_config(topic, topic_config, topic_config_hash) unless @topics_refs_map.dig(topic, topic_config_hash)
366
+ topic_ref = @topics_refs_map.dig(topic, topic_config_hash)
367
+
368
+ if partition_key
369
+ partition_count = partition_count(topic)
370
+
371
+ # Check if there are no overrides for the partitioner and use the default one only when
372
+ # no per-topic is present.
373
+ selected_partitioner = @topics_configs.dig(topic, topic_config_hash, :partitioner) || partitioner
374
+
375
+ # If the topic is not present, set to -1
376
+ partition = Rdkafka::Bindings.partitioner(
377
+ topic_ref,
378
+ partition_key,
379
+ partition_count,
380
+ selected_partitioner) if partition_count.positive?
381
+ end
382
+
383
+ # If partition is nil, use -1 to let librdafka set the partition randomly or
384
+ # based on the key when present.
385
+ partition ||= -1
386
+
387
+ # If timestamp is nil use 0 and let Kafka set one. If an integer or time
388
+ # use it.
389
+ raw_timestamp = if timestamp.nil?
390
+ 0
391
+ elsif timestamp.is_a?(Integer)
392
+ timestamp
393
+ elsif timestamp.is_a?(Time)
394
+ (timestamp.to_i * 1000) + (timestamp.usec / 1000)
395
+ else
396
+ raise TypeError.new("Timestamp has to be nil, an Integer or a Time")
397
+ end
398
+
399
+ delivery_handle = DeliveryHandle.new
400
+ delivery_handle.label = label
401
+ delivery_handle.topic = topic
402
+ delivery_handle[:pending] = true
403
+ delivery_handle[:response] = -1
404
+ delivery_handle[:partition] = -1
405
+ delivery_handle[:offset] = -1
406
+ DeliveryHandle.register(delivery_handle)
407
+
408
+ args = [
409
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_RKT, :pointer, topic_ref,
410
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_MSGFLAGS, :int, Rdkafka::Bindings::RD_KAFKA_MSG_F_COPY,
411
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_VALUE, :buffer_in, payload, :size_t, payload_size,
412
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_KEY, :buffer_in, key, :size_t, key_size,
413
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_PARTITION, :int32, partition,
414
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_TIMESTAMP, :int64, raw_timestamp,
415
+ :int, Rdkafka::Bindings::RD_KAFKA_VTYPE_OPAQUE, :pointer, delivery_handle,
416
+ ]
417
+
418
+ if headers
419
+ headers.each do |key0, value0|
420
+ key = key0.to_s
421
+ if value0.is_a?(Array)
422
+ # Handle array of values per KIP-82
423
+ value0.each do |value|
424
+ value = value.to_s
425
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
426
+ args << :string << key
427
+ args << :pointer << value
428
+ args << :size_t << value.bytesize
429
+ end
430
+ else
431
+ # Handle single value
432
+ value = value0.to_s
433
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_HEADER
434
+ args << :string << key
435
+ args << :pointer << value
436
+ args << :size_t << value.bytesize
437
+ end
438
+ end
439
+ end
440
+
441
+ args << :int << Rdkafka::Bindings::RD_KAFKA_VTYPE_END
442
+
443
+ # Produce the message
444
+ response = @native_kafka.with_inner do |inner|
445
+ Rdkafka::Bindings.rd_kafka_producev(
446
+ inner,
447
+ *args
448
+ )
449
+ end
450
+
451
+ # Raise error if the produce call was not successful
452
+ if response != 0
453
+ DeliveryHandle.remove(delivery_handle.to_ptr.address)
454
+ Rdkafka::RdkafkaError.validate!(response)
455
+ end
456
+
457
+ delivery_handle
458
+ end
459
+
460
+ # Calls (if registered) the delivery callback
461
+ #
462
+ # @param delivery_report [Producer::DeliveryReport]
463
+ # @param delivery_handle [Producer::DeliveryHandle]
464
+ def call_delivery_callback(delivery_report, delivery_handle)
465
+ return unless @delivery_callback
466
+
467
+ case @delivery_callback_arity
468
+ when 0
469
+ @delivery_callback.call
470
+ when 1
471
+ @delivery_callback.call(delivery_report)
472
+ else
473
+ @delivery_callback.call(delivery_report, delivery_handle)
474
+ end
475
+ end
476
+
477
+ # Figures out the arity of a given block/method
478
+ #
479
+ # @param callback [#call, Proc]
480
+ # @return [Integer] arity of the provided block/method
481
+ def arity(callback)
482
+ return callback.arity if callback.respond_to?(:arity)
483
+
484
+ callback.method(:call).arity
485
+ end
486
+
487
+ private
488
+
489
+ # Ensures, no operations can happen on a closed producer
490
+ #
491
+ # @param method [Symbol] name of the method that invoked producer
492
+ # @raise [Rdkafka::ClosedProducerError]
493
+ def closed_producer_check(method)
494
+ raise Rdkafka::ClosedProducerError.new(method) if closed?
495
+ end
496
+ end
497
+ end
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rdkafka
4
+ VERSION = "0.20.0"
5
+ LIBRDKAFKA_VERSION = "2.8.0"
6
+ LIBRDKAFKA_SOURCE_SHA256 = "5bd1c46f63265f31c6bfcedcde78703f77d28238eadf23821c2b43fc30be3e25"
7
+ end
data/lib/rdkafka.rb ADDED
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "logger"
4
+ require "objspace"
5
+ require "ffi"
6
+ require "json"
7
+
8
+ require "rdkafka/version"
9
+ require "rdkafka/helpers/time"
10
+ require "rdkafka/helpers/oauth"
11
+ require "rdkafka/abstract_handle"
12
+ require "rdkafka/admin"
13
+ require "rdkafka/admin/create_topic_handle"
14
+ require "rdkafka/admin/create_topic_report"
15
+ require "rdkafka/admin/delete_groups_handle"
16
+ require "rdkafka/admin/delete_groups_report"
17
+ require "rdkafka/admin/delete_topic_handle"
18
+ require "rdkafka/admin/delete_topic_report"
19
+ require "rdkafka/admin/create_partitions_handle"
20
+ require "rdkafka/admin/create_partitions_report"
21
+ require "rdkafka/admin/create_acl_handle"
22
+ require "rdkafka/admin/create_acl_report"
23
+ require "rdkafka/admin/delete_acl_handle"
24
+ require "rdkafka/admin/delete_acl_report"
25
+ require "rdkafka/admin/describe_acl_handle"
26
+ require "rdkafka/admin/describe_acl_report"
27
+ require "rdkafka/admin/describe_configs_handle"
28
+ require "rdkafka/admin/describe_configs_report"
29
+ require "rdkafka/admin/incremental_alter_configs_handle"
30
+ require "rdkafka/admin/incremental_alter_configs_report"
31
+ require "rdkafka/admin/acl_binding_result"
32
+ require "rdkafka/admin/config_binding_result"
33
+ require "rdkafka/admin/config_resource_binding_result"
34
+ require "rdkafka/bindings"
35
+ require "rdkafka/callbacks"
36
+ require "rdkafka/config"
37
+ require "rdkafka/consumer"
38
+ require "rdkafka/consumer/headers"
39
+ require "rdkafka/consumer/message"
40
+ require "rdkafka/consumer/partition"
41
+ require "rdkafka/consumer/topic_partition_list"
42
+ require "rdkafka/error"
43
+ require "rdkafka/metadata"
44
+ require "rdkafka/native_kafka"
45
+ require "rdkafka/producer/partitions_count_cache"
46
+ require "rdkafka/producer"
47
+ require "rdkafka/producer/delivery_handle"
48
+ require "rdkafka/producer/delivery_report"
49
+
50
+ # Main Rdkafka namespace of this gem
51
+ module Rdkafka
52
+ end
53
+
54
+ Rdkafka::Bindings.rd_kafka_global_init
data/renovate.json ADDED
@@ -0,0 +1,92 @@
1
+ {
2
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3
+ "extends": [
4
+ "config:recommended"
5
+ ],
6
+ "github-actions": {
7
+ "enabled": true,
8
+ "pinDigests": true
9
+ },
10
+ "packageRules": [
11
+ {
12
+ "matchManagers": [
13
+ "github-actions"
14
+ ],
15
+ "minimumReleaseAge": "7 days"
16
+ }
17
+ ],
18
+ "customManagers": [
19
+ {
20
+ "customType": "regex",
21
+ "managerFilePatterns": [
22
+ "/^ext/build_common\\.sh$/"
23
+ ],
24
+ "matchStrings": [
25
+ "readonly OPENSSL_VERSION=\"(?<currentValue>.*)\""
26
+ ],
27
+ "depNameTemplate": "openssl/openssl",
28
+ "datasourceTemplate": "github-releases",
29
+ "extractVersionTemplate": "^OpenSSL_(?<version>.*)$"
30
+ },
31
+ {
32
+ "customType": "regex",
33
+ "managerFilePatterns": [
34
+ "/^ext/build_common\\.sh$/"
35
+ ],
36
+ "matchStrings": [
37
+ "readonly CYRUS_SASL_VERSION=\"(?<currentValue>.*)\""
38
+ ],
39
+ "depNameTemplate": "cyrusimap/cyrus-sasl",
40
+ "datasourceTemplate": "github-releases",
41
+ "extractVersionTemplate": "^cyrus-sasl-(?<version>.*)$"
42
+ },
43
+ {
44
+ "customType": "regex",
45
+ "managerFilePatterns": [
46
+ "/^ext/build_common\\.sh$/"
47
+ ],
48
+ "matchStrings": [
49
+ "readonly ZLIB_VERSION=\"(?<currentValue>.*)\""
50
+ ],
51
+ "depNameTemplate": "madler/zlib",
52
+ "datasourceTemplate": "github-releases",
53
+ "extractVersionTemplate": "^v(?<version>.*)$"
54
+ },
55
+ {
56
+ "customType": "regex",
57
+ "managerFilePatterns": [
58
+ "/^ext/build_common\\.sh$/"
59
+ ],
60
+ "matchStrings": [
61
+ "readonly ZSTD_VERSION=\"(?<currentValue>.*)\""
62
+ ],
63
+ "depNameTemplate": "facebook/zstd",
64
+ "datasourceTemplate": "github-releases",
65
+ "extractVersionTemplate": "^v(?<version>.*)$"
66
+ },
67
+ {
68
+ "customType": "regex",
69
+ "managerFilePatterns": [
70
+ "/^ext/build_common\\.sh$/"
71
+ ],
72
+ "matchStrings": [
73
+ "readonly KRB5_VERSION=\"(?<currentValue>.*)\""
74
+ ],
75
+ "depNameTemplate": "krb5/krb5",
76
+ "datasourceTemplate": "github-releases",
77
+ "extractVersionTemplate": "^krb5-(?<version>.*)$"
78
+ },
79
+ {
80
+ "customType": "regex",
81
+ "managerFilePatterns": [
82
+ "/^ext/build_common\\.sh$/"
83
+ ],
84
+ "matchStrings": [
85
+ "readonly LIBRDKAFKA_VERSION=\"(?<currentValue>.*)\""
86
+ ],
87
+ "depNameTemplate": "confluentinc/librdkafka",
88
+ "datasourceTemplate": "github-releases",
89
+ "extractVersionTemplate": "^v(?<version>.*)$"
90
+ }
91
+ ]
92
+ }