rdkafka 0.25.0-aarch64-linux-gnu → 0.25.1-aarch64-linux-gnu

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/Gemfile +5 -6
  4. data/Gemfile.lint +14 -0
  5. data/Gemfile.lint.lock +123 -0
  6. data/README.md +1 -1
  7. data/Rakefile +21 -21
  8. data/ext/librdkafka.so +0 -0
  9. data/lib/rdkafka/admin/acl_binding_result.rb +4 -4
  10. data/lib/rdkafka/admin/create_acl_handle.rb +4 -4
  11. data/lib/rdkafka/admin/create_acl_report.rb +0 -2
  12. data/lib/rdkafka/admin/create_partitions_handle.rb +5 -5
  13. data/lib/rdkafka/admin/create_topic_handle.rb +5 -5
  14. data/lib/rdkafka/admin/delete_acl_handle.rb +6 -6
  15. data/lib/rdkafka/admin/delete_acl_report.rb +2 -3
  16. data/lib/rdkafka/admin/delete_groups_handle.rb +5 -5
  17. data/lib/rdkafka/admin/delete_topic_handle.rb +5 -5
  18. data/lib/rdkafka/admin/describe_acl_handle.rb +6 -6
  19. data/lib/rdkafka/admin/describe_acl_report.rb +2 -3
  20. data/lib/rdkafka/admin/describe_configs_handle.rb +4 -4
  21. data/lib/rdkafka/admin/describe_configs_report.rb +1 -1
  22. data/lib/rdkafka/admin/incremental_alter_configs_handle.rb +4 -4
  23. data/lib/rdkafka/admin/incremental_alter_configs_report.rb +1 -1
  24. data/lib/rdkafka/admin.rb +86 -20
  25. data/lib/rdkafka/bindings.rb +97 -82
  26. data/lib/rdkafka/callbacks.rb +10 -10
  27. data/lib/rdkafka/config.rb +18 -18
  28. data/lib/rdkafka/consumer/message.rb +5 -8
  29. data/lib/rdkafka/consumer/partition.rb +2 -2
  30. data/lib/rdkafka/consumer/topic_partition_list.rb +10 -10
  31. data/lib/rdkafka/consumer.rb +207 -14
  32. data/lib/rdkafka/error.rb +13 -13
  33. data/lib/rdkafka/helpers/oauth.rb +0 -1
  34. data/lib/rdkafka/helpers/time.rb +5 -0
  35. data/lib/rdkafka/metadata.rb +16 -16
  36. data/lib/rdkafka/native_kafka.rb +63 -2
  37. data/lib/rdkafka/producer/delivery_handle.rb +5 -5
  38. data/lib/rdkafka/producer/delivery_report.rb +1 -1
  39. data/lib/rdkafka/producer/partitions_count_cache.rb +6 -6
  40. data/lib/rdkafka/producer.rb +117 -57
  41. data/lib/rdkafka/version.rb +1 -1
  42. data/package-lock.json +331 -0
  43. data/package.json +9 -0
  44. data/rdkafka.gemspec +39 -40
  45. data/renovate.json +21 -0
  46. metadata +5 -1
data/lib/rdkafka/admin.rb CHANGED
@@ -36,8 +36,8 @@ module Rdkafka
36
36
  # Read values from the struct
37
37
  code = error_desc[:code]
38
38
 
39
- name = ''
40
- desc = ''
39
+ name = ""
40
+ desc = ""
41
41
 
42
42
  name = error_desc[:name].read_string unless error_desc[:name].null?
43
43
  desc = error_desc[:desc].read_string unless error_desc[:desc].null?
@@ -71,6 +71,65 @@ module Rdkafka
71
71
  end
72
72
  end
73
73
 
74
+ # Enable IO event notifications for fiber scheduler integration
75
+ # When admin operations complete, librdkafka will write to your FD
76
+ #
77
+ # @param fd [Integer] file descriptor to signal (from IO.pipe or eventfd)
78
+ # @param payload [String] data to write to fd (default: "\x01")
79
+ # @return [nil]
80
+ # @raise [ClosedInnerError] when the admin client is closed
81
+ def enable_queue_io_events(fd, payload = "\x01")
82
+ @native_kafka.enable_main_queue_io_events(fd, payload)
83
+ end
84
+
85
+ # Enable IO event notifications for background events
86
+ # @param fd [Integer] file descriptor to signal (from IO.pipe or eventfd)
87
+ # @param payload [String] data to write to fd (default: "\x01")
88
+ # @return [nil]
89
+ # @raise [ClosedInnerError] when the admin client is closed
90
+ def enable_background_queue_io_events(fd, payload = "\x01")
91
+ @native_kafka.enable_background_queue_io_events(fd, payload)
92
+ end
93
+
94
+ # Polls for events in a non-blocking loop, yielding the count after each iteration.
95
+ #
96
+ # This method processes events (stats, errors, etc.) in a single GVL/mutex session,
97
+ # which is more efficient than repeated individual polls. It uses non-blocking polls
98
+ # internally (no GVL release between polls).
99
+ #
100
+ # Yields the count of events processed after each poll iteration, allowing the caller
101
+ # to implement timeout or other termination logic by returning `:stop`.
102
+ #
103
+ # @yield [count] Called after each poll iteration
104
+ # @yieldparam count [Integer] Number of events processed in this iteration
105
+ # @yieldreturn [Symbol, Object] Return `:stop` to break the loop, any other value continues
106
+ # @return [nil]
107
+ # @raise [Rdkafka::ClosedAdminError] if called on a closed admin client
108
+ #
109
+ # @note This method holds the inner lock until the queue is empty or `:stop` is returned.
110
+ # Other admin operations will wait until this method returns.
111
+ # @note This method is thread-safe as it uses @native_kafka.with_inner synchronization
112
+ #
113
+ # @example Drain all pending events
114
+ # admin.events_poll_nb_each { |_count| }
115
+ #
116
+ # @example With timeout control
117
+ # deadline = monotonic_now + timeout_ms
118
+ # admin.events_poll_nb_each do |_count|
119
+ # :stop if monotonic_now >= deadline
120
+ # end
121
+ def events_poll_nb_each
122
+ closed_admin_check(__method__)
123
+
124
+ @native_kafka.with_inner do |inner|
125
+ loop do
126
+ count = Rdkafka::Bindings.rd_kafka_poll_nb(inner, 0)
127
+ break if count.zero?
128
+ break if yield(count) == :stop
129
+ end
130
+ end
131
+ end
132
+
74
133
  # @return [Proc] finalizer proc for closing the admin
75
134
  # @private
76
135
  def finalizer
@@ -113,7 +172,7 @@ module Rdkafka
113
172
  # @raise [ConfigError] When the partition count or replication factor are out of valid range
114
173
  # @raise [RdkafkaError] When the topic name is invalid or the topic already exists
115
174
  # @raise [RdkafkaError] When the topic configuration is invalid
116
- def create_topic(topic_name, partition_count, replication_factor, topic_config={})
175
+ def create_topic(topic_name, partition_count, replication_factor, topic_config = {})
117
176
  closed_admin_check(__method__)
118
177
 
119
178
  # Create a rd_kafka_NewTopic_t representing the new topic
@@ -129,14 +188,12 @@ module Rdkafka
129
188
  raise Rdkafka::Config::ConfigError.new(error_buffer.read_string)
130
189
  end
131
190
 
132
- unless topic_config.nil?
133
- topic_config.each do |key, value|
134
- Rdkafka::Bindings.rd_kafka_NewTopic_set_config(
135
- new_topic_ptr,
136
- key.to_s,
137
- value.to_s
138
- )
139
- end
191
+ topic_config&.each do |key, value|
192
+ Rdkafka::Bindings.rd_kafka_NewTopic_set_config(
193
+ new_topic_ptr,
194
+ key.to_s,
195
+ value.to_s
196
+ )
140
197
  end
141
198
 
142
199
  # Note that rd_kafka_CreateTopics can create more than one topic at a time
@@ -372,6 +429,8 @@ module Rdkafka
372
429
  # - RD_KAFKA_RESOURCE_BROKER = 4
373
430
  # @param resource_name [String] name of the resource
374
431
  # @param resource_pattern_type [Integer] rd_kafka_ResourcePatternType_t value:
432
+ # - RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0
433
+ # - RD_KAFKA_RESOURCE_PATTERN_ANY = 1
375
434
  # - RD_KAFKA_RESOURCE_PATTERN_MATCH = 2
376
435
  # - RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3
377
436
  # - RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4
@@ -471,6 +530,8 @@ module Rdkafka
471
530
  # - RD_KAFKA_RESOURCE_BROKER = 4
472
531
  # @param resource_name [String, nil] name of the resource or nil for any
473
532
  # @param resource_pattern_type [Integer] rd_kafka_ResourcePatternType_t value:
533
+ # - RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0
534
+ # - RD_KAFKA_RESOURCE_PATTERN_ANY = 1
474
535
  # - RD_KAFKA_RESOURCE_PATTERN_MATCH = 2
475
536
  # - RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3
476
537
  # - RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4
@@ -572,6 +633,8 @@ module Rdkafka
572
633
  # - RD_KAFKA_RESOURCE_BROKER = 4
573
634
  # @param resource_name [String, nil] name of the resource or nil for any
574
635
  # @param resource_pattern_type [Integer] rd_kafka_ResourcePatternType_t value:
636
+ # - RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0
637
+ # - RD_KAFKA_RESOURCE_PATTERN_ANY = 1
575
638
  # - RD_KAFKA_RESOURCE_PATTERN_MATCH = 2
576
639
  # - RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3
577
640
  # - RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4
@@ -720,10 +783,12 @@ module Rdkafka
720
783
 
721
784
  raise
722
785
  ensure
723
- Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
724
- configs_array_ptr,
725
- pointer_array.size
726
- ) if configs_array_ptr
786
+ if configs_array_ptr
787
+ Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
788
+ configs_array_ptr,
789
+ pointer_array.size
790
+ )
791
+ end
727
792
  end
728
793
 
729
794
  handle
@@ -791,7 +856,6 @@ module Rdkafka
791
856
  configs_array_ptr = FFI::MemoryPointer.new(:pointer, pointer_array.size)
792
857
  configs_array_ptr.write_array_of_pointer(pointer_array)
793
858
 
794
-
795
859
  begin
796
860
  @native_kafka.with_inner do |inner|
797
861
  Rdkafka::Bindings.rd_kafka_IncrementalAlterConfigs(
@@ -807,10 +871,12 @@ module Rdkafka
807
871
 
808
872
  raise
809
873
  ensure
810
- Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
811
- configs_array_ptr,
812
- pointer_array.size
813
- ) if configs_array_ptr
874
+ if configs_array_ptr
875
+ Rdkafka::Bindings.rd_kafka_ConfigResource_destroy_array(
876
+ configs_array_ptr,
877
+ pointer_array.size
878
+ )
879
+ end
814
880
  end
815
881
 
816
882
  handle
@@ -17,10 +17,10 @@ module Rdkafka
17
17
  # Returns the library extension based on the host OS
18
18
  # @return [String] 'dylib' on macOS, 'so' on other systems
19
19
  def self.lib_extension
20
- if RbConfig::CONFIG['host_os'] =~ /darwin/
21
- 'dylib'
20
+ if /darwin/.match?(RbConfig::CONFIG["host_os"])
21
+ "dylib"
22
22
  else
23
- 'so'
23
+ "so"
24
24
  end
25
25
  end
26
26
 
@@ -31,8 +31,8 @@ module Rdkafka
31
31
  error_message = e.message
32
32
 
33
33
  # Check if this is a glibc version mismatch error
34
- if error_message =~ /GLIBC_[\d.]+['"` ]?\s*not found/i
35
- glibc_version = error_message[/GLIBC_([\d.]+)/, 1] || 'unknown'
34
+ if /GLIBC_[\d.]+['"` ]?\s*not found/i.match?(error_message)
35
+ glibc_version = error_message[/GLIBC_([\d.]+)/, 1] || "unknown"
36
36
 
37
37
  raise Rdkafka::LibraryLoadError, <<~ERROR_MSG.strip
38
38
  Failed to load librdkafka due to glibc compatibility issue.
@@ -64,12 +64,12 @@ module Rdkafka
64
64
  RD_KAFKA_RESP_ERR__NOENT = -156
65
65
  RD_KAFKA_RESP_ERR_NO_ERROR = 0
66
66
 
67
- RD_KAFKA_OFFSET_END = -1
67
+ RD_KAFKA_OFFSET_END = -1
68
68
  RD_KAFKA_OFFSET_BEGINNING = -2
69
- RD_KAFKA_OFFSET_STORED = -1000
70
- RD_KAFKA_OFFSET_INVALID = -1001
69
+ RD_KAFKA_OFFSET_STORED = -1000
70
+ RD_KAFKA_OFFSET_INVALID = -1001
71
71
 
72
- RD_KAFKA_PARTITION_UA = -1
72
+ RD_KAFKA_PARTITION_UA = -1
73
73
  RD_KAFKA_PARTITION_UA_STR = RD_KAFKA_PARTITION_UA.to_s.freeze
74
74
 
75
75
  EMPTY_HASH = {}.freeze
@@ -90,6 +90,12 @@ module Rdkafka
90
90
  attach_function :rd_kafka_poll, [:pointer, :int], :int, blocking: true
91
91
  attach_function :rd_kafka_outq_len, [:pointer], :int, blocking: true
92
92
 
93
+ # Non-blocking poll variants (do not release GVL)
94
+ # These are more efficient for poll(0) calls in fiber schedulers where GVL
95
+ # release/reacquire overhead is wasteful since we don't expect to wait.
96
+ # Uses the same underlying C function but with blocking: false to skip GVL release.
97
+ attach_function :rd_kafka_poll_nb, :rd_kafka_poll, [:pointer, :int], :int, blocking: false
98
+
93
99
  # Metadata
94
100
 
95
101
  attach_function :rd_kafka_name, [:pointer], :string
@@ -101,14 +107,14 @@ module Rdkafka
101
107
  # FFI struct representing a Kafka message (rd_kafka_message_t)
102
108
  class Message < FFI::Struct
103
109
  layout :err, :int,
104
- :rkt, :pointer,
105
- :partition, :int32,
106
- :payload, :pointer,
107
- :len, :size_t,
108
- :key, :pointer,
109
- :key_len, :size_t,
110
- :offset, :int64,
111
- :_private, :pointer
110
+ :rkt, :pointer,
111
+ :partition, :int32,
112
+ :payload, :pointer,
113
+ :len, :size_t,
114
+ :key, :pointer,
115
+ :key_len, :size_t,
116
+ :offset, :int64,
117
+ :_private, :pointer
112
118
  end
113
119
 
114
120
  attach_function :rd_kafka_message_destroy, [:pointer], :void
@@ -120,20 +126,20 @@ module Rdkafka
120
126
  # FFI struct representing a topic partition (rd_kafka_topic_partition_t)
121
127
  class TopicPartition < FFI::Struct
122
128
  layout :topic, :string,
123
- :partition, :int32,
124
- :offset, :int64,
125
- :metadata, :pointer,
126
- :metadata_size, :size_t,
127
- :opaque, :pointer,
128
- :err, :int,
129
- :_private, :pointer
129
+ :partition, :int32,
130
+ :offset, :int64,
131
+ :metadata, :pointer,
132
+ :metadata_size, :size_t,
133
+ :opaque, :pointer,
134
+ :err, :int,
135
+ :_private, :pointer
130
136
  end
131
137
 
132
138
  # FFI struct representing a topic partition list (rd_kafka_topic_partition_list_t)
133
139
  class TopicPartitionList < FFI::Struct
134
140
  layout :cnt, :int,
135
- :size, :int,
136
- :elems, :pointer
141
+ :size, :int,
142
+ :elems, :pointer
137
143
  end
138
144
 
139
145
  attach_function :rd_kafka_topic_partition_list_new, [:int32], :pointer
@@ -148,7 +154,7 @@ module Rdkafka
148
154
  # separate errors results if obtaining configuration was not possible for any reason
149
155
  class ConfigResource < FFI::Struct
150
156
  layout :type, :int,
151
- :name, :string
157
+ :name, :string
152
158
  end
153
159
 
154
160
  attach_function :rd_kafka_DescribeConfigs, [:pointer, :pointer, :size_t, :pointer, :pointer], :void, blocking: true
@@ -177,16 +183,16 @@ module Rdkafka
177
183
  RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS = 16
178
184
  RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT = 131072
179
185
 
180
- RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET = 0
181
- RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE = 1
182
- RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2
186
+ RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET = 0
187
+ RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE = 1
188
+ RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2
183
189
  RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3
184
190
 
185
191
  # FFI struct for error description (rd_kafka_err_desc)
186
192
  class NativeErrorDesc < FFI::Struct
187
193
  layout :code, :int,
188
- :name, :pointer,
189
- :desc, :pointer
194
+ :name, :pointer,
195
+ :desc, :pointer
190
196
  end
191
197
 
192
198
  attach_function :rd_kafka_err2name, [:int], :string
@@ -222,6 +228,15 @@ module Rdkafka
222
228
  # Log queue
223
229
  attach_function :rd_kafka_set_log_queue, [:pointer, :pointer], :void
224
230
  attach_function :rd_kafka_queue_get_main, [:pointer], :pointer
231
+ attach_function :rd_kafka_queue_get_background, [:pointer], :pointer
232
+
233
+ # Queue IO Event Support - for fiber scheduler integration
234
+ # Enables notifications to a custom FD when queue transitions from empty to non-empty
235
+ # @param queue rd_kafka_queue_t* - the queue to monitor
236
+ # @param fd int - file descriptor to write to (provide your own pipe/eventfd)
237
+ # @param payload const void* - data to write to fd
238
+ # @param size size_t - size of payload
239
+ attach_function :rd_kafka_queue_io_event_enable, [:pointer, :int, :pointer, :size_t], :void
225
240
  # Per topic configs
226
241
  attach_function :rd_kafka_topic_conf_new, [], :pointer
227
242
  attach_function :rd_kafka_topic_conf_set, [:pointer, :string, :string, :pointer, :int], :kafka_config_response
@@ -230,19 +245,19 @@ module Rdkafka
230
245
  :void, [:pointer, :int, :string, :string]
231
246
  ) do |_client_ptr, level, _level_string, line|
232
247
  severity = case level
233
- when 0, 1, 2
234
- Logger::FATAL
235
- when 3
236
- Logger::ERROR
237
- when 4
238
- Logger::WARN
239
- when 5, 6
240
- Logger::INFO
241
- when 7
242
- Logger::DEBUG
243
- else
244
- Logger::UNKNOWN
245
- end
248
+ when 0, 1, 2
249
+ Logger::FATAL
250
+ when 3
251
+ Logger::ERROR
252
+ when 4
253
+ Logger::WARN
254
+ when 5, 6
255
+ Logger::INFO
256
+ when 7
257
+ Logger::DEBUG
258
+ else
259
+ Logger::UNKNOWN
260
+ end
246
261
 
247
262
  Rdkafka::Config.ensure_log_thread
248
263
  Rdkafka::Config.log_queue << [severity, "rdkafka: #{line}"]
@@ -268,8 +283,8 @@ module Rdkafka
268
283
  #
269
284
  # Since this cache is shared, having few consumers and/or producers in one process will
270
285
  # automatically improve the querying times even with low refresh times.
271
- (stats['topics'] || EMPTY_HASH).each do |topic_name, details|
272
- partitions_count = details['partitions'].keys.reject { |k| k == RD_KAFKA_PARTITION_UA_STR }.size
286
+ (stats["topics"] || EMPTY_HASH).each do |topic_name, details|
287
+ partitions_count = details["partitions"].keys.count { |k| !(k == RD_KAFKA_PARTITION_UA_STR) }
273
288
 
274
289
  next unless partitions_count.positive?
275
290
 
@@ -314,9 +329,7 @@ module Rdkafka
314
329
  OAuthbearerTokenRefreshCallback = FFI::Function.new(
315
330
  :void, [:pointer, :string, :pointer]
316
331
  ) do |client_ptr, config, _opaque|
317
- if Rdkafka::Config.oauthbearer_token_refresh_callback
318
- Rdkafka::Config.oauthbearer_token_refresh_callback.call(config, Rdkafka::Bindings.rd_kafka_name(client_ptr))
319
- end
332
+ Rdkafka::Config.oauthbearer_token_refresh_callback&.call(config, Rdkafka::Bindings.rd_kafka_name(client_ptr))
320
333
  end
321
334
 
322
335
  # Handle
@@ -344,6 +357,9 @@ module Rdkafka
344
357
  attach_function :rd_kafka_commit, [:pointer, :pointer, :bool], :int, blocking: true
345
358
  attach_function :rd_kafka_poll_set_consumer, [:pointer], :void, blocking: true
346
359
  attach_function :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: true
360
+ # Non-blocking consumer poll variant (does not release GVL)
361
+ # More efficient for poll(0) calls in fiber schedulers.
362
+ attach_function :rd_kafka_consumer_poll_nb, :rd_kafka_consumer_poll, [:pointer, :int], :pointer, blocking: false
347
363
  attach_function :rd_kafka_consumer_close, [:pointer], :void, blocking: true
348
364
  attach_function :rd_kafka_offsets_store, [:pointer, :pointer], :int, blocking: true
349
365
  attach_function :rd_kafka_pause_partitions, [:pointer, :pointer], :int, blocking: true
@@ -426,8 +442,8 @@ module Rdkafka
426
442
 
427
443
  # Hash mapping partitioner names to their FFI function symbols
428
444
  # @return [Hash{String => Symbol}]
429
- PARTITIONERS = %w(random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random).each_with_object({}) do |name, hsh|
430
- method_name = "rd_kafka_msg_partitioner_#{name}".to_sym
445
+ PARTITIONERS = %w[random consistent consistent_random murmur2 murmur2_random fnv1a fnv1a_random].each_with_object({}) do |name, hsh|
446
+ method_name = :"rd_kafka_msg_partitioner_#{name}"
431
447
  attach_function method_name, [:pointer, :pointer, :size_t, :int32, :pointer, :pointer], :int32
432
448
  hsh[name] = method_name
433
449
  end
@@ -453,7 +469,7 @@ module Rdkafka
453
469
  end
454
470
 
455
471
  # Create Topics
456
- RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
472
+ RD_KAFKA_ADMIN_OP_CREATETOPICS = 1 # rd_kafka_admin_op_t
457
473
  RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100 # rd_kafka_event_type_t
458
474
 
459
475
  attach_function :rd_kafka_CreateTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :void, blocking: true
@@ -464,7 +480,7 @@ module Rdkafka
464
480
  attach_function :rd_kafka_CreateTopics_result_topics, [:pointer, :pointer], :pointer, blocking: true
465
481
 
466
482
  # Delete Topics
467
- RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
483
+ RD_KAFKA_ADMIN_OP_DELETETOPICS = 2 # rd_kafka_admin_op_t
468
484
  RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101 # rd_kafka_event_type_t
469
485
 
470
486
  attach_function :rd_kafka_DeleteTopics, [:pointer, :pointer, :size_t, :pointer, :pointer], :int32, blocking: true
@@ -495,7 +511,6 @@ module Rdkafka
495
511
 
496
512
  # Background Queue and Callback
497
513
 
498
- attach_function :rd_kafka_queue_get_background, [:pointer], :pointer
499
514
  attach_function :rd_kafka_conf_set_background_event_cb, [:pointer, :pointer], :void
500
515
  attach_function :rd_kafka_queue_destroy, [:pointer], :void
501
516
 
@@ -517,7 +532,7 @@ module Rdkafka
517
532
  attach_function :rd_kafka_topic_result_name, [:pointer], :pointer
518
533
 
519
534
  # Create Acls
520
- RD_KAFKA_ADMIN_OP_CREATEACLS = 9
535
+ RD_KAFKA_ADMIN_OP_CREATEACLS = 9
521
536
  RD_KAFKA_EVENT_CREATEACLS_RESULT = 1024
522
537
 
523
538
  attach_function :rd_kafka_CreateAcls, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
@@ -525,7 +540,7 @@ module Rdkafka
525
540
  attach_function :rd_kafka_CreateAcls_result_acls, [:pointer, :pointer], :pointer
526
541
 
527
542
  # Delete Acls
528
- RD_KAFKA_ADMIN_OP_DELETEACLS = 11
543
+ RD_KAFKA_ADMIN_OP_DELETEACLS = 11
529
544
  RD_KAFKA_EVENT_DELETEACLS_RESULT = 4096
530
545
 
531
546
  attach_function :rd_kafka_DeleteAcls, [:pointer, :pointer, :size_t, :pointer, :pointer], :void
@@ -535,7 +550,7 @@ module Rdkafka
535
550
  attach_function :rd_kafka_DeleteAcls_result_response_matching_acls, [:pointer, :pointer], :pointer
536
551
 
537
552
  # Describe Acls
538
- RD_KAFKA_ADMIN_OP_DESCRIBEACLS = 10
553
+ RD_KAFKA_ADMIN_OP_DESCRIBEACLS = 10
539
554
  RD_KAFKA_EVENT_DESCRIBEACLS_RESULT = 2048
540
555
 
541
556
  attach_function :rd_kafka_DescribeAcls, [:pointer, :pointer, :pointer, :pointer], :void
@@ -551,41 +566,42 @@ module Rdkafka
551
566
  attach_function :rd_kafka_AclBinding_host, [:pointer], :pointer
552
567
  attach_function :rd_kafka_AclBinding_operation, [:pointer], :int32
553
568
  attach_function :rd_kafka_AclBinding_permission_type, [:pointer], :int32
554
- attach_function :rd_kafka_AclBinding_new, [:int32, :pointer, :int32, :pointer, :pointer, :int32, :int32, :pointer, :size_t ], :pointer
555
- attach_function :rd_kafka_AclBindingFilter_new, [:int32, :pointer, :int32, :pointer, :pointer, :int32, :int32, :pointer, :size_t ], :pointer
569
+ attach_function :rd_kafka_AclBinding_new, [:int32, :pointer, :int32, :pointer, :pointer, :int32, :int32, :pointer, :size_t], :pointer
570
+ attach_function :rd_kafka_AclBindingFilter_new, [:int32, :pointer, :int32, :pointer, :pointer, :int32, :int32, :pointer, :size_t], :pointer
556
571
  attach_function :rd_kafka_AclBinding_destroy, [:pointer], :void
557
572
 
558
573
  # rd_kafka_ResourceType_t - https://github.com/confluentinc/librdkafka/blob/292d2a66b9921b783f08147807992e603c7af059/src/rdkafka.h#L7307
559
- RD_KAFKA_RESOURCE_ANY = 1
574
+ RD_KAFKA_RESOURCE_ANY = 1
560
575
  RD_KAFKA_RESOURCE_TOPIC = 2
561
576
  RD_KAFKA_RESOURCE_GROUP = 3
562
577
  RD_KAFKA_RESOURCE_BROKER = 4
563
578
  RD_KAFKA_RESOURCE_TRANSACTIONAL_ID = 5
564
579
 
565
580
  # rd_kafka_ResourcePatternType_t - https://github.com/confluentinc/librdkafka/blob/292d2a66b9921b783f08147807992e603c7af059/src/rdkafka.h#L7320
566
- RD_KAFKA_RESOURCE_PATTERN_ANY = 1
567
- RD_KAFKA_RESOURCE_PATTERN_MATCH = 2
568
- RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3
581
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0
582
+ RD_KAFKA_RESOURCE_PATTERN_ANY = 1
583
+ RD_KAFKA_RESOURCE_PATTERN_MATCH = 2
584
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3
569
585
  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4
570
586
 
571
587
  # rd_kafka_AclOperation_t - https://github.com/confluentinc/librdkafka/blob/292d2a66b9921b783f08147807992e603c7af059/src/rdkafka.h#L8403
572
- RD_KAFKA_ACL_OPERATION_ANY = 1
573
- RD_KAFKA_ACL_OPERATION_ALL = 2
574
- RD_KAFKA_ACL_OPERATION_READ = 3
575
- RD_KAFKA_ACL_OPERATION_WRITE = 4
576
- RD_KAFKA_ACL_OPERATION_CREATE = 5
577
- RD_KAFKA_ACL_OPERATION_DELETE = 6
578
- RD_KAFKA_ACL_OPERATION_ALTER = 7
579
- RD_KAFKA_ACL_OPERATION_DESCRIBE = 8
580
- RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9
588
+ RD_KAFKA_ACL_OPERATION_ANY = 1
589
+ RD_KAFKA_ACL_OPERATION_ALL = 2
590
+ RD_KAFKA_ACL_OPERATION_READ = 3
591
+ RD_KAFKA_ACL_OPERATION_WRITE = 4
592
+ RD_KAFKA_ACL_OPERATION_CREATE = 5
593
+ RD_KAFKA_ACL_OPERATION_DELETE = 6
594
+ RD_KAFKA_ACL_OPERATION_ALTER = 7
595
+ RD_KAFKA_ACL_OPERATION_DESCRIBE = 8
596
+ RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9
581
597
  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10
582
- RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11
598
+ RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11
583
599
  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12
584
600
 
585
601
  # rd_kafka_AclPermissionType_t - https://github.com/confluentinc/librdkafka/blob/292d2a66b9921b783f08147807992e603c7af059/src/rdkafka.h#L8435
586
- RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1
587
- RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2
588
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3
602
+ RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1
603
+ RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2
604
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3
589
605
 
590
606
  # Extracting error details from Acl results
591
607
  attach_function :rd_kafka_acl_result_error, [:pointer], :pointer
@@ -595,14 +611,13 @@ module Rdkafka
595
611
  attach_function :rd_kafka_event_error_string, [:pointer], :pointer
596
612
  attach_function :rd_kafka_AclBinding_error, [:pointer], :pointer
597
613
 
598
-
599
614
  # FFI struct for native error (rd_kafka_error_t)
600
615
  class NativeError < FFI::Struct
601
616
  layout :code, :int32,
602
- :errstr, :pointer,
603
- :fatal, :u_int8_t,
604
- :retriable, :u_int8_t,
605
- :txn_requires_abort, :u_int8_t
617
+ :errstr, :pointer,
618
+ :fatal, :u_int8_t,
619
+ :retriable, :u_int8_t,
620
+ :txn_requires_abort, :u_int8_t
606
621
  end
607
622
 
608
623
  attach_function :rd_kafka_group_result_error, [:pointer], NativeError.by_ref # rd_kafka_group_result_t* => rd_kafka_error_t*
@@ -92,15 +92,15 @@ module Rdkafka
92
92
 
93
93
  # @param acl_result_pointer [FFI::Pointer] pointer to the delete ACL result response struct
94
94
  def initialize(acl_result_pointer)
95
- @matching_acls=[]
95
+ @matching_acls = []
96
96
  rd_kafka_error_pointer = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_response_error(acl_result_pointer)
97
97
  @result_error = Rdkafka::Bindings.rd_kafka_error_code(rd_kafka_error_pointer)
98
98
  @error_string = Rdkafka::Bindings.rd_kafka_error_string(rd_kafka_error_pointer)
99
99
  if @result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
100
- # Get the number of matching acls
101
- pointer_to_size_t = FFI::MemoryPointer.new(:int32)
102
- @matching_acls = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_response_matching_acls(acl_result_pointer, pointer_to_size_t)
103
- @matching_acls_count = pointer_to_size_t.read_int
100
+ # Get the number of matching acls
101
+ pointer_to_size_t = FFI::MemoryPointer.new(:int32)
102
+ @matching_acls = Rdkafka::Bindings.rd_kafka_DeleteAcls_result_response_matching_acls(acl_result_pointer, pointer_to_size_t)
103
+ @matching_acls_count = pointer_to_size_t.read_int
104
104
  end
105
105
  end
106
106
 
@@ -123,7 +123,7 @@ module Rdkafka
123
123
 
124
124
  # @param event_ptr [FFI::Pointer] pointer to the event
125
125
  def initialize(event_ptr)
126
- @matching_acls=[]
126
+ @matching_acls = []
127
127
  @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
128
128
  @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
129
129
  if @result_error == Rdkafka::Bindings::RD_KAFKA_RESP_ERR_NO_ERROR
@@ -144,7 +144,7 @@ module Rdkafka
144
144
 
145
145
  # @param event_ptr [FFI::Pointer] pointer to the event
146
146
  def initialize(event_ptr)
147
- @results=[]
147
+ @results = []
148
148
  @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
149
149
  @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
150
150
 
@@ -166,7 +166,7 @@ module Rdkafka
166
166
 
167
167
  # @param event_ptr [FFI::Pointer] pointer to the event
168
168
  def initialize(event_ptr)
169
- @results=[]
169
+ @results = []
170
170
  @result_error = Rdkafka::Bindings.rd_kafka_event_error(event_ptr)
171
171
  @error_string = Rdkafka::Bindings.rd_kafka_event_error_string(event_ptr)
172
172
 
@@ -452,14 +452,14 @@ module Rdkafka
452
452
 
453
453
  # FFI Function used for Create Topic and Delete Topic callbacks
454
454
  background_event_callback_function = FFI::Function.new(
455
- :void, [:pointer, :pointer, :pointer]
455
+ :void, [:pointer, :pointer, :pointer]
456
456
  ) do |client_ptr, event_ptr, opaque_ptr|
457
457
  BackgroundEventCallback.call(client_ptr, event_ptr, opaque_ptr)
458
458
  end
459
459
 
460
460
  # FFI Function used for Message Delivery callbacks
461
461
  delivery_callback_function = FFI::Function.new(
462
- :void, [:pointer, :pointer, :pointer]
462
+ :void, [:pointer, :pointer, :pointer]
463
463
  ) do |client_ptr, message_ptr, opaque_ptr|
464
464
  DeliveryCallback.call(client_ptr, message_ptr, opaque_ptr)
465
465
  end