kafka 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +14 -0
  3. data/.rubocop.yml +210 -0
  4. data/.travis.yml +45 -0
  5. data/CHANGELOG.md +3 -0
  6. data/CODE_OF_CONDUCT.md +74 -0
  7. data/Gemfile +5 -0
  8. data/LICENSE.txt +21 -0
  9. data/README.md +182 -0
  10. data/Rakefile +69 -0
  11. data/examples/consumer.rb +55 -0
  12. data/examples/producer.rb +46 -0
  13. data/ext/Rakefile +69 -0
  14. data/kafka.gemspec +39 -0
  15. data/lib/kafka/admin.rb +141 -0
  16. data/lib/kafka/config.rb +145 -0
  17. data/lib/kafka/consumer.rb +87 -0
  18. data/lib/kafka/error.rb +44 -0
  19. data/lib/kafka/ffi/admin/admin_options.rb +121 -0
  20. data/lib/kafka/ffi/admin/config_entry.rb +97 -0
  21. data/lib/kafka/ffi/admin/config_resource.rb +101 -0
  22. data/lib/kafka/ffi/admin/delete_topic.rb +19 -0
  23. data/lib/kafka/ffi/admin/new_partitions.rb +77 -0
  24. data/lib/kafka/ffi/admin/new_topic.rb +91 -0
  25. data/lib/kafka/ffi/admin/result.rb +66 -0
  26. data/lib/kafka/ffi/admin/topic_result.rb +32 -0
  27. data/lib/kafka/ffi/admin.rb +16 -0
  28. data/lib/kafka/ffi/broker_metadata.rb +32 -0
  29. data/lib/kafka/ffi/client.rb +640 -0
  30. data/lib/kafka/ffi/config.rb +382 -0
  31. data/lib/kafka/ffi/consumer.rb +342 -0
  32. data/lib/kafka/ffi/error.rb +25 -0
  33. data/lib/kafka/ffi/event.rb +215 -0
  34. data/lib/kafka/ffi/group_info.rb +75 -0
  35. data/lib/kafka/ffi/group_list.rb +27 -0
  36. data/lib/kafka/ffi/group_member_info.rb +52 -0
  37. data/lib/kafka/ffi/message/header.rb +205 -0
  38. data/lib/kafka/ffi/message.rb +205 -0
  39. data/lib/kafka/ffi/metadata.rb +58 -0
  40. data/lib/kafka/ffi/opaque.rb +81 -0
  41. data/lib/kafka/ffi/opaque_pointer.rb +73 -0
  42. data/lib/kafka/ffi/partition_metadata.rb +61 -0
  43. data/lib/kafka/ffi/producer.rb +144 -0
  44. data/lib/kafka/ffi/queue.rb +65 -0
  45. data/lib/kafka/ffi/topic.rb +32 -0
  46. data/lib/kafka/ffi/topic_config.rb +126 -0
  47. data/lib/kafka/ffi/topic_metadata.rb +42 -0
  48. data/lib/kafka/ffi/topic_partition.rb +43 -0
  49. data/lib/kafka/ffi/topic_partition_list.rb +167 -0
  50. data/lib/kafka/ffi.rb +624 -0
  51. data/lib/kafka/poller.rb +28 -0
  52. data/lib/kafka/producer/delivery_report.rb +120 -0
  53. data/lib/kafka/producer.rb +127 -0
  54. data/lib/kafka/version.rb +8 -0
  55. data/lib/kafka.rb +11 -0
  56. metadata +159 -0
@@ -0,0 +1,342 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ffi"
4
+ require "kafka/ffi/client"
5
+
6
+ module Kafka::FFI
7
+ class Consumer < Kafka::FFI::Client
8
+ native_type :pointer
9
+
10
+ def self.new(config = nil)
11
+ super(:consumer, config)
12
+ end
13
+
14
+ # Retrieve the Consumer's broker assigned group Member ID.
15
+ #
16
+ # @return [String] broker assigned group Member ID
17
+ def member_id
18
+ id = ::Kafka::FFI.rd_kafka_memberid(self)
19
+
20
+ if id.null?
21
+ return nil
22
+ end
23
+
24
+ id.read_string
25
+ ensure
26
+ ::Kafka::FFI.rd_kafka_mem_free(self, id)
27
+ end
28
+
29
+ # Get the last known (cached) low and high offsets for the partition. This
30
+ # differs from query_watermark_offsets in that does not query the brokers.
31
+ #
32
+ # @see query_watermark_offsets
33
+ #
34
+ # @param topic [String] Name of the topic
35
+ # @param partition [Integer] Topic partition
36
+ #
37
+ # @raise [Kafka::ResponseError] Error that occurred retrieving offsets
38
+ #
39
+ # @return [Array<(Integer, Integer)>] low and high offsets. If either is
40
+ # unknown the RD_KAFKA_OFFSET_INVALID is returned for that value
41
+ def get_watermark_offsets(topic, partition)
42
+ low = ::FFI::MemoryPointer.new(:int64)
43
+ high = ::FFI::MemoryPointer.new(:int64)
44
+
45
+ err = ::Kafka::FFI.rd_kafka_get_watermark_offsets(self, topic, partition, low, high)
46
+ if err != :ok
47
+ raise ::Kafka::ResponseError, err
48
+ end
49
+
50
+ [low.read_int64, high.read_int64]
51
+ end
52
+
53
+ # rubocop:disable Naming/AccessorMethodName
54
+
55
+ # Returns a reference to the consume queue. This is the queue served by
56
+ # consumer_poll.
57
+ #
58
+ # @note Caller must call #destroy when done with the Queue.
59
+ #
60
+ # @return [Queue] Consumer queue
61
+ def get_consumer_queue
62
+ ::Kafka::FFI.rd_kafka_queue_get_consumer(self)
63
+ end
64
+ # rubocop:enable Naming/AccessorMethodName
65
+
66
+ # Returns a reference to the partition's queue.
67
+ #
68
+ # @note Caller must call #destroy when done with the Queue.
69
+ #
70
+ # @return [Queue] Partition Queue
71
+ def get_partition_queue(topic, partition)
72
+ ::Kafka::FFI.rd_kafka_queue_get_partition(self, topic, partition)
73
+ end
74
+
75
+ # Redirect the main event queue to the Consumer's queue so the consumer
76
+ # doesn't need to poll from it separately for event callbacks to fire.
77
+ #
78
+ # @note It is not permitted to call #poll after redirecting the main queue
79
+ # with poll_set_consumer.
80
+ #
81
+ # @raise [Kafka::ResponseError] Error occurred redirecting the main
82
+ # queue.
83
+ def poll_set_consumer
84
+ err = ::Kafka::FFI.rd_kafka_poll_set_consumer(self)
85
+ if err != :ok
86
+ raise ::Kafka::ResponseError, err
87
+ end
88
+
89
+ nil
90
+ end
91
+
92
+ # Subscribe the consumer to receive Messages for a set of topics. The
93
+ # current set of subscriptions will be replaced.
94
+ #
95
+ # @example Subscribe to multiple topics
96
+ # client.subscribe("signals", "events", "changes")
97
+ #
98
+ # @param [String, Array<String>] Topic name or list of topics to subscribe
99
+ # to.
100
+ #
101
+ # @raise [Kafka::ResponseError] Error occurred subscribing to the topics.
102
+ def subscribe(topic, *rest)
103
+ topics = [topic, rest].flatten
104
+
105
+ tpl = TopicPartitionList.new(topics.length)
106
+ topics.each do |t|
107
+ tpl.add(t)
108
+ end
109
+
110
+ err = ::Kafka::FFI.rd_kafka_subscribe(self, tpl)
111
+ if err != :ok
112
+ raise ::Kafka::ResponseError, err
113
+ end
114
+
115
+ nil
116
+ ensure
117
+ tpl.destroy
118
+ end
119
+
120
+ # Unsubscribe from the current subscription set (e.g. all current
121
+ # subscriptions).
122
+ #
123
+ # @raise [Kafka::ResponseError] Error unsubscribing from topics
124
+ def unsubscribe
125
+ err = ::Kafka::FFI.rd_kafka_unsubscribe(self)
126
+ if err != :ok
127
+ raise ::Kafka::ResponseError, err
128
+ end
129
+
130
+ nil
131
+ end
132
+
133
+ # List the current topic subscriptions for the consumer.
134
+ #
135
+ # @raise [Kafka::ResponseError] Error that occurred retrieving the
136
+ # subscriptions
137
+ #
138
+ # @return [Array<String>] List of current subscribed topics
139
+ def subscription
140
+ ptr = ::FFI::MemoryPointer.new(:pointer)
141
+
142
+ err = ::Kafka::FFI.rd_kafka_subscription(self, ptr)
143
+ if err != :ok
144
+ raise ::Kafka::ResponseError, err
145
+ end
146
+
147
+ begin
148
+ tpl = ::Kafka::FFI::TopicPartitionList.new(ptr.read_pointer)
149
+
150
+ # Map the topic partition list to topic names.
151
+ tpl.elements.map(&:topic)
152
+ ensure
153
+ tpl.destroy
154
+ end
155
+ ensure
156
+ ptr.free
157
+ end
158
+
159
+ # Alias to subscriptions since it does return a list of topics
160
+ alias subscriptions subscription
161
+
162
+ # Atomically assign the set of partitions to consume. This will replace the
163
+ # existing assignment.
164
+ #
165
+ # @see rdkafka.h rd_kafka_assign for semantics on use from callbacks and
166
+ # how empty vs NULL lists affect internal state.
167
+ #
168
+ # @param list [TopicPartitionList] List of topic+partition assignments
169
+ #
170
+ # @raise [Kafka::ResponseError] Error processing assignments
171
+ def assign(list)
172
+ err = ::Kafka::FFI.rd_kafka_assign(self, list)
173
+ if err != :ok
174
+ raise ::Kafka::ResponseError, err
175
+ end
176
+
177
+ nil
178
+ end
179
+
180
+ # List the current partition assignment(s) for the consumer.
181
+ #
182
+ # @raise [Kafka::ResponseError] Error that occurred retrieving the
183
+ # assignments.
184
+ #
185
+ # @return [Hash{String => Array<Integer>}] Current assignments for the
186
+ # consumer. Hash keys are topic names and values are the list of assigned
187
+ # partitions.
188
+ def assignment
189
+ ptr = ::FFI::MemoryPointer.new(:pointer)
190
+
191
+ err = ::Kafka::FFI.rd_kafka_assignment(self, ptr)
192
+ if err != :ok
193
+ raise ::Kafka::ResponseError, err
194
+ end
195
+
196
+ begin
197
+ tpl = ::Kafka::FFI::TopicPartitionList.new(ptr.read_pointer)
198
+
199
+ # { "topic" => [1, 2, 3] }
200
+ tpl.elements.inject({}) do |memo, tp|
201
+ (memo[tp.topic] ||= []) << tp.partition
202
+ memo
203
+ end
204
+ ensure
205
+ tpl.destroy
206
+ end
207
+ ensure
208
+ ptr.free
209
+ end
210
+
211
+ # Alias assignment since it returns a set
212
+ alias assignments assignment
213
+
214
+ # Retrieve committed offsets for topics + partitions. The offset field for
215
+ # each TopicPartition in list will be set to the stored offset or
216
+ # RD_KAFKA_OFFSET_INVALID in case there was no stored offset for that
217
+ # partition. The error field is set if there was an error with the
218
+ # TopicPartition.
219
+ #
220
+ # @param list [TopicPartitionList] List of topic+partitions to fetch
221
+ # current offsets. The list will be updated to set the committed offset
222
+ # or error as appropriate.
223
+ # @param timeout [Integer] Maximum time to wait in milliseconds
224
+ #
225
+ # @raise [Kafka::ResponseError] Error with the request (likely a
226
+ # timeout). Errors with individual topic+partition combinations are set
227
+ # in the returned TopicPartitionList
228
+ #
229
+ # @return [TopicPartitionList] the updated list
230
+ def committed(list, timeout: 1000)
231
+ if list.nil?
232
+ raise ArgumentError, "list cannot be nil"
233
+ end
234
+
235
+ err = ::Kafka::FFI.rd_kafka_committed(self, list, timeout)
236
+ if err != :ok
237
+ raise ::Kafka::ResponseError, err
238
+ end
239
+
240
+ # Return the list that was passed in as it should now be augmented with
241
+ # the committed offsets and any errors fetching said offsets.
242
+ list
243
+ end
244
+
245
+ # Poll the consumer's queue for a waiting Message and yields that message.
246
+ # The yielded message must not be cached in the application as it becomes
247
+ # unusable once the block completes.
248
+ #
249
+ # @see max.poll.interal.ms configuration option.
250
+ #
251
+ # @param timeout [Integer] How long to wait for a message in milliseconds.
252
+ #
253
+ # @raise [ArgumentError] consumer_poll was called without a block.
254
+ # @raise [Kafka::ResponseError] Error occurred while polling.
255
+ #
256
+ # @yield [message]
257
+ # @yieldparam message [Message] Message received from Kafka. Application
258
+ # must not call #destroy as it is owned by the Consumer.
259
+ #
260
+ # @return Either nil or the result of the block
261
+ def consumer_poll(timeout)
262
+ if !block_given?
263
+ raise ArgumentError, "consumer_poll must be passed a block"
264
+ end
265
+
266
+ msg = ::Kafka::FFI.rd_kafka_consumer_poll(self, timeout.to_i)
267
+
268
+ # No message was available
269
+ if msg.null?
270
+ return nil
271
+ end
272
+
273
+ begin
274
+ if msg.error
275
+ raise msg.error
276
+ end
277
+
278
+ yield(msg)
279
+ ensure
280
+ msg.destroy
281
+ end
282
+ end
283
+
284
+ # Commit the set of offsets from the given TopicPartitionList.
285
+ #
286
+ # @param offsets [TopicPartitionList] Set of topic+partition with offset
287
+ # (and maybe metadata) to be committed. If offsets is nil the current
288
+ # partition assignment set will be used instead.
289
+ # @param async [Boolean] If async is false this operation will block until
290
+ # the broker offset commit is done.
291
+ #
292
+ # @raise [Kafka::ResponseError] Error committing offsets. Only raise if
293
+ # async is false.
294
+ def commit(offsets, async)
295
+ err = ::Kafka::FFI.rd_kafka_commit(self, offsets, async)
296
+ if err != :ok
297
+ raise ::Kafka::ResponseError, err
298
+ end
299
+
300
+ nil
301
+ end
302
+
303
+ # Commit the message's offset on the broker for the message's partition.
304
+ #
305
+ # @param message [Message] The message to commit as processed
306
+ # @param async [Boolean] True to allow commit to happen in the background.
307
+ #
308
+ # @raise [Kafka::ResponseError] Error that occurred commiting the message
309
+ def commit_message(message, async)
310
+ if message.nil? || message.null?
311
+ raise ArgumentError, "message cannot but nil/null"
312
+ end
313
+
314
+ err = ::Kafka::FFI.rd_kafka_commit_message(message, async)
315
+ if err
316
+ raise ::Kafka::ResponseError, err
317
+ end
318
+
319
+ nil
320
+ end
321
+
322
+ # Close down the consumer. This will block until the consumer has revoked
323
+ # its assignment(s), committed offsets, and left the consumer group. The
324
+ # maximum blocking time is roughly limited to the `session.timeout.ms`
325
+ # config option.
326
+ #
327
+ # Ensure that `destroy` is called after the consumer is closed to free up
328
+ # resources.
329
+ #
330
+ # @note Maximum blocking time is roughly limited to `session.timeout.ms`
331
+ #
332
+ # @raise [Kafka::ResponseError] Error occurred closing the consumer
333
+ def close
334
+ err = ::Kafka::FFI.rd_kafka_consumer_close(self)
335
+ if err != :ok
336
+ raise ::Kafka::ResponseError, err
337
+ end
338
+
339
+ nil
340
+ end
341
+ end
342
+ end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka::FFI
4
+ class Error < Kafka::Error; end
5
+
6
+ # TopicAlreadyConfigured is raised by Client#topic when passing a config to a
7
+ # topic that has already been initialized for the Client.
8
+ class TopicAlreadyConfiguredError < Error; end
9
+
10
+ # ConfigError is raised when making changes to the global config.
11
+ class ConfigError < Error
12
+ attr_reader :key
13
+ attr_reader :value
14
+
15
+ def initialize(key, value, message)
16
+ super(message)
17
+
18
+ @key = key
19
+ @value = value
20
+ end
21
+ end
22
+
23
+ class UnknownConfigKey < ConfigError; end
24
+ class InvalidConfigValue < ConfigError; end
25
+ end
@@ -0,0 +1,215 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "kafka/ffi/opaque_pointer"
4
+
5
+ module Kafka::FFI
6
+ class Event < OpaquePointer
7
+ # LogMessage is attached to RD_KAFKA_EVENT_LOG events.
8
+ LogMessage = Struct.new(:facility, :message, :level) do
9
+ # @attr facility [String] Log facility
10
+ # @attr message [String] Log message
11
+ # @attr level [Integer] Verbosity level of the message
12
+
13
+ def to_s
14
+ message
15
+ end
16
+ end
17
+
18
+ # Returns the event's type
19
+ #
20
+ # @see RD_KAFKA_EVENT_*
21
+ #
22
+ # @return [Symbol] Type of the event
23
+ def type
24
+ ::Kafka::FFI.rd_kafka_event_type(self)
25
+ end
26
+
27
+ # Returns the name of the event's type.
28
+ #
29
+ # @return [String] Name of the type of event
30
+ def name
31
+ ::Kafka::FFI.rd_kafka_event_name(self)
32
+ end
33
+
34
+ # Retrieve the set of messages. Can take a block to iterate over the set of
35
+ # Messages rather than return them.
36
+ #
37
+ # Events:
38
+ # - RD_KAFKA_EVENT_FETCH
39
+ # - RD_KAFKA_EVENT_DR
40
+ #
41
+ # @note Do not call #destroy on the Messages
42
+ #
43
+ # @yield [message] Iterate over available messages
44
+ # @yieldparam [Message]
45
+ #
46
+ # @return [Array<Message>] Messages attached to the Event
47
+ # @return [nil] Event does not have any Messages or a block was given.
48
+ def messages
49
+ # This departs from the librdkafka API due to having a collection of
50
+ # methods that have funky semantics for Ruby.
51
+
52
+ # @todo Messages are only on RD_KAFKA_EVENT_FETCH and RD_KAFKA_EVENT_DR.
53
+ # Need to test what happens with other event types.
54
+
55
+ # No block so fetch all of the messages and return them as an array.
56
+ if !block_given?
57
+ count = ::Kafka::FFI.rd_kafka_event_message_count(self)
58
+ if count == 0
59
+ return []
60
+ end
61
+
62
+ begin
63
+ # Allocates enough memory for the full set but only converts as many
64
+ # as were returned.
65
+ # @todo Retrieve all until sum(ret) == count?
66
+ ptr = ::FFI::MemoryPointer.new(:pointer, count)
67
+ ret = ::Kafka::FFI.rd_kafka_event_message_array(self, ptr, count)
68
+
69
+ # Map the return pointers to Messages
70
+ return ptr.read_array_of_pointer(ret).map! { |p| Message.from_native(p) }
71
+ ensure
72
+ ptr.free
73
+ end
74
+ end
75
+
76
+ # Block was passed so use rd_kafka_event_message_next
77
+ begin
78
+ ptr = ::FFI::MemoryPointer.new(:pointer)
79
+
80
+ loop do
81
+ msg = ::Kafka::FFI.rd_kafka_event_message_next(self)
82
+ if msg.null?
83
+ break
84
+ end
85
+
86
+ yield(msg)
87
+ end
88
+ ensure
89
+ ptr.free
90
+ end
91
+ end
92
+
93
+ # Returns the configuration for the event or nil if the configuration
94
+ # property is not set or not applicable for the event type.
95
+ #
96
+ # Events:
97
+ # - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH
98
+ #
99
+ # @return [String] Configuration string for the event
100
+ # @return [nil] Property not set or not applicable.
101
+ def config_string
102
+ ::Kafka::FFI.rd_kafka_event_string(self)
103
+ end
104
+
105
+ # Returns the error code for the event or nil if there was no error.
106
+ #
107
+ # @see error_is_fatal to detect if it is a fatal error.
108
+ #
109
+ # @return [nil] No error for the Event
110
+ # @return [Kafka::ResponseError] Error code for the event.
111
+ def error
112
+ err = ::Kafka::FFI.rd_kafka_event_error(self)
113
+ if err != :ok
114
+ ::Kafka::ResponseError.new(err, error_string)
115
+ end
116
+ end
117
+
118
+ # Returns a description of the error or nil when there is no error.
119
+ #
120
+ # @return [nil] No error for the Event
121
+ # @return [String] Description of the error
122
+ def error_string
123
+ ::Kafka::FFI.rd_kafka_event_error_string(self)
124
+ end
125
+
126
+ # Returns true or false if the Event represents a fatal error.
127
+ #
128
+ # @return [Boolean] There is an error for the Event and it is fatal.
129
+ def error_is_fatal
130
+ error && ::Kafka::FFI.rd_kafka_event_error_is_fatal(self)
131
+ end
132
+ alias error_is_fatal? error_is_fatal
133
+
134
+ # Returns the log message attached to the event.
135
+ #
136
+ # Events:
137
+ # - RD_KAFKA_EVENT_LOG
138
+ #
139
+ # @return [Event::LogMessage] Attach log entry
140
+ def log
141
+ facility = ::FFI::MemoryPointer.new(:pointer)
142
+ message = ::FFI::MemoryPointer.new(:pointer)
143
+ level = ::FFI::MemoryPointer.new(:pointer)
144
+
145
+ exists = ::Kafka::FFI.rd_kafka_event_log(self, facility, message, level)
146
+ if exists != 0
147
+ # Event type does not support log messages.
148
+ return nil
149
+ end
150
+
151
+ LogMessage.new(
152
+ facility.read_pointer.read_string,
153
+ message.read_pointer.read_string,
154
+ level.read_int,
155
+ )
156
+ ensure
157
+ facility.free
158
+ message.free
159
+ level.free
160
+ end
161
+
162
+ # Extracts stats from the event
163
+ #
164
+ # Events:
165
+ # - RD_KAFKA_EVENT_STATS
166
+ #
167
+ # @return [nil] Event type does not support stats
168
+ # @return [String] JSON encoded stats information.
169
+ def stats
170
+ # Calling stats on an unsupported type causes a segfault with librdkafka
171
+ # 1.3.0.
172
+ if type != :stats
173
+ return nil
174
+ end
175
+
176
+ ::Kafka::FFI.rd_kafka_event_stats(self)
177
+ end
178
+
179
+ # Returns the topic partition list from the Event.
180
+ #
181
+ # @note Application MUST NOT call #destroy on the list
182
+ #
183
+ # Events:
184
+ # - RD_KAFKA_EVENT_REBALANCE
185
+ # - RD_KAFKA_EVENT_OFFSET_COMMIT
186
+ #
187
+ # @return [TopicPartitionList]
188
+ def topic_partition_list
189
+ ::Kafka::FFI.rd_kafka_event_topic_partition_list(self)
190
+ end
191
+
192
+ # Returns the topic partition from the Event.
193
+ #
194
+ # @note The application MUST call #destroy on the TopicPartition when done.
195
+ #
196
+ # Events:
197
+ # - RD_KAFKA_EVENT_ERROR
198
+ #
199
+ # @return [TopicPartition]
200
+ def topic_partition
201
+ ::Kafka::FFI.rd_kafka_event_topic_partition(self)
202
+ end
203
+
204
+ # Destroy the event, releasing it's resources back to the system.
205
+ #
206
+ # @todo Is the applicaiton responsible for calling #destroy?
207
+ def destroy
208
+ # It is safe to call destroy even if the Event's pointer is NULL but it
209
+ # doesn't do anything so might as well guard against it just in case.
210
+ if !pointer.null?
211
+ ::Kafka::FFI.rd_kafka_event_destroy(self)
212
+ end
213
+ end
214
+ end
215
+ end
@@ -0,0 +1,75 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ffi"
4
+ require "kafka/ffi/broker_metadata"
5
+ require "kafka/ffi/group_member_info"
6
+
7
+ module Kafka::FFI
8
+ class GroupInfo < ::FFI::Struct
9
+ layout(
10
+ :broker, BrokerMetadata.by_value,
11
+ :group, :string,
12
+ :err, :error_code,
13
+ :state, :string,
14
+ :protocol_type, :string,
15
+ :protocol, :string,
16
+ :members, :pointer,
17
+ :member_cnt, :int
18
+ )
19
+
20
+ # Returns information about the broker that originated the group info.
21
+ #
22
+ # @return [Kafka::FFI::BrokerMetadata] Broker info
23
+ def broker
24
+ self[:broker]
25
+ end
26
+
27
+ # Returns the name of the group
28
+ #
29
+ # @return [String] Group name
30
+ def group
31
+ self[:group]
32
+ end
33
+ alias name group
34
+
35
+ # Returns any broker orignated error for the consumer group.
36
+ #
37
+ # @return [nil] No error
38
+ # @return [Kafka::ResponseError] Broker originated error
39
+ def error
40
+ if self[:err] != :ok
41
+ ::Kafka::ResponseError.new(self[:err])
42
+ end
43
+ end
44
+
45
+ # Returns the current state of the group
46
+ #
47
+ # @return [String] Group state
48
+ def state
49
+ self[:state]
50
+ end
51
+
52
+ # Returns the group protocol type
53
+ #
54
+ # @return [String] Group protocol type
55
+ def protocol_type
56
+ self[:protocol_type]
57
+ end
58
+
59
+ # Returns the group protocol
60
+ #
61
+ # @return [String] Group protocol
62
+ def protocol
63
+ self[:protocol]
64
+ end
65
+
66
+ # Returns information about the members of the consumer group
67
+ #
68
+ # @return [Array<GroupMemberInfo>] Member information
69
+ def members
70
+ self[:member_cnt].times.map do |i|
71
+ GroupMemberInfo.new(self[:members] + (i * GroupMemberInfo.size))
72
+ end
73
+ end
74
+ end
75
+ end
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ffi"
4
+ require "kafka/ffi/group_info"
5
+
6
+ module Kafka::FFI
7
+ class GroupList < ::FFI::Struct
8
+ layout(
9
+ :groups, :pointer,
10
+ :group_cnt, :int
11
+ )
12
+
13
+ # Returns information about the consumer groups in the cluster.
14
+ #
15
+ # @return [Array<GroupInfo>] Group metadata
16
+ def groups
17
+ self[:group_cnt].times.map do |i|
18
+ GroupInfo.new(self[:groups] + (i * GroupInfo.size))
19
+ end
20
+ end
21
+
22
+ # Release the resources used by the group list back to the system
23
+ def destroy
24
+ ::Kafka::FFI.rd_kafka_group_list_destroy(self)
25
+ end
26
+ end
27
+ end