kafka 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +14 -0
- data/.rubocop.yml +210 -0
- data/.travis.yml +45 -0
- data/CHANGELOG.md +3 -0
- data/CODE_OF_CONDUCT.md +74 -0
- data/Gemfile +5 -0
- data/LICENSE.txt +21 -0
- data/README.md +182 -0
- data/Rakefile +69 -0
- data/examples/consumer.rb +55 -0
- data/examples/producer.rb +46 -0
- data/ext/Rakefile +69 -0
- data/kafka.gemspec +39 -0
- data/lib/kafka/admin.rb +141 -0
- data/lib/kafka/config.rb +145 -0
- data/lib/kafka/consumer.rb +87 -0
- data/lib/kafka/error.rb +44 -0
- data/lib/kafka/ffi/admin/admin_options.rb +121 -0
- data/lib/kafka/ffi/admin/config_entry.rb +97 -0
- data/lib/kafka/ffi/admin/config_resource.rb +101 -0
- data/lib/kafka/ffi/admin/delete_topic.rb +19 -0
- data/lib/kafka/ffi/admin/new_partitions.rb +77 -0
- data/lib/kafka/ffi/admin/new_topic.rb +91 -0
- data/lib/kafka/ffi/admin/result.rb +66 -0
- data/lib/kafka/ffi/admin/topic_result.rb +32 -0
- data/lib/kafka/ffi/admin.rb +16 -0
- data/lib/kafka/ffi/broker_metadata.rb +32 -0
- data/lib/kafka/ffi/client.rb +640 -0
- data/lib/kafka/ffi/config.rb +382 -0
- data/lib/kafka/ffi/consumer.rb +342 -0
- data/lib/kafka/ffi/error.rb +25 -0
- data/lib/kafka/ffi/event.rb +215 -0
- data/lib/kafka/ffi/group_info.rb +75 -0
- data/lib/kafka/ffi/group_list.rb +27 -0
- data/lib/kafka/ffi/group_member_info.rb +52 -0
- data/lib/kafka/ffi/message/header.rb +205 -0
- data/lib/kafka/ffi/message.rb +205 -0
- data/lib/kafka/ffi/metadata.rb +58 -0
- data/lib/kafka/ffi/opaque.rb +81 -0
- data/lib/kafka/ffi/opaque_pointer.rb +73 -0
- data/lib/kafka/ffi/partition_metadata.rb +61 -0
- data/lib/kafka/ffi/producer.rb +144 -0
- data/lib/kafka/ffi/queue.rb +65 -0
- data/lib/kafka/ffi/topic.rb +32 -0
- data/lib/kafka/ffi/topic_config.rb +126 -0
- data/lib/kafka/ffi/topic_metadata.rb +42 -0
- data/lib/kafka/ffi/topic_partition.rb +43 -0
- data/lib/kafka/ffi/topic_partition_list.rb +167 -0
- data/lib/kafka/ffi.rb +624 -0
- data/lib/kafka/poller.rb +28 -0
- data/lib/kafka/producer/delivery_report.rb +120 -0
- data/lib/kafka/producer.rb +127 -0
- data/lib/kafka/version.rb +8 -0
- data/lib/kafka.rb +11 -0
- metadata +159 -0
@@ -0,0 +1,640 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "ffi"
|
4
|
+
require "kafka/config"
|
5
|
+
require "kafka/ffi/opaque_pointer"
|
6
|
+
require "kafka/ffi/admin/result"
|
7
|
+
|
8
|
+
module Kafka::FFI
|
9
|
+
# Client is a handle to a configured librdkafka instance that begins
|
10
|
+
# operation once created. Client is an abstract class and will provide either
|
11
|
+
# a Consumer or Producer based on the type being created. Each Client
|
12
|
+
# instance can either produce or consume messages to / from topics and cannot
|
13
|
+
# do both.
|
14
|
+
#
|
15
|
+
# @see Consumer
|
16
|
+
# @see Producer
|
17
|
+
#
|
18
|
+
# @note Naming this is hard and librdkafka primarily just refers to it as "a
|
19
|
+
# handle" to an instance. It's more akin to an internal service and this
|
20
|
+
# Client talks the API to that service.
|
21
|
+
class Client < OpaquePointer
|
22
|
+
require "kafka/ffi/consumer"
|
23
|
+
require "kafka/ffi/producer"
|
24
|
+
|
25
|
+
# Create a new Client of type with the given configuration.
|
26
|
+
#
|
27
|
+
# @param type [:consumer, :producer] Type of Kafka instance to create.
|
28
|
+
# @param config [nil] Use librdkafka default config
|
29
|
+
# @param config [Config, Kafka::Config] Configuration for the instance.
|
30
|
+
# @param config [Hash{[String, Symbol] => [String, Integer, nil, Boolean]}]
|
31
|
+
# Configuration options for the instance.
|
32
|
+
#
|
33
|
+
# @return [Consumer, Producer]
|
34
|
+
def self.new(type, config = nil)
|
35
|
+
error = ::FFI::MemoryPointer.new(:char, 512)
|
36
|
+
|
37
|
+
# Convenience for passing in a Kafka::Config instead of building a
|
38
|
+
# Kafka::FFI::Config since Kafka::Config provides a way to create a
|
39
|
+
# config from a Hash.
|
40
|
+
config =
|
41
|
+
case config
|
42
|
+
when Config, nil then config
|
43
|
+
when ::Kafka::Config then config.to_ffi
|
44
|
+
when Hash then ::Kafka::Config.new(config).to_ffi
|
45
|
+
else
|
46
|
+
raise ArgumentError, "config must be on of nil, Config, ::Kafka::Config, or Hash"
|
47
|
+
end
|
48
|
+
|
49
|
+
client = Kafka::FFI.rd_kafka_new(type, config, error, error.size)
|
50
|
+
if client.nil?
|
51
|
+
raise Error, error.read_string
|
52
|
+
end
|
53
|
+
|
54
|
+
if config
|
55
|
+
# Store a reference to the config on the Client instance. We do this to
|
56
|
+
# tie the Config's lifecycle to the Client instance in Ruby since they
|
57
|
+
# are already tied in librdkafka. This ensures that any Ruby objects
|
58
|
+
# referenced in the config (like callbacks) are not garbage collected.
|
59
|
+
#
|
60
|
+
# Using instance_variable_set to avoid exposing an API method that
|
61
|
+
# could cause confusion from end users since the config cannot be
|
62
|
+
# changed after initialization.
|
63
|
+
client.instance_variable_set(:@config, config)
|
64
|
+
end
|
65
|
+
|
66
|
+
client
|
67
|
+
end
|
68
|
+
|
69
|
+
def self.from_native(ptr, _ctx)
|
70
|
+
if !ptr.is_a?(::FFI::Pointer)
|
71
|
+
raise TypeError, "from_native can only convert from a ::FFI::Pointer to #{self}"
|
72
|
+
end
|
73
|
+
|
74
|
+
# Converting from a null pointer should return nil. Likely this was
|
75
|
+
# caused by rd_kafka_new returning an error and a NULL pointer for the
|
76
|
+
# Client.
|
77
|
+
if ptr.null?
|
78
|
+
return nil
|
79
|
+
end
|
80
|
+
|
81
|
+
# Build a temporary Client to pass to rd_kafka_type. There is a bit of a
|
82
|
+
# chicken and egg problem here. We can't create the final class until
|
83
|
+
# after we know the type. But for type safety we want to pass a Client.
|
84
|
+
cfg = allocate
|
85
|
+
cfg.send(:initialize, ptr)
|
86
|
+
type = ::Kafka::FFI.rd_kafka_type(cfg)
|
87
|
+
|
88
|
+
klass =
|
89
|
+
case type
|
90
|
+
when :producer then Producer
|
91
|
+
when :consumer then Consumer
|
92
|
+
else
|
93
|
+
raise ArgumentError, "unknown Kafka client type: #{type}"
|
94
|
+
end
|
95
|
+
|
96
|
+
client = klass.allocate
|
97
|
+
client.send(:initialize, ptr)
|
98
|
+
client
|
99
|
+
end
|
100
|
+
|
101
|
+
def initialize(ptr)
|
102
|
+
super(ptr)
|
103
|
+
|
104
|
+
@topics = {}
|
105
|
+
end
|
106
|
+
|
107
|
+
# Retrive the current configuration used by Client.
|
108
|
+
#
|
109
|
+
# @note The returned config is read-only and tied to the lifetime of the
|
110
|
+
# Client. Don't try to modify or destroy the config.
|
111
|
+
def config
|
112
|
+
::Kafka::FFI.rd_kafka_conf(self)
|
113
|
+
end
|
114
|
+
|
115
|
+
# Retrieve the Kafka handle name.
|
116
|
+
#
|
117
|
+
# @return [String] handle / client name
|
118
|
+
def name
|
119
|
+
::Kafka::FFI.rd_kafka_name(self)
|
120
|
+
end
|
121
|
+
|
122
|
+
# Retrieves the Client's Cluster ID
|
123
|
+
#
|
124
|
+
# @note requires config `api.version.request` set to true
|
125
|
+
def cluster_id
|
126
|
+
id = ::Kafka::FFI.rd_kafka_clusterid(self)
|
127
|
+
|
128
|
+
if id.null?
|
129
|
+
return nil
|
130
|
+
end
|
131
|
+
|
132
|
+
id.read_string
|
133
|
+
ensure
|
134
|
+
if !id.null?
|
135
|
+
::Kafka::FFI.rd_kafka_mem_free(self, id)
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
# Retrieves the current Controller ID as reported by broker metadata.
|
140
|
+
#
|
141
|
+
# @note requires config `api.version.request` set to true
|
142
|
+
#
|
143
|
+
# @param timeout [Integer] Maximum time to wait in milliseconds. Specify 0
|
144
|
+
# for a non-blocking call.
|
145
|
+
#
|
146
|
+
# @return [Integer] controller broker id or -1 if no ID could be retrieved
|
147
|
+
# before the timeout.
|
148
|
+
def controller_id(timeout: 1000)
|
149
|
+
::Kafka::FFI.rd_kafka_controllerid(self, timeout)
|
150
|
+
end
|
151
|
+
|
152
|
+
# Create or fetch the Topic with the given name. The first time topic is
|
153
|
+
# called for a given name, a configuration can be passed for the topic.
|
154
|
+
#
|
155
|
+
# @note The returned Topic is owned by the Client and will be destroyed
|
156
|
+
# when the Client is destroyed.
|
157
|
+
#
|
158
|
+
# @param name [String] Name of the topic
|
159
|
+
# @param config [TopicConfig, nil] Config options for the topic. This can
|
160
|
+
# only be passed for the first call of `topic` per topic name since a
|
161
|
+
# Topic can only be configured at creation.
|
162
|
+
#
|
163
|
+
# @raise [Kafka::ResponseError] Error occurred creating the topic
|
164
|
+
# @raise [TopicAlreadyConfiguredError] Passed a config for a topic that has
|
165
|
+
# already been configured.
|
166
|
+
#
|
167
|
+
# @return [Topic] Topic instance
|
168
|
+
# error.
|
169
|
+
def topic(name, config = nil)
|
170
|
+
topic = @topics[name]
|
171
|
+
if topic
|
172
|
+
if config
|
173
|
+
# Make this an exception because it's probably a programmer error
|
174
|
+
# that _should_ only primarily happen during development due to
|
175
|
+
# misunderstanding the semantics.
|
176
|
+
raise TopicAlreadyConfigured, "#{name} was already configured"
|
177
|
+
end
|
178
|
+
|
179
|
+
return topic
|
180
|
+
end
|
181
|
+
|
182
|
+
# @todo - Keep list of topics and manage their lifecycle?
|
183
|
+
topic = ::Kafka::FFI.rd_kafka_topic_new(self, name, config)
|
184
|
+
if topic.nil?
|
185
|
+
raise ::Kafka::ResponseError, ::Kafka::FFI.rd_kafka_last_error
|
186
|
+
end
|
187
|
+
|
188
|
+
@topics[name] = topic
|
189
|
+
topic
|
190
|
+
end
|
191
|
+
|
192
|
+
# Polls for events on the the Client, causing callbacks to be fired. This
|
193
|
+
# is used by both the Producer and Consumer to ensure callbacks are
|
194
|
+
# processed in a timely manor.
|
195
|
+
#
|
196
|
+
# @note Do not call in a Consumer after poll_set_consumer has been called.
|
197
|
+
#
|
198
|
+
# @param timeout [Integer] Time in milliseconds to wait for an event.
|
199
|
+
# 0 - Non-blocking call, returning immediately if there are no events.
|
200
|
+
# -1 - Wait indefinately for an event.
|
201
|
+
#
|
202
|
+
# @return [Integer] Number of events served
|
203
|
+
def poll(timeout: 250)
|
204
|
+
::Kafka::FFI.rd_kafka_poll(self, timeout)
|
205
|
+
end
|
206
|
+
|
207
|
+
# Pause producing or consuming of the provided list of partitions. The list
|
208
|
+
# is updated to include any errors.
|
209
|
+
#
|
210
|
+
# @param list [TopicPartitionList] Set of partitions to pause
|
211
|
+
#
|
212
|
+
# @raise [Kafka::ResponseError] Invalid request
|
213
|
+
#
|
214
|
+
# @return [TopicPartitionList] List of partitions with errors set for any
|
215
|
+
# of the TopicPartitions that failed.
|
216
|
+
def pause_partitions(list)
|
217
|
+
err = ::Kafka::FFI.rd_kafka_pause_partitions(self, list)
|
218
|
+
if err != :ok
|
219
|
+
raise ::Kafka::ResponseError, err
|
220
|
+
end
|
221
|
+
|
222
|
+
list
|
223
|
+
end
|
224
|
+
|
225
|
+
# Resume producing or consuming of the provided list of partitions.
|
226
|
+
#
|
227
|
+
# @param list [TopicPartitionList] Set of partitions to unpause
|
228
|
+
#
|
229
|
+
# @raise [Kafka::ResponseError] Invalid request
|
230
|
+
#
|
231
|
+
# @return [TopicPartitionList] List of partitions with errors set for any
|
232
|
+
# of the TopicPartitions that failed.
|
233
|
+
def resume_partitions(list)
|
234
|
+
err = ::Kafka::FFI.rd_kafka_resume_partitions(self, list)
|
235
|
+
if err != :ok
|
236
|
+
raise ::Kafka::ResponseError, err
|
237
|
+
end
|
238
|
+
|
239
|
+
list
|
240
|
+
end
|
241
|
+
|
242
|
+
# rubocop:disable Naming/AccessorMethodName
|
243
|
+
|
244
|
+
# Get a reference to the main librdkafka event queue. This is the queue
|
245
|
+
# that is served by rd_kafka_poll.
|
246
|
+
#
|
247
|
+
# @note Application must call #destroy on this queue when finished.
|
248
|
+
#
|
249
|
+
# @return [Queue] Main client Event queue
|
250
|
+
def get_main_queue
|
251
|
+
::Kafka::FFI.rd_kafka_queue_get_main(self)
|
252
|
+
end
|
253
|
+
|
254
|
+
# Get a reference to the background thread queue. The background queue is
|
255
|
+
# automatically polled by librdkafka and is fully managed internally.
|
256
|
+
#
|
257
|
+
# @note The returned Queue must not be polled, forwarded, or otherwise
|
258
|
+
# manage by the application. It may only be used as the destination queue
|
259
|
+
# passed to queue-enabled APIs.
|
260
|
+
#
|
261
|
+
# @note The caller must call #destroy on the Queue when finished with it
|
262
|
+
#
|
263
|
+
# @return [Queue] Background queue
|
264
|
+
# @return [nil] Background queue is disabled
|
265
|
+
def get_background_queue
|
266
|
+
::Kafka::FFI.rd_kafka_queue_get_background(self)
|
267
|
+
end
|
268
|
+
|
269
|
+
# Forward librdkafka and debug logs to the specified queue. This allows the
|
270
|
+
# application to serve logg callbacks in its thread of choice.
|
271
|
+
#
|
272
|
+
# @param dest [Queue] Destination Queue for logs
|
273
|
+
# @param dest [nil] Forward logs to the Client's main queue
|
274
|
+
#
|
275
|
+
# @raise [Kafka::ResponseError] Error setting the log Queue
|
276
|
+
def set_log_queue(dest)
|
277
|
+
err = ::Kafka::FFI.rd_kafka_set_log_queue(self, dest)
|
278
|
+
if err != :ok
|
279
|
+
raise ::Kafka::ResponseError, err
|
280
|
+
end
|
281
|
+
|
282
|
+
nil
|
283
|
+
end
|
284
|
+
|
285
|
+
# rubocop:enable Naming/AccessorMethodName
|
286
|
+
|
287
|
+
# Query the broker for the oldest and newest offsets for the partition.
|
288
|
+
#
|
289
|
+
# @param topic [String] Name of the topic to get offsets for
|
290
|
+
# @param partition [int] Partition of the topic to get offsets for
|
291
|
+
#
|
292
|
+
# @raise [Kafka::ResponseError] Error that occurred retrieving offsets
|
293
|
+
#
|
294
|
+
# @return [Range] Range of known offsets
|
295
|
+
def query_watermark_offsets(topic, partition, timeout: 1000)
|
296
|
+
low = ::FFI::MemoryPointer.new(:int64)
|
297
|
+
high = ::FFI::MemoryPointer.new(:int64)
|
298
|
+
|
299
|
+
err = ::Kafka::FFI.rd_kafka_query_watermark_offsets(self, topic, partition, low, high, timeout)
|
300
|
+
if err != :ok
|
301
|
+
raise ::Kafka::ResponseError, err
|
302
|
+
end
|
303
|
+
|
304
|
+
Range.new(low.read_int64, high.read_int64, false)
|
305
|
+
end
|
306
|
+
|
307
|
+
# Look up the offsets for the given partition by timestamp. The offset for
|
308
|
+
# each partition will be the earliest offset whose timestamp is greater
|
309
|
+
# than or equal to the timestamp set in the TopicPartitionList.
|
310
|
+
#
|
311
|
+
# @param list [TopicPartitionList] List of TopicPartitions to fetch offsets
|
312
|
+
# for. The TopicPartitions in the list will be modified based on the
|
313
|
+
# results of the query.
|
314
|
+
#
|
315
|
+
# @raise [Kafka::ResponseError] Invalid request
|
316
|
+
#
|
317
|
+
# @return [TopicPartitionList] List of topics with offset set.
|
318
|
+
def offsets_for_times(list, timeout: 1000)
|
319
|
+
if list.nil?
|
320
|
+
raise ArgumentError, "list cannot be nil"
|
321
|
+
end
|
322
|
+
|
323
|
+
err = ::Kafka::FFI.rd_kafka_offsets_for_times(self, list, timeout)
|
324
|
+
if err != :ok
|
325
|
+
raise ::Kafka::ResponseError, err
|
326
|
+
end
|
327
|
+
|
328
|
+
list
|
329
|
+
end
|
330
|
+
|
331
|
+
# Retrieve metadata from the Kafka cluster
|
332
|
+
#
|
333
|
+
# @param local_only [Boolean] Only request info about locally known topics,
|
334
|
+
# don't query all topics in the cluster.
|
335
|
+
# @param topic [String, Topic] Only request info about this topic.
|
336
|
+
# @param timeout [Integer] Request timeout in milliseconds
|
337
|
+
#
|
338
|
+
# @raise [Kafka::ResponseError] Error retrieving metadata
|
339
|
+
#
|
340
|
+
# @return [Metadata] Details about the state of the cluster.
|
341
|
+
def metadata(local_only: false, topic: nil, timeout: 1000)
|
342
|
+
ptr = ::FFI::MemoryPointer.new(:pointer)
|
343
|
+
|
344
|
+
# Need to use a Topic reference if asking for only information about a
|
345
|
+
# single topic.
|
346
|
+
if topic.is_a?(String)
|
347
|
+
topic = self.topic(topic)
|
348
|
+
end
|
349
|
+
|
350
|
+
err = ::Kafka::FFI.rd_kafka_metadata(self, local_only, topic, ptr, timeout)
|
351
|
+
if err != :ok
|
352
|
+
raise ::Kafka::ResponseError, err
|
353
|
+
end
|
354
|
+
|
355
|
+
Kafka::FFI::Metadata.new(ptr.read_pointer)
|
356
|
+
ensure
|
357
|
+
ptr.free
|
358
|
+
end
|
359
|
+
|
360
|
+
# List and describe client groups in the cluster.
|
361
|
+
#
|
362
|
+
# @note Application must call #destroy to release the list when done
|
363
|
+
#
|
364
|
+
# @raise [Kafka::ResponseError] Error occurred receiving group details
|
365
|
+
#
|
366
|
+
# @return [Kafka::FFI::GroupList] List of consumer groups in the cluster.
|
367
|
+
def group_list(group: nil, timeout: 1000)
|
368
|
+
list = ::FFI::MemoryPointer.new(:pointer)
|
369
|
+
|
370
|
+
err = ::Kafka::FFI.rd_kafka_list_groups(self, group, list, timeout)
|
371
|
+
if err != :ok
|
372
|
+
raise ::Kafka::ResponseError, err
|
373
|
+
end
|
374
|
+
|
375
|
+
GroupList.new(list.read_pointer)
|
376
|
+
ensure
|
377
|
+
list.free
|
378
|
+
end
|
379
|
+
|
380
|
+
# Create a copy of the Client's default topic configuration object. The
|
381
|
+
# caller is now responsible for ownership of the new config.
|
382
|
+
#
|
383
|
+
# @return [TopicConfig] Duplicate config
|
384
|
+
def default_topic_conf_dup
|
385
|
+
::Kafka::FFI.rd_kafka_default_topic_conf_dup(self)
|
386
|
+
end
|
387
|
+
|
388
|
+
# Returns the current length of the outbound queue. This is the sum of
|
389
|
+
# several factors including outbound messages, pending callbacks, waiting
|
390
|
+
# acknowledgements, etc...
|
391
|
+
#
|
392
|
+
# An application should wait for the return value to reach 0 before
|
393
|
+
# terminating to make sure outstanding messages, requests, callbacks, and
|
394
|
+
# events are fully processed.
|
395
|
+
#
|
396
|
+
# @return [Integer] Number of outbound items still pending
|
397
|
+
def outq_len
|
398
|
+
::Kafka::FFI.rd_kafka_outq_len(self)
|
399
|
+
end
|
400
|
+
alias out_queue_len outq_len
|
401
|
+
|
402
|
+
# Adds one or more brokers to the Client's list of initial bootstrap
|
403
|
+
# brokers. Additionaly brokers will be discovered automatically once the
|
404
|
+
# Client connects to a broker by querying the broker metadata.
|
405
|
+
#
|
406
|
+
# @note It is preferred to set brokers through the `metadata.broker.list`
|
407
|
+
# or `bootstrap.servers` config options.
|
408
|
+
#
|
409
|
+
# @example Add multiple brokers
|
410
|
+
# client.brokers_add(["kafka_1:9092", "kafka_2:9092"])
|
411
|
+
#
|
412
|
+
# @example Add a single broker with protocol
|
413
|
+
# client.brokers.add("PLAINTEXT://localhost:9096")
|
414
|
+
#
|
415
|
+
# @see rdkafka.h rd_kafka_brokers_add
|
416
|
+
#
|
417
|
+
# @param brokers [String] Comma separated list of broker addresses to add.
|
418
|
+
# @param brokers [Array<String>] Array of broker addresses to add.
|
419
|
+
#
|
420
|
+
# @return [Integer] number of brokers successfully added
|
421
|
+
def brokers_add(brokers)
|
422
|
+
if brokers.is_a?(Array)
|
423
|
+
brokers = brokers.join(",")
|
424
|
+
end
|
425
|
+
|
426
|
+
::Kafka::FFI.rd_kafka_brokers_add(self, brokers)
|
427
|
+
end
|
428
|
+
|
429
|
+
## Admin APIs
|
430
|
+
|
431
|
+
# Create topics in the cluster with the given configuration.
|
432
|
+
#
|
433
|
+
# Application is responsible for calling #destroy on the returned results
|
434
|
+
# when done with the results.
|
435
|
+
#
|
436
|
+
# @param topics [NewTopic, Array<NewTopic>] List of topics to create on the
|
437
|
+
# cluster.
|
438
|
+
# @parma options [Admin::AdminOptions] Admin API request options
|
439
|
+
# @param timeout [Integer] Time in milliseconds to wait for a reply.
|
440
|
+
#
|
441
|
+
# @raise [Kafka::ResponseError] An error occurred creating the topic(s)
|
442
|
+
#
|
443
|
+
# @return [nil] Create timed out
|
444
|
+
# @return [Admin::Result<TopicResult>] Response from the cluster with
|
445
|
+
# details about the creation of the list of topics or any errors.
|
446
|
+
def create_topics(topics, options: nil, timeout: 5000)
|
447
|
+
topics = Array(topics)
|
448
|
+
|
449
|
+
# CreateTopic wants an array of topics
|
450
|
+
list = ::FFI::MemoryPointer.new(:pointer, topics.length)
|
451
|
+
list.write_array_of_pointer(topics.map(&:pointer))
|
452
|
+
|
453
|
+
queue = ::Kafka::FFI::Queue.new(self)
|
454
|
+
|
455
|
+
::Kafka::FFI.rd_kafka_CreateTopics(self, list, topics.length, options, queue)
|
456
|
+
|
457
|
+
event = queue.poll(timeout: timeout)
|
458
|
+
if event
|
459
|
+
::Kafka::FFI::Admin::Result.new(event)
|
460
|
+
end
|
461
|
+
ensure
|
462
|
+
list.free
|
463
|
+
queue.destroy
|
464
|
+
end
|
465
|
+
|
466
|
+
# Delete a list of Topics from the cluster.
|
467
|
+
#
|
468
|
+
# Application is responsible for calling #destroy on the returned results
|
469
|
+
# when done with the results.
|
470
|
+
#
|
471
|
+
# @param topics [DeleteTopic] List of topics to delete
|
472
|
+
# @parma options [Admin::AdminOptions] Admin API request options
|
473
|
+
# @param timeout [Integer] Time to wait in milliseconds for the deletion to
|
474
|
+
# complete.
|
475
|
+
#
|
476
|
+
# @return [nil] Delete timed out
|
477
|
+
# @return [Array<TopicResult>] Response from the cluster with details about
|
478
|
+
# the deletion of the list of topics or any errors.
|
479
|
+
def delete_topics(topics, options: nil, timeout: 5000)
|
480
|
+
topics = Array(topics)
|
481
|
+
|
482
|
+
# DeleteTopics wants an array of topics
|
483
|
+
list = ::FFI::MemoryPointer.new(:pointer, topics.length)
|
484
|
+
list.write_array_of_pointer(topics.map(&:pointer))
|
485
|
+
|
486
|
+
queue = ::Kafka::FFI::Queue.new(self)
|
487
|
+
|
488
|
+
::Kafka::FFI.rd_kafka_DeleteTopics(self, list, topics.length, options, queue)
|
489
|
+
|
490
|
+
event = queue.poll(timeout: timeout)
|
491
|
+
if event
|
492
|
+
::Kafka::FFI::Admin::Result.new(event)
|
493
|
+
end
|
494
|
+
ensure
|
495
|
+
list.free
|
496
|
+
queue.destroy
|
497
|
+
end
|
498
|
+
|
499
|
+
# Create additional partition(s) for a topic on the cluster.
|
500
|
+
#
|
501
|
+
# Application is responsible for calling #destroy on the returned results
|
502
|
+
# when done with the results.
|
503
|
+
#
|
504
|
+
# @param requests [Admin::NewPartitions] Details about partions to create
|
505
|
+
# and possibly broker assignments for those partitions.
|
506
|
+
# @param requests [Array<Admin::NewPartitions>] List of partition detauls.
|
507
|
+
# @param options [Admin::AdminOptions] Admin API request options
|
508
|
+
# @param timeout [Integer] Time to wait in milliseconds for request to
|
509
|
+
# complete.
|
510
|
+
#
|
511
|
+
# @return [nil] Request timed out
|
512
|
+
# @return [Admin::Result<Admin::TopicResult>] Results from the cluster
|
513
|
+
# detailing success or failure of creating new partitions.
|
514
|
+
def create_partitions(requests, options: nil, timeout: 5000)
|
515
|
+
requests = Array(requests)
|
516
|
+
|
517
|
+
# NewPartitions wants an array of Admin::NewPartitions
|
518
|
+
list = ::FFI::MemoryPointer.new(:pointer, requests.length)
|
519
|
+
list.write_array_of_pointer(requests.map(&:pointer))
|
520
|
+
|
521
|
+
# Queue to receive the result
|
522
|
+
queue = ::Kafka::FFI::Queue.new(self)
|
523
|
+
|
524
|
+
::Kafka::FFI.rd_kafka_CreatePartitions(self, list, requests.length, options, queue)
|
525
|
+
|
526
|
+
event = queue.poll(timeout: timeout)
|
527
|
+
if event
|
528
|
+
::Kafka::FFI::Admin::Result.new(event)
|
529
|
+
end
|
530
|
+
ensure
|
531
|
+
list.free
|
532
|
+
queue.destroy
|
533
|
+
end
|
534
|
+
|
535
|
+
# Update the configuration for the specified resources. Updates may succeed
|
536
|
+
# for a subset of the provided resources while others fail. The
|
537
|
+
# configuration for a particular resource is update atomically, replacing
|
538
|
+
# values using the provided ConfigResource (set via set_config) and
|
539
|
+
# reverting any unspecified config options to their default values.
|
540
|
+
#
|
541
|
+
# Application is responsible for calling #destroy on the returned results
|
542
|
+
# when done with the results.
|
543
|
+
#
|
544
|
+
# @see rdkafka.h rd_kafka_AlterConfigs
|
545
|
+
#
|
546
|
+
# @note AlterConfigs will replace all existing configuration for the given
|
547
|
+
# resources, reverting all unspecified config options to their default
|
548
|
+
# values.
|
549
|
+
#
|
550
|
+
# @note At most one :broker type ConfigResource can be specified per call
|
551
|
+
# to alter_configs since the changes must be sent to the broker specified
|
552
|
+
# in the resource.
|
553
|
+
#
|
554
|
+
# @param resources [Admin::ConfigResource] Resource to alter configs for.
|
555
|
+
# @param resources [Array<Admin::ConfigResource>] List of resources with
|
556
|
+
# their configs to update. At most one of type :broker is allowed per
|
557
|
+
# call.
|
558
|
+
# @param options [Admin::AdminOptions] Admin API request options
|
559
|
+
# @param timeout [Integer] Time to wait in milliseconds for request to
|
560
|
+
# complete.
|
561
|
+
#
|
562
|
+
# @return [nil] Request timed out
|
563
|
+
# @return [Array<Admin::ConfigResource>]
|
564
|
+
def alter_configs(resources, options: nil, timeout: 5000)
|
565
|
+
resources = Array(resources)
|
566
|
+
|
567
|
+
# NewPartitions wants an array of Admin::ConfigResource
|
568
|
+
list = ::FFI::MemoryPointer.new(:pointer, resources.length)
|
569
|
+
list.write_array_of_pointer(resources.map(&:pointer))
|
570
|
+
|
571
|
+
# Queue to receive the result
|
572
|
+
queue = ::Kafka::FFI::Queue.new(self)
|
573
|
+
|
574
|
+
::Kafka::FFI.rd_kafka_AlterConfigs(self, list, resources.length, options, queue)
|
575
|
+
|
576
|
+
event = queue.poll(timeout: timeout)
|
577
|
+
if event
|
578
|
+
::Kafka::FFI::Admin::Result.new(event)
|
579
|
+
end
|
580
|
+
ensure
|
581
|
+
list.free
|
582
|
+
queue.destroy
|
583
|
+
end
|
584
|
+
|
585
|
+
# Get configuration for the specified resources.
|
586
|
+
#
|
587
|
+
# Application is responsible for calling #destroy on the returned results
|
588
|
+
# when done with the results.
|
589
|
+
#
|
590
|
+
# @see rdkafka.h rd_kafka_DescribeConfigs
|
591
|
+
#
|
592
|
+
# @param resources [Admin::ConfigResource] Resource to request
|
593
|
+
# configuration details for.
|
594
|
+
# @param resources [Array<Admin::ConfigResource>] List of resources to get
|
595
|
+
# config details for.
|
596
|
+
# @param options [Admin::AdminOptions] Admin API request options
|
597
|
+
# @param timeout [Integer] Time to wait in milliseconds for request to
|
598
|
+
# complete.
|
599
|
+
#
|
600
|
+
# @return [nil] Request timed out
|
601
|
+
# @return [Admin::Result<Admin::ConfigResource>] Configurations for the
|
602
|
+
# requested resources.
|
603
|
+
def describe_configs(resources, options: nil, timeout: 5000)
|
604
|
+
resources = Array(resources)
|
605
|
+
|
606
|
+
# DescribeConfigs wants an array of Admin::ConfigResource pointers
|
607
|
+
list = ::FFI::MemoryPointer.new(:pointer, resources.length)
|
608
|
+
list.write_array_of_pointer(resources.map(&:pointer))
|
609
|
+
|
610
|
+
# Queue to receive the result
|
611
|
+
queue = ::Kafka::FFI::Queue.new(self)
|
612
|
+
|
613
|
+
::Kafka::FFI.rd_kafka_DescribeConfigs(self, list, resources.length, options, queue)
|
614
|
+
|
615
|
+
event = queue.poll(timeout: timeout)
|
616
|
+
if event
|
617
|
+
::Kafka::FFI::Admin::Result.new(event)
|
618
|
+
end
|
619
|
+
ensure
|
620
|
+
list.free
|
621
|
+
queue.destroy
|
622
|
+
end
|
623
|
+
|
624
|
+
# Release all of the resources used by this Client. This may block until
|
625
|
+
# the instance has finished it's shutdown procedure. Always make sure to
|
626
|
+
# destory any associated resources and cleanly shutting down the instance
|
627
|
+
# before calling destroy.
|
628
|
+
def destroy
|
629
|
+
if !pointer.null?
|
630
|
+
# Clean up any cached topics before destroying the Client.
|
631
|
+
@topics.each do |_, topic|
|
632
|
+
::Kafka::FFI.rd_kafka_topic_destroy(topic)
|
633
|
+
end
|
634
|
+
@topics.clear
|
635
|
+
|
636
|
+
::Kafka::FFI.rd_kafka_destroy(self)
|
637
|
+
end
|
638
|
+
end
|
639
|
+
end
|
640
|
+
end
|