kafka 0.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (56) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +14 -0
  3. data/.rubocop.yml +210 -0
  4. data/.travis.yml +45 -0
  5. data/CHANGELOG.md +3 -0
  6. data/CODE_OF_CONDUCT.md +74 -0
  7. data/Gemfile +5 -0
  8. data/LICENSE.txt +21 -0
  9. data/README.md +182 -0
  10. data/Rakefile +69 -0
  11. data/examples/consumer.rb +55 -0
  12. data/examples/producer.rb +46 -0
  13. data/ext/Rakefile +69 -0
  14. data/kafka.gemspec +39 -0
  15. data/lib/kafka/admin.rb +141 -0
  16. data/lib/kafka/config.rb +145 -0
  17. data/lib/kafka/consumer.rb +87 -0
  18. data/lib/kafka/error.rb +44 -0
  19. data/lib/kafka/ffi/admin/admin_options.rb +121 -0
  20. data/lib/kafka/ffi/admin/config_entry.rb +97 -0
  21. data/lib/kafka/ffi/admin/config_resource.rb +101 -0
  22. data/lib/kafka/ffi/admin/delete_topic.rb +19 -0
  23. data/lib/kafka/ffi/admin/new_partitions.rb +77 -0
  24. data/lib/kafka/ffi/admin/new_topic.rb +91 -0
  25. data/lib/kafka/ffi/admin/result.rb +66 -0
  26. data/lib/kafka/ffi/admin/topic_result.rb +32 -0
  27. data/lib/kafka/ffi/admin.rb +16 -0
  28. data/lib/kafka/ffi/broker_metadata.rb +32 -0
  29. data/lib/kafka/ffi/client.rb +640 -0
  30. data/lib/kafka/ffi/config.rb +382 -0
  31. data/lib/kafka/ffi/consumer.rb +342 -0
  32. data/lib/kafka/ffi/error.rb +25 -0
  33. data/lib/kafka/ffi/event.rb +215 -0
  34. data/lib/kafka/ffi/group_info.rb +75 -0
  35. data/lib/kafka/ffi/group_list.rb +27 -0
  36. data/lib/kafka/ffi/group_member_info.rb +52 -0
  37. data/lib/kafka/ffi/message/header.rb +205 -0
  38. data/lib/kafka/ffi/message.rb +205 -0
  39. data/lib/kafka/ffi/metadata.rb +58 -0
  40. data/lib/kafka/ffi/opaque.rb +81 -0
  41. data/lib/kafka/ffi/opaque_pointer.rb +73 -0
  42. data/lib/kafka/ffi/partition_metadata.rb +61 -0
  43. data/lib/kafka/ffi/producer.rb +144 -0
  44. data/lib/kafka/ffi/queue.rb +65 -0
  45. data/lib/kafka/ffi/topic.rb +32 -0
  46. data/lib/kafka/ffi/topic_config.rb +126 -0
  47. data/lib/kafka/ffi/topic_metadata.rb +42 -0
  48. data/lib/kafka/ffi/topic_partition.rb +43 -0
  49. data/lib/kafka/ffi/topic_partition_list.rb +167 -0
  50. data/lib/kafka/ffi.rb +624 -0
  51. data/lib/kafka/poller.rb +28 -0
  52. data/lib/kafka/producer/delivery_report.rb +120 -0
  53. data/lib/kafka/producer.rb +127 -0
  54. data/lib/kafka/version.rb +8 -0
  55. data/lib/kafka.rb +11 -0
  56. metadata +159 -0
@@ -0,0 +1,61 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka::FFI
4
+ class PartitionMetadata < ::FFI::Struct
5
+ layout(
6
+ :id, :int32,
7
+ :err, :error_code,
8
+ :leader, :int32,
9
+ :replica_cnt, :int,
10
+ :replicas, :pointer, # *int32_t
11
+ :isr_cnt, :int,
12
+ :isrs, :pointer # *int32_t
13
+ )
14
+
15
+ # Returns the Partition's ID
16
+ #
17
+ # @return [Integer] Partition ID
18
+ def id
19
+ self[:id]
20
+ end
21
+
22
+ # Returns the error for the Partition as reported by the Broker.
23
+ #
24
+ # @return [nil] No error reported by Broker
25
+ # @return [Kafka::ResponseError] Error reported by Broker
26
+ def error
27
+ if self[:err] != :ok
28
+ return ::Kafka::ResponseError.new(self[:err])
29
+ end
30
+ end
31
+
32
+ # ID of the Leader Broker for this Partition
33
+ #
34
+ # @return [Integer] Leader Broker ID
35
+ def leader
36
+ self[:leader]
37
+ end
38
+
39
+ # Returns the Broker IDs of the Brokers with replicas of this Partition.
40
+ #
41
+ # @return [Array<Integer>] IDs for Brokers with replicas
42
+ def replicas
43
+ if self[:replica_cnt] == 0 || self[:replicas].null?
44
+ return []
45
+ end
46
+
47
+ self[:replicas].read_array_of_int32(self[:replica_cnt])
48
+ end
49
+
50
+ # Returns the Broker IDs of the in-sync replicas for this Partition.
51
+ #
52
+ # @return [Array<Integer>] IDs of Brokers that have in-sync replicas.
53
+ def in_sync_replicas
54
+ if self[:isr_cnt] == 0 || self[:isrs].null?
55
+ return []
56
+ end
57
+
58
+ self[:isrs].read_array_of_int32(self[:isr_cnt])
59
+ end
60
+ end
61
+ end
@@ -0,0 +1,144 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ffi"
4
+ require "kafka/ffi/client"
5
+
6
+ module Kafka::FFI
7
+ class Producer < Kafka::FFI::Client
8
+ def self.new(config = nil)
9
+ super(:producer, config)
10
+ end
11
+
12
+ # Produce and send a single message to the Kafka cluster.
13
+ #
14
+ # @param topic [Topic, String] Topic (or name of topic) to receive the
15
+ # message.
16
+ #
17
+ # @param payload [String, nil] Content of the message.
18
+ #
19
+ # @param key [String] Message partitioning key
20
+ #
21
+ # @param partition [nil, -1] Use the configured partitioner to determine
22
+ # which partition to publish the message to.
23
+ # @param partition [Integer] Partition of the topic that should receive the
24
+ # message.
25
+ #
26
+ # @param headers [Kafka::FFI::Message::Header]
27
+ #
28
+ # @param timestamp [Time] Timestamp as Time
29
+ # @param timestamp [Integer] Timestamp as milliseconds since unix epoch
30
+ # @param timestamp [nil] Timestamp is assigned by librdkafka
31
+ #
32
+ # @param opaque [Opaque] Reference to an object owned by the application
33
+ # which will be available as Message#opaque in callbacks. The application
34
+ # MUST call #free on the Opaque once the final callback has been
35
+ # triggered to avoid leaking memory.
36
+ def produce(topic, payload, key: nil, partition: nil, headers: nil, timestamp: nil, opaque: nil)
37
+ args = [
38
+ # Ensure librdkafka copies the payload into its own memory since the
39
+ # string backing it could be garbage collected.
40
+ :vtype, :msgflags, :int, Kafka::FFI::RD_KAFKA_MSG_F_COPY,
41
+ ]
42
+
43
+ if payload
44
+ args.append(:vtype, :value, :buffer_in, payload, :size_t, payload.bytesize)
45
+ end
46
+
47
+ # The partitioning key is optional
48
+ if key
49
+ args.append(:vtype, :key, :buffer_in, key, :size_t, key.bytesize)
50
+ end
51
+
52
+ # Partition will default to being auto assigned by the configured
53
+ # partitioning strategy.
54
+ if partition
55
+ args.append(:vtype, :partition, :int32, partition)
56
+ end
57
+
58
+ # Headers are optional and can be passed as either a reference to a
59
+ # Header object or individual key/value pairs. This only supports the
60
+ # Header object because supporting key + valu
61
+ if headers
62
+ args.append(:vtype, :headers, :pointer, headers.pointer)
63
+ end
64
+
65
+ case topic
66
+ when Topic
67
+ args.append(:vtype, :rkt, :pointer, topic.pointer)
68
+ when String
69
+ args.append(:vtype, :topic, :string, topic)
70
+ else
71
+ raise ArgumentError, "topic must be either a Topic or String"
72
+ end
73
+
74
+ if opaque
75
+ args.append(:vtype, :opaque, :pointer, opaque.pointer)
76
+ end
77
+
78
+ if timestamp
79
+ ts =
80
+ case timestamp
81
+ when Time then ((timestamp.to_i * 1000) + (timestamp.nsec / 1000))
82
+ when Integer then timestamp
83
+ else
84
+ raise ArgumentError, "timestamp must be nil, a Time, or an Integer"
85
+ end
86
+
87
+ args.append(:vtype, :timestamp, :int64, ts)
88
+ end
89
+
90
+ # Add the sentinel value to denote the end of the argument list.
91
+ args.append(:vtype, :end)
92
+
93
+ err = ::Kafka::FFI.rd_kafka_producev(self, *args)
94
+ if err != :ok
95
+ # The only documented error is RD_KAFKA_RESP_ERR__CONFLICT should both
96
+ # HEADER and HEADERS keys be passed in. There is no way for HEADER to
97
+ # be passed to producev based on the above implementation.
98
+ raise ::Kafka::ResponseError, err
99
+ end
100
+
101
+ nil
102
+ end
103
+
104
+ # Wait until all outstanding produce requests are completed. This should
105
+ # typically be done prior to destroying a producer to ensure all queued and
106
+ # in-flight requests are completed before terminating.
107
+ #
108
+ # @raise [Kafka::ResponseError] Timeout was reached before all
109
+ # outstanding requests were completed.
110
+ def flush(timeout: 1000)
111
+ err = ::Kafka::FFI.rd_kafka_flush(self, timeout)
112
+ if err != :ok
113
+ raise ::Kafka::ResponseError, err
114
+ end
115
+
116
+ nil
117
+ end
118
+
119
+ # Purge messages currently handled by the Producer. By default this will
120
+ # purge all queued and inflight messages asyncronously.
121
+ #
122
+ # @param queued [Boolean] Purge any queued messages
123
+ # @param inflight [Boolean] Purge messages that are inflight
124
+ # @param blocking [Boolean] When true don't wait for background thread
125
+ # queue purging to finish.
126
+ #
127
+ # @raise [Kafka::ResponseError] Error occurred purging state. This is
128
+ # unlikely as the documented error are not possible with this
129
+ # implementation.
130
+ def purge(queued: true, inflight: true, blocking: false)
131
+ mask = 0
132
+ mask |= RD_KAFKA_PURGE_F_QUEUE if queued
133
+ mask |= RD_KAFKA_PURGE_F_INFLIGHT if inflight
134
+ mask |= RD_KAFKA_PURGE_F_NON_BLOCKING if blocking
135
+
136
+ err = ::Kafka::FFI.rd_kafka_purge(self, mask)
137
+ if err != :ok
138
+ raise ::Kafka::ResponseError, err
139
+ end
140
+
141
+ nil
142
+ end
143
+ end
144
+ end
@@ -0,0 +1,65 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ffi"
4
+ require "kafka/ffi/opaque_pointer"
5
+
6
+ module Kafka::FFI
7
+ class Queue < OpaquePointer
8
+ def self.new(client)
9
+ ::Kafka::FFI.rd_kafka_queue_new(client)
10
+ end
11
+
12
+ # Poll a queue for an event, waiting up to timeout milliseconds. Takes an
13
+ # optional block which will handle destroying the event at the completion
14
+ # of the block.
15
+ #
16
+ # @param timeout [Integer] Max time to wait in millseconds for an Event.
17
+ #
18
+ # @yield [event]
19
+ # @yieldparam event [Event] Polled event
20
+ #
21
+ # @return [nil] No event was available within the timeout
22
+ # @return [Event] Event polled from the queue, application is responsible
23
+ # for calling #destroy on the Event when finished with it.
24
+ # @return When passed a block, the result returned by the block
25
+ def poll(timeout: 1000)
26
+ event = ::Kafka::FFI.rd_kafka_queue_poll(self, timeout)
27
+ if event.nil?
28
+ return nil
29
+ end
30
+
31
+ if block_given?
32
+ begin
33
+ yield(event)
34
+ ensure
35
+ event.destroy
36
+ end
37
+ else
38
+ event
39
+ end
40
+ end
41
+
42
+ # Forward events meant for this Queue to the destination Queue instead.
43
+ #
44
+ # @param dest [Queue] Destination queue to forward
45
+ # @param dest [nil] Remove forwarding for this queue.
46
+ def forward(dest)
47
+ ::Kafka::FFI.rd_kafka_queue_forward(self, dest)
48
+ end
49
+
50
+ # Retrieve the current number of elemens in the queue.
51
+ #
52
+ # @return [Integer] Number of elements in the queue
53
+ def length
54
+ ::Kafka::FFI.rd_kafka_queue_length(self)
55
+ end
56
+
57
+ # Release the applications reference on the queue, possibly destroying it
58
+ # and releasing it's resources.
59
+ def destroy
60
+ if !pointer.null?
61
+ ::Kafka::FFI.rd_kafka_queue_destroy(self)
62
+ end
63
+ end
64
+ end
65
+ end
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "kafka/ffi/opaque_pointer"
4
+
5
+ module Kafka::FFI
6
+ class Topic < OpaquePointer
7
+ # Retrieve the name of the topic
8
+ #
9
+ # @return [String] Name of the topic
10
+ def name
11
+ ::Kafka::FFI.rd_kafka_topic_name(self)
12
+ end
13
+
14
+ # Seek consumer for topic_partition to offset.
15
+ #
16
+ # @param partition [Integer] Partition to set offset for
17
+ # @param offset [Integer] Absolute or logical offset
18
+ # @param timeout [Integer] Maximum time to wait in milliseconds
19
+ #
20
+ # @return [Boolean] True when the consumer's offset was changed
21
+ def seek(partition, offset, timeout: 1000)
22
+ ::Kafka::FFI.rd_kafka_seek(self, partition, offset, timeout)
23
+ end
24
+
25
+ # Release the application's hold on the backing topic in librdkafka.
26
+ def destroy
27
+ if !pointer.null?
28
+ ::Kafka::FFI.rd_kafka_topic_destroy(self)
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,126 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ffi"
4
+ require "kafka/ffi/opaque_pointer"
5
+
6
+ module Kafka::FFI
7
+ # TopicConfig can be passed to Topic.new to configure how the client
8
+ # interacts with the Topic.
9
+ class TopicConfig < OpaquePointer
10
+ def self.new
11
+ Kafka::FFI.rd_kafka_topic_conf_new
12
+ end
13
+
14
+ # Set the config option at `key` to `value`. The configuration options
15
+ # match those used by librdkafka (and the Java client).
16
+ #
17
+ # @see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
18
+ #
19
+ # @param key [String] Configuration key
20
+ # @param value [String] Value to set
21
+ #
22
+ # @raise [Kafka::FFI::UnknownConfigKey]
23
+ # @raise [Kafka::FFI::InvalidConfigValue]
24
+ def set(key, value)
25
+ key = key.to_s
26
+ value = value.to_s
27
+
28
+ error = ::FFI::MemoryPointer.new(:char, 512)
29
+ result = ::Kafka::FFI.rd_kafka_topic_conf_set(self, key, value, error, error.size)
30
+
31
+ # See config_result enum in ffi.rb
32
+ case result
33
+ when :ok
34
+ nil
35
+ when :unknown
36
+ raise Kafka::FFI::UnknownConfigKey.new(key, value, error.read_string)
37
+ when :invalid
38
+ raise Kafka::FFI::InvalidConfigValue.new(key, value, error.read_string)
39
+ end
40
+ ensure
41
+ error.free if error
42
+ end
43
+
44
+ # Get the current config value for the given key.
45
+ #
46
+ # @param key [String] Config key to fetch the setting for.
47
+ #
48
+ # @return [String, :unknown] Value for the key or :unknown if not already
49
+ # set.
50
+ def get(key)
51
+ key = key.to_s
52
+
53
+ # Will contain the size of the value at key
54
+ size = ::FFI::MemoryPointer.new(:size_t)
55
+
56
+ # Make an initial request for the size of buffer we need to allocate.
57
+ # When trying to make a guess at the potential size the code would often
58
+ # segfault due to rd_kafka_conf_get reallocating the buffer.
59
+ err = ::Kafka::FFI.rd_kafka_topic_conf_get(self, key, ::FFI::Pointer::NULL, size)
60
+ if err != :ok
61
+ return err
62
+ end
63
+
64
+ # Allocate a string long enough to contain the whole value.
65
+ value = ::FFI::MemoryPointer.new(:char, size.read(:size_t))
66
+ err = ::Kafka::FFI.rd_kafka_topic_conf_get(self, key, value, size)
67
+ if err != :ok
68
+ return err
69
+ end
70
+
71
+ value.read_string
72
+ ensure
73
+ size.free if size
74
+ value.free if value
75
+ end
76
+
77
+ # Duplicate the current config
78
+ #
79
+ # @return [TopicConfig] Duplicated config
80
+ def dup
81
+ ::Kafka::FFI.rd_kafka_topic_conf_dup(self)
82
+ end
83
+
84
+ # Sets a custom partitioner callback that is called for each message to
85
+ # determine which partition to publish the message to.
86
+ #
87
+ # @example Random partitioner
88
+ # set_partitioner_cb do |_topic, _key, parts|
89
+ # rand(parts)
90
+ # end
91
+ #
92
+ # @see "partitioner" config option for predefined strategies
93
+ #
94
+ # @yield [topic, key, partition_count]
95
+ # @yieldparam topic [Topic] Topic the message is being published to
96
+ # @yieldparam key [String] Partitioning key provided when publishing
97
+ # @yieldparam partition_count [Integer] Number of partitions the topic has
98
+ # @yieldreturn [Integer] The partition to publish the message to
99
+ def set_partitioner_cb
100
+ if !block_given?
101
+ raise ArgumentError, "set_partitioner_cb must be called with a block"
102
+ end
103
+
104
+ # @todo How do we guarantee the block does not get garbage collected?
105
+ # @todo Support opaque pointers?
106
+ cb = ::FFI::Function.new(:int, [:pointer, :string, :size_t, :int32, :pointer, :pointer]) do |topic, key, _, partitions, _, _|
107
+ topic = Topic.new(topic)
108
+
109
+ yield(topic, key, partitions)
110
+ end
111
+
112
+ ::Kafka::FFI.rd_kafka_topic_conf_set_partitioner_cb(self, cb)
113
+ end
114
+
115
+ # Free all resources used by the topic config.
116
+ #
117
+ # @note Never call #destroy on a Config that has been passed to
118
+ # Kafka::FFI.rd_kafka_topic_new since the handle will take ownership of
119
+ # the config.
120
+ def destroy
121
+ if !pointer.null?
122
+ ::Kafka::FFI.rd_kafka_topic_conf_destroy(self)
123
+ end
124
+ end
125
+ end
126
+ end
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka::FFI
4
+ class TopicMetadata < ::FFI::Struct
5
+ layout(
6
+ :topic, :string,
7
+ :partition_cnt, :int,
8
+ :partitions, :pointer, # *rd_kafka_metadata_partition
9
+ :err, :error_code
10
+ )
11
+
12
+ # Returns the name of the topic
13
+ #
14
+ # @return [String] Name of the topic
15
+ def topic
16
+ self[:topic]
17
+ end
18
+ alias name topic
19
+
20
+ # Returns any Broker reported errors.
21
+ #
22
+ # @return [nil] Broker reported no errors for the topic
23
+ # @return [Kafka::ResponseError] Error reported by Broker
24
+ def error
25
+ if self[:err] != :ok
26
+ return ::Kafka::ResponseError.new(self[:err])
27
+ end
28
+ end
29
+
30
+ # Returns the set of PartitionMetadata for the Topic
31
+ #
32
+ # @return [Array<PartitionMetadata>] Details about individual Topic
33
+ # Partitions.
34
+ def partitions
35
+ ptr = self[:partitions]
36
+
37
+ self[:partition_cnt].times.map do |i|
38
+ PartitionMetadata.new(ptr + (i * PartitionMetadata.size))
39
+ end
40
+ end
41
+ end
42
+ end
@@ -0,0 +1,43 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ffi"
4
+
5
+ module Kafka::FFI
6
+ class TopicPartition < ::FFI::Struct
7
+ layout(
8
+ :topic, :string,
9
+ :partition, :int32,
10
+ :offset, :int64,
11
+ :metadata, :pointer,
12
+ :metadata_size, :size_t,
13
+ :opaque, :pointer,
14
+ :err, :error_code,
15
+ :_private, :pointer # DO NOT TOUCH. Internal to librdkafka
16
+ )
17
+
18
+ # @return [nil] The TopicPartition does not have an error set
19
+ # @return [Kafka::ResponseError] Error for this topic occurred related to
20
+ # the action the TopicPartition (or TopicPartitionList) was passed to.
21
+ def error
22
+ if self[:err] != :ok
23
+ ::Kafka::ResponseError.new(self[:err])
24
+ end
25
+ end
26
+
27
+ # @return [String] Name of the topic
28
+ def topic
29
+ self[:topic]
30
+ end
31
+
32
+ # @return [Integer] Partition number
33
+ def partition
34
+ self[:partition]
35
+ end
36
+
37
+ # @return [Integer] Known offset for the consumer group for topic +
38
+ # partition.
39
+ def offset
40
+ self[:offset]
41
+ end
42
+ end
43
+ end
@@ -0,0 +1,167 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ffi"
4
+ require "kafka/ffi/topic_partition"
5
+
6
+ module Kafka::FFI
7
+ class TopicPartitionList < ::FFI::Struct
8
+ layout(
9
+ :cnt, :int,
10
+ :size, :int,
11
+ :elems, :pointer
12
+ )
13
+
14
+ # New initializes a new TopicPartitionList with an initial capacity to hold
15
+ # `count` items.
16
+ #
17
+ # @param count [Integer] Initial capacity
18
+ #
19
+ # @return [TopicPartitionList] An empty TopicPartitionList
20
+ def self.new(count = 0)
21
+ # Handle initialization through FFI. This will be called by
22
+ # rd_kafka_topic_partition_list_new.
23
+ if count.is_a?(::FFI::Pointer)
24
+ return super(count)
25
+ end
26
+
27
+ ::Kafka::FFI.rd_kafka_topic_partition_list_new(count)
28
+ end
29
+
30
+ # Returns the number of elements in the TopicPartitionList.
31
+ #
32
+ # @return [Integer] Number of elements
33
+ def size
34
+ self[:cnt]
35
+ end
36
+
37
+ # Returns true when the TopicPartitionList is empty
38
+ #
39
+ # @return [Boolean] True when the list is empty
40
+ def empty?
41
+ size == 0
42
+ end
43
+
44
+ # Add a topic + partition combination to the list
45
+ #
46
+ # @param topic [String] Name of the topic to add
47
+ # @param partition [Integer] Partition of the topic to add to the list.
48
+ # @param partition [-1] Add all partitions of the topic to the list.
49
+ #
50
+ # @return [TopicPartition] TopicPartition for the combination
51
+ def add(topic, partition = -1)
52
+ ::Kafka::FFI.rd_kafka_topic_partition_list_add(self, topic.to_s, partition)
53
+ end
54
+
55
+ # Add a range of TopicPartitions to the list.
56
+ #
57
+ # @param topic [String] Name of the topic to add
58
+ # @param range_or_lower [Range, Integer] Either a Range specifying the
59
+ # Range of partitions or the lower bound for the range. When providing a
60
+ # Range any value for upper is ignored.
61
+ # @param upper [Integer, nil] The upper bound of the set of partitions
62
+ # (inclusive). Required unless range_or_lower is a Range.
63
+ def add_range(topic, range_or_lower, upper = nil)
64
+ lower = range_or_lower
65
+
66
+ # Allows passing a Range for convenience.
67
+ if range_or_lower.is_a?(Range)
68
+ lower = range_or_lower.min
69
+ upper = range_or_lower.max
70
+ elsif upper.nil?
71
+ raise ArgumentError, "upper was nil but must be provided when lower is not a Range"
72
+ end
73
+
74
+ ::Kafka::FFI.rd_kafka_topic_partition_list_add_range(self, topic.to_s, lower.to_i, upper.to_i)
75
+ end
76
+
77
+ # Remove a TopicPartition by partition
78
+ #
79
+ # @param topic [String] Name of the topic to remove
80
+ # @param partition [Integer] Partition to remove
81
+ #
82
+ # @return [Boolean] True when the partition was found and removed
83
+ def del(topic, partition)
84
+ ::Kafka::FFI.rd_kafka_topic_partition_list_del(self, topic.to_s, partition) == 1
85
+ end
86
+
87
+ # Remove a TopicPartition by index
88
+ #
89
+ # @param idx [Integer] Index in elements to remove
90
+ #
91
+ # @return [Boolean] True when the TopicPartition was found and removed
92
+ def del_by_idx(idx)
93
+ ::Kafka::FFI.rd_kafka_topic_partition_list_del_by_idx(self, idx) == 1
94
+ end
95
+
96
+ alias delete del
97
+ alias delete_by_index del_by_idx
98
+
99
+ # Duplicate the TopicPartitionList as a new TopicPartitionList that is
100
+ # identical to the current one.
101
+ #
102
+ # @return [TopicPartitionList] New clone of this TopicPartitionList
103
+ def copy
104
+ ::Kafka::FFI.rd_kafka_topic_partition_list_copy(self)
105
+ end
106
+
107
+ # Set the consumed offset for topic and partition
108
+ #
109
+ # @param topic [String] Name of the topic to set the offset for
110
+ # @param partition [Integer] Partition to set the offset for
111
+ # @param offset [Integer] Offset of the topic+partition to set
112
+ #
113
+ # @return [Integer] 0 for success otherwise rd_kafka_resp_err_t code
114
+ def set_offset(topic, partition, offset)
115
+ ::Kafka::FFI.rd_kafka_topic_partition_list_set_offset(self, topic, partition, offset)
116
+ end
117
+
118
+ # Sort the TopicPartitionList. Sort can take a block that should implement
119
+ # a standard comparison function that returns -1, 0, or 1 depending on if
120
+ # left is less than, equal to, or greater than the right argument.
121
+ #
122
+ # @example Custom sorting function
123
+ # sort do |left, right|
124
+ # left.partition <=> right.partition
125
+ # end
126
+ def sort(&block)
127
+ ::Kafka::FFI.rd_kafka_topic_partition_list_sort(self, block, nil)
128
+ end
129
+
130
+ # Find the TopicPartition in the set for the given topic + partition. Will
131
+ # return nil if the list does not include the combination.
132
+ #
133
+ # @param topic [String] Name of the topic
134
+ # @param partition [Integer] Topic partition
135
+ #
136
+ # @return [TopicPartition, nil] The TopicPartion for the topic + partition
137
+ # combination.
138
+ def find(topic, partition)
139
+ result = ::Kafka::FFI.rd_kafka_topic_partition_list_find(self, topic, partition)
140
+
141
+ if result.null?
142
+ return nil
143
+ end
144
+
145
+ result
146
+ end
147
+
148
+ # Retrieves the set of TopicPartitions for the list.
149
+ #
150
+ # @return [Array<TopicPartition>]
151
+ def elements
152
+ self[:cnt].times.map do |i|
153
+ TopicPartition.new(self[:elems] + (i * TopicPartition.size))
154
+ end
155
+ end
156
+
157
+ # Free all resources used by the list and the list itself. Usage it
158
+ # dependent on the semantics of librdkafka, so make sure to only call on
159
+ # TopicPartitionLists that are not owned by objects. Generally, if you
160
+ # constructed the object it should be safe to destroy.
161
+ def destroy
162
+ if !null?
163
+ ::Kafka::FFI.rd_kafka_topic_partition_list_destroy(self)
164
+ end
165
+ end
166
+ end
167
+ end