ruby-kafka-aws-iam 1.4.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (145) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +393 -0
  3. data/.github/workflows/stale.yml +19 -0
  4. data/.gitignore +13 -0
  5. data/.readygo +1 -0
  6. data/.rspec +3 -0
  7. data/.rubocop.yml +44 -0
  8. data/.ruby-version +1 -0
  9. data/.yardopts +3 -0
  10. data/CHANGELOG.md +314 -0
  11. data/Gemfile +5 -0
  12. data/ISSUE_TEMPLATE.md +23 -0
  13. data/LICENSE.txt +176 -0
  14. data/Procfile +2 -0
  15. data/README.md +1356 -0
  16. data/Rakefile +8 -0
  17. data/benchmarks/message_encoding.rb +23 -0
  18. data/bin/console +8 -0
  19. data/bin/setup +5 -0
  20. data/docker-compose.yml +39 -0
  21. data/examples/consumer-group.rb +35 -0
  22. data/examples/firehose-consumer.rb +64 -0
  23. data/examples/firehose-producer.rb +54 -0
  24. data/examples/simple-consumer.rb +34 -0
  25. data/examples/simple-producer.rb +42 -0
  26. data/examples/ssl-producer.rb +44 -0
  27. data/lib/kafka/async_producer.rb +297 -0
  28. data/lib/kafka/broker.rb +217 -0
  29. data/lib/kafka/broker_info.rb +16 -0
  30. data/lib/kafka/broker_pool.rb +41 -0
  31. data/lib/kafka/broker_uri.rb +43 -0
  32. data/lib/kafka/client.rb +838 -0
  33. data/lib/kafka/cluster.rb +513 -0
  34. data/lib/kafka/compression.rb +45 -0
  35. data/lib/kafka/compressor.rb +86 -0
  36. data/lib/kafka/connection.rb +228 -0
  37. data/lib/kafka/connection_builder.rb +33 -0
  38. data/lib/kafka/consumer.rb +642 -0
  39. data/lib/kafka/consumer_group/assignor.rb +63 -0
  40. data/lib/kafka/consumer_group.rb +231 -0
  41. data/lib/kafka/crc32_hash.rb +15 -0
  42. data/lib/kafka/datadog.rb +420 -0
  43. data/lib/kafka/digest.rb +22 -0
  44. data/lib/kafka/fetch_operation.rb +115 -0
  45. data/lib/kafka/fetched_batch.rb +58 -0
  46. data/lib/kafka/fetched_batch_generator.rb +120 -0
  47. data/lib/kafka/fetched_message.rb +48 -0
  48. data/lib/kafka/fetched_offset_resolver.rb +48 -0
  49. data/lib/kafka/fetcher.rb +224 -0
  50. data/lib/kafka/gzip_codec.rb +34 -0
  51. data/lib/kafka/heartbeat.rb +25 -0
  52. data/lib/kafka/instrumenter.rb +38 -0
  53. data/lib/kafka/interceptors.rb +33 -0
  54. data/lib/kafka/lz4_codec.rb +27 -0
  55. data/lib/kafka/message_buffer.rb +87 -0
  56. data/lib/kafka/murmur2_hash.rb +17 -0
  57. data/lib/kafka/offset_manager.rb +259 -0
  58. data/lib/kafka/partitioner.rb +40 -0
  59. data/lib/kafka/pause.rb +92 -0
  60. data/lib/kafka/pending_message.rb +29 -0
  61. data/lib/kafka/pending_message_queue.rb +41 -0
  62. data/lib/kafka/produce_operation.rb +205 -0
  63. data/lib/kafka/producer.rb +528 -0
  64. data/lib/kafka/prometheus.rb +316 -0
  65. data/lib/kafka/protocol/add_offsets_to_txn_request.rb +29 -0
  66. data/lib/kafka/protocol/add_offsets_to_txn_response.rb +21 -0
  67. data/lib/kafka/protocol/add_partitions_to_txn_request.rb +34 -0
  68. data/lib/kafka/protocol/add_partitions_to_txn_response.rb +47 -0
  69. data/lib/kafka/protocol/alter_configs_request.rb +44 -0
  70. data/lib/kafka/protocol/alter_configs_response.rb +49 -0
  71. data/lib/kafka/protocol/api_versions_request.rb +21 -0
  72. data/lib/kafka/protocol/api_versions_response.rb +53 -0
  73. data/lib/kafka/protocol/consumer_group_protocol.rb +19 -0
  74. data/lib/kafka/protocol/create_partitions_request.rb +42 -0
  75. data/lib/kafka/protocol/create_partitions_response.rb +28 -0
  76. data/lib/kafka/protocol/create_topics_request.rb +45 -0
  77. data/lib/kafka/protocol/create_topics_response.rb +26 -0
  78. data/lib/kafka/protocol/decoder.rb +175 -0
  79. data/lib/kafka/protocol/delete_topics_request.rb +33 -0
  80. data/lib/kafka/protocol/delete_topics_response.rb +26 -0
  81. data/lib/kafka/protocol/describe_configs_request.rb +35 -0
  82. data/lib/kafka/protocol/describe_configs_response.rb +73 -0
  83. data/lib/kafka/protocol/describe_groups_request.rb +27 -0
  84. data/lib/kafka/protocol/describe_groups_response.rb +73 -0
  85. data/lib/kafka/protocol/encoder.rb +184 -0
  86. data/lib/kafka/protocol/end_txn_request.rb +29 -0
  87. data/lib/kafka/protocol/end_txn_response.rb +19 -0
  88. data/lib/kafka/protocol/fetch_request.rb +70 -0
  89. data/lib/kafka/protocol/fetch_response.rb +136 -0
  90. data/lib/kafka/protocol/find_coordinator_request.rb +29 -0
  91. data/lib/kafka/protocol/find_coordinator_response.rb +29 -0
  92. data/lib/kafka/protocol/heartbeat_request.rb +27 -0
  93. data/lib/kafka/protocol/heartbeat_response.rb +17 -0
  94. data/lib/kafka/protocol/init_producer_id_request.rb +26 -0
  95. data/lib/kafka/protocol/init_producer_id_response.rb +27 -0
  96. data/lib/kafka/protocol/join_group_request.rb +47 -0
  97. data/lib/kafka/protocol/join_group_response.rb +41 -0
  98. data/lib/kafka/protocol/leave_group_request.rb +25 -0
  99. data/lib/kafka/protocol/leave_group_response.rb +17 -0
  100. data/lib/kafka/protocol/list_groups_request.rb +23 -0
  101. data/lib/kafka/protocol/list_groups_response.rb +35 -0
  102. data/lib/kafka/protocol/list_offset_request.rb +53 -0
  103. data/lib/kafka/protocol/list_offset_response.rb +89 -0
  104. data/lib/kafka/protocol/member_assignment.rb +42 -0
  105. data/lib/kafka/protocol/message.rb +172 -0
  106. data/lib/kafka/protocol/message_set.rb +55 -0
  107. data/lib/kafka/protocol/metadata_request.rb +31 -0
  108. data/lib/kafka/protocol/metadata_response.rb +185 -0
  109. data/lib/kafka/protocol/offset_commit_request.rb +47 -0
  110. data/lib/kafka/protocol/offset_commit_response.rb +29 -0
  111. data/lib/kafka/protocol/offset_fetch_request.rb +38 -0
  112. data/lib/kafka/protocol/offset_fetch_response.rb +56 -0
  113. data/lib/kafka/protocol/produce_request.rb +94 -0
  114. data/lib/kafka/protocol/produce_response.rb +63 -0
  115. data/lib/kafka/protocol/record.rb +88 -0
  116. data/lib/kafka/protocol/record_batch.rb +223 -0
  117. data/lib/kafka/protocol/request_message.rb +26 -0
  118. data/lib/kafka/protocol/sasl_handshake_request.rb +33 -0
  119. data/lib/kafka/protocol/sasl_handshake_response.rb +28 -0
  120. data/lib/kafka/protocol/sync_group_request.rb +33 -0
  121. data/lib/kafka/protocol/sync_group_response.rb +26 -0
  122. data/lib/kafka/protocol/txn_offset_commit_request.rb +46 -0
  123. data/lib/kafka/protocol/txn_offset_commit_response.rb +47 -0
  124. data/lib/kafka/protocol.rb +225 -0
  125. data/lib/kafka/round_robin_assignment_strategy.rb +52 -0
  126. data/lib/kafka/sasl/awsmskiam.rb +128 -0
  127. data/lib/kafka/sasl/gssapi.rb +76 -0
  128. data/lib/kafka/sasl/oauth.rb +64 -0
  129. data/lib/kafka/sasl/plain.rb +39 -0
  130. data/lib/kafka/sasl/scram.rb +180 -0
  131. data/lib/kafka/sasl_authenticator.rb +73 -0
  132. data/lib/kafka/snappy_codec.rb +29 -0
  133. data/lib/kafka/socket_with_timeout.rb +96 -0
  134. data/lib/kafka/ssl_context.rb +66 -0
  135. data/lib/kafka/ssl_socket_with_timeout.rb +192 -0
  136. data/lib/kafka/statsd.rb +296 -0
  137. data/lib/kafka/tagged_logger.rb +77 -0
  138. data/lib/kafka/transaction_manager.rb +306 -0
  139. data/lib/kafka/transaction_state_machine.rb +72 -0
  140. data/lib/kafka/version.rb +5 -0
  141. data/lib/kafka/zstd_codec.rb +27 -0
  142. data/lib/kafka.rb +373 -0
  143. data/lib/ruby-kafka.rb +5 -0
  144. data/ruby-kafka.gemspec +54 -0
  145. metadata +520 -0
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class LeaveGroupRequest
6
+ def initialize(group_id:, member_id:)
7
+ @group_id = group_id
8
+ @member_id = member_id
9
+ end
10
+
11
+ def api_key
12
+ LEAVE_GROUP_API
13
+ end
14
+
15
+ def response_class
16
+ LeaveGroupResponse
17
+ end
18
+
19
+ def encode(encoder)
20
+ encoder.write_string(@group_id)
21
+ encoder.write_string(@member_id)
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,17 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class LeaveGroupResponse
6
+ attr_reader :error_code
7
+
8
+ def initialize(error_code:)
9
+ @error_code = error_code
10
+ end
11
+
12
+ def self.decode(decoder)
13
+ new(error_code: decoder.int16)
14
+ end
15
+ end
16
+ end
17
+ end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class ListGroupsRequest
6
+ def api_key
7
+ LIST_GROUPS_API
8
+ end
9
+
10
+ def api_version
11
+ 0
12
+ end
13
+
14
+ def response_class
15
+ Protocol::ListGroupsResponse
16
+ end
17
+
18
+ def encode(encoder)
19
+ # noop
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class ListGroupsResponse
6
+ class GroupEntry
7
+ attr_reader :group_id, :protocol_type
8
+
9
+ def initialize(group_id:, protocol_type:)
10
+ @group_id = group_id
11
+ @protocol_type = protocol_type
12
+ end
13
+ end
14
+
15
+ attr_reader :error_code, :groups
16
+
17
+ def initialize(error_code:, groups:)
18
+ @error_code = error_code
19
+ @groups = groups
20
+ end
21
+
22
+ def self.decode(decoder)
23
+ error_code = decoder.int16
24
+ groups = decoder.array do
25
+ GroupEntry.new(
26
+ group_id: decoder.string,
27
+ protocol_type: decoder.string
28
+ )
29
+ end
30
+
31
+ new(error_code: error_code, groups: groups)
32
+ end
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ # A request to list the available offsets for a set of topics/partitions.
6
+ #
7
+ # ## API Specification
8
+ #
9
+ # OffsetRequest => ReplicaId [TopicName [Partition Time MaxNumberOfOffsets]]
10
+ # ReplicaId => int32
11
+ # IsolationLevel => int8
12
+ # TopicName => string
13
+ # Partition => int32
14
+ # Time => int64
15
+ #
16
+ class ListOffsetRequest
17
+ ISOLATION_READ_UNCOMMITTED = 0
18
+ ISOLATION_READ_COMMITTED = 1
19
+
20
+ # @param topics [Hash]
21
+ def initialize(topics:)
22
+ @replica_id = REPLICA_ID
23
+ @topics = topics
24
+ end
25
+
26
+ def api_version
27
+ 2
28
+ end
29
+
30
+ def api_key
31
+ LIST_OFFSET_API
32
+ end
33
+
34
+ def response_class
35
+ Protocol::ListOffsetResponse
36
+ end
37
+
38
+ def encode(encoder)
39
+ encoder.write_int32(@replica_id)
40
+ encoder.write_int8(ISOLATION_READ_COMMITTED)
41
+
42
+ encoder.write_array(@topics) do |topic, partitions|
43
+ encoder.write_string(topic)
44
+
45
+ encoder.write_array(partitions) do |partition|
46
+ encoder.write_int32(partition.fetch(:partition))
47
+ encoder.write_int64(partition.fetch(:time))
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,89 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+
6
+ # A response to a list offset request.
7
+ #
8
+ # ## API Specification
9
+ #
10
+ # OffsetResponse => [TopicName [PartitionOffsets]]
11
+ # ThrottleTimeMS => int32
12
+ # PartitionOffsets => Partition ErrorCode Timestamp Offset
13
+ # Partition => int32
14
+ # ErrorCode => int16
15
+ # Timestamp => int64
16
+ # Offset => int64
17
+ #
18
+ class ListOffsetResponse
19
+ class TopicOffsetInfo
20
+ attr_reader :name, :partition_offsets
21
+
22
+ def initialize(name:, partition_offsets:)
23
+ @name = name
24
+ @partition_offsets = partition_offsets
25
+ end
26
+ end
27
+
28
+ class PartitionOffsetInfo
29
+ attr_reader :partition, :error_code, :timestamp, :offset
30
+
31
+ def initialize(partition:, error_code:, timestamp:, offset:)
32
+ @partition = partition
33
+ @error_code = error_code
34
+ @timestamp = timestamp
35
+ @offset = offset
36
+ end
37
+ end
38
+
39
+ attr_reader :topics
40
+
41
+ def initialize(topics:)
42
+ @topics = topics
43
+ end
44
+
45
+ def offset_for(topic, partition)
46
+ topic_info = @topics.find {|t| t.name == topic }
47
+
48
+ if topic_info.nil?
49
+ raise UnknownTopicOrPartition, "Unknown topic #{topic}"
50
+ end
51
+
52
+ partition_info = topic_info
53
+ .partition_offsets
54
+ .find {|p| p.partition == partition }
55
+
56
+ if partition_info.nil?
57
+ raise UnknownTopicOrPartition, "Unknown partition #{topic}/#{partition}"
58
+ end
59
+
60
+ Protocol.handle_error(partition_info.error_code)
61
+
62
+ partition_info.offset
63
+ end
64
+
65
+ def self.decode(decoder)
66
+ _throttle_time_ms = decoder.int32
67
+ topics = decoder.array do
68
+ name = decoder.string
69
+
70
+ partition_offsets = decoder.array do
71
+ PartitionOffsetInfo.new(
72
+ partition: decoder.int32,
73
+ error_code: decoder.int16,
74
+ timestamp: decoder.int64,
75
+ offset: decoder.int64
76
+ )
77
+ end
78
+
79
+ TopicOffsetInfo.new(
80
+ name: name,
81
+ partition_offsets: partition_offsets
82
+ )
83
+ end
84
+
85
+ new(topics: topics)
86
+ end
87
+ end
88
+ end
89
+ end
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class MemberAssignment
6
+ attr_reader :topics
7
+
8
+ def initialize(version: 0, topics: {}, user_data: nil)
9
+ @version = version
10
+ @topics = topics
11
+ @user_data = user_data
12
+ end
13
+
14
+ def assign(topic, partitions)
15
+ @topics[topic] ||= []
16
+ @topics[topic].concat(partitions)
17
+ end
18
+
19
+ def encode(encoder)
20
+ encoder.write_int16(@version)
21
+
22
+ encoder.write_array(@topics) do |topic, partitions|
23
+ encoder.write_string(topic)
24
+
25
+ encoder.write_array(partitions) do |partition|
26
+ encoder.write_int32(partition)
27
+ end
28
+ end
29
+
30
+ encoder.write_bytes(@user_data)
31
+ end
32
+
33
+ def self.decode(decoder)
34
+ new(
35
+ version: decoder.int16,
36
+ topics: Hash[decoder.array { [decoder.string, decoder.array { decoder.int32 }] }],
37
+ user_data: decoder.bytes,
38
+ )
39
+ end
40
+ end
41
+ end
42
+ end
@@ -0,0 +1,172 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "stringio"
4
+ require "zlib"
5
+
6
+ module Kafka
7
+ module Protocol
8
+
9
+ # ## API Specification
10
+ #
11
+ # Message => Crc MagicByte Attributes Timestamp Key Value
12
+ # Crc => int32
13
+ # MagicByte => int8
14
+ # Attributes => int8
15
+ # Timestamp => int64, in ms
16
+ # Key => bytes
17
+ # Value => bytes
18
+ #
19
+ class Message
20
+ MAGIC_BYTE = 1
21
+
22
+ attr_reader :key, :value, :codec_id, :offset
23
+
24
+ attr_reader :bytesize, :create_time
25
+
26
+ def initialize(value:, key: nil, create_time: Time.now, codec_id: 0, offset: -1)
27
+ @key = key
28
+ @value = value
29
+ @codec_id = codec_id
30
+ @offset = offset
31
+ @create_time = create_time
32
+
33
+ @bytesize = @key.to_s.bytesize + @value.to_s.bytesize
34
+ end
35
+
36
+ def encode(encoder)
37
+ data = encode_with_crc
38
+
39
+ encoder.write_int64(offset)
40
+ encoder.write_bytes(data)
41
+ end
42
+
43
+ def ==(other)
44
+ @key == other.key &&
45
+ @value == other.value &&
46
+ @codec_id == other.codec_id &&
47
+ @offset == other.offset
48
+ end
49
+
50
+ def compressed?
51
+ @codec_id != 0
52
+ end
53
+
54
+ # @return [Array<Kafka::Protocol::Message>]
55
+ def decompress
56
+ codec = Compression.find_codec_by_id(@codec_id)
57
+
58
+ # For some weird reason we need to cut out the first 20 bytes.
59
+ data = codec.decompress(value)
60
+ message_set_decoder = Decoder.from_string(data)
61
+ message_set = MessageSet.decode(message_set_decoder)
62
+
63
+ correct_offsets(message_set.messages)
64
+ end
65
+
66
+ def self.decode(decoder)
67
+ offset = decoder.int64
68
+ message_decoder = Decoder.from_string(decoder.bytes)
69
+
70
+ _crc = message_decoder.int32
71
+ magic_byte = message_decoder.int8
72
+ attributes = message_decoder.int8
73
+
74
+ # The magic byte indicates the message format version. There are situations
75
+ # where an old message format can be returned from a newer version of Kafka,
76
+ # because old messages are not necessarily rewritten on upgrades.
77
+ case magic_byte
78
+ when 0
79
+ # No timestamp in the pre-0.10 message format.
80
+ timestamp = nil
81
+ when 1
82
+ timestamp = message_decoder.int64
83
+
84
+ # If the timestamp is set to zero, it's because the message has been upgraded
85
+ # from the Kafka 0.9 disk format to the Kafka 0.10 format. The former didn't
86
+ # have a timestamp attribute, so we'll just set the timestamp to nil.
87
+ timestamp = nil if timestamp.zero?
88
+ else
89
+ raise Kafka::Error, "Invalid magic byte: #{magic_byte}"
90
+ end
91
+
92
+ key = message_decoder.bytes
93
+ value = message_decoder.bytes
94
+
95
+ # The codec id is encoded in the three least significant bits of the
96
+ # attributes.
97
+ codec_id = attributes & 0b111
98
+
99
+ # The timestamp will be nil if the message was written in the Kafka 0.9 log format.
100
+ create_time = timestamp && Time.at(timestamp / 1000.0)
101
+
102
+ new(key: key, value: value, codec_id: codec_id, offset: offset, create_time: create_time)
103
+ end
104
+
105
+ # Ensure the backward compatibility of Message format from Kafka 0.11.x
106
+ def is_control_record
107
+ false
108
+ end
109
+
110
+ def headers
111
+ {}
112
+ end
113
+
114
+ private
115
+
116
+ # Offsets may be relative with regards to wrapped message offset, but there are special cases.
117
+ #
118
+ # Cases when client will receive corrected offsets:
119
+ # - When fetch request is version 0, kafka will correct relative offset on broker side before replying fetch response
120
+ # - When messages is stored in 0.9 format on disk (broker configured to do so).
121
+ #
122
+ # All other cases, compressed inner messages should have relative offset, with below attributes:
123
+ # - The container message should have the 'real' offset
124
+ # - The container message's offset should be the 'real' offset of the last message in the compressed batch
125
+ def correct_offsets(messages)
126
+ max_relative_offset = messages.last.offset
127
+
128
+ # The offsets are already correct, do nothing.
129
+ return messages if max_relative_offset == offset
130
+
131
+ # The contained messages have relative offsets, and needs to be corrected.
132
+ base_offset = offset - max_relative_offset
133
+
134
+ messages.map do |message|
135
+ Message.new(
136
+ offset: message.offset + base_offset,
137
+ value: message.value,
138
+ key: message.key,
139
+ create_time: message.create_time,
140
+ codec_id: message.codec_id
141
+ )
142
+ end
143
+ end
144
+
145
+ def encode_with_crc
146
+ buffer = StringIO.new
147
+ encoder = Encoder.new(buffer)
148
+
149
+ data = encode_without_crc
150
+ crc = Zlib.crc32(data)
151
+
152
+ encoder.write_int32(crc)
153
+ encoder.write(data)
154
+
155
+ buffer.string
156
+ end
157
+
158
+ def encode_without_crc
159
+ buffer = StringIO.new
160
+ encoder = Encoder.new(buffer)
161
+
162
+ encoder.write_int8(MAGIC_BYTE)
163
+ encoder.write_int8(@codec_id)
164
+ encoder.write_int64((@create_time.to_f * 1000).to_i)
165
+ encoder.write_bytes(@key)
166
+ encoder.write_bytes(@value)
167
+
168
+ buffer.string
169
+ end
170
+ end
171
+ end
172
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class MessageSet
6
+ attr_reader :messages
7
+
8
+ def initialize(messages: [])
9
+ @messages = messages
10
+ end
11
+
12
+ def size
13
+ @messages.size
14
+ end
15
+
16
+ def ==(other)
17
+ messages == other.messages
18
+ end
19
+
20
+ def encode(encoder)
21
+ # Messages in a message set are *not* encoded as an array. Rather,
22
+ # they are written in sequence.
23
+ @messages.each do |message|
24
+ message.encode(encoder)
25
+ end
26
+ end
27
+
28
+ def self.decode(decoder)
29
+ fetched_messages = []
30
+
31
+ until decoder.eof?
32
+ begin
33
+ message = Message.decode(decoder)
34
+
35
+ if message.compressed?
36
+ fetched_messages.concat(message.decompress)
37
+ else
38
+ fetched_messages << message
39
+ end
40
+ rescue EOFError
41
+ if fetched_messages.empty?
42
+ # If the first message in the set is truncated, it's likely because the
43
+ # message is larger than the maximum size that we have asked for.
44
+ raise MessageTooLargeToRead
45
+ else
46
+ # We tried to decode a partial message at the end of the set; just skip it.
47
+ end
48
+ end
49
+ end
50
+
51
+ new(messages: fetched_messages)
52
+ end
53
+ end
54
+ end
55
+ end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class MetadataRequest
6
+
7
+ # A request for cluster metadata.
8
+ #
9
+ # @param topics [Array<String>]
10
+ def initialize(topics: [])
11
+ @topics = topics
12
+ end
13
+
14
+ def api_key
15
+ TOPIC_METADATA_API
16
+ end
17
+
18
+ def api_version
19
+ 1
20
+ end
21
+
22
+ def response_class
23
+ Protocol::MetadataResponse
24
+ end
25
+
26
+ def encode(encoder)
27
+ encoder.write_array(@topics) {|topic| encoder.write_string(topic) }
28
+ end
29
+ end
30
+ end
31
+ end