ruby-kafka-aws-iam 1.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +393 -0
  3. data/.github/workflows/stale.yml +19 -0
  4. data/.gitignore +13 -0
  5. data/.readygo +1 -0
  6. data/.rspec +3 -0
  7. data/.rubocop.yml +44 -0
  8. data/.ruby-version +1 -0
  9. data/.yardopts +3 -0
  10. data/CHANGELOG.md +314 -0
  11. data/Gemfile +5 -0
  12. data/ISSUE_TEMPLATE.md +23 -0
  13. data/LICENSE.txt +176 -0
  14. data/Procfile +2 -0
  15. data/README.md +1356 -0
  16. data/Rakefile +8 -0
  17. data/benchmarks/message_encoding.rb +23 -0
  18. data/bin/console +8 -0
  19. data/bin/setup +5 -0
  20. data/docker-compose.yml +39 -0
  21. data/examples/consumer-group.rb +35 -0
  22. data/examples/firehose-consumer.rb +64 -0
  23. data/examples/firehose-producer.rb +54 -0
  24. data/examples/simple-consumer.rb +34 -0
  25. data/examples/simple-producer.rb +42 -0
  26. data/examples/ssl-producer.rb +44 -0
  27. data/lib/kafka/async_producer.rb +297 -0
  28. data/lib/kafka/broker.rb +217 -0
  29. data/lib/kafka/broker_info.rb +16 -0
  30. data/lib/kafka/broker_pool.rb +41 -0
  31. data/lib/kafka/broker_uri.rb +43 -0
  32. data/lib/kafka/client.rb +838 -0
  33. data/lib/kafka/cluster.rb +513 -0
  34. data/lib/kafka/compression.rb +45 -0
  35. data/lib/kafka/compressor.rb +86 -0
  36. data/lib/kafka/connection.rb +228 -0
  37. data/lib/kafka/connection_builder.rb +33 -0
  38. data/lib/kafka/consumer.rb +642 -0
  39. data/lib/kafka/consumer_group/assignor.rb +63 -0
  40. data/lib/kafka/consumer_group.rb +231 -0
  41. data/lib/kafka/crc32_hash.rb +15 -0
  42. data/lib/kafka/datadog.rb +420 -0
  43. data/lib/kafka/digest.rb +22 -0
  44. data/lib/kafka/fetch_operation.rb +115 -0
  45. data/lib/kafka/fetched_batch.rb +58 -0
  46. data/lib/kafka/fetched_batch_generator.rb +120 -0
  47. data/lib/kafka/fetched_message.rb +48 -0
  48. data/lib/kafka/fetched_offset_resolver.rb +48 -0
  49. data/lib/kafka/fetcher.rb +224 -0
  50. data/lib/kafka/gzip_codec.rb +34 -0
  51. data/lib/kafka/heartbeat.rb +25 -0
  52. data/lib/kafka/instrumenter.rb +38 -0
  53. data/lib/kafka/interceptors.rb +33 -0
  54. data/lib/kafka/lz4_codec.rb +27 -0
  55. data/lib/kafka/message_buffer.rb +87 -0
  56. data/lib/kafka/murmur2_hash.rb +17 -0
  57. data/lib/kafka/offset_manager.rb +259 -0
  58. data/lib/kafka/partitioner.rb +40 -0
  59. data/lib/kafka/pause.rb +92 -0
  60. data/lib/kafka/pending_message.rb +29 -0
  61. data/lib/kafka/pending_message_queue.rb +41 -0
  62. data/lib/kafka/produce_operation.rb +205 -0
  63. data/lib/kafka/producer.rb +528 -0
  64. data/lib/kafka/prometheus.rb +316 -0
  65. data/lib/kafka/protocol/add_offsets_to_txn_request.rb +29 -0
  66. data/lib/kafka/protocol/add_offsets_to_txn_response.rb +21 -0
  67. data/lib/kafka/protocol/add_partitions_to_txn_request.rb +34 -0
  68. data/lib/kafka/protocol/add_partitions_to_txn_response.rb +47 -0
  69. data/lib/kafka/protocol/alter_configs_request.rb +44 -0
  70. data/lib/kafka/protocol/alter_configs_response.rb +49 -0
  71. data/lib/kafka/protocol/api_versions_request.rb +21 -0
  72. data/lib/kafka/protocol/api_versions_response.rb +53 -0
  73. data/lib/kafka/protocol/consumer_group_protocol.rb +19 -0
  74. data/lib/kafka/protocol/create_partitions_request.rb +42 -0
  75. data/lib/kafka/protocol/create_partitions_response.rb +28 -0
  76. data/lib/kafka/protocol/create_topics_request.rb +45 -0
  77. data/lib/kafka/protocol/create_topics_response.rb +26 -0
  78. data/lib/kafka/protocol/decoder.rb +175 -0
  79. data/lib/kafka/protocol/delete_topics_request.rb +33 -0
  80. data/lib/kafka/protocol/delete_topics_response.rb +26 -0
  81. data/lib/kafka/protocol/describe_configs_request.rb +35 -0
  82. data/lib/kafka/protocol/describe_configs_response.rb +73 -0
  83. data/lib/kafka/protocol/describe_groups_request.rb +27 -0
  84. data/lib/kafka/protocol/describe_groups_response.rb +73 -0
  85. data/lib/kafka/protocol/encoder.rb +184 -0
  86. data/lib/kafka/protocol/end_txn_request.rb +29 -0
  87. data/lib/kafka/protocol/end_txn_response.rb +19 -0
  88. data/lib/kafka/protocol/fetch_request.rb +70 -0
  89. data/lib/kafka/protocol/fetch_response.rb +136 -0
  90. data/lib/kafka/protocol/find_coordinator_request.rb +29 -0
  91. data/lib/kafka/protocol/find_coordinator_response.rb +29 -0
  92. data/lib/kafka/protocol/heartbeat_request.rb +27 -0
  93. data/lib/kafka/protocol/heartbeat_response.rb +17 -0
  94. data/lib/kafka/protocol/init_producer_id_request.rb +26 -0
  95. data/lib/kafka/protocol/init_producer_id_response.rb +27 -0
  96. data/lib/kafka/protocol/join_group_request.rb +47 -0
  97. data/lib/kafka/protocol/join_group_response.rb +41 -0
  98. data/lib/kafka/protocol/leave_group_request.rb +25 -0
  99. data/lib/kafka/protocol/leave_group_response.rb +17 -0
  100. data/lib/kafka/protocol/list_groups_request.rb +23 -0
  101. data/lib/kafka/protocol/list_groups_response.rb +35 -0
  102. data/lib/kafka/protocol/list_offset_request.rb +53 -0
  103. data/lib/kafka/protocol/list_offset_response.rb +89 -0
  104. data/lib/kafka/protocol/member_assignment.rb +42 -0
  105. data/lib/kafka/protocol/message.rb +172 -0
  106. data/lib/kafka/protocol/message_set.rb +55 -0
  107. data/lib/kafka/protocol/metadata_request.rb +31 -0
  108. data/lib/kafka/protocol/metadata_response.rb +185 -0
  109. data/lib/kafka/protocol/offset_commit_request.rb +47 -0
  110. data/lib/kafka/protocol/offset_commit_response.rb +29 -0
  111. data/lib/kafka/protocol/offset_fetch_request.rb +38 -0
  112. data/lib/kafka/protocol/offset_fetch_response.rb +56 -0
  113. data/lib/kafka/protocol/produce_request.rb +94 -0
  114. data/lib/kafka/protocol/produce_response.rb +63 -0
  115. data/lib/kafka/protocol/record.rb +88 -0
  116. data/lib/kafka/protocol/record_batch.rb +223 -0
  117. data/lib/kafka/protocol/request_message.rb +26 -0
  118. data/lib/kafka/protocol/sasl_handshake_request.rb +33 -0
  119. data/lib/kafka/protocol/sasl_handshake_response.rb +28 -0
  120. data/lib/kafka/protocol/sync_group_request.rb +33 -0
  121. data/lib/kafka/protocol/sync_group_response.rb +26 -0
  122. data/lib/kafka/protocol/txn_offset_commit_request.rb +46 -0
  123. data/lib/kafka/protocol/txn_offset_commit_response.rb +47 -0
  124. data/lib/kafka/protocol.rb +225 -0
  125. data/lib/kafka/round_robin_assignment_strategy.rb +52 -0
  126. data/lib/kafka/sasl/awsmskiam.rb +128 -0
  127. data/lib/kafka/sasl/gssapi.rb +76 -0
  128. data/lib/kafka/sasl/oauth.rb +64 -0
  129. data/lib/kafka/sasl/plain.rb +39 -0
  130. data/lib/kafka/sasl/scram.rb +180 -0
  131. data/lib/kafka/sasl_authenticator.rb +73 -0
  132. data/lib/kafka/snappy_codec.rb +29 -0
  133. data/lib/kafka/socket_with_timeout.rb +96 -0
  134. data/lib/kafka/ssl_context.rb +66 -0
  135. data/lib/kafka/ssl_socket_with_timeout.rb +192 -0
  136. data/lib/kafka/statsd.rb +296 -0
  137. data/lib/kafka/tagged_logger.rb +77 -0
  138. data/lib/kafka/transaction_manager.rb +306 -0
  139. data/lib/kafka/transaction_state_machine.rb +72 -0
  140. data/lib/kafka/version.rb +5 -0
  141. data/lib/kafka/zstd_codec.rb +27 -0
  142. data/lib/kafka.rb +373 -0
  143. data/lib/ruby-kafka.rb +5 -0
  144. data/ruby-kafka.gemspec +54 -0
  145. metadata +520 -0
@@ -0,0 +1,185 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+
6
+ # A response to a {MetadataRequest}.
7
+ #
8
+ # The response contains information on the brokers, topics, and partitions in
9
+ # the cluster.
10
+ #
11
+ # * For each broker a node id, host, and port is provided.
12
+ # * For each topic partition the node id of the broker acting as partition leader,
13
+ # as well as a list of node ids for the set of replicas, are given. The `isr` list is
14
+ # the subset of replicas that are "in sync", i.e. have fully caught up with the
15
+ # leader.
16
+ #
17
+ # ## API Specification
18
+ #
19
+ # MetadataResponse => [Broker][TopicMetadata]
20
+ # Broker => NodeId Host Port (any number of brokers may be returned)
21
+ # NodeId => int32
22
+ # Host => string
23
+ # Port => int32
24
+ #
25
+ # TopicMetadata => TopicErrorCode TopicName [PartitionMetadata]
26
+ # TopicErrorCode => int16
27
+ #
28
+ # PartitionMetadata => PartitionErrorCode PartitionId Leader Replicas Isr
29
+ # PartitionErrorCode => int16
30
+ # PartitionId => int32
31
+ # Leader => int32
32
+ # Replicas => [int32]
33
+ # Isr => [int32]
34
+ #
35
+ class MetadataResponse
36
+ class PartitionMetadata
37
+ attr_reader :partition_id, :leader, :replicas
38
+
39
+ attr_reader :partition_error_code
40
+
41
+ def initialize(partition_error_code:, partition_id:, leader:, replicas: [], isr: [])
42
+ @partition_error_code = partition_error_code
43
+ @partition_id = partition_id
44
+ @leader = leader
45
+ @replicas = replicas
46
+ @isr = isr
47
+ end
48
+ end
49
+
50
+ class TopicMetadata
51
+ # @return [String] the name of the topic
52
+ attr_reader :topic_name
53
+
54
+ # @return [Array<PartitionMetadata>] the partitions in the topic.
55
+ attr_reader :partitions
56
+
57
+ attr_reader :topic_error_code
58
+
59
+ def initialize(topic_error_code: 0, topic_name:, partitions:)
60
+ @topic_error_code = topic_error_code
61
+ @topic_name = topic_name
62
+ @partitions = partitions
63
+ end
64
+ end
65
+
66
+ # @return [Array<Kafka::BrokerInfo>] the list of brokers in the cluster.
67
+ attr_reader :brokers
68
+
69
+ # @return [Array<TopicMetadata>] the list of topics in the cluster.
70
+ attr_reader :topics
71
+
72
+ # @return [Integer] The broker id of the controller broker.
73
+ attr_reader :controller_id
74
+
75
+ def initialize(brokers:, controller_id:, topics:)
76
+ @brokers = brokers
77
+ @controller_id = controller_id
78
+ @topics = topics
79
+ end
80
+
81
+ # Finds the node id of the broker that is acting as leader for the given topic
82
+ # and partition per this metadata.
83
+ #
84
+ # @param topic [String] the name of the topic.
85
+ # @param partition [Integer] the partition number.
86
+ # @return [Integer] the node id of the leader.
87
+ def find_leader_id(topic, partition)
88
+ topic_info = @topics.find {|t| t.topic_name == topic }
89
+
90
+ if topic_info.nil?
91
+ raise UnknownTopicOrPartition, "no topic #{topic}"
92
+ end
93
+
94
+ Protocol.handle_error(topic_info.topic_error_code)
95
+
96
+ partition_info = topic_info.partitions.find {|p| p.partition_id == partition }
97
+
98
+ if partition_info.nil?
99
+ raise UnknownTopicOrPartition, "no partition #{partition} in topic #{topic}"
100
+ end
101
+
102
+ begin
103
+ Protocol.handle_error(partition_info.partition_error_code)
104
+ rescue ReplicaNotAvailable
105
+ # This error can be safely ignored per the protocol specification.
106
+ end
107
+
108
+ partition_info.leader
109
+ end
110
+
111
+ # Finds the broker info for the given node id.
112
+ #
113
+ # @param node_id [Integer] the node id of the broker.
114
+ # @return [Kafka::BrokerInfo] information about the broker.
115
+ def find_broker(node_id)
116
+ broker = @brokers.find {|b| b.node_id == node_id }
117
+
118
+ raise Kafka::NoSuchBroker, "No broker with id #{node_id}" if broker.nil?
119
+
120
+ broker
121
+ end
122
+
123
+ def controller_broker
124
+ find_broker(controller_id)
125
+ end
126
+
127
+ def partitions_for(topic_name)
128
+ topic = @topics.find {|t| t.topic_name == topic_name }
129
+
130
+ if topic.nil?
131
+ raise UnknownTopicOrPartition, "unknown topic #{topic_name}"
132
+ end
133
+
134
+ Protocol.handle_error(topic.topic_error_code)
135
+
136
+ topic.partitions
137
+ end
138
+
139
+ # Decodes a MetadataResponse from a {Decoder} containing response data.
140
+ #
141
+ # @param decoder [Decoder]
142
+ # @return [MetadataResponse] the metadata response.
143
+ def self.decode(decoder)
144
+ brokers = decoder.array do
145
+ node_id = decoder.int32
146
+ host = decoder.string
147
+ port = decoder.int32
148
+ _rack = decoder.string
149
+
150
+ BrokerInfo.new(
151
+ node_id: node_id,
152
+ host: host,
153
+ port: port
154
+ )
155
+ end
156
+
157
+ controller_id = decoder.int32
158
+
159
+ topics = decoder.array do
160
+ topic_error_code = decoder.int16
161
+ topic_name = decoder.string
162
+ _is_internal = decoder.boolean
163
+
164
+ partitions = decoder.array do
165
+ PartitionMetadata.new(
166
+ partition_error_code: decoder.int16,
167
+ partition_id: decoder.int32,
168
+ leader: decoder.int32,
169
+ replicas: decoder.array { decoder.int32 },
170
+ isr: decoder.array { decoder.int32 },
171
+ )
172
+ end
173
+
174
+ TopicMetadata.new(
175
+ topic_error_code: topic_error_code,
176
+ topic_name: topic_name,
177
+ partitions: partitions,
178
+ )
179
+ end
180
+
181
+ new(brokers: brokers, controller_id: controller_id, topics: topics)
182
+ end
183
+ end
184
+ end
185
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class OffsetCommitRequest
6
+ # This value signals to the broker that its default configuration should be used.
7
+ DEFAULT_RETENTION_TIME = -1
8
+
9
+ def api_key
10
+ OFFSET_COMMIT_API
11
+ end
12
+
13
+ def api_version
14
+ 2
15
+ end
16
+
17
+ def response_class
18
+ OffsetCommitResponse
19
+ end
20
+
21
+ def initialize(group_id:, generation_id:, member_id:, retention_time: DEFAULT_RETENTION_TIME, offsets:)
22
+ @group_id = group_id
23
+ @generation_id = generation_id
24
+ @member_id = member_id
25
+ @retention_time = retention_time
26
+ @offsets = offsets
27
+ end
28
+
29
+ def encode(encoder)
30
+ encoder.write_string(@group_id)
31
+ encoder.write_int32(@generation_id)
32
+ encoder.write_string(@member_id)
33
+ encoder.write_int64(@retention_time)
34
+
35
+ encoder.write_array(@offsets) do |topic, partitions|
36
+ encoder.write_string(topic)
37
+
38
+ encoder.write_array(partitions) do |partition, offset|
39
+ encoder.write_int32(partition)
40
+ encoder.write_int64(offset)
41
+ encoder.write_string(nil) # metadata
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class OffsetCommitResponse
6
+ attr_reader :topics
7
+
8
+ def initialize(topics:)
9
+ @topics = topics
10
+ end
11
+
12
+ def self.decode(decoder)
13
+ topics = decoder.array {
14
+ topic = decoder.string
15
+ partitions = decoder.array {
16
+ partition = decoder.int32
17
+ error_code = decoder.int16
18
+
19
+ [partition, error_code]
20
+ }
21
+
22
+ [topic, Hash[partitions]]
23
+ }
24
+
25
+ new(topics: Hash[topics])
26
+ end
27
+ end
28
+ end
29
+ end
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class OffsetFetchRequest
6
+ def initialize(group_id:, topics:)
7
+ @group_id = group_id
8
+ @topics = topics
9
+ end
10
+
11
+ def api_key
12
+ OFFSET_FETCH_API
13
+ end
14
+
15
+ # setting topics to nil fetches all offsets for a consumer group
16
+ # and that feature is only available in API version 2+
17
+ def api_version
18
+ @topics.nil? ? 2 : 1
19
+ end
20
+
21
+ def response_class
22
+ OffsetFetchResponse
23
+ end
24
+
25
+ def encode(encoder)
26
+ encoder.write_string(@group_id)
27
+
28
+ encoder.write_array(@topics) do |topic, partitions|
29
+ encoder.write_string(topic)
30
+
31
+ encoder.write_array(partitions) do |partition|
32
+ encoder.write_int32(partition)
33
+ end
34
+ end
35
+ end
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class OffsetFetchResponse
6
+ class PartitionOffsetInfo
7
+ attr_reader :offset, :metadata, :error_code
8
+
9
+ def initialize(offset:, metadata:, error_code:)
10
+ @offset = offset
11
+ @metadata = metadata
12
+ @error_code = error_code
13
+ end
14
+ end
15
+
16
+ attr_reader :topics
17
+
18
+ def initialize(topics:)
19
+ @topics = topics
20
+ end
21
+
22
+ def offset_for(topic, partition)
23
+ offset_info = topics.fetch(topic).fetch(partition, nil)
24
+
25
+ if offset_info
26
+ Protocol.handle_error(offset_info.error_code)
27
+ offset_info.offset
28
+ else
29
+ -1
30
+ end
31
+ end
32
+
33
+ def self.decode(decoder)
34
+ topics = decoder.array {
35
+ topic = decoder.string
36
+
37
+ partitions = decoder.array {
38
+ partition = decoder.int32
39
+
40
+ info = PartitionOffsetInfo.new(
41
+ offset: decoder.int64,
42
+ metadata: decoder.string,
43
+ error_code: decoder.int16,
44
+ )
45
+
46
+ [partition, info]
47
+ }
48
+
49
+ [topic, Hash[partitions]]
50
+ }
51
+
52
+ new(topics: Hash[topics])
53
+ end
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,94 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "stringio"
4
+
5
+ module Kafka
6
+ module Protocol
7
+
8
+ # A produce request sends a message set to the server.
9
+ #
10
+ # ## API Specification
11
+ #
12
+ # ProduceRequest => RequiredAcks Timeout [TopicName [Partition MessageSetSize MessageSet]]
13
+ # RequiredAcks => int16
14
+ # Timeout => int32
15
+ # Partition => int32
16
+ # MessageSetSize => int32
17
+ #
18
+ # MessageSet => [Offset MessageSize Message]
19
+ # Offset => int64
20
+ # MessageSize => int32
21
+ #
22
+ # Message => Crc MagicByte Attributes Key Value
23
+ # Crc => int32
24
+ # MagicByte => int8
25
+ # Attributes => int8
26
+ # Key => bytes
27
+ # Value => bytes
28
+ #
29
+ class ProduceRequest
30
+ API_MIN_VERSION = 3
31
+
32
+ attr_reader :transactional_id, :required_acks, :timeout, :messages_for_topics, :compressor
33
+
34
+ # @param required_acks [Integer]
35
+ # @param timeout [Integer]
36
+ # @param messages_for_topics [Hash]
37
+ def initialize(transactional_id: nil, required_acks:, timeout:, messages_for_topics:, compressor: nil)
38
+ @transactional_id = transactional_id
39
+ @required_acks = required_acks
40
+ @timeout = timeout
41
+ @messages_for_topics = messages_for_topics
42
+ @compressor = compressor
43
+ end
44
+
45
+ def api_key
46
+ PRODUCE_API
47
+ end
48
+
49
+ def api_version
50
+ compressor.codec.nil? ? API_MIN_VERSION : [compressor.codec.produce_api_min_version, API_MIN_VERSION].max
51
+ end
52
+
53
+ def response_class
54
+ requires_acks? ? Protocol::ProduceResponse : nil
55
+ end
56
+
57
+ # Whether this request requires any acknowledgements at all. If no acknowledgements
58
+ # are required, the server will not send back a response at all.
59
+ #
60
+ # @return [Boolean] true if acknowledgements are required, false otherwise.
61
+ def requires_acks?
62
+ @required_acks != 0
63
+ end
64
+
65
+ def encode(encoder)
66
+ encoder.write_string(@transactional_id)
67
+ encoder.write_int16(@required_acks)
68
+ encoder.write_int32(@timeout)
69
+
70
+ encoder.write_array(@messages_for_topics) do |topic, messages_for_partition|
71
+ encoder.write_string(topic)
72
+
73
+ encoder.write_array(messages_for_partition) do |partition, record_batch|
74
+ encoder.write_int32(partition)
75
+
76
+ record_batch.fulfill_relative_data
77
+ encoded_record_batch = compress(record_batch)
78
+ encoder.write_bytes(encoded_record_batch)
79
+ end
80
+ end
81
+ end
82
+
83
+ private
84
+
85
+ def compress(record_batch)
86
+ if @compressor.nil?
87
+ Protocol::Encoder.encode_with(record_batch)
88
+ else
89
+ @compressor.compress(record_batch)
90
+ end
91
+ end
92
+ end
93
+ end
94
+ end
@@ -0,0 +1,63 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class ProduceResponse
6
+ class TopicInfo
7
+ attr_reader :topic, :partitions
8
+
9
+ def initialize(topic:, partitions:)
10
+ @topic = topic
11
+ @partitions = partitions
12
+ end
13
+ end
14
+
15
+ class PartitionInfo
16
+ attr_reader :partition, :error_code, :offset, :timestamp
17
+
18
+ def initialize(partition:, error_code:, offset:, timestamp:)
19
+ @partition = partition
20
+ @error_code = error_code
21
+ @offset = offset
22
+ @timestamp = timestamp
23
+ end
24
+ end
25
+
26
+ attr_reader :topics, :throttle_time_ms
27
+
28
+ def initialize(topics: [], throttle_time_ms: 0)
29
+ @topics = topics
30
+ @throttle_time_ms = throttle_time_ms
31
+ end
32
+
33
+ def each_partition
34
+ @topics.each do |topic_info|
35
+ topic_info.partitions.each do |partition_info|
36
+ yield topic_info, partition_info
37
+ end
38
+ end
39
+ end
40
+
41
+ def self.decode(decoder)
42
+ topics = decoder.array do
43
+ topic = decoder.string
44
+
45
+ partitions = decoder.array do
46
+ PartitionInfo.new(
47
+ partition: decoder.int32,
48
+ error_code: decoder.int16,
49
+ offset: decoder.int64,
50
+ timestamp: Time.at(decoder.int64 / 1000.0),
51
+ )
52
+ end
53
+
54
+ TopicInfo.new(topic: topic, partitions: partitions)
55
+ end
56
+
57
+ throttle_time_ms = decoder.int32
58
+
59
+ new(topics: topics, throttle_time_ms: throttle_time_ms)
60
+ end
61
+ end
62
+ end
63
+ end
@@ -0,0 +1,88 @@
1
+ module Kafka
2
+ module Protocol
3
+ class Record
4
+ attr_reader :key, :value, :headers, :attributes, :bytesize
5
+ attr_accessor :offset_delta, :timestamp_delta, :offset, :create_time, :is_control_record
6
+
7
+ def initialize(
8
+ key: nil,
9
+ value:,
10
+ headers: {},
11
+ attributes: 0,
12
+ offset_delta: 0,
13
+ offset: 0,
14
+ timestamp_delta: 0,
15
+ create_time: Time.now,
16
+ is_control_record: false
17
+ )
18
+ @key = key
19
+ @value = value
20
+ @headers = headers
21
+ @attributes = attributes
22
+
23
+ @offset_delta = offset_delta
24
+ @offset = offset
25
+ @timestamp_delta = timestamp_delta
26
+ @create_time = create_time
27
+ @is_control_record = is_control_record
28
+
29
+ @bytesize = @key.to_s.bytesize + @value.to_s.bytesize
30
+ end
31
+
32
+ def encode(encoder)
33
+ record_buffer = StringIO.new
34
+
35
+ record_encoder = Encoder.new(record_buffer)
36
+
37
+ record_encoder.write_int8(@attributes)
38
+ record_encoder.write_varint(@timestamp_delta)
39
+ record_encoder.write_varint(@offset_delta)
40
+
41
+ record_encoder.write_varint_string(@key)
42
+ record_encoder.write_varint_bytes(@value)
43
+
44
+ record_encoder.write_varint_array(@headers.to_a) do |header_key, header_value|
45
+ record_encoder.write_varint_string(header_key.to_s)
46
+ record_encoder.write_varint_bytes(header_value.to_s)
47
+ end
48
+
49
+ encoder.write_varint_bytes(record_buffer.string)
50
+ end
51
+
52
+ def ==(other)
53
+ offset_delta == other.offset_delta &&
54
+ timestamp_delta == other.timestamp_delta &&
55
+ offset == other.offset &&
56
+ is_control_record == other.is_control_record
57
+ end
58
+
59
+ def self.decode(decoder)
60
+ record_decoder = Decoder.from_string(decoder.varint_bytes)
61
+
62
+ attributes = record_decoder.int8
63
+ timestamp_delta = record_decoder.varint
64
+ offset_delta = record_decoder.varint
65
+
66
+ key = record_decoder.varint_string
67
+ value = record_decoder.varint_bytes
68
+
69
+ headers = {}
70
+ record_decoder.varint_array do
71
+ header_key = record_decoder.varint_string
72
+ header_value = record_decoder.varint_bytes
73
+
74
+ headers[header_key] = header_value
75
+ end
76
+
77
+ new(
78
+ key: key,
79
+ value: value,
80
+ headers: headers,
81
+ attributes: attributes,
82
+ offset_delta: offset_delta,
83
+ timestamp_delta: timestamp_delta
84
+ )
85
+ end
86
+ end
87
+ end
88
+ end