ruby-kafka-custom 0.7.7.26

Sign up to get free protection for your applications and to get access to all the features.
Files changed (105) hide show
  1. checksums.yaml +7 -0
  2. data/lib/kafka/async_producer.rb +279 -0
  3. data/lib/kafka/broker.rb +205 -0
  4. data/lib/kafka/broker_info.rb +16 -0
  5. data/lib/kafka/broker_pool.rb +41 -0
  6. data/lib/kafka/broker_uri.rb +43 -0
  7. data/lib/kafka/client.rb +754 -0
  8. data/lib/kafka/cluster.rb +455 -0
  9. data/lib/kafka/compression.rb +43 -0
  10. data/lib/kafka/compressor.rb +85 -0
  11. data/lib/kafka/connection.rb +220 -0
  12. data/lib/kafka/connection_builder.rb +33 -0
  13. data/lib/kafka/consumer.rb +592 -0
  14. data/lib/kafka/consumer_group.rb +208 -0
  15. data/lib/kafka/datadog.rb +413 -0
  16. data/lib/kafka/fetch_operation.rb +115 -0
  17. data/lib/kafka/fetched_batch.rb +54 -0
  18. data/lib/kafka/fetched_batch_generator.rb +117 -0
  19. data/lib/kafka/fetched_message.rb +47 -0
  20. data/lib/kafka/fetched_offset_resolver.rb +48 -0
  21. data/lib/kafka/fetcher.rb +221 -0
  22. data/lib/kafka/gzip_codec.rb +30 -0
  23. data/lib/kafka/heartbeat.rb +25 -0
  24. data/lib/kafka/instrumenter.rb +38 -0
  25. data/lib/kafka/lz4_codec.rb +23 -0
  26. data/lib/kafka/message_buffer.rb +87 -0
  27. data/lib/kafka/offset_manager.rb +248 -0
  28. data/lib/kafka/partitioner.rb +35 -0
  29. data/lib/kafka/pause.rb +92 -0
  30. data/lib/kafka/pending_message.rb +29 -0
  31. data/lib/kafka/pending_message_queue.rb +41 -0
  32. data/lib/kafka/produce_operation.rb +205 -0
  33. data/lib/kafka/producer.rb +504 -0
  34. data/lib/kafka/protocol.rb +217 -0
  35. data/lib/kafka/protocol/add_partitions_to_txn_request.rb +34 -0
  36. data/lib/kafka/protocol/add_partitions_to_txn_response.rb +47 -0
  37. data/lib/kafka/protocol/alter_configs_request.rb +44 -0
  38. data/lib/kafka/protocol/alter_configs_response.rb +49 -0
  39. data/lib/kafka/protocol/api_versions_request.rb +21 -0
  40. data/lib/kafka/protocol/api_versions_response.rb +53 -0
  41. data/lib/kafka/protocol/consumer_group_protocol.rb +19 -0
  42. data/lib/kafka/protocol/create_partitions_request.rb +42 -0
  43. data/lib/kafka/protocol/create_partitions_response.rb +28 -0
  44. data/lib/kafka/protocol/create_topics_request.rb +45 -0
  45. data/lib/kafka/protocol/create_topics_response.rb +26 -0
  46. data/lib/kafka/protocol/decoder.rb +175 -0
  47. data/lib/kafka/protocol/delete_topics_request.rb +33 -0
  48. data/lib/kafka/protocol/delete_topics_response.rb +26 -0
  49. data/lib/kafka/protocol/describe_configs_request.rb +35 -0
  50. data/lib/kafka/protocol/describe_configs_response.rb +73 -0
  51. data/lib/kafka/protocol/describe_groups_request.rb +27 -0
  52. data/lib/kafka/protocol/describe_groups_response.rb +73 -0
  53. data/lib/kafka/protocol/encoder.rb +184 -0
  54. data/lib/kafka/protocol/end_txn_request.rb +29 -0
  55. data/lib/kafka/protocol/end_txn_response.rb +19 -0
  56. data/lib/kafka/protocol/fetch_request.rb +70 -0
  57. data/lib/kafka/protocol/fetch_response.rb +136 -0
  58. data/lib/kafka/protocol/find_coordinator_request.rb +29 -0
  59. data/lib/kafka/protocol/find_coordinator_response.rb +29 -0
  60. data/lib/kafka/protocol/heartbeat_request.rb +27 -0
  61. data/lib/kafka/protocol/heartbeat_response.rb +17 -0
  62. data/lib/kafka/protocol/init_producer_id_request.rb +26 -0
  63. data/lib/kafka/protocol/init_producer_id_response.rb +27 -0
  64. data/lib/kafka/protocol/join_group_request.rb +41 -0
  65. data/lib/kafka/protocol/join_group_response.rb +33 -0
  66. data/lib/kafka/protocol/leave_group_request.rb +25 -0
  67. data/lib/kafka/protocol/leave_group_response.rb +17 -0
  68. data/lib/kafka/protocol/list_groups_request.rb +23 -0
  69. data/lib/kafka/protocol/list_groups_response.rb +35 -0
  70. data/lib/kafka/protocol/list_offset_request.rb +53 -0
  71. data/lib/kafka/protocol/list_offset_response.rb +89 -0
  72. data/lib/kafka/protocol/member_assignment.rb +42 -0
  73. data/lib/kafka/protocol/message.rb +172 -0
  74. data/lib/kafka/protocol/message_set.rb +55 -0
  75. data/lib/kafka/protocol/metadata_request.rb +31 -0
  76. data/lib/kafka/protocol/metadata_response.rb +185 -0
  77. data/lib/kafka/protocol/offset_commit_request.rb +47 -0
  78. data/lib/kafka/protocol/offset_commit_response.rb +29 -0
  79. data/lib/kafka/protocol/offset_fetch_request.rb +36 -0
  80. data/lib/kafka/protocol/offset_fetch_response.rb +56 -0
  81. data/lib/kafka/protocol/produce_request.rb +92 -0
  82. data/lib/kafka/protocol/produce_response.rb +63 -0
  83. data/lib/kafka/protocol/record.rb +88 -0
  84. data/lib/kafka/protocol/record_batch.rb +222 -0
  85. data/lib/kafka/protocol/request_message.rb +26 -0
  86. data/lib/kafka/protocol/sasl_handshake_request.rb +33 -0
  87. data/lib/kafka/protocol/sasl_handshake_response.rb +28 -0
  88. data/lib/kafka/protocol/sync_group_request.rb +33 -0
  89. data/lib/kafka/protocol/sync_group_response.rb +23 -0
  90. data/lib/kafka/round_robin_assignment_strategy.rb +54 -0
  91. data/lib/kafka/sasl/gssapi.rb +76 -0
  92. data/lib/kafka/sasl/oauth.rb +64 -0
  93. data/lib/kafka/sasl/plain.rb +39 -0
  94. data/lib/kafka/sasl/scram.rb +177 -0
  95. data/lib/kafka/sasl_authenticator.rb +61 -0
  96. data/lib/kafka/snappy_codec.rb +25 -0
  97. data/lib/kafka/socket_with_timeout.rb +96 -0
  98. data/lib/kafka/ssl_context.rb +66 -0
  99. data/lib/kafka/ssl_socket_with_timeout.rb +187 -0
  100. data/lib/kafka/statsd.rb +296 -0
  101. data/lib/kafka/tagged_logger.rb +72 -0
  102. data/lib/kafka/transaction_manager.rb +261 -0
  103. data/lib/kafka/transaction_state_machine.rb +72 -0
  104. data/lib/kafka/version.rb +5 -0
  105. metadata +461 -0
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class OffsetCommitRequest
6
+ # This value signals to the broker that its default configuration should be used.
7
+ DEFAULT_RETENTION_TIME = -1
8
+
9
+ def api_key
10
+ OFFSET_COMMIT_API
11
+ end
12
+
13
+ def api_version
14
+ 2
15
+ end
16
+
17
+ def response_class
18
+ OffsetCommitResponse
19
+ end
20
+
21
+ def initialize(group_id:, generation_id:, member_id:, retention_time: DEFAULT_RETENTION_TIME, offsets:)
22
+ @group_id = group_id
23
+ @generation_id = generation_id
24
+ @member_id = member_id
25
+ @retention_time = retention_time
26
+ @offsets = offsets
27
+ end
28
+
29
+ def encode(encoder)
30
+ encoder.write_string(@group_id)
31
+ encoder.write_int32(@generation_id)
32
+ encoder.write_string(@member_id)
33
+ encoder.write_int64(@retention_time)
34
+
35
+ encoder.write_array(@offsets) do |topic, partitions|
36
+ encoder.write_string(topic)
37
+
38
+ encoder.write_array(partitions) do |partition, offset|
39
+ encoder.write_int32(partition)
40
+ encoder.write_int64(offset)
41
+ encoder.write_string(nil) # metadata
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class OffsetCommitResponse
6
+ attr_reader :topics
7
+
8
+ def initialize(topics:)
9
+ @topics = topics
10
+ end
11
+
12
+ def self.decode(decoder)
13
+ topics = decoder.array {
14
+ topic = decoder.string
15
+ partitions = decoder.array {
16
+ partition = decoder.int32
17
+ error_code = decoder.int16
18
+
19
+ [partition, error_code]
20
+ }
21
+
22
+ [topic, Hash[partitions]]
23
+ }
24
+
25
+ new(topics: Hash[topics])
26
+ end
27
+ end
28
+ end
29
+ end
@@ -0,0 +1,36 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class OffsetFetchRequest
6
+ def initialize(group_id:, topics:)
7
+ @group_id = group_id
8
+ @topics = topics
9
+ end
10
+
11
+ def api_key
12
+ OFFSET_FETCH_API
13
+ end
14
+
15
+ def api_version
16
+ 1
17
+ end
18
+
19
+ def response_class
20
+ OffsetFetchResponse
21
+ end
22
+
23
+ def encode(encoder)
24
+ encoder.write_string(@group_id)
25
+
26
+ encoder.write_array(@topics) do |topic, partitions|
27
+ encoder.write_string(topic)
28
+
29
+ encoder.write_array(partitions) do |partition|
30
+ encoder.write_int32(partition)
31
+ end
32
+ end
33
+ end
34
+ end
35
+ end
36
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class OffsetFetchResponse
6
+ class PartitionOffsetInfo
7
+ attr_reader :offset, :metadata, :error_code
8
+
9
+ def initialize(offset:, metadata:, error_code:)
10
+ @offset = offset
11
+ @metadata = metadata
12
+ @error_code = error_code
13
+ end
14
+ end
15
+
16
+ attr_reader :topics
17
+
18
+ def initialize(topics:)
19
+ @topics = topics
20
+ end
21
+
22
+ def offset_for(topic, partition)
23
+ offset_info = topics.fetch(topic).fetch(partition, nil)
24
+
25
+ if offset_info
26
+ Protocol.handle_error(offset_info.error_code)
27
+ offset_info.offset
28
+ else
29
+ -1
30
+ end
31
+ end
32
+
33
+ def self.decode(decoder)
34
+ topics = decoder.array {
35
+ topic = decoder.string
36
+
37
+ partitions = decoder.array {
38
+ partition = decoder.int32
39
+
40
+ info = PartitionOffsetInfo.new(
41
+ offset: decoder.int64,
42
+ metadata: decoder.string,
43
+ error_code: decoder.int16,
44
+ )
45
+
46
+ [partition, info]
47
+ }
48
+
49
+ [topic, Hash[partitions]]
50
+ }
51
+
52
+ new(topics: Hash[topics])
53
+ end
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,92 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "stringio"
4
+
5
+ module Kafka
6
+ module Protocol
7
+
8
+ # A produce request sends a message set to the server.
9
+ #
10
+ # ## API Specification
11
+ #
12
+ # ProduceRequest => RequiredAcks Timeout [TopicName [Partition MessageSetSize MessageSet]]
13
+ # RequiredAcks => int16
14
+ # Timeout => int32
15
+ # Partition => int32
16
+ # MessageSetSize => int32
17
+ #
18
+ # MessageSet => [Offset MessageSize Message]
19
+ # Offset => int64
20
+ # MessageSize => int32
21
+ #
22
+ # Message => Crc MagicByte Attributes Key Value
23
+ # Crc => int32
24
+ # MagicByte => int8
25
+ # Attributes => int8
26
+ # Key => bytes
27
+ # Value => bytes
28
+ #
29
+ class ProduceRequest
30
+ attr_reader :transactional_id, :required_acks, :timeout, :messages_for_topics, :compressor
31
+
32
+ # @param required_acks [Integer]
33
+ # @param timeout [Integer]
34
+ # @param messages_for_topics [Hash]
35
+ def initialize(transactional_id: nil, required_acks:, timeout:, messages_for_topics:, compressor: nil)
36
+ @transactional_id = transactional_id
37
+ @required_acks = required_acks
38
+ @timeout = timeout
39
+ @messages_for_topics = messages_for_topics
40
+ @compressor = compressor
41
+ end
42
+
43
+ def api_key
44
+ PRODUCE_API
45
+ end
46
+
47
+ def api_version
48
+ 3
49
+ end
50
+
51
+ def response_class
52
+ requires_acks? ? Protocol::ProduceResponse : nil
53
+ end
54
+
55
+ # Whether this request requires any acknowledgements at all. If no acknowledgements
56
+ # are required, the server will not send back a response at all.
57
+ #
58
+ # @return [Boolean] true if acknowledgements are required, false otherwise.
59
+ def requires_acks?
60
+ @required_acks != 0
61
+ end
62
+
63
+ def encode(encoder)
64
+ encoder.write_string(@transactional_id)
65
+ encoder.write_int16(@required_acks)
66
+ encoder.write_int32(@timeout)
67
+
68
+ encoder.write_array(@messages_for_topics) do |topic, messages_for_partition|
69
+ encoder.write_string(topic)
70
+
71
+ encoder.write_array(messages_for_partition) do |partition, record_batch|
72
+ encoder.write_int32(partition)
73
+
74
+ record_batch.fulfill_relative_data
75
+ encoded_record_batch = compress(record_batch)
76
+ encoder.write_bytes(encoded_record_batch)
77
+ end
78
+ end
79
+ end
80
+
81
+ private
82
+
83
+ def compress(record_batch)
84
+ if @compressor.nil?
85
+ Protocol::Encoder.encode_with(record_batch)
86
+ else
87
+ @compressor.compress(record_batch)
88
+ end
89
+ end
90
+ end
91
+ end
92
+ end
@@ -0,0 +1,63 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kafka
4
+ module Protocol
5
+ class ProduceResponse
6
+ class TopicInfo
7
+ attr_reader :topic, :partitions
8
+
9
+ def initialize(topic:, partitions:)
10
+ @topic = topic
11
+ @partitions = partitions
12
+ end
13
+ end
14
+
15
+ class PartitionInfo
16
+ attr_reader :partition, :error_code, :offset, :timestamp
17
+
18
+ def initialize(partition:, error_code:, offset:, timestamp:)
19
+ @partition = partition
20
+ @error_code = error_code
21
+ @offset = offset
22
+ @timestamp = timestamp
23
+ end
24
+ end
25
+
26
+ attr_reader :topics, :throttle_time_ms
27
+
28
+ def initialize(topics: [], throttle_time_ms: 0)
29
+ @topics = topics
30
+ @throttle_time_ms = throttle_time_ms
31
+ end
32
+
33
+ def each_partition
34
+ @topics.each do |topic_info|
35
+ topic_info.partitions.each do |partition_info|
36
+ yield topic_info, partition_info
37
+ end
38
+ end
39
+ end
40
+
41
+ def self.decode(decoder)
42
+ topics = decoder.array do
43
+ topic = decoder.string
44
+
45
+ partitions = decoder.array do
46
+ PartitionInfo.new(
47
+ partition: decoder.int32,
48
+ error_code: decoder.int16,
49
+ offset: decoder.int64,
50
+ timestamp: Time.at(decoder.int64 / 1000.0),
51
+ )
52
+ end
53
+
54
+ TopicInfo.new(topic: topic, partitions: partitions)
55
+ end
56
+
57
+ throttle_time_ms = decoder.int32
58
+
59
+ new(topics: topics, throttle_time_ms: throttle_time_ms)
60
+ end
61
+ end
62
+ end
63
+ end
@@ -0,0 +1,88 @@
1
+ module Kafka
2
+ module Protocol
3
+ class Record
4
+ attr_reader :key, :value, :headers, :attributes, :bytesize
5
+ attr_accessor :offset_delta, :timestamp_delta, :offset, :create_time, :is_control_record
6
+
7
+ def initialize(
8
+ key: nil,
9
+ value:,
10
+ headers: {},
11
+ attributes: 0,
12
+ offset_delta: 0,
13
+ offset: 0,
14
+ timestamp_delta: 0,
15
+ create_time: Time.now,
16
+ is_control_record: false
17
+ )
18
+ @key = key
19
+ @value = value
20
+ @headers = headers
21
+ @attributes = attributes
22
+
23
+ @offset_delta = offset_delta
24
+ @offset = offset
25
+ @timestamp_delta = timestamp_delta
26
+ @create_time = create_time
27
+ @is_control_record = is_control_record
28
+
29
+ @bytesize = @key.to_s.bytesize + @value.to_s.bytesize
30
+ end
31
+
32
+ def encode(encoder)
33
+ record_buffer = StringIO.new
34
+
35
+ record_encoder = Encoder.new(record_buffer)
36
+
37
+ record_encoder.write_int8(@attributes)
38
+ record_encoder.write_varint(@timestamp_delta)
39
+ record_encoder.write_varint(@offset_delta)
40
+
41
+ record_encoder.write_varint_string(@key)
42
+ record_encoder.write_varint_bytes(@value)
43
+
44
+ record_encoder.write_varint_array(@headers.to_a) do |header_key, header_value|
45
+ record_encoder.write_varint_string(header_key.to_s)
46
+ record_encoder.write_varint_bytes(header_value.to_s)
47
+ end
48
+
49
+ encoder.write_varint_bytes(record_buffer.string)
50
+ end
51
+
52
+ def ==(other)
53
+ offset_delta == other.offset_delta &&
54
+ timestamp_delta == other.timestamp_delta &&
55
+ offset == other.offset &&
56
+ is_control_record == other.is_control_record
57
+ end
58
+
59
+ def self.decode(decoder)
60
+ record_decoder = Decoder.from_string(decoder.varint_bytes)
61
+
62
+ attributes = record_decoder.int8
63
+ timestamp_delta = record_decoder.varint
64
+ offset_delta = record_decoder.varint
65
+
66
+ key = record_decoder.varint_string
67
+ value = record_decoder.varint_bytes
68
+
69
+ headers = {}
70
+ record_decoder.varint_array do
71
+ header_key = record_decoder.varint_string
72
+ header_value = record_decoder.varint_bytes
73
+
74
+ headers[header_key] = header_value
75
+ end
76
+
77
+ new(
78
+ key: key,
79
+ value: value,
80
+ headers: headers,
81
+ attributes: attributes,
82
+ offset_delta: offset_delta,
83
+ timestamp_delta: timestamp_delta
84
+ )
85
+ end
86
+ end
87
+ end
88
+ end
@@ -0,0 +1,222 @@
1
+ require 'digest/crc32'
2
+ require 'kafka/protocol/record'
3
+
4
+ module Kafka
5
+ module Protocol
6
+ class RecordBatch
7
+ MAGIC_BYTE = 2
8
+ # The size of metadata before the real record data
9
+ RECORD_BATCH_OVERHEAD = 49
10
+ # Masks to extract information from attributes
11
+ CODEC_ID_MASK = 0b00000111
12
+ IN_TRANSACTION_MASK = 0b00010000
13
+ IS_CONTROL_BATCH_MASK = 0b00100000
14
+ TIMESTAMP_TYPE_MASK = 0b001000
15
+
16
+ attr_reader :records, :first_offset, :first_timestamp, :partition_leader_epoch, :in_transaction, :is_control_batch, :last_offset_delta, :max_timestamp, :producer_id, :producer_epoch, :first_sequence
17
+
18
+ attr_accessor :codec_id
19
+
20
+ def initialize(
21
+ records: [],
22
+ first_offset: 0,
23
+ first_timestamp: Time.now,
24
+ partition_leader_epoch: 0,
25
+ codec_id: 0,
26
+ in_transaction: false,
27
+ is_control_batch: false,
28
+ last_offset_delta: 0,
29
+ producer_id: -1,
30
+ producer_epoch: 0,
31
+ first_sequence: 0,
32
+ max_timestamp: Time.now
33
+ )
34
+ @records = Array(records)
35
+ @first_offset = first_offset
36
+ @first_timestamp = first_timestamp
37
+ @codec_id = codec_id
38
+
39
+ # Records verification
40
+ @last_offset_delta = last_offset_delta
41
+ @max_timestamp = max_timestamp
42
+
43
+ # Transaction information
44
+ @producer_id = producer_id
45
+ @producer_epoch = producer_epoch
46
+
47
+ @first_sequence = first_sequence
48
+ @partition_leader_epoch = partition_leader_epoch
49
+ @in_transaction = in_transaction
50
+ @is_control_batch = is_control_batch
51
+
52
+ mark_control_record
53
+ end
54
+
55
+ def size
56
+ @records.size
57
+ end
58
+
59
+ def last_offset
60
+ @first_offset + @last_offset_delta
61
+ end
62
+
63
+ def attributes
64
+ 0x0000 | @codec_id |
65
+ (@in_transaction ? IN_TRANSACTION_MASK : 0x0) |
66
+ (@is_control_batch ? IS_CONTROL_BATCH_MASK : 0x0)
67
+ end
68
+
69
+ def encode(encoder)
70
+ encoder.write_int64(@first_offset)
71
+
72
+ record_batch_buffer = StringIO.new
73
+ record_batch_encoder = Encoder.new(record_batch_buffer)
74
+
75
+ record_batch_encoder.write_int32(@partition_leader_epoch)
76
+ record_batch_encoder.write_int8(MAGIC_BYTE)
77
+
78
+ body = encode_record_batch_body
79
+ crc = Digest::CRC32c.checksum(body)
80
+
81
+ record_batch_encoder.write_int32(crc)
82
+ record_batch_encoder.write(body)
83
+
84
+ encoder.write_bytes(record_batch_buffer.string)
85
+ end
86
+
87
+ def encode_record_batch_body
88
+ buffer = StringIO.new
89
+ encoder = Encoder.new(buffer)
90
+
91
+ encoder.write_int16(attributes)
92
+ encoder.write_int32(@last_offset_delta)
93
+ encoder.write_int64((@first_timestamp.to_f * 1000).to_i)
94
+ encoder.write_int64((@max_timestamp.to_f * 1000).to_i)
95
+
96
+ encoder.write_int64(@producer_id)
97
+ encoder.write_int16(@producer_epoch)
98
+ encoder.write_int32(@first_sequence)
99
+
100
+ encoder.write_int32(@records.length)
101
+
102
+ records_array = encode_record_array
103
+ if compressed?
104
+ codec = Compression.find_codec_by_id(@codec_id)
105
+ records_array = codec.compress(records_array)
106
+ end
107
+ encoder.write(records_array)
108
+
109
+ buffer.string
110
+ end
111
+
112
+ def encode_record_array
113
+ buffer = StringIO.new
114
+ encoder = Encoder.new(buffer)
115
+ @records.each do |record|
116
+ record.encode(encoder)
117
+ end
118
+ buffer.string
119
+ end
120
+
121
+ def compressed?
122
+ @codec_id != 0
123
+ end
124
+
125
+ def fulfill_relative_data
126
+ first_record = records.min_by { |record| record.create_time }
127
+ @first_timestamp = first_record.nil? ? Time.now : first_record.create_time
128
+
129
+ last_record = records.max_by { |record| record.create_time }
130
+ @max_timestamp = last_record.nil? ? Time.now : last_record.create_time
131
+
132
+ records.each_with_index do |record, index|
133
+ record.offset_delta = index
134
+ record.timestamp_delta = (record.create_time - first_timestamp).to_i
135
+ end
136
+ @last_offset_delta = records.length - 1
137
+ end
138
+
139
+ def ==(other)
140
+ records == other.records &&
141
+ first_offset == other.first_offset &&
142
+ partition_leader_epoch == other.partition_leader_epoch &&
143
+ in_transaction == other.in_transaction &&
144
+ is_control_batch == other.is_control_batch &&
145
+ last_offset_delta == other.last_offset_delta &&
146
+ producer_id == other.producer_id &&
147
+ producer_epoch == other.producer_epoch &&
148
+ first_sequence == other.first_sequence
149
+ end
150
+
151
+ def self.decode(decoder)
152
+ first_offset = decoder.int64
153
+
154
+ record_batch_raw = decoder.bytes
155
+ record_batch_decoder = Decoder.from_string(record_batch_raw)
156
+
157
+ partition_leader_epoch = record_batch_decoder.int32
158
+ # Currently, the magic byte is used to distingush legacy MessageSet and
159
+ # RecordBatch. Therefore, we don't care about magic byte here yet.
160
+ _magic_byte = record_batch_decoder.int8
161
+ _crc = record_batch_decoder.int32
162
+
163
+ attributes = record_batch_decoder.int16
164
+ codec_id = attributes & CODEC_ID_MASK
165
+ in_transaction = (attributes & IN_TRANSACTION_MASK) > 0
166
+ is_control_batch = (attributes & IS_CONTROL_BATCH_MASK) > 0
167
+ log_append_time = (attributes & TIMESTAMP_TYPE_MASK) != 0
168
+
169
+ last_offset_delta = record_batch_decoder.int32
170
+ first_timestamp = Time.at(record_batch_decoder.int64 / 1000)
171
+ max_timestamp = Time.at(record_batch_decoder.int64 / 1000)
172
+
173
+ producer_id = record_batch_decoder.int64
174
+ producer_epoch = record_batch_decoder.int16
175
+ first_sequence = record_batch_decoder.int32
176
+
177
+ records_array_length = record_batch_decoder.int32
178
+ records_array_raw = record_batch_decoder.read(
179
+ record_batch_raw.size - RECORD_BATCH_OVERHEAD
180
+ )
181
+ if codec_id != 0
182
+ codec = Compression.find_codec_by_id(codec_id)
183
+ records_array_raw = codec.decompress(records_array_raw)
184
+ end
185
+
186
+ records_array_decoder = Decoder.from_string(records_array_raw)
187
+ records_array = []
188
+ until records_array_decoder.eof?
189
+ record = Record.decode(records_array_decoder)
190
+ record.offset = first_offset + record.offset_delta
191
+ record.create_time = log_append_time && max_timestamp ? max_timestamp : first_timestamp + record.timestamp_delta
192
+ records_array << record
193
+ end
194
+
195
+ raise InsufficientDataMessage if records_array.length != records_array_length
196
+
197
+ new(
198
+ records: records_array,
199
+ first_offset: first_offset,
200
+ first_timestamp: first_timestamp,
201
+ partition_leader_epoch: partition_leader_epoch,
202
+ in_transaction: in_transaction,
203
+ is_control_batch: is_control_batch,
204
+ last_offset_delta: last_offset_delta,
205
+ producer_id: producer_id,
206
+ producer_epoch: producer_epoch,
207
+ first_sequence: first_sequence,
208
+ max_timestamp: max_timestamp
209
+ )
210
+ rescue EOFError
211
+ raise InsufficientDataMessage, 'Partial trailing record detected!'
212
+ end
213
+
214
+ def mark_control_record
215
+ if in_transaction && is_control_batch
216
+ record = @records.first
217
+ record.is_control_record = true unless record.nil?
218
+ end
219
+ end
220
+ end
221
+ end
222
+ end