ruby-kafka-aws-iam 1.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.circleci/config.yml +393 -0
- data/.github/workflows/stale.yml +19 -0
- data/.gitignore +13 -0
- data/.readygo +1 -0
- data/.rspec +3 -0
- data/.rubocop.yml +44 -0
- data/.ruby-version +1 -0
- data/.yardopts +3 -0
- data/CHANGELOG.md +314 -0
- data/Gemfile +5 -0
- data/ISSUE_TEMPLATE.md +23 -0
- data/LICENSE.txt +176 -0
- data/Procfile +2 -0
- data/README.md +1356 -0
- data/Rakefile +8 -0
- data/benchmarks/message_encoding.rb +23 -0
- data/bin/console +8 -0
- data/bin/setup +5 -0
- data/docker-compose.yml +39 -0
- data/examples/consumer-group.rb +35 -0
- data/examples/firehose-consumer.rb +64 -0
- data/examples/firehose-producer.rb +54 -0
- data/examples/simple-consumer.rb +34 -0
- data/examples/simple-producer.rb +42 -0
- data/examples/ssl-producer.rb +44 -0
- data/lib/kafka/async_producer.rb +297 -0
- data/lib/kafka/broker.rb +217 -0
- data/lib/kafka/broker_info.rb +16 -0
- data/lib/kafka/broker_pool.rb +41 -0
- data/lib/kafka/broker_uri.rb +43 -0
- data/lib/kafka/client.rb +838 -0
- data/lib/kafka/cluster.rb +513 -0
- data/lib/kafka/compression.rb +45 -0
- data/lib/kafka/compressor.rb +86 -0
- data/lib/kafka/connection.rb +228 -0
- data/lib/kafka/connection_builder.rb +33 -0
- data/lib/kafka/consumer.rb +642 -0
- data/lib/kafka/consumer_group/assignor.rb +63 -0
- data/lib/kafka/consumer_group.rb +231 -0
- data/lib/kafka/crc32_hash.rb +15 -0
- data/lib/kafka/datadog.rb +420 -0
- data/lib/kafka/digest.rb +22 -0
- data/lib/kafka/fetch_operation.rb +115 -0
- data/lib/kafka/fetched_batch.rb +58 -0
- data/lib/kafka/fetched_batch_generator.rb +120 -0
- data/lib/kafka/fetched_message.rb +48 -0
- data/lib/kafka/fetched_offset_resolver.rb +48 -0
- data/lib/kafka/fetcher.rb +224 -0
- data/lib/kafka/gzip_codec.rb +34 -0
- data/lib/kafka/heartbeat.rb +25 -0
- data/lib/kafka/instrumenter.rb +38 -0
- data/lib/kafka/interceptors.rb +33 -0
- data/lib/kafka/lz4_codec.rb +27 -0
- data/lib/kafka/message_buffer.rb +87 -0
- data/lib/kafka/murmur2_hash.rb +17 -0
- data/lib/kafka/offset_manager.rb +259 -0
- data/lib/kafka/partitioner.rb +40 -0
- data/lib/kafka/pause.rb +92 -0
- data/lib/kafka/pending_message.rb +29 -0
- data/lib/kafka/pending_message_queue.rb +41 -0
- data/lib/kafka/produce_operation.rb +205 -0
- data/lib/kafka/producer.rb +528 -0
- data/lib/kafka/prometheus.rb +316 -0
- data/lib/kafka/protocol/add_offsets_to_txn_request.rb +29 -0
- data/lib/kafka/protocol/add_offsets_to_txn_response.rb +21 -0
- data/lib/kafka/protocol/add_partitions_to_txn_request.rb +34 -0
- data/lib/kafka/protocol/add_partitions_to_txn_response.rb +47 -0
- data/lib/kafka/protocol/alter_configs_request.rb +44 -0
- data/lib/kafka/protocol/alter_configs_response.rb +49 -0
- data/lib/kafka/protocol/api_versions_request.rb +21 -0
- data/lib/kafka/protocol/api_versions_response.rb +53 -0
- data/lib/kafka/protocol/consumer_group_protocol.rb +19 -0
- data/lib/kafka/protocol/create_partitions_request.rb +42 -0
- data/lib/kafka/protocol/create_partitions_response.rb +28 -0
- data/lib/kafka/protocol/create_topics_request.rb +45 -0
- data/lib/kafka/protocol/create_topics_response.rb +26 -0
- data/lib/kafka/protocol/decoder.rb +175 -0
- data/lib/kafka/protocol/delete_topics_request.rb +33 -0
- data/lib/kafka/protocol/delete_topics_response.rb +26 -0
- data/lib/kafka/protocol/describe_configs_request.rb +35 -0
- data/lib/kafka/protocol/describe_configs_response.rb +73 -0
- data/lib/kafka/protocol/describe_groups_request.rb +27 -0
- data/lib/kafka/protocol/describe_groups_response.rb +73 -0
- data/lib/kafka/protocol/encoder.rb +184 -0
- data/lib/kafka/protocol/end_txn_request.rb +29 -0
- data/lib/kafka/protocol/end_txn_response.rb +19 -0
- data/lib/kafka/protocol/fetch_request.rb +70 -0
- data/lib/kafka/protocol/fetch_response.rb +136 -0
- data/lib/kafka/protocol/find_coordinator_request.rb +29 -0
- data/lib/kafka/protocol/find_coordinator_response.rb +29 -0
- data/lib/kafka/protocol/heartbeat_request.rb +27 -0
- data/lib/kafka/protocol/heartbeat_response.rb +17 -0
- data/lib/kafka/protocol/init_producer_id_request.rb +26 -0
- data/lib/kafka/protocol/init_producer_id_response.rb +27 -0
- data/lib/kafka/protocol/join_group_request.rb +47 -0
- data/lib/kafka/protocol/join_group_response.rb +41 -0
- data/lib/kafka/protocol/leave_group_request.rb +25 -0
- data/lib/kafka/protocol/leave_group_response.rb +17 -0
- data/lib/kafka/protocol/list_groups_request.rb +23 -0
- data/lib/kafka/protocol/list_groups_response.rb +35 -0
- data/lib/kafka/protocol/list_offset_request.rb +53 -0
- data/lib/kafka/protocol/list_offset_response.rb +89 -0
- data/lib/kafka/protocol/member_assignment.rb +42 -0
- data/lib/kafka/protocol/message.rb +172 -0
- data/lib/kafka/protocol/message_set.rb +55 -0
- data/lib/kafka/protocol/metadata_request.rb +31 -0
- data/lib/kafka/protocol/metadata_response.rb +185 -0
- data/lib/kafka/protocol/offset_commit_request.rb +47 -0
- data/lib/kafka/protocol/offset_commit_response.rb +29 -0
- data/lib/kafka/protocol/offset_fetch_request.rb +38 -0
- data/lib/kafka/protocol/offset_fetch_response.rb +56 -0
- data/lib/kafka/protocol/produce_request.rb +94 -0
- data/lib/kafka/protocol/produce_response.rb +63 -0
- data/lib/kafka/protocol/record.rb +88 -0
- data/lib/kafka/protocol/record_batch.rb +223 -0
- data/lib/kafka/protocol/request_message.rb +26 -0
- data/lib/kafka/protocol/sasl_handshake_request.rb +33 -0
- data/lib/kafka/protocol/sasl_handshake_response.rb +28 -0
- data/lib/kafka/protocol/sync_group_request.rb +33 -0
- data/lib/kafka/protocol/sync_group_response.rb +26 -0
- data/lib/kafka/protocol/txn_offset_commit_request.rb +46 -0
- data/lib/kafka/protocol/txn_offset_commit_response.rb +47 -0
- data/lib/kafka/protocol.rb +225 -0
- data/lib/kafka/round_robin_assignment_strategy.rb +52 -0
- data/lib/kafka/sasl/awsmskiam.rb +128 -0
- data/lib/kafka/sasl/gssapi.rb +76 -0
- data/lib/kafka/sasl/oauth.rb +64 -0
- data/lib/kafka/sasl/plain.rb +39 -0
- data/lib/kafka/sasl/scram.rb +180 -0
- data/lib/kafka/sasl_authenticator.rb +73 -0
- data/lib/kafka/snappy_codec.rb +29 -0
- data/lib/kafka/socket_with_timeout.rb +96 -0
- data/lib/kafka/ssl_context.rb +66 -0
- data/lib/kafka/ssl_socket_with_timeout.rb +192 -0
- data/lib/kafka/statsd.rb +296 -0
- data/lib/kafka/tagged_logger.rb +77 -0
- data/lib/kafka/transaction_manager.rb +306 -0
- data/lib/kafka/transaction_state_machine.rb +72 -0
- data/lib/kafka/version.rb +5 -0
- data/lib/kafka/zstd_codec.rb +27 -0
- data/lib/kafka.rb +373 -0
- data/lib/ruby-kafka.rb +5 -0
- data/ruby-kafka.gemspec +54 -0
- metadata +520 -0
@@ -0,0 +1,77 @@
|
|
1
|
+
# Basic implementation of a tagged logger that matches the API of
|
2
|
+
# ActiveSupport::TaggedLogging.
|
3
|
+
|
4
|
+
require 'delegate'
|
5
|
+
require 'logger'
|
6
|
+
|
7
|
+
module Kafka
|
8
|
+
class TaggedLogger < SimpleDelegator
|
9
|
+
|
10
|
+
%i(debug info warn error).each do |method|
|
11
|
+
define_method method do |msg_or_progname, &block|
|
12
|
+
if block_given?
|
13
|
+
super(msg_or_progname, &block)
|
14
|
+
else
|
15
|
+
super("#{tags_text}#{msg_or_progname}")
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
def tagged(*tags)
|
21
|
+
new_tags = push_tags(*tags)
|
22
|
+
yield self
|
23
|
+
ensure
|
24
|
+
pop_tags(new_tags.size)
|
25
|
+
end
|
26
|
+
|
27
|
+
def push_tags(*tags)
|
28
|
+
tags.flatten.reject { |t| t.nil? || t.empty? }.tap do |new_tags|
|
29
|
+
current_tags.concat new_tags
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
def pop_tags(size = 1)
|
34
|
+
current_tags.pop size
|
35
|
+
end
|
36
|
+
|
37
|
+
def clear_tags!
|
38
|
+
current_tags.clear
|
39
|
+
end
|
40
|
+
|
41
|
+
def current_tags
|
42
|
+
# We use our object ID here to avoid conflicting with other instances
|
43
|
+
thread_key = @thread_key ||= "kafka_tagged_logging_tags:#{object_id}".freeze
|
44
|
+
Thread.current[thread_key] ||= []
|
45
|
+
end
|
46
|
+
|
47
|
+
def tags_text
|
48
|
+
tags = current_tags
|
49
|
+
if tags.any?
|
50
|
+
tags.collect { |tag| "[#{tag}] " }.join
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def self.new(logger_or_stream = nil)
|
55
|
+
# don't keep wrapping the same logger over and over again
|
56
|
+
return logger_or_stream if logger_or_stream.is_a?(TaggedLogger)
|
57
|
+
super
|
58
|
+
end
|
59
|
+
|
60
|
+
def initialize(logger_or_stream = nil)
|
61
|
+
logger = if %w(info debug warn error).all? { |s| logger_or_stream.respond_to?(s) }
|
62
|
+
logger_or_stream
|
63
|
+
elsif logger_or_stream
|
64
|
+
::Logger.new(logger_or_stream)
|
65
|
+
else
|
66
|
+
::Logger.new(nil)
|
67
|
+
end
|
68
|
+
super(logger)
|
69
|
+
end
|
70
|
+
|
71
|
+
def flush
|
72
|
+
clear_tags!
|
73
|
+
super if defined?(super)
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
end
|
@@ -0,0 +1,306 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'kafka/transaction_state_machine'
|
4
|
+
|
5
|
+
module Kafka
|
6
|
+
class TransactionManager
|
7
|
+
DEFAULT_TRANSACTION_TIMEOUT = 60 # 60 seconds
|
8
|
+
TRANSACTION_RESULT_COMMIT = true
|
9
|
+
TRANSACTION_RESULT_ABORT = false
|
10
|
+
|
11
|
+
attr_reader :producer_id, :producer_epoch, :transactional_id
|
12
|
+
|
13
|
+
def initialize(
|
14
|
+
cluster:,
|
15
|
+
logger:,
|
16
|
+
idempotent: false,
|
17
|
+
transactional: false,
|
18
|
+
transactional_id: nil,
|
19
|
+
transactional_timeout: DEFAULT_TRANSACTION_TIMEOUT
|
20
|
+
)
|
21
|
+
@cluster = cluster
|
22
|
+
@logger = TaggedLogger.new(logger)
|
23
|
+
|
24
|
+
@transactional = transactional
|
25
|
+
@transactional_id = transactional_id
|
26
|
+
@transactional_timeout = transactional_timeout
|
27
|
+
@transaction_state = Kafka::TransactionStateMachine.new(logger: logger)
|
28
|
+
@transaction_partitions = {}
|
29
|
+
|
30
|
+
# If transactional mode is enabled, idempotent must be enabled
|
31
|
+
@idempotent = transactional || idempotent
|
32
|
+
|
33
|
+
@producer_id = -1
|
34
|
+
@producer_epoch = 0
|
35
|
+
|
36
|
+
@sequences = {}
|
37
|
+
end
|
38
|
+
|
39
|
+
def idempotent?
|
40
|
+
@idempotent == true
|
41
|
+
end
|
42
|
+
|
43
|
+
def transactional?
|
44
|
+
@transactional == true && !@transactional_id.nil?
|
45
|
+
end
|
46
|
+
|
47
|
+
def init_producer_id(force = false)
|
48
|
+
return if @producer_id >= 0 && !force
|
49
|
+
|
50
|
+
response = transaction_coordinator.init_producer_id(
|
51
|
+
transactional_id: @transactional_id,
|
52
|
+
transactional_timeout: @transactional_timeout
|
53
|
+
)
|
54
|
+
Protocol.handle_error(response.error_code)
|
55
|
+
|
56
|
+
# Reset producer id
|
57
|
+
@producer_id = response.producer_id
|
58
|
+
@producer_epoch = response.producer_epoch
|
59
|
+
|
60
|
+
# Reset sequence
|
61
|
+
@sequences = {}
|
62
|
+
|
63
|
+
@logger.debug "Current Producer ID is #{@producer_id} and Producer Epoch is #{@producer_epoch}"
|
64
|
+
end
|
65
|
+
|
66
|
+
def next_sequence_for(topic, partition)
|
67
|
+
@sequences[topic] ||= {}
|
68
|
+
@sequences[topic][partition] ||= 0
|
69
|
+
end
|
70
|
+
|
71
|
+
def update_sequence_for(topic, partition, sequence)
|
72
|
+
@sequences[topic] ||= {}
|
73
|
+
@sequences[topic][partition] = sequence
|
74
|
+
end
|
75
|
+
|
76
|
+
def init_transactions
|
77
|
+
force_transactional!
|
78
|
+
unless @transaction_state.uninitialized?
|
79
|
+
@logger.warn("Transaction already initialized!")
|
80
|
+
return
|
81
|
+
end
|
82
|
+
init_producer_id(true)
|
83
|
+
@transaction_partitions = {}
|
84
|
+
@transaction_state.transition_to!(TransactionStateMachine::READY)
|
85
|
+
|
86
|
+
@logger.info "Transaction #{@transactional_id} is initialized, Producer ID: #{@producer_id} (Epoch #{@producer_epoch})"
|
87
|
+
|
88
|
+
nil
|
89
|
+
rescue
|
90
|
+
@transaction_state.transition_to!(TransactionStateMachine::ERROR)
|
91
|
+
raise
|
92
|
+
end
|
93
|
+
|
94
|
+
def add_partitions_to_transaction(topic_partitions)
|
95
|
+
force_transactional!
|
96
|
+
|
97
|
+
if @transaction_state.uninitialized?
|
98
|
+
raise Kafka::InvalidTxnStateError, 'Transaction is uninitialized'
|
99
|
+
end
|
100
|
+
|
101
|
+
# Extract newly created partitions
|
102
|
+
new_topic_partitions = {}
|
103
|
+
topic_partitions.each do |topic, partitions|
|
104
|
+
partitions.each do |partition|
|
105
|
+
@transaction_partitions[topic] ||= {}
|
106
|
+
if !@transaction_partitions[topic][partition]
|
107
|
+
new_topic_partitions[topic] ||= []
|
108
|
+
new_topic_partitions[topic] << partition
|
109
|
+
|
110
|
+
@logger.info "Adding parition #{topic}/#{partition} to transaction #{@transactional_id}, Producer ID: #{@producer_id} (Epoch #{@producer_epoch})"
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
unless new_topic_partitions.empty?
|
116
|
+
response = transaction_coordinator.add_partitions_to_txn(
|
117
|
+
transactional_id: @transactional_id,
|
118
|
+
producer_id: @producer_id,
|
119
|
+
producer_epoch: @producer_epoch,
|
120
|
+
topics: new_topic_partitions
|
121
|
+
)
|
122
|
+
|
123
|
+
# Update added topic partitions
|
124
|
+
response.errors.each do |tp|
|
125
|
+
tp.partitions.each do |p|
|
126
|
+
Protocol.handle_error(p.error_code)
|
127
|
+
@transaction_partitions[tp.topic] ||= {}
|
128
|
+
@transaction_partitions[tp.topic][p.partition] = true
|
129
|
+
end
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
nil
|
134
|
+
rescue
|
135
|
+
@transaction_state.transition_to!(TransactionStateMachine::ERROR)
|
136
|
+
raise
|
137
|
+
end
|
138
|
+
|
139
|
+
def begin_transaction
|
140
|
+
force_transactional!
|
141
|
+
raise Kafka::InvalidTxnStateError, 'Transaction has already started' if @transaction_state.in_transaction?
|
142
|
+
raise Kafka::InvalidTxnStateError, 'Transaction is not ready' unless @transaction_state.ready?
|
143
|
+
@transaction_state.transition_to!(TransactionStateMachine::IN_TRANSACTION)
|
144
|
+
|
145
|
+
@logger.info "Begin transaction #{@transactional_id}, Producer ID: #{@producer_id} (Epoch #{@producer_epoch})"
|
146
|
+
|
147
|
+
nil
|
148
|
+
rescue
|
149
|
+
@transaction_state.transition_to!(TransactionStateMachine::ERROR)
|
150
|
+
raise
|
151
|
+
end
|
152
|
+
|
153
|
+
def commit_transaction
|
154
|
+
force_transactional!
|
155
|
+
|
156
|
+
if @transaction_state.committing_transaction?
|
157
|
+
@logger.warn("Transaction is being committed")
|
158
|
+
return
|
159
|
+
end
|
160
|
+
|
161
|
+
unless @transaction_state.in_transaction?
|
162
|
+
raise Kafka::InvalidTxnStateError, 'Transaction is not valid to commit'
|
163
|
+
end
|
164
|
+
|
165
|
+
@transaction_state.transition_to!(TransactionStateMachine::COMMITTING_TRANSACTION)
|
166
|
+
|
167
|
+
@logger.info "Commiting transaction #{@transactional_id}, Producer ID: #{@producer_id} (Epoch #{@producer_epoch})"
|
168
|
+
|
169
|
+
response = transaction_coordinator.end_txn(
|
170
|
+
transactional_id: @transactional_id,
|
171
|
+
producer_id: @producer_id,
|
172
|
+
producer_epoch: @producer_epoch,
|
173
|
+
transaction_result: TRANSACTION_RESULT_COMMIT
|
174
|
+
)
|
175
|
+
Protocol.handle_error(response.error_code)
|
176
|
+
|
177
|
+
@logger.info "Transaction #{@transactional_id} is committed, Producer ID: #{@producer_id} (Epoch #{@producer_epoch})"
|
178
|
+
complete_transaction
|
179
|
+
|
180
|
+
nil
|
181
|
+
rescue
|
182
|
+
@transaction_state.transition_to!(TransactionStateMachine::ERROR)
|
183
|
+
raise
|
184
|
+
end
|
185
|
+
|
186
|
+
def abort_transaction
|
187
|
+
force_transactional!
|
188
|
+
|
189
|
+
if @transaction_state.aborting_transaction?
|
190
|
+
@logger.warn("Transaction is being aborted")
|
191
|
+
return
|
192
|
+
end
|
193
|
+
|
194
|
+
unless @transaction_state.in_transaction?
|
195
|
+
@logger.warn('Aborting transaction that was never opened on brokers')
|
196
|
+
return
|
197
|
+
end
|
198
|
+
|
199
|
+
@transaction_state.transition_to!(TransactionStateMachine::ABORTING_TRANSACTION)
|
200
|
+
|
201
|
+
@logger.info "Aborting transaction #{@transactional_id}, Producer ID: #{@producer_id} (Epoch #{@producer_epoch})"
|
202
|
+
|
203
|
+
response = transaction_coordinator.end_txn(
|
204
|
+
transactional_id: @transactional_id,
|
205
|
+
producer_id: @producer_id,
|
206
|
+
producer_epoch: @producer_epoch,
|
207
|
+
transaction_result: TRANSACTION_RESULT_ABORT
|
208
|
+
)
|
209
|
+
Protocol.handle_error(response.error_code)
|
210
|
+
|
211
|
+
@logger.info "Transaction #{@transactional_id} is aborted, Producer ID: #{@producer_id} (Epoch #{@producer_epoch})"
|
212
|
+
|
213
|
+
complete_transaction
|
214
|
+
|
215
|
+
nil
|
216
|
+
rescue
|
217
|
+
@transaction_state.transition_to!(TransactionStateMachine::ERROR)
|
218
|
+
raise
|
219
|
+
end
|
220
|
+
|
221
|
+
def send_offsets_to_txn(offsets:, group_id:)
|
222
|
+
force_transactional!
|
223
|
+
|
224
|
+
unless @transaction_state.in_transaction?
|
225
|
+
raise Kafka::InvalidTxnStateError, 'Transaction is not valid to send offsets'
|
226
|
+
end
|
227
|
+
|
228
|
+
add_response = transaction_coordinator.add_offsets_to_txn(
|
229
|
+
transactional_id: @transactional_id,
|
230
|
+
producer_id: @producer_id,
|
231
|
+
producer_epoch: @producer_epoch,
|
232
|
+
group_id: group_id
|
233
|
+
)
|
234
|
+
Protocol.handle_error(add_response.error_code)
|
235
|
+
|
236
|
+
send_response = group_coordinator(group_id: group_id).txn_offset_commit(
|
237
|
+
transactional_id: @transactional_id,
|
238
|
+
group_id: group_id,
|
239
|
+
producer_id: @producer_id,
|
240
|
+
producer_epoch: @producer_epoch,
|
241
|
+
offsets: offsets
|
242
|
+
)
|
243
|
+
send_response.errors.each do |tp|
|
244
|
+
tp.partitions.each do |partition|
|
245
|
+
Protocol.handle_error(partition.error_code)
|
246
|
+
end
|
247
|
+
end
|
248
|
+
|
249
|
+
nil
|
250
|
+
rescue
|
251
|
+
@transaction_state.transition_to!(TransactionStateMachine::ERROR)
|
252
|
+
raise
|
253
|
+
end
|
254
|
+
|
255
|
+
def in_transaction?
|
256
|
+
@transaction_state.in_transaction?
|
257
|
+
end
|
258
|
+
|
259
|
+
def error?
|
260
|
+
@transaction_state.error?
|
261
|
+
end
|
262
|
+
|
263
|
+
def ready?
|
264
|
+
@transaction_state.ready?
|
265
|
+
end
|
266
|
+
|
267
|
+
def close
|
268
|
+
if in_transaction?
|
269
|
+
@logger.warn("Aborting pending transaction ...")
|
270
|
+
abort_transaction
|
271
|
+
elsif @transaction_state.aborting_transaction? || @transaction_state.committing_transaction?
|
272
|
+
@logger.warn("Transaction is finishing. Sleeping until finish!")
|
273
|
+
sleep 5
|
274
|
+
end
|
275
|
+
end
|
276
|
+
|
277
|
+
private
|
278
|
+
|
279
|
+
def force_transactional!
|
280
|
+
unless transactional?
|
281
|
+
raise Kafka::InvalidTxnStateError, 'Please turn on transactional mode to use transaction'
|
282
|
+
end
|
283
|
+
|
284
|
+
if @transactional_id.nil? || @transactional_id.empty?
|
285
|
+
raise Kafka::InvalidTxnStateError, 'Please provide a transaction_id to use transactional mode'
|
286
|
+
end
|
287
|
+
end
|
288
|
+
|
289
|
+
def transaction_coordinator
|
290
|
+
@cluster.get_transaction_coordinator(
|
291
|
+
transactional_id: @transactional_id
|
292
|
+
)
|
293
|
+
end
|
294
|
+
|
295
|
+
def group_coordinator(group_id:)
|
296
|
+
@cluster.get_group_coordinator(
|
297
|
+
group_id: group_id
|
298
|
+
)
|
299
|
+
end
|
300
|
+
|
301
|
+
def complete_transaction
|
302
|
+
@transaction_state.transition_to!(TransactionStateMachine::READY)
|
303
|
+
@transaction_partitions = {}
|
304
|
+
end
|
305
|
+
end
|
306
|
+
end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Kafka
|
4
|
+
class TransactionStateMachine
|
5
|
+
class InvalidTransitionError < StandardError; end
|
6
|
+
class InvalidStateError < StandardError; end
|
7
|
+
|
8
|
+
STATES = [
|
9
|
+
UNINITIALIZED = :uninitialized,
|
10
|
+
READY = :ready,
|
11
|
+
IN_TRANSACTION = :in_trasaction,
|
12
|
+
COMMITTING_TRANSACTION = :committing_transaction,
|
13
|
+
ABORTING_TRANSACTION = :aborting_transaction,
|
14
|
+
ERROR = :error
|
15
|
+
]
|
16
|
+
|
17
|
+
TRANSITIONS = {
|
18
|
+
UNINITIALIZED => [READY, ERROR],
|
19
|
+
READY => [UNINITIALIZED, COMMITTING_TRANSACTION, ABORTING_TRANSACTION],
|
20
|
+
IN_TRANSACTION => [READY],
|
21
|
+
COMMITTING_TRANSACTION => [IN_TRANSACTION],
|
22
|
+
ABORTING_TRANSACTION => [IN_TRANSACTION],
|
23
|
+
# Any states can transition to error state
|
24
|
+
ERROR => STATES
|
25
|
+
}
|
26
|
+
|
27
|
+
def initialize(logger:)
|
28
|
+
@state = UNINITIALIZED
|
29
|
+
@mutex = Mutex.new
|
30
|
+
@logger = TaggedLogger.new(logger)
|
31
|
+
end
|
32
|
+
|
33
|
+
def transition_to!(next_state)
|
34
|
+
raise InvalidStateError unless STATES.include?(next_state)
|
35
|
+
unless TRANSITIONS[next_state].include?(@state)
|
36
|
+
raise InvalidTransitionError, "Could not transition from state '#{@state}' to state '#{next_state}'"
|
37
|
+
end
|
38
|
+
@logger.debug("Transaction state changed to '#{next_state}'!")
|
39
|
+
@mutex.synchronize { @state = next_state }
|
40
|
+
end
|
41
|
+
|
42
|
+
def uninitialized?
|
43
|
+
in_state?(UNINITIALIZED)
|
44
|
+
end
|
45
|
+
|
46
|
+
def ready?
|
47
|
+
in_state?(READY)
|
48
|
+
end
|
49
|
+
|
50
|
+
def in_transaction?
|
51
|
+
in_state?(IN_TRANSACTION)
|
52
|
+
end
|
53
|
+
|
54
|
+
def committing_transaction?
|
55
|
+
in_state?(COMMITTING_TRANSACTION)
|
56
|
+
end
|
57
|
+
|
58
|
+
def aborting_transaction?
|
59
|
+
in_state?(ABORTING_TRANSACTION)
|
60
|
+
end
|
61
|
+
|
62
|
+
def error?
|
63
|
+
in_state?(ERROR)
|
64
|
+
end
|
65
|
+
|
66
|
+
private
|
67
|
+
|
68
|
+
def in_state?(state)
|
69
|
+
@mutex.synchronize { @state == state }
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Kafka
|
4
|
+
class ZstdCodec
|
5
|
+
def codec_id
|
6
|
+
4
|
7
|
+
end
|
8
|
+
|
9
|
+
def produce_api_min_version
|
10
|
+
7
|
11
|
+
end
|
12
|
+
|
13
|
+
def load
|
14
|
+
require "zstd-ruby"
|
15
|
+
rescue LoadError
|
16
|
+
raise LoadError, "using zstd compression requires adding a dependency on the `zstd-ruby` gem to your Gemfile."
|
17
|
+
end
|
18
|
+
|
19
|
+
def compress(data)
|
20
|
+
Zstd.compress(data)
|
21
|
+
end
|
22
|
+
|
23
|
+
def decompress(data)
|
24
|
+
Zstd.decompress(data)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|