ruby-kafka-custom 0.7.7.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/kafka/async_producer.rb +279 -0
- data/lib/kafka/broker.rb +205 -0
- data/lib/kafka/broker_info.rb +16 -0
- data/lib/kafka/broker_pool.rb +41 -0
- data/lib/kafka/broker_uri.rb +43 -0
- data/lib/kafka/client.rb +754 -0
- data/lib/kafka/cluster.rb +455 -0
- data/lib/kafka/compression.rb +43 -0
- data/lib/kafka/compressor.rb +85 -0
- data/lib/kafka/connection.rb +220 -0
- data/lib/kafka/connection_builder.rb +33 -0
- data/lib/kafka/consumer.rb +592 -0
- data/lib/kafka/consumer_group.rb +208 -0
- data/lib/kafka/datadog.rb +413 -0
- data/lib/kafka/fetch_operation.rb +115 -0
- data/lib/kafka/fetched_batch.rb +54 -0
- data/lib/kafka/fetched_batch_generator.rb +117 -0
- data/lib/kafka/fetched_message.rb +47 -0
- data/lib/kafka/fetched_offset_resolver.rb +48 -0
- data/lib/kafka/fetcher.rb +221 -0
- data/lib/kafka/gzip_codec.rb +30 -0
- data/lib/kafka/heartbeat.rb +25 -0
- data/lib/kafka/instrumenter.rb +38 -0
- data/lib/kafka/lz4_codec.rb +23 -0
- data/lib/kafka/message_buffer.rb +87 -0
- data/lib/kafka/offset_manager.rb +248 -0
- data/lib/kafka/partitioner.rb +35 -0
- data/lib/kafka/pause.rb +92 -0
- data/lib/kafka/pending_message.rb +29 -0
- data/lib/kafka/pending_message_queue.rb +41 -0
- data/lib/kafka/produce_operation.rb +205 -0
- data/lib/kafka/producer.rb +504 -0
- data/lib/kafka/protocol.rb +217 -0
- data/lib/kafka/protocol/add_partitions_to_txn_request.rb +34 -0
- data/lib/kafka/protocol/add_partitions_to_txn_response.rb +47 -0
- data/lib/kafka/protocol/alter_configs_request.rb +44 -0
- data/lib/kafka/protocol/alter_configs_response.rb +49 -0
- data/lib/kafka/protocol/api_versions_request.rb +21 -0
- data/lib/kafka/protocol/api_versions_response.rb +53 -0
- data/lib/kafka/protocol/consumer_group_protocol.rb +19 -0
- data/lib/kafka/protocol/create_partitions_request.rb +42 -0
- data/lib/kafka/protocol/create_partitions_response.rb +28 -0
- data/lib/kafka/protocol/create_topics_request.rb +45 -0
- data/lib/kafka/protocol/create_topics_response.rb +26 -0
- data/lib/kafka/protocol/decoder.rb +175 -0
- data/lib/kafka/protocol/delete_topics_request.rb +33 -0
- data/lib/kafka/protocol/delete_topics_response.rb +26 -0
- data/lib/kafka/protocol/describe_configs_request.rb +35 -0
- data/lib/kafka/protocol/describe_configs_response.rb +73 -0
- data/lib/kafka/protocol/describe_groups_request.rb +27 -0
- data/lib/kafka/protocol/describe_groups_response.rb +73 -0
- data/lib/kafka/protocol/encoder.rb +184 -0
- data/lib/kafka/protocol/end_txn_request.rb +29 -0
- data/lib/kafka/protocol/end_txn_response.rb +19 -0
- data/lib/kafka/protocol/fetch_request.rb +70 -0
- data/lib/kafka/protocol/fetch_response.rb +136 -0
- data/lib/kafka/protocol/find_coordinator_request.rb +29 -0
- data/lib/kafka/protocol/find_coordinator_response.rb +29 -0
- data/lib/kafka/protocol/heartbeat_request.rb +27 -0
- data/lib/kafka/protocol/heartbeat_response.rb +17 -0
- data/lib/kafka/protocol/init_producer_id_request.rb +26 -0
- data/lib/kafka/protocol/init_producer_id_response.rb +27 -0
- data/lib/kafka/protocol/join_group_request.rb +41 -0
- data/lib/kafka/protocol/join_group_response.rb +33 -0
- data/lib/kafka/protocol/leave_group_request.rb +25 -0
- data/lib/kafka/protocol/leave_group_response.rb +17 -0
- data/lib/kafka/protocol/list_groups_request.rb +23 -0
- data/lib/kafka/protocol/list_groups_response.rb +35 -0
- data/lib/kafka/protocol/list_offset_request.rb +53 -0
- data/lib/kafka/protocol/list_offset_response.rb +89 -0
- data/lib/kafka/protocol/member_assignment.rb +42 -0
- data/lib/kafka/protocol/message.rb +172 -0
- data/lib/kafka/protocol/message_set.rb +55 -0
- data/lib/kafka/protocol/metadata_request.rb +31 -0
- data/lib/kafka/protocol/metadata_response.rb +185 -0
- data/lib/kafka/protocol/offset_commit_request.rb +47 -0
- data/lib/kafka/protocol/offset_commit_response.rb +29 -0
- data/lib/kafka/protocol/offset_fetch_request.rb +36 -0
- data/lib/kafka/protocol/offset_fetch_response.rb +56 -0
- data/lib/kafka/protocol/produce_request.rb +92 -0
- data/lib/kafka/protocol/produce_response.rb +63 -0
- data/lib/kafka/protocol/record.rb +88 -0
- data/lib/kafka/protocol/record_batch.rb +222 -0
- data/lib/kafka/protocol/request_message.rb +26 -0
- data/lib/kafka/protocol/sasl_handshake_request.rb +33 -0
- data/lib/kafka/protocol/sasl_handshake_response.rb +28 -0
- data/lib/kafka/protocol/sync_group_request.rb +33 -0
- data/lib/kafka/protocol/sync_group_response.rb +23 -0
- data/lib/kafka/round_robin_assignment_strategy.rb +54 -0
- data/lib/kafka/sasl/gssapi.rb +76 -0
- data/lib/kafka/sasl/oauth.rb +64 -0
- data/lib/kafka/sasl/plain.rb +39 -0
- data/lib/kafka/sasl/scram.rb +177 -0
- data/lib/kafka/sasl_authenticator.rb +61 -0
- data/lib/kafka/snappy_codec.rb +25 -0
- data/lib/kafka/socket_with_timeout.rb +96 -0
- data/lib/kafka/ssl_context.rb +66 -0
- data/lib/kafka/ssl_socket_with_timeout.rb +187 -0
- data/lib/kafka/statsd.rb +296 -0
- data/lib/kafka/tagged_logger.rb +72 -0
- data/lib/kafka/transaction_manager.rb +261 -0
- data/lib/kafka/transaction_state_machine.rb +72 -0
- data/lib/kafka/version.rb +5 -0
- metadata +461 -0
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "kafka/snappy_codec"
|
4
|
+
require "kafka/gzip_codec"
|
5
|
+
require "kafka/lz4_codec"
|
6
|
+
|
7
|
+
module Kafka
|
8
|
+
module Compression
|
9
|
+
CODEC_NAMES = {
|
10
|
+
1 => :gzip,
|
11
|
+
2 => :snappy,
|
12
|
+
3 => :lz4,
|
13
|
+
}.freeze
|
14
|
+
|
15
|
+
CODECS = {
|
16
|
+
:gzip => GzipCodec.new,
|
17
|
+
:snappy => SnappyCodec.new,
|
18
|
+
:lz4 => LZ4Codec.new,
|
19
|
+
}.freeze
|
20
|
+
|
21
|
+
def self.codecs
|
22
|
+
CODECS.keys
|
23
|
+
end
|
24
|
+
|
25
|
+
def self.find_codec(name)
|
26
|
+
codec = CODECS.fetch(name) do
|
27
|
+
raise "Unknown compression codec #{name}"
|
28
|
+
end
|
29
|
+
|
30
|
+
codec.load
|
31
|
+
|
32
|
+
codec
|
33
|
+
end
|
34
|
+
|
35
|
+
def self.find_codec_by_id(codec_id)
|
36
|
+
codec_name = CODEC_NAMES.fetch(codec_id) do
|
37
|
+
raise "Unknown codec id #{codec_id}"
|
38
|
+
end
|
39
|
+
|
40
|
+
find_codec(codec_name)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,85 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "kafka/compression"
|
4
|
+
|
5
|
+
module Kafka
|
6
|
+
|
7
|
+
# Compresses message sets using a specified codec.
|
8
|
+
#
|
9
|
+
# A message set is only compressed if its size meets the defined threshold.
|
10
|
+
#
|
11
|
+
# ## Instrumentation
|
12
|
+
#
|
13
|
+
# Whenever a message set is compressed, the notification
|
14
|
+
# `compress.compressor.kafka` will be emitted with the following payload:
|
15
|
+
#
|
16
|
+
# * `message_count` – the number of messages in the message set.
|
17
|
+
# * `uncompressed_bytesize` – the byte size of the original data.
|
18
|
+
# * `compressed_bytesize` – the byte size of the compressed data.
|
19
|
+
#
|
20
|
+
class Compressor
|
21
|
+
|
22
|
+
# @param codec_name [Symbol, nil]
|
23
|
+
# @param threshold [Integer] the minimum number of messages in a message set
|
24
|
+
# that will trigger compression.
|
25
|
+
def initialize(codec_name: nil, threshold: 1, instrumenter:)
|
26
|
+
# Codec may be nil, in which case we won't compress.
|
27
|
+
@codec = codec_name && Compression.find_codec(codec_name)
|
28
|
+
|
29
|
+
@threshold = threshold
|
30
|
+
@instrumenter = instrumenter
|
31
|
+
end
|
32
|
+
|
33
|
+
# @param record_batch [Protocol::RecordBatch]
|
34
|
+
# @param offset [Integer] used to simulate broker behaviour in tests
|
35
|
+
# @return [Protocol::RecordBatch]
|
36
|
+
def compress(record_batch, offset: -1)
|
37
|
+
if record_batch.is_a?(Protocol::RecordBatch)
|
38
|
+
compress_record_batch(record_batch)
|
39
|
+
else
|
40
|
+
# Deprecated message set format
|
41
|
+
compress_message_set(record_batch, offset)
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
private
|
46
|
+
|
47
|
+
def compress_message_set(message_set, offset)
|
48
|
+
return message_set if @codec.nil? || message_set.size < @threshold
|
49
|
+
|
50
|
+
data = Protocol::Encoder.encode_with(message_set)
|
51
|
+
compressed_data = @codec.compress(data)
|
52
|
+
|
53
|
+
@instrumenter.instrument("compress.compressor") do |notification|
|
54
|
+
notification[:message_count] = message_set.size
|
55
|
+
notification[:uncompressed_bytesize] = data.bytesize
|
56
|
+
notification[:compressed_bytesize] = compressed_data.bytesize
|
57
|
+
end
|
58
|
+
|
59
|
+
wrapper_message = Protocol::Message.new(
|
60
|
+
value: compressed_data,
|
61
|
+
codec_id: @codec.codec_id,
|
62
|
+
offset: offset
|
63
|
+
)
|
64
|
+
|
65
|
+
Protocol::MessageSet.new(messages: [wrapper_message])
|
66
|
+
end
|
67
|
+
|
68
|
+
def compress_record_batch(record_batch)
|
69
|
+
if @codec.nil? || record_batch.size < @threshold
|
70
|
+
record_batch.codec_id = 0
|
71
|
+
return Protocol::Encoder.encode_with(record_batch)
|
72
|
+
end
|
73
|
+
|
74
|
+
record_batch.codec_id = @codec.codec_id
|
75
|
+
data = Protocol::Encoder.encode_with(record_batch)
|
76
|
+
|
77
|
+
@instrumenter.instrument("compress.compressor") do |notification|
|
78
|
+
notification[:message_count] = record_batch.size
|
79
|
+
notification[:compressed_bytesize] = data.bytesize
|
80
|
+
end
|
81
|
+
|
82
|
+
data
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
@@ -0,0 +1,220 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "stringio"
|
4
|
+
require "kafka/socket_with_timeout"
|
5
|
+
require "kafka/ssl_socket_with_timeout"
|
6
|
+
require "kafka/protocol/request_message"
|
7
|
+
require "kafka/protocol/encoder"
|
8
|
+
require "kafka/protocol/decoder"
|
9
|
+
|
10
|
+
module Kafka
|
11
|
+
|
12
|
+
# A connection to a single Kafka broker.
|
13
|
+
#
|
14
|
+
# Usually you'll need a separate connection to each broker in a cluster, since most
|
15
|
+
# requests must be directed specifically to the broker that is currently leader for
|
16
|
+
# the set of topic partitions you want to produce to or consume from.
|
17
|
+
#
|
18
|
+
# ## Instrumentation
|
19
|
+
#
|
20
|
+
# Connections emit a `request.connection.kafka` notification on each request. The following
|
21
|
+
# keys will be found in the payload:
|
22
|
+
#
|
23
|
+
# * `:api` — the name of the API being invoked.
|
24
|
+
# * `:request_size` — the number of bytes in the request.
|
25
|
+
# * `:response_size` — the number of bytes in the response.
|
26
|
+
#
|
27
|
+
# The notification also includes the duration of the request.
|
28
|
+
#
|
29
|
+
class Connection
|
30
|
+
SOCKET_TIMEOUT = 10
|
31
|
+
CONNECT_TIMEOUT = 10
|
32
|
+
|
33
|
+
# Time after which an idle connection will be reopened.
|
34
|
+
IDLE_TIMEOUT = 60 * 5
|
35
|
+
|
36
|
+
attr_reader :encoder
|
37
|
+
attr_reader :decoder
|
38
|
+
|
39
|
+
# Opens a connection to a Kafka broker.
|
40
|
+
#
|
41
|
+
# @param host [String] the hostname of the broker.
|
42
|
+
# @param port [Integer] the port of the broker.
|
43
|
+
# @param client_id [String] the client id is a user-specified string sent in each
|
44
|
+
# request to help trace calls and should logically identify the application
|
45
|
+
# making the request.
|
46
|
+
# @param logger [Logger] the logger used to log trace messages.
|
47
|
+
# @param connect_timeout [Integer] the socket timeout for connecting to the broker.
|
48
|
+
# Default is 10 seconds.
|
49
|
+
# @param socket_timeout [Integer] the socket timeout for reading and writing to the
|
50
|
+
# broker. Default is 10 seconds.
|
51
|
+
#
|
52
|
+
# @return [Connection] a new connection.
|
53
|
+
def initialize(host:, port:, client_id:, logger:, instrumenter:, connect_timeout: nil, socket_timeout: nil, ssl_context: nil)
|
54
|
+
@host, @port, @client_id = host, port, client_id
|
55
|
+
@logger = TaggedLogger.new(logger)
|
56
|
+
@instrumenter = instrumenter
|
57
|
+
|
58
|
+
@connect_timeout = connect_timeout || CONNECT_TIMEOUT
|
59
|
+
@socket_timeout = socket_timeout || SOCKET_TIMEOUT
|
60
|
+
@ssl_context = ssl_context
|
61
|
+
end
|
62
|
+
|
63
|
+
def to_s
|
64
|
+
"#{@host}:#{@port}"
|
65
|
+
end
|
66
|
+
|
67
|
+
def open?
|
68
|
+
!@socket.nil? && !@socket.closed?
|
69
|
+
end
|
70
|
+
|
71
|
+
def close
|
72
|
+
@logger.debug "Closing socket to #{to_s}"
|
73
|
+
|
74
|
+
@socket.close if @socket
|
75
|
+
end
|
76
|
+
|
77
|
+
# Sends a request over the connection.
|
78
|
+
#
|
79
|
+
# @param request [#encode, #response_class] the request that should be
|
80
|
+
# encoded and written.
|
81
|
+
#
|
82
|
+
# @return [Object] the response.
|
83
|
+
def send_request(request)
|
84
|
+
api_name = Protocol.api_name(request.api_key)
|
85
|
+
|
86
|
+
# Default notification payload.
|
87
|
+
notification = {
|
88
|
+
broker_host: @host,
|
89
|
+
api: api_name,
|
90
|
+
request_size: 0,
|
91
|
+
response_size: 0,
|
92
|
+
}
|
93
|
+
|
94
|
+
raise IdleConnection if idle?
|
95
|
+
|
96
|
+
@logger.push_tags(api_name)
|
97
|
+
@instrumenter.instrument("request.connection", notification) do
|
98
|
+
open unless open?
|
99
|
+
|
100
|
+
@correlation_id += 1
|
101
|
+
|
102
|
+
@logger.debug "Sending #{api_name} API request #{@correlation_id} to #{to_s}"
|
103
|
+
|
104
|
+
write_request(request, notification)
|
105
|
+
|
106
|
+
response_class = request.response_class
|
107
|
+
response = wait_for_response(response_class, notification) unless response_class.nil?
|
108
|
+
|
109
|
+
@last_request = Time.now
|
110
|
+
|
111
|
+
response
|
112
|
+
end
|
113
|
+
rescue SystemCallError, EOFError, IOError => e
|
114
|
+
close
|
115
|
+
|
116
|
+
raise ConnectionError, "Connection error #{e.class}: #{e}"
|
117
|
+
ensure
|
118
|
+
@logger.pop_tags
|
119
|
+
end
|
120
|
+
|
121
|
+
private
|
122
|
+
|
123
|
+
def open
|
124
|
+
@logger.debug "Opening connection to #{@host}:#{@port} with client id #{@client_id}..."
|
125
|
+
|
126
|
+
if @ssl_context
|
127
|
+
@socket = SSLSocketWithTimeout.new(@host, @port, connect_timeout: @connect_timeout, timeout: @socket_timeout, ssl_context: @ssl_context)
|
128
|
+
else
|
129
|
+
@socket = SocketWithTimeout.new(@host, @port, connect_timeout: @connect_timeout, timeout: @socket_timeout)
|
130
|
+
end
|
131
|
+
|
132
|
+
@encoder = Kafka::Protocol::Encoder.new(@socket)
|
133
|
+
@decoder = Kafka::Protocol::Decoder.new(@socket)
|
134
|
+
|
135
|
+
# Correlation id is initialized to zero and bumped for each request.
|
136
|
+
@correlation_id = 0
|
137
|
+
|
138
|
+
@last_request = nil
|
139
|
+
rescue Errno::ETIMEDOUT => e
|
140
|
+
@logger.error "Timed out while trying to connect to #{self}: #{e}"
|
141
|
+
raise ConnectionError, e
|
142
|
+
rescue SocketError, Errno::ECONNREFUSED, Errno::EHOSTUNREACH => e
|
143
|
+
@logger.error "Failed to connect to #{self}: #{e}"
|
144
|
+
raise ConnectionError, e
|
145
|
+
end
|
146
|
+
|
147
|
+
def idle?
|
148
|
+
@last_request && @last_request < Time.now - IDLE_TIMEOUT
|
149
|
+
end
|
150
|
+
|
151
|
+
# Writes a request over the connection.
|
152
|
+
#
|
153
|
+
# @param request [#encode] the request that should be encoded and written.
|
154
|
+
#
|
155
|
+
# @return [nil]
|
156
|
+
def write_request(request, notification)
|
157
|
+
message = Kafka::Protocol::RequestMessage.new(
|
158
|
+
api_key: request.api_key,
|
159
|
+
api_version: request.respond_to?(:api_version) ? request.api_version : 0,
|
160
|
+
correlation_id: @correlation_id,
|
161
|
+
client_id: @client_id,
|
162
|
+
request: request,
|
163
|
+
)
|
164
|
+
|
165
|
+
data = Kafka::Protocol::Encoder.encode_with(message)
|
166
|
+
notification[:request_size] = data.bytesize
|
167
|
+
|
168
|
+
@encoder.write_bytes(data)
|
169
|
+
|
170
|
+
nil
|
171
|
+
rescue Errno::ETIMEDOUT
|
172
|
+
@logger.error "Timed out while writing request #{@correlation_id}"
|
173
|
+
raise
|
174
|
+
end
|
175
|
+
|
176
|
+
# Reads a response from the connection.
|
177
|
+
#
|
178
|
+
# @param response_class [#decode] an object that can decode the response from
|
179
|
+
# a given Decoder.
|
180
|
+
#
|
181
|
+
# @return [nil]
|
182
|
+
def read_response(response_class, notification)
|
183
|
+
@logger.debug "Waiting for response #{@correlation_id} from #{to_s}"
|
184
|
+
|
185
|
+
data = @decoder.bytes
|
186
|
+
notification[:response_size] = data.bytesize
|
187
|
+
|
188
|
+
buffer = StringIO.new(data)
|
189
|
+
response_decoder = Kafka::Protocol::Decoder.new(buffer)
|
190
|
+
|
191
|
+
correlation_id = response_decoder.int32
|
192
|
+
response = response_class.decode(response_decoder)
|
193
|
+
|
194
|
+
@logger.debug "Received response #{correlation_id} from #{to_s}"
|
195
|
+
|
196
|
+
return correlation_id, response
|
197
|
+
rescue Errno::ETIMEDOUT
|
198
|
+
@logger.error "Timed out while waiting for response #{@correlation_id}"
|
199
|
+
raise
|
200
|
+
end
|
201
|
+
|
202
|
+
def wait_for_response(response_class, notification)
|
203
|
+
loop do
|
204
|
+
correlation_id, response = read_response(response_class, notification)
|
205
|
+
|
206
|
+
# There may have been a previous request that timed out before the client
|
207
|
+
# was able to read the response. In that case, the response will still be
|
208
|
+
# sitting in the socket waiting to be read. If the response we just read
|
209
|
+
# was to a previous request, we can safely skip it.
|
210
|
+
if correlation_id < @correlation_id
|
211
|
+
@logger.error "Received out-of-order response id #{correlation_id}, was expecting #{@correlation_id}"
|
212
|
+
elsif correlation_id > @correlation_id
|
213
|
+
raise Kafka::Error, "Correlation id mismatch: expected #{@correlation_id} but got #{correlation_id}"
|
214
|
+
else
|
215
|
+
return response
|
216
|
+
end
|
217
|
+
end
|
218
|
+
end
|
219
|
+
end
|
220
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Kafka
|
4
|
+
class ConnectionBuilder
|
5
|
+
def initialize(client_id:, logger:, instrumenter:, connect_timeout:, socket_timeout:, ssl_context:, sasl_authenticator:)
|
6
|
+
@client_id = client_id
|
7
|
+
@logger = TaggedLogger.new(logger)
|
8
|
+
@instrumenter = instrumenter
|
9
|
+
@connect_timeout = connect_timeout
|
10
|
+
@socket_timeout = socket_timeout
|
11
|
+
@ssl_context = ssl_context
|
12
|
+
@sasl_authenticator = sasl_authenticator
|
13
|
+
end
|
14
|
+
|
15
|
+
def build_connection(host, port)
|
16
|
+
connection = Connection.new(
|
17
|
+
host: host,
|
18
|
+
port: port,
|
19
|
+
client_id: @client_id,
|
20
|
+
connect_timeout: @connect_timeout,
|
21
|
+
socket_timeout: @socket_timeout,
|
22
|
+
logger: @logger,
|
23
|
+
instrumenter: @instrumenter,
|
24
|
+
ssl_context: @ssl_context,
|
25
|
+
)
|
26
|
+
|
27
|
+
@sasl_authenticator.authenticate!(connection)
|
28
|
+
|
29
|
+
connection
|
30
|
+
end
|
31
|
+
|
32
|
+
end
|
33
|
+
end
|
@@ -0,0 +1,592 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "kafka/consumer_group"
|
4
|
+
require "kafka/offset_manager"
|
5
|
+
require "kafka/fetcher"
|
6
|
+
require "kafka/pause"
|
7
|
+
|
8
|
+
module Kafka
|
9
|
+
|
10
|
+
# A client that consumes messages from a Kafka cluster in coordination with
|
11
|
+
# other clients.
|
12
|
+
#
|
13
|
+
# A Consumer subscribes to one or more Kafka topics; all consumers with the
|
14
|
+
# same *group id* then agree on who should read from the individual topic
|
15
|
+
# partitions. When group members join or leave, the group synchronizes,
|
16
|
+
# making sure that all partitions are assigned to a single member, and that
|
17
|
+
# all members have some partitions to read from.
|
18
|
+
#
|
19
|
+
# ## Example
|
20
|
+
#
|
21
|
+
# A simple producer that simply writes the messages it consumes to the
|
22
|
+
# console.
|
23
|
+
#
|
24
|
+
# require "kafka"
|
25
|
+
#
|
26
|
+
# kafka = Kafka.new(["kafka1:9092", "kafka2:9092"])
|
27
|
+
#
|
28
|
+
# # Create a new Consumer instance in the group `my-group`:
|
29
|
+
# consumer = kafka.consumer(group_id: "my-group")
|
30
|
+
#
|
31
|
+
# # Subscribe to a Kafka topic:
|
32
|
+
# consumer.subscribe("messages")
|
33
|
+
#
|
34
|
+
# # Loop forever, reading in messages from all topics that have been
|
35
|
+
# # subscribed to.
|
36
|
+
# consumer.each_message do |message|
|
37
|
+
# puts message.topic
|
38
|
+
# puts message.partition
|
39
|
+
# puts message.key
|
40
|
+
# puts message.headers
|
41
|
+
# puts message.value
|
42
|
+
# puts message.offset
|
43
|
+
# end
|
44
|
+
#
|
45
|
+
class Consumer
|
46
|
+
|
47
|
+
def initialize(cluster:, logger:, instrumenter:, group:, fetcher:, offset_manager:, session_timeout:, heartbeat:)
|
48
|
+
@cluster = cluster
|
49
|
+
@logger = TaggedLogger.new(logger)
|
50
|
+
@instrumenter = instrumenter
|
51
|
+
@group = group
|
52
|
+
@offset_manager = offset_manager
|
53
|
+
@session_timeout = session_timeout
|
54
|
+
@fetcher = fetcher
|
55
|
+
@heartbeat = heartbeat
|
56
|
+
|
57
|
+
@pauses = Hash.new {|h, k|
|
58
|
+
h[k] = Hash.new {|h2, k2|
|
59
|
+
h2[k2] = Pause.new
|
60
|
+
}
|
61
|
+
}
|
62
|
+
|
63
|
+
# Whether or not the consumer is currently consuming messages.
|
64
|
+
@running = false
|
65
|
+
|
66
|
+
# Hash containing offsets for each topic and partition that has the
|
67
|
+
# automatically_mark_as_processed feature disabled. Offset manager is only active
|
68
|
+
# when everything is suppose to happen automatically. Otherwise we need to keep track of the
|
69
|
+
# offset manually in memory for all the time
|
70
|
+
# The key structure for this equals an array with topic and partition [topic, partition]
|
71
|
+
# The value is equal to the offset of the last message we've received
|
72
|
+
# @note It won't be updated in case user marks message as processed, because for the case
|
73
|
+
# when user commits message other than last in a batch, this would make ruby-kafka refetch
|
74
|
+
# some already consumed messages
|
75
|
+
@current_offsets = Hash.new { |h, k| h[k] = {} }
|
76
|
+
end
|
77
|
+
|
78
|
+
# Subscribes the consumer to a topic.
|
79
|
+
#
|
80
|
+
# Typically you either want to start reading messages from the very
|
81
|
+
# beginning of the topic's partitions or you simply want to wait for new
|
82
|
+
# messages to be written. In the former case, set `start_from_beginning`
|
83
|
+
# to true (the default); in the latter, set it to false.
|
84
|
+
#
|
85
|
+
# @param topic_or_regex [String, Regexp] subscribe to single topic with a string
|
86
|
+
# or multiple topics matching a regex.
|
87
|
+
# @param default_offset [Symbol] whether to start from the beginning or the
|
88
|
+
# end of the topic's partitions. Deprecated.
|
89
|
+
# @param start_from_beginning [Boolean] whether to start from the beginning
|
90
|
+
# of the topic or just subscribe to new messages being produced. This
|
91
|
+
# only applies when first consuming a topic partition – once the consumer
|
92
|
+
# has checkpointed its progress, it will always resume from the last
|
93
|
+
# checkpoint.
|
94
|
+
# @param max_bytes_per_partition [Integer] the maximum amount of data fetched
|
95
|
+
# from a single partition at a time.
|
96
|
+
# @return [nil]
|
97
|
+
def subscribe(topic_or_regex, default_offset: nil, start_from_beginning: true, max_bytes_per_partition: 1048576)
|
98
|
+
default_offset ||= start_from_beginning ? :earliest : :latest
|
99
|
+
|
100
|
+
if topic_or_regex.is_a?(Regexp)
|
101
|
+
cluster_topics.select { |topic| topic =~ topic_or_regex }.each do |topic|
|
102
|
+
subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
|
103
|
+
end
|
104
|
+
else
|
105
|
+
subscribe_to_topic(topic_or_regex, default_offset, start_from_beginning, max_bytes_per_partition)
|
106
|
+
end
|
107
|
+
|
108
|
+
nil
|
109
|
+
end
|
110
|
+
|
111
|
+
# Stop the consumer.
|
112
|
+
#
|
113
|
+
# The consumer will finish any in-progress work and shut down.
|
114
|
+
#
|
115
|
+
# @return [nil]
|
116
|
+
def stop
|
117
|
+
@running = false
|
118
|
+
@fetcher.stop
|
119
|
+
@cluster.disconnect
|
120
|
+
end
|
121
|
+
|
122
|
+
# Pause processing of a specific topic partition.
|
123
|
+
#
|
124
|
+
# When a specific message causes the processor code to fail, it can be a good
|
125
|
+
# idea to simply pause the partition until the error can be resolved, allowing
|
126
|
+
# the rest of the partitions to continue being processed.
|
127
|
+
#
|
128
|
+
# If the `timeout` argument is passed, the partition will automatically be
|
129
|
+
# resumed when the timeout expires. If `exponential_backoff` is enabled, each
|
130
|
+
# subsequent pause will cause the timeout to double until a message from the
|
131
|
+
# partition has been successfully processed.
|
132
|
+
#
|
133
|
+
# @param topic [String]
|
134
|
+
# @param partition [Integer]
|
135
|
+
# @param timeout [nil, Integer] the number of seconds to pause the partition for,
|
136
|
+
# or `nil` if the partition should not be automatically resumed.
|
137
|
+
# @param max_timeout [nil, Integer] the maximum number of seconds to pause for,
|
138
|
+
# or `nil` if no maximum should be enforced.
|
139
|
+
# @param exponential_backoff [Boolean] whether to enable exponential backoff.
|
140
|
+
# @return [nil]
|
141
|
+
def pause(topic, partition, timeout: nil, max_timeout: nil, exponential_backoff: false)
|
142
|
+
if max_timeout && !exponential_backoff
|
143
|
+
raise ArgumentError, "`max_timeout` only makes sense when `exponential_backoff` is enabled"
|
144
|
+
end
|
145
|
+
|
146
|
+
pause_for(topic, partition).pause!(
|
147
|
+
timeout: timeout,
|
148
|
+
max_timeout: max_timeout,
|
149
|
+
exponential_backoff: exponential_backoff,
|
150
|
+
)
|
151
|
+
end
|
152
|
+
|
153
|
+
# Resume processing of a topic partition.
|
154
|
+
#
|
155
|
+
# @see #pause
|
156
|
+
# @param topic [String]
|
157
|
+
# @param partition [Integer]
|
158
|
+
# @return [nil]
|
159
|
+
def resume(topic, partition)
|
160
|
+
pause_for(topic, partition).resume!
|
161
|
+
|
162
|
+
# During re-balancing we might have lost the paused partition. Check if partition is still in group before seek.
|
163
|
+
seek_to_next(topic, partition) if @group.assigned_to?(topic, partition)
|
164
|
+
end
|
165
|
+
|
166
|
+
# Whether the topic partition is currently paused.
|
167
|
+
#
|
168
|
+
# @see #pause
|
169
|
+
# @param topic [String]
|
170
|
+
# @param partition [Integer]
|
171
|
+
# @return [Boolean] true if the partition is paused, false otherwise.
|
172
|
+
def paused?(topic, partition)
|
173
|
+
pause = pause_for(topic, partition)
|
174
|
+
pause.paused? && !pause.expired?
|
175
|
+
end
|
176
|
+
|
177
|
+
# Fetches and enumerates the messages in the topics that the consumer group
|
178
|
+
# subscribes to.
|
179
|
+
#
|
180
|
+
# Each message is yielded to the provided block. If the block returns
|
181
|
+
# without raising an exception, the message will be considered successfully
|
182
|
+
# processed. At regular intervals the offset of the most recent successfully
|
183
|
+
# processed message in each partition will be committed to the Kafka
|
184
|
+
# offset store. If the consumer crashes or leaves the group, the group member
|
185
|
+
# that is tasked with taking over processing of these partitions will resume
|
186
|
+
# at the last committed offsets.
|
187
|
+
#
|
188
|
+
# @param min_bytes [Integer] the minimum number of bytes to read before
|
189
|
+
# returning messages from each broker; if `max_wait_time` is reached, this
|
190
|
+
# is ignored.
|
191
|
+
# @param max_bytes [Integer] the maximum number of bytes to read before
|
192
|
+
# returning messages from each broker.
|
193
|
+
# @param max_wait_time [Integer, Float] the maximum duration of time to wait before
|
194
|
+
# returning messages from each broker, in seconds.
|
195
|
+
# @param automatically_mark_as_processed [Boolean] whether to automatically
|
196
|
+
# mark a message as successfully processed when the block returns
|
197
|
+
# without an exception. Once marked successful, the offsets of processed
|
198
|
+
# messages can be committed to Kafka.
|
199
|
+
# @yieldparam message [Kafka::FetchedMessage] a message fetched from Kafka.
|
200
|
+
# @raise [Kafka::ProcessingError] if there was an error processing a message.
|
201
|
+
# The original exception will be returned by calling `#cause` on the
|
202
|
+
# {Kafka::ProcessingError} instance.
|
203
|
+
# @return [nil]
|
204
|
+
def each_message(min_bytes: 1, max_bytes: 10485760, max_wait_time: 1, automatically_mark_as_processed: true)
|
205
|
+
@fetcher.configure(
|
206
|
+
min_bytes: min_bytes,
|
207
|
+
max_bytes: max_bytes,
|
208
|
+
max_wait_time: max_wait_time,
|
209
|
+
)
|
210
|
+
|
211
|
+
consumer_loop do
|
212
|
+
batches = fetch_batches
|
213
|
+
|
214
|
+
batches.each do |batch|
|
215
|
+
batch.messages.each do |message|
|
216
|
+
notification = {
|
217
|
+
topic: message.topic,
|
218
|
+
partition: message.partition,
|
219
|
+
offset: message.offset,
|
220
|
+
offset_lag: batch.highwater_mark_offset - message.offset - 1,
|
221
|
+
create_time: message.create_time,
|
222
|
+
key: message.key,
|
223
|
+
value: message.value,
|
224
|
+
headers: message.headers
|
225
|
+
}
|
226
|
+
|
227
|
+
# Instrument an event immediately so that subscribers don't have to wait until
|
228
|
+
# the block is completed.
|
229
|
+
@instrumenter.instrument("start_process_message.consumer", notification)
|
230
|
+
|
231
|
+
@instrumenter.instrument("process_message.consumer", notification) do
|
232
|
+
begin
|
233
|
+
yield message unless message.is_control_record
|
234
|
+
@current_offsets[message.topic][message.partition] = message.offset
|
235
|
+
rescue => e
|
236
|
+
location = "#{message.topic}/#{message.partition} at offset #{message.offset}"
|
237
|
+
backtrace = e.backtrace.join("\n")
|
238
|
+
@logger.error "Exception raised when processing #{location} -- #{e.class}: #{e}\n#{backtrace}"
|
239
|
+
|
240
|
+
raise ProcessingError.new(message.topic, message.partition, message.offset)
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
mark_message_as_processed(message) if automatically_mark_as_processed
|
245
|
+
@offset_manager.commit_offsets_if_necessary
|
246
|
+
|
247
|
+
trigger_heartbeat
|
248
|
+
|
249
|
+
return if shutting_down?
|
250
|
+
end
|
251
|
+
|
252
|
+
# We've successfully processed a batch from the partition, so we can clear
|
253
|
+
# the pause.
|
254
|
+
pause_for(batch.topic, batch.partition).reset!
|
255
|
+
end
|
256
|
+
|
257
|
+
# We may not have received any messages, but it's still a good idea to
|
258
|
+
# commit offsets if we've processed messages in the last set of batches.
|
259
|
+
# This also ensures the offsets are retained if we haven't read any messages
|
260
|
+
# since the offset retention period has elapsed.
|
261
|
+
@offset_manager.commit_offsets_if_necessary
|
262
|
+
end
|
263
|
+
end
|
264
|
+
|
265
|
+
# Fetches and enumerates the messages in the topics that the consumer group
|
266
|
+
# subscribes to.
|
267
|
+
#
|
268
|
+
# Each batch of messages is yielded to the provided block. If the block returns
|
269
|
+
# without raising an exception, the batch will be considered successfully
|
270
|
+
# processed. At regular intervals the offset of the most recent successfully
|
271
|
+
# processed message batch in each partition will be committed to the Kafka
|
272
|
+
# offset store. If the consumer crashes or leaves the group, the group member
|
273
|
+
# that is tasked with taking over processing of these partitions will resume
|
274
|
+
# at the last committed offsets.
|
275
|
+
#
|
276
|
+
# @param min_bytes [Integer] the minimum number of bytes to read before
|
277
|
+
# returning messages from each broker; if `max_wait_time` is reached, this
|
278
|
+
# is ignored.
|
279
|
+
# @param max_bytes [Integer] the maximum number of bytes to read before
|
280
|
+
# returning messages from each broker.
|
281
|
+
# @param max_wait_time [Integer, Float] the maximum duration of time to wait before
|
282
|
+
# returning messages from each broker, in seconds.
|
283
|
+
# @param automatically_mark_as_processed [Boolean] whether to automatically
|
284
|
+
# mark a batch's messages as successfully processed when the block returns
|
285
|
+
# without an exception. Once marked successful, the offsets of processed
|
286
|
+
# messages can be committed to Kafka.
|
287
|
+
# @yieldparam batch [Kafka::FetchedBatch] a message batch fetched from Kafka.
|
288
|
+
# @raise [Kafka::ProcessingError] if there was an error processing a batch.
|
289
|
+
# The original exception will be returned by calling `#cause` on the
|
290
|
+
# {Kafka::ProcessingError} instance.
|
291
|
+
# @return [nil]
|
292
|
+
def each_batch(min_bytes: 1, max_bytes: 10485760, max_wait_time: 1, automatically_mark_as_processed: true)
|
293
|
+
@fetcher.configure(
|
294
|
+
min_bytes: min_bytes,
|
295
|
+
max_bytes: max_bytes,
|
296
|
+
max_wait_time: max_wait_time,
|
297
|
+
)
|
298
|
+
|
299
|
+
consumer_loop do
|
300
|
+
batches = fetch_batches
|
301
|
+
|
302
|
+
batches.each do |batch|
|
303
|
+
unless batch.empty?
|
304
|
+
raw_messages = batch.messages
|
305
|
+
batch.messages = raw_messages.reject(&:is_control_record)
|
306
|
+
|
307
|
+
notification = {
|
308
|
+
topic: batch.topic,
|
309
|
+
partition: batch.partition,
|
310
|
+
last_offset: batch.last_offset,
|
311
|
+
offset_lag: batch.offset_lag,
|
312
|
+
highwater_mark_offset: batch.highwater_mark_offset,
|
313
|
+
message_count: batch.messages.count,
|
314
|
+
}
|
315
|
+
|
316
|
+
# Instrument an event immediately so that subscribers don't have to wait until
|
317
|
+
# the block is completed.
|
318
|
+
@instrumenter.instrument("start_process_batch.consumer", notification)
|
319
|
+
|
320
|
+
@instrumenter.instrument("process_batch.consumer", notification) do
|
321
|
+
begin
|
322
|
+
yield batch
|
323
|
+
@current_offsets[batch.topic][batch.partition] = batch.last_offset unless batch.unknown_last_offset?
|
324
|
+
rescue => e
|
325
|
+
offset_range = (batch.first_offset..batch.last_offset || batch.highwater_mark_offset)
|
326
|
+
location = "#{batch.topic}/#{batch.partition} in offset range #{offset_range}"
|
327
|
+
backtrace = e.backtrace.join("\n")
|
328
|
+
|
329
|
+
@logger.error "Exception raised when processing #{location} -- #{e.class}: #{e}\n#{backtrace}"
|
330
|
+
|
331
|
+
raise ProcessingError.new(batch.topic, batch.partition, offset_range)
|
332
|
+
ensure
|
333
|
+
batch.messages = raw_messages
|
334
|
+
end
|
335
|
+
end
|
336
|
+
mark_message_as_processed(batch.messages.last) if automatically_mark_as_processed
|
337
|
+
|
338
|
+
# We've successfully processed a batch from the partition, so we can clear
|
339
|
+
# the pause.
|
340
|
+
pause_for(batch.topic, batch.partition).reset!
|
341
|
+
end
|
342
|
+
|
343
|
+
@offset_manager.commit_offsets_if_necessary
|
344
|
+
|
345
|
+
trigger_heartbeat
|
346
|
+
|
347
|
+
return if shutting_down?
|
348
|
+
end
|
349
|
+
|
350
|
+
# We may not have received any messages, but it's still a good idea to
|
351
|
+
# commit offsets if we've processed messages in the last set of batches.
|
352
|
+
# This also ensures the offsets are retained if we haven't read any messages
|
353
|
+
# since the offset retention period has elapsed.
|
354
|
+
@offset_manager.commit_offsets_if_necessary
|
355
|
+
end
|
356
|
+
end
|
357
|
+
|
358
|
+
# Move the consumer's position in a topic partition to the specified offset.
|
359
|
+
#
|
360
|
+
# Note that this has to be done prior to calling {#each_message} or {#each_batch}
|
361
|
+
# and only has an effect if the consumer is assigned the partition. Typically,
|
362
|
+
# you will want to do this in every consumer group member in order to make sure
|
363
|
+
# that the member that's assigned the partition knows where to start.
|
364
|
+
#
|
365
|
+
# @param topic [String]
|
366
|
+
# @param partition [Integer]
|
367
|
+
# @param offset [Integer]
|
368
|
+
# @return [nil]
|
369
|
+
def seek(topic, partition, offset)
|
370
|
+
@offset_manager.seek_to(topic, partition, offset)
|
371
|
+
end
|
372
|
+
|
373
|
+
def commit_offsets
|
374
|
+
@offset_manager.commit_offsets
|
375
|
+
end
|
376
|
+
|
377
|
+
def mark_message_as_processed(message)
|
378
|
+
@offset_manager.mark_as_processed(message.topic, message.partition, message.offset)
|
379
|
+
end
|
380
|
+
|
381
|
+
def trigger_heartbeat
|
382
|
+
@heartbeat.trigger
|
383
|
+
end
|
384
|
+
|
385
|
+
def trigger_heartbeat!
|
386
|
+
@heartbeat.trigger!
|
387
|
+
end
|
388
|
+
|
389
|
+
# Aliases for the external API compatibility
|
390
|
+
alias send_heartbeat_if_necessary trigger_heartbeat
|
391
|
+
alias send_heartbeat trigger_heartbeat!
|
392
|
+
|
393
|
+
private
|
394
|
+
|
395
|
+
def consumer_loop
|
396
|
+
@running = true
|
397
|
+
@logger.push_tags(@group.to_s)
|
398
|
+
|
399
|
+
@fetcher.start
|
400
|
+
|
401
|
+
while running?
|
402
|
+
begin
|
403
|
+
@instrumenter.instrument("loop.consumer") do
|
404
|
+
yield
|
405
|
+
end
|
406
|
+
rescue HeartbeatError
|
407
|
+
make_final_offsets_commit!
|
408
|
+
join_group if running?
|
409
|
+
rescue OffsetCommitError
|
410
|
+
join_group if running?
|
411
|
+
rescue RebalanceInProgress
|
412
|
+
@logger.warn "Group rebalance in progress, re-joining..."
|
413
|
+
join_group if running?
|
414
|
+
rescue FetchError, NotLeaderForPartition, UnknownTopicOrPartition
|
415
|
+
@cluster.mark_as_stale!
|
416
|
+
rescue LeaderNotAvailable => e
|
417
|
+
@logger.error "Leader not available; waiting 1s before retrying"
|
418
|
+
@cluster.mark_as_stale!
|
419
|
+
sleep 1
|
420
|
+
rescue ConnectionError => e
|
421
|
+
@logger.error "Connection error #{e.class}: #{e.message}"
|
422
|
+
@cluster.mark_as_stale!
|
423
|
+
rescue SignalException => e
|
424
|
+
@logger.warn "Received signal #{e.message}, shutting down"
|
425
|
+
@running = false
|
426
|
+
end
|
427
|
+
end
|
428
|
+
ensure
|
429
|
+
@fetcher.stop
|
430
|
+
|
431
|
+
# In order to quickly have the consumer group re-balance itself, it's
|
432
|
+
# important that members explicitly tell Kafka when they're leaving.
|
433
|
+
make_final_offsets_commit!
|
434
|
+
@group.leave rescue nil
|
435
|
+
@running = false
|
436
|
+
@logger.pop_tags
|
437
|
+
end
|
438
|
+
|
439
|
+
def make_final_offsets_commit!(attempts = 3)
|
440
|
+
@offset_manager.commit_offsets
|
441
|
+
rescue ConnectionError, OffsetCommitError, EOFError
|
442
|
+
# It's important to make sure final offsets commit is done
|
443
|
+
# As otherwise messages that have been processed after last auto-commit
|
444
|
+
# will be processed again and that may be huge amount of messages
|
445
|
+
return if attempts.zero?
|
446
|
+
|
447
|
+
@logger.error "Retrying to make final offsets commit (#{attempts} attempts left)"
|
448
|
+
sleep(0.1)
|
449
|
+
make_final_offsets_commit!(attempts - 1)
|
450
|
+
rescue Kafka::Error => e
|
451
|
+
@logger.error "Encountered error while shutting down; #{e.class}: #{e.message}"
|
452
|
+
end
|
453
|
+
|
454
|
+
def join_group
|
455
|
+
old_generation_id = @group.generation_id
|
456
|
+
|
457
|
+
@group.join
|
458
|
+
|
459
|
+
if old_generation_id && @group.generation_id != old_generation_id + 1
|
460
|
+
# We've been out of the group for at least an entire generation, no
|
461
|
+
# sense in trying to hold on to offset data
|
462
|
+
clear_current_offsets
|
463
|
+
@offset_manager.clear_offsets
|
464
|
+
else
|
465
|
+
# After rejoining the group we may have been assigned a new set of
|
466
|
+
# partitions. Keeping the old offset commits around forever would risk
|
467
|
+
# having the consumer go back and reprocess messages if it's assigned
|
468
|
+
# a partition it used to be assigned to way back. For that reason, we
|
469
|
+
# only keep commits for the partitions that we're still assigned.
|
470
|
+
clear_current_offsets(excluding: @group.assigned_partitions)
|
471
|
+
@offset_manager.clear_offsets_excluding(@group.assigned_partitions)
|
472
|
+
end
|
473
|
+
|
474
|
+
@fetcher.reset
|
475
|
+
|
476
|
+
@group.assigned_partitions.each do |topic, partitions|
|
477
|
+
partitions.each do |partition|
|
478
|
+
if paused?(topic, partition)
|
479
|
+
@logger.warn "Not fetching from #{topic}/#{partition} due to pause"
|
480
|
+
else
|
481
|
+
seek_to_next(topic, partition)
|
482
|
+
end
|
483
|
+
end
|
484
|
+
end
|
485
|
+
end
|
486
|
+
|
487
|
+
def seek_to_next(topic, partition)
|
488
|
+
# When automatic marking is off, the first poll needs to be based on the last committed
|
489
|
+
# offset from Kafka, that's why we fallback in case of nil (it may not be 0)
|
490
|
+
if @current_offsets[topic].key?(partition)
|
491
|
+
offset = @current_offsets[topic][partition] + 1
|
492
|
+
else
|
493
|
+
offset = @offset_manager.next_offset_for(topic, partition)
|
494
|
+
end
|
495
|
+
|
496
|
+
@fetcher.seek(topic, partition, offset)
|
497
|
+
end
|
498
|
+
|
499
|
+
def resume_paused_partitions!
|
500
|
+
@pauses.each do |topic, partitions|
|
501
|
+
partitions.each do |partition, pause|
|
502
|
+
@instrumenter.instrument("pause_status.consumer", {
|
503
|
+
topic: topic,
|
504
|
+
partition: partition,
|
505
|
+
duration: pause.pause_duration,
|
506
|
+
})
|
507
|
+
|
508
|
+
if pause.paused? && pause.expired?
|
509
|
+
@logger.info "Automatically resuming partition #{topic}/#{partition}, pause timeout expired"
|
510
|
+
resume(topic, partition)
|
511
|
+
end
|
512
|
+
end
|
513
|
+
end
|
514
|
+
end
|
515
|
+
|
516
|
+
def fetch_batches
|
517
|
+
# Return early if the consumer has been stopped.
|
518
|
+
return [] if shutting_down?
|
519
|
+
|
520
|
+
join_group unless @group.member?
|
521
|
+
|
522
|
+
trigger_heartbeat
|
523
|
+
|
524
|
+
resume_paused_partitions!
|
525
|
+
|
526
|
+
if !@fetcher.data?
|
527
|
+
@logger.debug "No batches to process"
|
528
|
+
sleep 2
|
529
|
+
[]
|
530
|
+
else
|
531
|
+
tag, message = @fetcher.poll
|
532
|
+
|
533
|
+
case tag
|
534
|
+
when :batches
|
535
|
+
# make sure any old batches, fetched prior to the completion of a consumer group sync,
|
536
|
+
# are only processed if the batches are from brokers for which this broker is still responsible.
|
537
|
+
message.select { |batch| @group.assigned_to?(batch.topic, batch.partition) }
|
538
|
+
when :exception
|
539
|
+
raise message
|
540
|
+
end
|
541
|
+
end
|
542
|
+
rescue OffsetOutOfRange => e
|
543
|
+
@logger.error "Invalid offset #{e.offset} for #{e.topic}/#{e.partition}, resetting to default offset"
|
544
|
+
|
545
|
+
@offset_manager.seek_to_default(e.topic, e.partition)
|
546
|
+
|
547
|
+
retry
|
548
|
+
rescue ConnectionError => e
|
549
|
+
@logger.error "Connection error while fetching messages: #{e}"
|
550
|
+
|
551
|
+
raise FetchError, e
|
552
|
+
end
|
553
|
+
|
554
|
+
def pause_for(topic, partition)
|
555
|
+
@pauses[topic][partition]
|
556
|
+
end
|
557
|
+
|
558
|
+
def running?
|
559
|
+
@running
|
560
|
+
end
|
561
|
+
|
562
|
+
def shutting_down?
|
563
|
+
!running?
|
564
|
+
end
|
565
|
+
|
566
|
+
def clear_current_offsets(excluding: {})
|
567
|
+
@current_offsets.each do |topic, partitions|
|
568
|
+
partitions.keep_if do |partition, _|
|
569
|
+
excluding.fetch(topic, []).include?(partition)
|
570
|
+
end
|
571
|
+
end
|
572
|
+
end
|
573
|
+
|
574
|
+
def subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
|
575
|
+
@group.subscribe(topic)
|
576
|
+
@offset_manager.set_default_offset(topic, default_offset)
|
577
|
+
@fetcher.subscribe(topic, max_bytes_per_partition: max_bytes_per_partition)
|
578
|
+
end
|
579
|
+
|
580
|
+
def cluster_topics
|
581
|
+
attempts = 0
|
582
|
+
begin
|
583
|
+
attempts += 1
|
584
|
+
@cluster.list_topics
|
585
|
+
rescue Kafka::ConnectionError
|
586
|
+
@cluster.mark_as_stale!
|
587
|
+
retry unless attempts > 1
|
588
|
+
raise
|
589
|
+
end
|
590
|
+
end
|
591
|
+
end
|
592
|
+
end
|