ruby-kafka 0.7.8 → 0.7.9
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -1
- data/lib/kafka/fetched_batch_generator.rb +1 -1
- data/lib/kafka/prometheus.rb +317 -0
- data/lib/kafka/sasl/scram.rb +15 -12
- data/lib/kafka/ssl_context.rb +2 -1
- data/lib/kafka/tagged_logger.rb +24 -20
- data/lib/kafka/version.rb +1 -1
- data/ruby-kafka.gemspec +1 -0
- metadata +17 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9facc22cb8739a683d4be6ee8389f0e2c6e7a6530e17e855ee6333aabdf428f0
|
4
|
+
data.tar.gz: d0e979d9e2e0ccf776d7d062e807367ff6380fa4f2863ecf6c414f3a7a3dc1fa
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b339415a8ba234fe78d0fce1c36933f7d39e79561dfd296a1cdb580877f3e5ab7fe5c8119709458b5207f9dc9375638d7a1e93c5c24adc155841dca07b783e5b
|
7
|
+
data.tar.gz: b54e211a11096e98c6460958de7097c4293fdee3dac18e117f75e9ce2c1eeb6b749a8a8f2b5ba8e559ee75c4fa33a8a299c68e37bab9a1f7e16194d8dd8ef92f
|
data/CHANGELOG.md
CHANGED
@@ -2,7 +2,12 @@
|
|
2
2
|
|
3
3
|
Changes and additions to the library will be listed here.
|
4
4
|
|
5
|
-
##
|
5
|
+
## 0.7.9
|
6
|
+
|
7
|
+
- Fix SSL authentication for ruby < 2.4.0 (#742)
|
8
|
+
- Add metrics for prometheus/client (#739)
|
9
|
+
- Do not add nil message entries when ignoring old messages (#746)
|
10
|
+
- Scram authentication thread save (#743)
|
6
11
|
|
7
12
|
## 0.7.8
|
8
13
|
- Optionally verify hostname on SSL certs (#733)
|
@@ -0,0 +1,317 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
#
|
4
|
+
# Subscriber to ruby_kafka to report metrics to prometheus
|
5
|
+
#
|
6
|
+
# Usage:
|
7
|
+
# require "kafka/prometheus"
|
8
|
+
#
|
9
|
+
# Once the file has been required, no further configuration is needed, all operational
|
10
|
+
# metrics are automatically emitted (Unless PROMETHEUS_NO_AUTO_START is set).
|
11
|
+
#
|
12
|
+
# By Peter Mustel, T2 Data AB
|
13
|
+
#
|
14
|
+
begin
|
15
|
+
require 'prometheus/client'
|
16
|
+
rescue LoadError
|
17
|
+
warn 'In order to report Kafka client metrics to Prometheus you need to install the `prometheus-client` gem.'
|
18
|
+
raise
|
19
|
+
end
|
20
|
+
|
21
|
+
require 'active_support/subscriber'
|
22
|
+
|
23
|
+
module Kafka
|
24
|
+
module Prometheus
|
25
|
+
SIZE_BUCKETS = [1, 10, 100, 1000, 10_000, 100_000, 1_000_000].freeze
|
26
|
+
LATENCY_BUCKETS = [0.0001, 0.001, 0.01, 0.1, 1.0, 10, 100, 1000].freeze
|
27
|
+
DELAY_BUCKETS = [1, 3, 10, 30, 100, 300, 1000, 3000, 10_000, 30_000].freeze
|
28
|
+
|
29
|
+
class << self
|
30
|
+
attr_accessor :registry
|
31
|
+
|
32
|
+
def start(registry = ::Prometheus::Client.registry)
|
33
|
+
@registry = registry
|
34
|
+
ConnectionSubscriber.attach_to 'connection.kafka'
|
35
|
+
ConsumerSubscriber.attach_to 'consumer.kafka'
|
36
|
+
ProducerSubscriber.attach_to 'producer.kafka'
|
37
|
+
AsyncProducerSubscriber.attach_to 'async_producer.kafka'
|
38
|
+
FetcherSubscriber.attach_to 'fetcher.kafka'
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
class ConnectionSubscriber < ActiveSupport::Subscriber
|
43
|
+
def initialize
|
44
|
+
super
|
45
|
+
@api_calls = Prometheus.registry.counter(:api_calls, 'Total calls')
|
46
|
+
@api_latency = Prometheus.registry.histogram(:api_latency, 'Latency', {}, LATENCY_BUCKETS)
|
47
|
+
@api_request_size = Prometheus.registry.histogram(:api_request_size, 'Request size', {}, SIZE_BUCKETS)
|
48
|
+
@api_response_size = Prometheus.registry.histogram(:api_response_size, 'Response size', {}, SIZE_BUCKETS)
|
49
|
+
@api_errors = Prometheus.registry.counter(:api_errors, 'Errors')
|
50
|
+
end
|
51
|
+
|
52
|
+
def request(event)
|
53
|
+
key = {
|
54
|
+
client: event.payload.fetch(:client_id),
|
55
|
+
api: event.payload.fetch(:api, 'unknown'),
|
56
|
+
broker: event.payload.fetch(:broker_host)
|
57
|
+
}
|
58
|
+
request_size = event.payload.fetch(:request_size, 0)
|
59
|
+
response_size = event.payload.fetch(:response_size, 0)
|
60
|
+
|
61
|
+
@api_calls.increment(key)
|
62
|
+
@api_latency.observe(key, event.duration)
|
63
|
+
@api_request_size.observe(key, request_size)
|
64
|
+
@api_response_size.observe(key, response_size)
|
65
|
+
@api_errors.increment(key) if event.payload.key?(:exception)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
class ConsumerSubscriber < ActiveSupport::Subscriber
|
70
|
+
def initialize
|
71
|
+
super
|
72
|
+
@process_messages = Prometheus.registry.counter(:consumer_process_messages, 'Total messages')
|
73
|
+
@process_message_errors = Prometheus.registry.counter(:consumer_process_message_errors, 'Total errors')
|
74
|
+
@process_message_latency =
|
75
|
+
Prometheus.registry.histogram(:consumer_process_message_latency, 'Latency', {}, LATENCY_BUCKETS)
|
76
|
+
@offset_lag = Prometheus.registry.gauge(:consumer_offset_lag, 'Offset lag')
|
77
|
+
@time_lag = Prometheus.registry.gauge(:consumer_time_lag, 'Time lag of message')
|
78
|
+
@process_batch_errors = Prometheus.registry.counter(:consumer_process_batch_errors, 'Total errors in batch')
|
79
|
+
@process_batch_latency =
|
80
|
+
Prometheus.registry.histogram(:consumer_process_batch_latency, 'Latency in batch', {}, LATENCY_BUCKETS)
|
81
|
+
@batch_size = Prometheus.registry.histogram(:consumer_batch_size, 'Size of batch', {}, SIZE_BUCKETS)
|
82
|
+
@join_group = Prometheus.registry.histogram(:consumer_join_group, 'Time to join group', {}, DELAY_BUCKETS)
|
83
|
+
@join_group_errors = Prometheus.registry.counter(:consumer_join_group_errors, 'Total error in joining group')
|
84
|
+
@sync_group = Prometheus.registry.histogram(:consumer_sync_group, 'Time to sync group', {}, DELAY_BUCKETS)
|
85
|
+
@sync_group_errors = Prometheus.registry.counter(:consumer_sync_group_errors, 'Total error in syncing group')
|
86
|
+
@leave_group = Prometheus.registry.histogram(:consumer_leave_group, 'Time to leave group', {}, DELAY_BUCKETS)
|
87
|
+
@leave_group_errors = Prometheus.registry.counter(:consumer_leave_group_errors, 'Total error in leaving group')
|
88
|
+
@pause_duration = Prometheus.registry.gauge(:consumer_pause_duration, 'Pause duration')
|
89
|
+
end
|
90
|
+
|
91
|
+
def process_message(event)
|
92
|
+
key = {
|
93
|
+
client: event.payload.fetch(:client_id),
|
94
|
+
group_id: event.payload.fetch(:group_id),
|
95
|
+
topic: event.payload.fetch(:topic),
|
96
|
+
partition: event.payload.fetch(:partition)
|
97
|
+
}
|
98
|
+
|
99
|
+
offset_lag = event.payload.fetch(:offset_lag)
|
100
|
+
create_time = event.payload.fetch(:create_time)
|
101
|
+
|
102
|
+
time_lag = create_time && ((Time.now - create_time) * 1000).to_i
|
103
|
+
|
104
|
+
if event.payload.key?(:exception)
|
105
|
+
@process_message_errors.increment(key)
|
106
|
+
else
|
107
|
+
@process_message_latency.observe(key, event.duration)
|
108
|
+
@process_messages.increment(key)
|
109
|
+
end
|
110
|
+
|
111
|
+
@offset_lag.set(key, offset_lag)
|
112
|
+
|
113
|
+
# Not all messages have timestamps.
|
114
|
+
return unless time_lag
|
115
|
+
|
116
|
+
@time_lag.set(key, time_lag)
|
117
|
+
end
|
118
|
+
|
119
|
+
def process_batch(event)
|
120
|
+
key = {
|
121
|
+
client: event.payload.fetch(:client_id),
|
122
|
+
group_id: event.payload.fetch(:group_id),
|
123
|
+
topic: event.payload.fetch(:topic),
|
124
|
+
partition: event.payload.fetch(:partition)
|
125
|
+
}
|
126
|
+
message_count = event.payload.fetch(:message_count)
|
127
|
+
|
128
|
+
if event.payload.key?(:exception)
|
129
|
+
@process_batch_errors.increment(key)
|
130
|
+
else
|
131
|
+
@process_batch_latency.observe(key, event.duration)
|
132
|
+
@process_messages.increment(key, message_count)
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
def fetch_batch(event)
|
137
|
+
key = {
|
138
|
+
client: event.payload.fetch(:client_id),
|
139
|
+
group_id: event.payload.fetch(:group_id),
|
140
|
+
topic: event.payload.fetch(:topic),
|
141
|
+
partition: event.payload.fetch(:partition)
|
142
|
+
}
|
143
|
+
offset_lag = event.payload.fetch(:offset_lag)
|
144
|
+
batch_size = event.payload.fetch(:message_count)
|
145
|
+
|
146
|
+
@batch_size.observe(key, batch_size)
|
147
|
+
@offset_lag.set(key, offset_lag)
|
148
|
+
end
|
149
|
+
|
150
|
+
def join_group(event)
|
151
|
+
key = { client: event.payload.fetch(:client_id), group_id: event.payload.fetch(:group_id) }
|
152
|
+
@join_group.observe(key, event.duration)
|
153
|
+
|
154
|
+
@join_group_errors.increment(key) if event.payload.key?(:exception)
|
155
|
+
end
|
156
|
+
|
157
|
+
def sync_group(event)
|
158
|
+
key = { client: event.payload.fetch(:client_id), group_id: event.payload.fetch(:group_id) }
|
159
|
+
@sync_group.observe(key, event.duration)
|
160
|
+
|
161
|
+
@sync_group_errors.increment(key) if event.payload.key?(:exception)
|
162
|
+
end
|
163
|
+
|
164
|
+
def leave_group(event)
|
165
|
+
key = { client: event.payload.fetch(:client_id), group_id: event.payload.fetch(:group_id) }
|
166
|
+
@leave_group.observe(key, event.duration)
|
167
|
+
|
168
|
+
@leave_group_errors.increment(key) if event.payload.key?(:exception)
|
169
|
+
end
|
170
|
+
|
171
|
+
def pause_status(event)
|
172
|
+
key = {
|
173
|
+
client: event.payload.fetch(:client_id),
|
174
|
+
group_id: event.payload.fetch(:group_id),
|
175
|
+
topic: event.payload.fetch(:topic),
|
176
|
+
partition: event.payload.fetch(:partition)
|
177
|
+
}
|
178
|
+
|
179
|
+
duration = event.payload.fetch(:duration)
|
180
|
+
@pause_duration.set(key, duration)
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
184
|
+
class ProducerSubscriber < ActiveSupport::Subscriber
|
185
|
+
def initialize
|
186
|
+
super
|
187
|
+
@produce_messages = Prometheus.registry.counter(:producer_produced_messages, 'Produced messages total')
|
188
|
+
@produce_message_size =
|
189
|
+
Prometheus.registry.histogram(:producer_message_size, 'Message size', {}, SIZE_BUCKETS)
|
190
|
+
@buffer_size = Prometheus.registry.histogram(:producer_buffer_size, 'Buffer size', {}, SIZE_BUCKETS)
|
191
|
+
@buffer_fill_ratio = Prometheus.registry.histogram(:producer_buffer_fill_ratio, 'Buffer fill ratio')
|
192
|
+
@buffer_fill_percentage = Prometheus.registry.histogram(:producer_buffer_fill_percentage, 'Buffer fill percentage')
|
193
|
+
@produce_errors = Prometheus.registry.counter(:producer_produce_errors, 'Produce errors')
|
194
|
+
@deliver_errors = Prometheus.registry.counter(:producer_deliver_errors, 'Deliver error')
|
195
|
+
@deliver_latency =
|
196
|
+
Prometheus.registry.histogram(:producer_deliver_latency, 'Delivery latency', {}, LATENCY_BUCKETS)
|
197
|
+
@deliver_messages = Prometheus.registry.counter(:producer_deliver_messages, 'Total count of delivered messages')
|
198
|
+
@deliver_attempts = Prometheus.registry.histogram(:producer_deliver_attempts, 'Delivery attempts')
|
199
|
+
@ack_messages = Prometheus.registry.counter(:producer_ack_messages, 'Ack')
|
200
|
+
@ack_delay = Prometheus.registry.histogram(:producer_ack_delay, 'Ack delay', {}, LATENCY_BUCKETS)
|
201
|
+
@ack_errors = Prometheus.registry.counter(:producer_ack_errors, 'Ack errors')
|
202
|
+
end
|
203
|
+
|
204
|
+
def produce_message(event)
|
205
|
+
client = event.payload.fetch(:client_id)
|
206
|
+
key = { client: client, topic: event.payload.fetch(:topic) }
|
207
|
+
|
208
|
+
message_size = event.payload.fetch(:message_size)
|
209
|
+
buffer_size = event.payload.fetch(:buffer_size)
|
210
|
+
max_buffer_size = event.payload.fetch(:max_buffer_size)
|
211
|
+
buffer_fill_ratio = buffer_size.to_f / max_buffer_size.to_f
|
212
|
+
buffer_fill_percentage = buffer_fill_ratio * 100.0
|
213
|
+
|
214
|
+
# This gets us the write rate.
|
215
|
+
@produce_messages.increment(key)
|
216
|
+
@produce_message_size.observe(key, message_size)
|
217
|
+
|
218
|
+
# This gets us the avg/max buffer size per producer.
|
219
|
+
@buffer_size.observe({ client: client }, buffer_size)
|
220
|
+
|
221
|
+
# This gets us the avg/max buffer fill ratio per producer.
|
222
|
+
@buffer_fill_ratio.observe({ client: client }, buffer_fill_ratio)
|
223
|
+
@buffer_fill_percentage.observe({ client: client }, buffer_fill_percentage)
|
224
|
+
end
|
225
|
+
|
226
|
+
def buffer_overflow(event)
|
227
|
+
key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
|
228
|
+
@produce_errors.increment(key)
|
229
|
+
end
|
230
|
+
|
231
|
+
def deliver_messages(event)
|
232
|
+
key = { client: event.payload.fetch(:client_id) }
|
233
|
+
message_count = event.payload.fetch(:delivered_message_count)
|
234
|
+
attempts = event.payload.fetch(:attempts)
|
235
|
+
|
236
|
+
@deliver_errors.increment(key) if event.payload.key?(:exception)
|
237
|
+
@deliver_latency.observe(key, event.duration)
|
238
|
+
|
239
|
+
# Messages delivered to Kafka:
|
240
|
+
@deliver_messages.increment(key, message_count)
|
241
|
+
|
242
|
+
# Number of attempts to deliver messages:
|
243
|
+
@deliver_attempts.observe(key, attempts)
|
244
|
+
end
|
245
|
+
|
246
|
+
def ack_message(event)
|
247
|
+
key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
|
248
|
+
|
249
|
+
# Number of messages ACK'd for the topic.
|
250
|
+
@ack_messages.increment(key)
|
251
|
+
|
252
|
+
# Histogram of delay between a message being produced and it being ACK'd.
|
253
|
+
@ack_delay.observe(key, event.payload.fetch(:delay))
|
254
|
+
end
|
255
|
+
|
256
|
+
def topic_error(event)
|
257
|
+
key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
|
258
|
+
|
259
|
+
@ack_errors.increment(key)
|
260
|
+
end
|
261
|
+
end
|
262
|
+
|
263
|
+
class AsyncProducerSubscriber < ActiveSupport::Subscriber
|
264
|
+
def initialize
|
265
|
+
super
|
266
|
+
@queue_size = Prometheus.registry.histogram(:async_producer_queue_size, 'Queue size', {}, SIZE_BUCKETS)
|
267
|
+
@queue_fill_ratio = Prometheus.registry.histogram(:async_producer_queue_fill_ratio, 'Queue fill ratio')
|
268
|
+
@produce_errors = Prometheus.registry.counter(:async_producer_produce_errors, 'Producer errors')
|
269
|
+
@dropped_messages = Prometheus.registry.counter(:async_producer_dropped_messages, 'Dropped messages')
|
270
|
+
end
|
271
|
+
|
272
|
+
def enqueue_message(event)
|
273
|
+
key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
|
274
|
+
|
275
|
+
queue_size = event.payload.fetch(:queue_size)
|
276
|
+
max_queue_size = event.payload.fetch(:max_queue_size)
|
277
|
+
queue_fill_ratio = queue_size.to_f / max_queue_size.to_f
|
278
|
+
|
279
|
+
# This gets us the avg/max queue size per producer.
|
280
|
+
@queue_size.observe(key, queue_size)
|
281
|
+
|
282
|
+
# This gets us the avg/max queue fill ratio per producer.
|
283
|
+
@queue_fill_ratio.observe(key, queue_fill_ratio)
|
284
|
+
end
|
285
|
+
|
286
|
+
def buffer_overflow(event)
|
287
|
+
key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
|
288
|
+
@produce_errors.increment(key)
|
289
|
+
end
|
290
|
+
|
291
|
+
def drop_messages(event)
|
292
|
+
key = { client: event.payload.fetch(:client_id) }
|
293
|
+
message_count = event.payload.fetch(:message_count)
|
294
|
+
|
295
|
+
@dropped_messages.increment(key, message_count)
|
296
|
+
end
|
297
|
+
end
|
298
|
+
|
299
|
+
class FetcherSubscriber < ActiveSupport::Subscriber
|
300
|
+
def initialize
|
301
|
+
super
|
302
|
+
@queue_size = Prometheus.registry.gauge(:fetcher_queue_size, 'Queue size')
|
303
|
+
end
|
304
|
+
|
305
|
+
def loop(event)
|
306
|
+
queue_size = event.payload.fetch(:queue_size)
|
307
|
+
client = event.payload.fetch(:client_id)
|
308
|
+
group_id = event.payload.fetch(:group_id)
|
309
|
+
|
310
|
+
@queue_size.set({ client: client, group_id: group_id }, queue_size)
|
311
|
+
end
|
312
|
+
end
|
313
|
+
end
|
314
|
+
end
|
315
|
+
|
316
|
+
# To enable testability, it is possible to skip the start until test time
|
317
|
+
Kafka::Prometheus.start unless defined?(PROMETHEUS_NO_AUTO_START)
|
data/lib/kafka/sasl/scram.rb
CHANGED
@@ -12,6 +12,7 @@ module Kafka
|
|
12
12
|
}.freeze
|
13
13
|
|
14
14
|
def initialize(username:, password:, mechanism: 'sha256', logger:)
|
15
|
+
@semaphore = Mutex.new
|
15
16
|
@username = username
|
16
17
|
@password = password
|
17
18
|
@logger = TaggedLogger.new(logger)
|
@@ -35,22 +36,24 @@ module Kafka
|
|
35
36
|
@logger.debug "Authenticating #{@username} with SASL #{@mechanism}"
|
36
37
|
|
37
38
|
begin
|
38
|
-
|
39
|
-
|
40
|
-
|
39
|
+
@semaphore.synchronize do
|
40
|
+
msg = first_message
|
41
|
+
@logger.debug "Sending first client SASL SCRAM message: #{msg}"
|
42
|
+
encoder.write_bytes(msg)
|
41
43
|
|
42
|
-
|
43
|
-
|
44
|
+
@server_first_message = decoder.bytes
|
45
|
+
@logger.debug "Received first server SASL SCRAM message: #{@server_first_message}"
|
44
46
|
|
45
|
-
|
46
|
-
|
47
|
-
|
47
|
+
msg = final_message
|
48
|
+
@logger.debug "Sending final client SASL SCRAM message: #{msg}"
|
49
|
+
encoder.write_bytes(msg)
|
48
50
|
|
49
|
-
|
50
|
-
|
51
|
+
response = parse_response(decoder.bytes)
|
52
|
+
@logger.debug "Received last server SASL SCRAM message: #{response}"
|
51
53
|
|
52
|
-
|
53
|
-
|
54
|
+
raise FailedScramAuthentication, response['e'] if response['e']
|
55
|
+
raise FailedScramAuthentication, "Invalid server signature" if response['v'] != server_signature
|
56
|
+
end
|
54
57
|
rescue EOFError => e
|
55
58
|
raise FailedScramAuthentication, e.message
|
56
59
|
end
|
data/lib/kafka/ssl_context.rb
CHANGED
@@ -55,7 +55,8 @@ module Kafka
|
|
55
55
|
end
|
56
56
|
ssl_context.cert_store = store
|
57
57
|
ssl_context.verify_mode = OpenSSL::SSL::VERIFY_PEER
|
58
|
-
|
58
|
+
# Verify certificate hostname if supported (ruby >= 2.4.0)
|
59
|
+
ssl_context.verify_hostname = verify_hostname if ssl_context.respond_to?(:verify_hostname=)
|
59
60
|
end
|
60
61
|
|
61
62
|
ssl_context
|
data/lib/kafka/tagged_logger.rb
CHANGED
@@ -1,13 +1,19 @@
|
|
1
|
-
require 'forwardable'
|
2
|
-
|
3
1
|
# Basic implementation of a tagged logger that matches the API of
|
4
2
|
# ActiveSupport::TaggedLogging.
|
5
3
|
|
4
|
+
require 'logger'
|
5
|
+
|
6
6
|
module Kafka
|
7
|
-
|
7
|
+
class TaggedLogger < SimpleDelegator
|
8
8
|
|
9
|
-
|
10
|
-
|
9
|
+
%i(debug info warn error fatal).each do |method|
|
10
|
+
define_method method do |msg_or_progname, &block|
|
11
|
+
if block_given?
|
12
|
+
super(msg_or_progname, &block)
|
13
|
+
else
|
14
|
+
super("#{tags_text}#{msg_or_progname}")
|
15
|
+
end
|
16
|
+
end
|
11
17
|
end
|
12
18
|
|
13
19
|
def tagged(*tags)
|
@@ -44,23 +50,21 @@ module Kafka
|
|
44
50
|
end
|
45
51
|
end
|
46
52
|
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
delegate [:push_tags, :pop_tags, :clear_tags!] => :formatter
|
52
|
-
|
53
|
-
def self.new(logger)
|
54
|
-
logger ||= Logger.new(nil)
|
55
|
-
return logger if logger.respond_to?(:push_tags) # already included
|
56
|
-
# Ensure we set a default formatter so we aren't extending nil!
|
57
|
-
logger.formatter ||= Logger::Formatter.new
|
58
|
-
logger.formatter.extend TaggedFormatter
|
59
|
-
logger.extend(self)
|
53
|
+
def self.new(logger_or_stream = nil)
|
54
|
+
# don't keep wrapping the same logger over and over again
|
55
|
+
return logger_or_stream if logger_or_stream.is_a?(TaggedLogger)
|
56
|
+
super
|
60
57
|
end
|
61
58
|
|
62
|
-
def
|
63
|
-
|
59
|
+
def initialize(logger_or_stream = nil)
|
60
|
+
logger = if logger_or_stream.is_a?(::Logger)
|
61
|
+
logger_or_stream
|
62
|
+
elsif logger_or_stream
|
63
|
+
::Logger.new(logger_or_stream)
|
64
|
+
else
|
65
|
+
::Logger.new(nil)
|
66
|
+
end
|
67
|
+
super(logger)
|
64
68
|
end
|
65
69
|
|
66
70
|
def flush
|
data/lib/kafka/version.rb
CHANGED
data/ruby-kafka.gemspec
CHANGED
@@ -44,6 +44,7 @@ Gem::Specification.new do |spec|
|
|
44
44
|
spec.add_development_dependency "rspec_junit_formatter", "0.2.2"
|
45
45
|
spec.add_development_dependency "dogstatsd-ruby", ">= 3.0.0", "< 5.0.0"
|
46
46
|
spec.add_development_dependency "statsd-ruby"
|
47
|
+
spec.add_development_dependency "prometheus-client"
|
47
48
|
spec.add_development_dependency "ruby-prof"
|
48
49
|
spec.add_development_dependency "timecop"
|
49
50
|
spec.add_development_dependency "rubocop", "~> 0.49.1"
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.9
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Daniel Schierbeck
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2019-
|
11
|
+
date: 2019-07-15 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: digest-crc
|
@@ -240,6 +240,20 @@ dependencies:
|
|
240
240
|
- - ">="
|
241
241
|
- !ruby/object:Gem::Version
|
242
242
|
version: '0'
|
243
|
+
- !ruby/object:Gem::Dependency
|
244
|
+
name: prometheus-client
|
245
|
+
requirement: !ruby/object:Gem::Requirement
|
246
|
+
requirements:
|
247
|
+
- - ">="
|
248
|
+
- !ruby/object:Gem::Version
|
249
|
+
version: '0'
|
250
|
+
type: :development
|
251
|
+
prerelease: false
|
252
|
+
version_requirements: !ruby/object:Gem::Requirement
|
253
|
+
requirements:
|
254
|
+
- - ">="
|
255
|
+
- !ruby/object:Gem::Version
|
256
|
+
version: '0'
|
243
257
|
- !ruby/object:Gem::Dependency
|
244
258
|
name: ruby-prof
|
245
259
|
requirement: !ruby/object:Gem::Requirement
|
@@ -374,6 +388,7 @@ files:
|
|
374
388
|
- lib/kafka/pending_message_queue.rb
|
375
389
|
- lib/kafka/produce_operation.rb
|
376
390
|
- lib/kafka/producer.rb
|
391
|
+
- lib/kafka/prometheus.rb
|
377
392
|
- lib/kafka/protocol.rb
|
378
393
|
- lib/kafka/protocol/add_offsets_to_txn_request.rb
|
379
394
|
- lib/kafka/protocol/add_offsets_to_txn_response.rb
|