ruby-kafka 0.7.10 → 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5aa84a39ade791c3bce57152d426e95cb7d29575af3e8a9cb29fcded1c6cdad5
4
- data.tar.gz: 46be738fb1a2b71bbbd5d5d3b2fc95b705aef5740f666095eeaf40c9a6980fc4
3
+ metadata.gz: 8ebe10cb2c93b4387a14d069c4291751808abed924cbee6f4ff2f00770aaa1c2
4
+ data.tar.gz: 1e392b4f2b5137cd83bd191b5c629fd98b41697d389fb5c68c3100d03c3ff218
5
5
  SHA512:
6
- metadata.gz: 42e8059ab9b49e16a8e9c70e5739c9694d2142d5d440f8e2c18477cc151a24183536ae1548aed66a7e7ee6ea64c96ff2c9aacb63366ed92bd1612af6e52d8300
7
- data.tar.gz: 5008c803cc61fbea993f7f2a22ca7ac3ebb448f91cbdca40d540b9f4604dab8b4e38fcf4b20006a16641b5239e78d6045ccbec70168171219b716090ea72ba09
6
+ metadata.gz: 037a8fe1a7495f5e3d723e047392f68a1373e91f8a00337beb7ae6e053d031d6ab1b8597767a1b8b43399850e23493295180a24250e0c2f09b4f626316cd6915
7
+ data.tar.gz: 108d7160593e452f27c1e828438f4cd8c2c6c009f081a0ff949ca330f9487b948aad73cf23ba39e12adb0ed9f773c667cfeebaad1625b33770057b7b69b3dcb2
@@ -203,6 +203,70 @@ jobs:
203
203
  - run: bundle install --path vendor/bundle
204
204
  - run: bundle exec rspec --profile --tag functional spec/functional
205
205
 
206
+ kafka-2.3:
207
+ docker:
208
+ - image: circleci/ruby:2.5.1-node
209
+ environment:
210
+ LOG_LEVEL: DEBUG
211
+ - image: wurstmeister/zookeeper
212
+ - image: wurstmeister/kafka:2.12-2.3.1
213
+ environment:
214
+ KAFKA_ADVERTISED_HOST_NAME: localhost
215
+ KAFKA_ADVERTISED_PORT: 9092
216
+ KAFKA_PORT: 9092
217
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
218
+ KAFKA_DELETE_TOPIC_ENABLE: true
219
+ - image: wurstmeister/kafka:2.12-2.3.1
220
+ environment:
221
+ KAFKA_ADVERTISED_HOST_NAME: localhost
222
+ KAFKA_ADVERTISED_PORT: 9093
223
+ KAFKA_PORT: 9093
224
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
225
+ KAFKA_DELETE_TOPIC_ENABLE: true
226
+ - image: wurstmeister/kafka:2.12-2.3.1
227
+ environment:
228
+ KAFKA_ADVERTISED_HOST_NAME: localhost
229
+ KAFKA_ADVERTISED_PORT: 9094
230
+ KAFKA_PORT: 9094
231
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
232
+ KAFKA_DELETE_TOPIC_ENABLE: true
233
+ steps:
234
+ - checkout
235
+ - run: bundle install --path vendor/bundle
236
+ - run: bundle exec rspec --profile --tag functional spec/functional
237
+
238
+ kafka-2.4:
239
+ docker:
240
+ - image: circleci/ruby:2.5.1-node
241
+ environment:
242
+ LOG_LEVEL: DEBUG
243
+ - image: wurstmeister/zookeeper
244
+ - image: wurstmeister/kafka:2.12-2.4.0
245
+ environment:
246
+ KAFKA_ADVERTISED_HOST_NAME: localhost
247
+ KAFKA_ADVERTISED_PORT: 9092
248
+ KAFKA_PORT: 9092
249
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
250
+ KAFKA_DELETE_TOPIC_ENABLE: true
251
+ - image: wurstmeister/kafka:2.12-2.4.0
252
+ environment:
253
+ KAFKA_ADVERTISED_HOST_NAME: localhost
254
+ KAFKA_ADVERTISED_PORT: 9093
255
+ KAFKA_PORT: 9093
256
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
257
+ KAFKA_DELETE_TOPIC_ENABLE: true
258
+ - image: wurstmeister/kafka:2.12-2.4.0
259
+ environment:
260
+ KAFKA_ADVERTISED_HOST_NAME: localhost
261
+ KAFKA_ADVERTISED_PORT: 9094
262
+ KAFKA_PORT: 9094
263
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
264
+ KAFKA_DELETE_TOPIC_ENABLE: true
265
+ steps:
266
+ - checkout
267
+ - run: bundle install --path vendor/bundle
268
+ - run: bundle exec rspec --profile --tag functional spec/functional
269
+
206
270
  workflows:
207
271
  version: 2
208
272
  test:
@@ -214,3 +278,5 @@ workflows:
214
278
  - kafka-2.0
215
279
  - kafka-2.1
216
280
  - kafka-2.2
281
+ - kafka-2.3
282
+ - kafka-2.4
@@ -0,0 +1,19 @@
1
+ name: Mark stale issues and pull requests
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 0 * * *"
6
+
7
+ jobs:
8
+ stale:
9
+
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - uses: actions/stale@v1
14
+ with:
15
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
16
+ stale-issue-message: 'Issue has been marked as stale due to a lack of activity.'
17
+ stale-pr-message: 'Pull request has been marked as stale due to a lack of activity.'
18
+ stale-issue-label: 'no-issue-activity'
19
+ stale-pr-label: 'no-pr-activity'
@@ -2,6 +2,13 @@
2
2
 
3
3
  Changes and additions to the library will be listed here.
4
4
 
5
+ ## Unreleased
6
+
7
+ ## 1.0.0
8
+
9
+ - Add client methods to manage configs (#759)
10
+ - Support Kafka 2.3 and 2.4.
11
+
5
12
  ## 0.7.10
6
13
 
7
14
  - Fix logger again (#762)
data/README.md CHANGED
@@ -113,6 +113,15 @@ Or install it yourself as:
113
113
  <td>Limited support</td>
114
114
  <td>Limited support</td>
115
115
  </tr>
116
+ <tr>
117
+ <th>Kafka 2.3</th>
118
+ <td>Limited support</td>
119
+ <td>Limited support</td>
120
+ </tr>
121
+ <th>Kafka 2.4</th>
122
+ <td>Limited support</td>
123
+ <td>Limited support</td>
124
+ </tr>
116
125
  </table>
117
126
 
118
127
  This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with the v0.5.x series. There's limited support for Kafka 0.8, and things should work with Kafka 0.11, although there may be performance issues due to changes in the protocol.
@@ -124,6 +133,9 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
124
133
  - **Kafka 1.0:** Everything that works with Kafka 0.11 should still work, but so far no features specific to Kafka 1.0 have been added.
125
134
  - **Kafka 2.0:** Everything that works with Kafka 1.0 should still work, but so far no features specific to Kafka 2.0 have been added.
126
135
  - **Kafka 2.1:** Everything that works with Kafka 2.0 should still work, but so far no features specific to Kafka 2.1 have been added.
136
+ - **Kafka 2.2:** Everything that works with Kafka 2.1 should still work, but so far no features specific to Kafka 2.2 have been added.
137
+ - **Kafka 2.3:** Everything that works with Kafka 2.2 should still work, but so far no features specific to Kafka 2.3 have been added.
138
+ - **Kafka 2.4:** Everything that works with Kafka 2.3 should still work, but so far no features specific to Kafka 2.4 have been added.
127
139
 
128
140
  This library requires Ruby 2.1 or higher.
129
141
 
@@ -103,6 +103,9 @@ module Kafka
103
103
  # @raise [BufferOverflow] if the message queue is full.
104
104
  # @return [nil]
105
105
  def produce(value, topic:, **options)
106
+ # We want to fail fast if `topic` isn't a String
107
+ topic = topic.to_str
108
+
106
109
  ensure_threads_running!
107
110
 
108
111
  if @queue.size >= @max_queue_size
@@ -138,6 +138,9 @@ module Kafka
138
138
  def deliver_message(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, retries: 1)
139
139
  create_time = Time.now
140
140
 
141
+ # We want to fail fast if `topic` isn't a String
142
+ topic = topic.to_str
143
+
141
144
  message = PendingMessage.new(
142
145
  value: value,
143
146
  key: key,
@@ -337,6 +340,7 @@ module Kafka
337
340
  def consumer(
338
341
  group_id:,
339
342
  session_timeout: 30,
343
+ rebalance_timeout: 60,
340
344
  offset_commit_interval: 10,
341
345
  offset_commit_threshold: 0,
342
346
  heartbeat_interval: 10,
@@ -357,6 +361,7 @@ module Kafka
357
361
  logger: @logger,
358
362
  group_id: group_id,
359
363
  session_timeout: session_timeout,
364
+ rebalance_timeout: rebalance_timeout,
360
365
  retention_time: retention_time,
361
366
  instrumenter: instrumenter,
362
367
  )
@@ -530,6 +535,24 @@ module Kafka
530
535
  end
531
536
  end
532
537
 
538
+ # Describe broker configs
539
+ #
540
+ # @param broker_id [int] the id of the broker
541
+ # @param configs [Array] array of config keys.
542
+ # @return [Array<Kafka::Protocol::DescribeConfigsResponse::ConfigEntry>]
543
+ def describe_configs(broker_id, configs = [])
544
+ @cluster.describe_configs(broker_id, configs)
545
+ end
546
+
547
+ # Alter broker configs
548
+ #
549
+ # @param broker_id [int] the id of the broker
550
+ # @param configs [Array] array of config strings.
551
+ # @return [nil]
552
+ def alter_configs(broker_id, configs = [])
553
+ @cluster.alter_configs(broker_id, configs)
554
+ end
555
+
533
556
  # Creates a topic in the cluster.
534
557
  #
535
558
  # @example Creating a topic with log compaction
@@ -615,6 +638,14 @@ module Kafka
615
638
  @cluster.describe_group(group_id)
616
639
  end
617
640
 
641
+ # Fetch all committed offsets for a consumer group
642
+ #
643
+ # @param group_id [String] the id of the consumer group
644
+ # @return [Hash<String, Hash<Integer, Kafka::Protocol::OffsetFetchResponse::PartitionOffsetInfo>>]
645
+ def fetch_group_offsets(group_id)
646
+ @cluster.fetch_group_offsets(group_id)
647
+ end
648
+
618
649
  # Create partitions for a topic.
619
650
  #
620
651
  # @param name [String] the name of the topic.
@@ -45,6 +45,10 @@ module Kafka
45
45
  new_topics = topics - @target_topics
46
46
 
47
47
  unless new_topics.empty?
48
+ if new_topics.any? { |topic| topic.nil? or topic.empty? }
49
+ raise ArgumentError, "Topic must not be nil or empty"
50
+ end
51
+
48
52
  @logger.info "New topics added to target list: #{new_topics.to_a.join(', ')}"
49
53
 
50
54
  @target_topics.merge(new_topics)
@@ -139,6 +143,40 @@ module Kafka
139
143
  end
140
144
  end
141
145
 
146
+ def describe_configs(broker_id, configs = [])
147
+ options = {
148
+ resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]]
149
+ }
150
+
151
+ info = cluster_info.brokers.find {|broker| broker.node_id == broker_id }
152
+ broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id)
153
+
154
+ response = broker.describe_configs(**options)
155
+
156
+ response.resources.each do |resource|
157
+ Protocol.handle_error(resource.error_code, resource.error_message)
158
+ end
159
+
160
+ response.resources.first.configs
161
+ end
162
+
163
+ def alter_configs(broker_id, configs = [])
164
+ options = {
165
+ resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]]
166
+ }
167
+
168
+ info = cluster_info.brokers.find {|broker| broker.node_id == broker_id }
169
+ broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id)
170
+
171
+ response = broker.alter_configs(**options)
172
+
173
+ response.resources.each do |resource|
174
+ Protocol.handle_error(resource.error_code, resource.error_message)
175
+ end
176
+
177
+ nil
178
+ end
179
+
142
180
  def partitions_for(topic)
143
181
  add_target_topics([topic])
144
182
  refresh_metadata_if_necessary!
@@ -252,6 +290,20 @@ module Kafka
252
290
  group
253
291
  end
254
292
 
293
+ def fetch_group_offsets(group_id)
294
+ topics = get_group_coordinator(group_id: group_id)
295
+ .fetch_offsets(group_id: group_id, topics: nil)
296
+ .topics
297
+
298
+ topics.each do |_, partitions|
299
+ partitions.each do |_, response|
300
+ Protocol.handle_error(response.error_code)
301
+ end
302
+ end
303
+
304
+ topics
305
+ end
306
+
255
307
  def create_partitions_for(name, num_partitions:, timeout:)
256
308
  options = {
257
309
  topics: [[name, num_partitions, nil]],
@@ -58,6 +58,9 @@ module Kafka
58
58
  @connect_timeout = connect_timeout || CONNECT_TIMEOUT
59
59
  @socket_timeout = socket_timeout || SOCKET_TIMEOUT
60
60
  @ssl_context = ssl_context
61
+
62
+ @socket = nil
63
+ @last_request = nil
61
64
  end
62
65
 
63
66
  def to_s
@@ -7,11 +7,12 @@ module Kafka
7
7
  class ConsumerGroup
8
8
  attr_reader :assigned_partitions, :generation_id, :group_id
9
9
 
10
- def initialize(cluster:, logger:, group_id:, session_timeout:, retention_time:, instrumenter:)
10
+ def initialize(cluster:, logger:, group_id:, session_timeout:, rebalance_timeout:, retention_time:, instrumenter:)
11
11
  @cluster = cluster
12
12
  @logger = TaggedLogger.new(logger)
13
13
  @group_id = group_id
14
14
  @session_timeout = session_timeout
15
+ @rebalance_timeout = rebalance_timeout
15
16
  @instrumenter = instrumenter
16
17
  @member_id = ""
17
18
  @generation_id = nil
@@ -140,7 +141,9 @@ module Kafka
140
141
  response = coordinator.join_group(
141
142
  group_id: @group_id,
142
143
  session_timeout: @session_timeout,
144
+ rebalance_timeout: @rebalance_timeout,
143
145
  member_id: @member_id,
146
+ topics: @topics,
144
147
  )
145
148
 
146
149
  Protocol.handle_error(response.error_code)
@@ -40,7 +40,7 @@ module Kafka
40
40
  end
41
41
 
42
42
  def host
43
- @host ||= default_host
43
+ @host
44
44
  end
45
45
 
46
46
  def host=(host)
@@ -49,7 +49,7 @@ module Kafka
49
49
  end
50
50
 
51
51
  def port
52
- @port ||= default_port
52
+ @port
53
53
  end
54
54
 
55
55
  def port=(port)
@@ -77,14 +77,6 @@ module Kafka
77
77
 
78
78
  private
79
79
 
80
- def default_host
81
- ::Datadog::Statsd.const_defined?(:Connection) ? ::Datadog::Statsd::Connection::DEFAULT_HOST : ::Datadog::Statsd::DEFAULT_HOST
82
- end
83
-
84
- def default_port
85
- ::Datadog::Statsd.const_defined?(:Connection) ? ::Datadog::Statsd::Connection::DEFAULT_PORT : ::Datadog::Statsd::DEFAULT_PORT
86
- end
87
-
88
80
  def clear
89
81
  @statsd && @statsd.close
90
82
  @statsd = nil
@@ -17,6 +17,9 @@ module Kafka
17
17
  @commands = Queue.new
18
18
  @next_offsets = Hash.new { |h, k| h[k] = {} }
19
19
 
20
+ # We are only running when someone calls start.
21
+ @running = false
22
+
20
23
  # Long poll until at least this many bytes can be fetched.
21
24
  @min_bytes = 1
22
25
 
@@ -110,7 +113,7 @@ module Kafka
110
113
  elsif @queue.size < @max_queue_size
111
114
  step
112
115
  else
113
- @logger.warn "Reached max fetcher queue size (#{@max_queue_size}), sleeping 1s"
116
+ @logger.info "Reached max fetcher queue size (#{@max_queue_size}), sleeping 1s"
114
117
  sleep 1
115
118
  end
116
119
  ensure
@@ -188,11 +188,14 @@ module Kafka
188
188
  # @raise [BufferOverflow] if the maximum buffer size has been reached.
189
189
  # @return [nil]
190
190
  def produce(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, create_time: Time.now)
191
+ # We want to fail fast if `topic` isn't a String
192
+ topic = topic.to_str
193
+
191
194
  message = PendingMessage.new(
192
195
  value: value && value.to_s,
193
196
  key: key && key.to_s,
194
197
  headers: headers,
195
- topic: topic.to_s,
198
+ topic: topic,
196
199
  partition: partition && Integer(partition),
197
200
  partition_key: partition_key && partition_key.to_s,
198
201
  create_time: create_time
@@ -42,11 +42,11 @@ module Kafka
42
42
  class ConnectionSubscriber < ActiveSupport::Subscriber
43
43
  def initialize
44
44
  super
45
- @api_calls = Prometheus.registry.counter(:api_calls, 'Total calls')
46
- @api_latency = Prometheus.registry.histogram(:api_latency, 'Latency', {}, LATENCY_BUCKETS)
47
- @api_request_size = Prometheus.registry.histogram(:api_request_size, 'Request size', {}, SIZE_BUCKETS)
48
- @api_response_size = Prometheus.registry.histogram(:api_response_size, 'Response size', {}, SIZE_BUCKETS)
49
- @api_errors = Prometheus.registry.counter(:api_errors, 'Errors')
45
+ @api_calls = Prometheus.registry.counter(:api_calls, docstring: 'Total calls', labels: [:client, :api, :broker])
46
+ @api_latency = Prometheus.registry.histogram(:api_latency, docstring: 'Latency', buckets: LATENCY_BUCKETS, labels: [:client, :api, :broker])
47
+ @api_request_size = Prometheus.registry.histogram(:api_request_size, docstring: 'Request size', buckets: SIZE_BUCKETS, labels: [:client, :api, :broker])
48
+ @api_response_size = Prometheus.registry.histogram(:api_response_size, docstring: 'Response size', buckets: SIZE_BUCKETS, labels: [:client, :api, :broker])
49
+ @api_errors = Prometheus.registry.counter(:api_errors, docstring: 'Errors', labels: [:client, :api, :broker])
50
50
  end
51
51
 
52
52
  def request(event)
@@ -58,34 +58,34 @@ module Kafka
58
58
  request_size = event.payload.fetch(:request_size, 0)
59
59
  response_size = event.payload.fetch(:response_size, 0)
60
60
 
61
- @api_calls.increment(key)
62
- @api_latency.observe(key, event.duration)
63
- @api_request_size.observe(key, request_size)
64
- @api_response_size.observe(key, response_size)
65
- @api_errors.increment(key) if event.payload.key?(:exception)
61
+ @api_calls.increment(labels: key)
62
+ @api_latency.observe(event.duration, labels: key)
63
+ @api_request_size.observe(request_size, labels: key)
64
+ @api_response_size.observe(response_size, labels: key)
65
+ @api_errors.increment(labels: key) if event.payload.key?(:exception)
66
66
  end
67
67
  end
68
68
 
69
69
  class ConsumerSubscriber < ActiveSupport::Subscriber
70
70
  def initialize
71
71
  super
72
- @process_messages = Prometheus.registry.counter(:consumer_process_messages, 'Total messages')
73
- @process_message_errors = Prometheus.registry.counter(:consumer_process_message_errors, 'Total errors')
72
+ @process_messages = Prometheus.registry.counter(:consumer_process_messages, docstring: 'Total messages', labels: [:client, :group_id, :topic, :partition])
73
+ @process_message_errors = Prometheus.registry.counter(:consumer_process_message_errors, docstring: 'Total errors', labels: [:client, :group_id, :topic, :partition])
74
74
  @process_message_latency =
75
- Prometheus.registry.histogram(:consumer_process_message_latency, 'Latency', {}, LATENCY_BUCKETS)
76
- @offset_lag = Prometheus.registry.gauge(:consumer_offset_lag, 'Offset lag')
77
- @time_lag = Prometheus.registry.gauge(:consumer_time_lag, 'Time lag of message')
78
- @process_batch_errors = Prometheus.registry.counter(:consumer_process_batch_errors, 'Total errors in batch')
75
+ Prometheus.registry.histogram(:consumer_process_message_latency, docstring: 'Latency', buckets: LATENCY_BUCKETS, labels: [:client, :group_id, :topic, :partition])
76
+ @offset_lag = Prometheus.registry.gauge(:consumer_offset_lag, docstring: 'Offset lag', labels: [:client, :group_id, :topic, :partition])
77
+ @time_lag = Prometheus.registry.gauge(:consumer_time_lag, docstring: 'Time lag of message', labels: [:client, :group_id, :topic, :partition])
78
+ @process_batch_errors = Prometheus.registry.counter(:consumer_process_batch_errors, docstring: 'Total errors in batch', labels: [:client, :group_id, :topic, :partition])
79
79
  @process_batch_latency =
80
- Prometheus.registry.histogram(:consumer_process_batch_latency, 'Latency in batch', {}, LATENCY_BUCKETS)
81
- @batch_size = Prometheus.registry.histogram(:consumer_batch_size, 'Size of batch', {}, SIZE_BUCKETS)
82
- @join_group = Prometheus.registry.histogram(:consumer_join_group, 'Time to join group', {}, DELAY_BUCKETS)
83
- @join_group_errors = Prometheus.registry.counter(:consumer_join_group_errors, 'Total error in joining group')
84
- @sync_group = Prometheus.registry.histogram(:consumer_sync_group, 'Time to sync group', {}, DELAY_BUCKETS)
85
- @sync_group_errors = Prometheus.registry.counter(:consumer_sync_group_errors, 'Total error in syncing group')
86
- @leave_group = Prometheus.registry.histogram(:consumer_leave_group, 'Time to leave group', {}, DELAY_BUCKETS)
87
- @leave_group_errors = Prometheus.registry.counter(:consumer_leave_group_errors, 'Total error in leaving group')
88
- @pause_duration = Prometheus.registry.gauge(:consumer_pause_duration, 'Pause duration')
80
+ Prometheus.registry.histogram(:consumer_process_batch_latency, docstring: 'Latency in batch', buckets: LATENCY_BUCKETS, labels: [:client, :group_id, :topic, :partition])
81
+ @batch_size = Prometheus.registry.histogram(:consumer_batch_size, docstring: 'Size of batch', buckets: SIZE_BUCKETS, labels: [:client, :group_id, :topic, :partition])
82
+ @join_group = Prometheus.registry.histogram(:consumer_join_group, docstring: 'Time to join group', buckets: DELAY_BUCKETS, labels: [:client, :group_id])
83
+ @join_group_errors = Prometheus.registry.counter(:consumer_join_group_errors, docstring: 'Total error in joining group', labels: [:client, :group_id])
84
+ @sync_group = Prometheus.registry.histogram(:consumer_sync_group, docstring: 'Time to sync group', buckets: DELAY_BUCKETS, labels: [:client, :group_id])
85
+ @sync_group_errors = Prometheus.registry.counter(:consumer_sync_group_errors, docstring: 'Total error in syncing group', labels: [:client, :group_id])
86
+ @leave_group = Prometheus.registry.histogram(:consumer_leave_group, docstring: 'Time to leave group', buckets: DELAY_BUCKETS, labels: [:client, :group_id])
87
+ @leave_group_errors = Prometheus.registry.counter(:consumer_leave_group_errors, docstring: 'Total error in leaving group', labels: [:client, :group_id])
88
+ @pause_duration = Prometheus.registry.gauge(:consumer_pause_duration, docstring: 'Pause duration', labels: [:client, :group_id, :topic, :partition])
89
89
  end
90
90
 
91
91
  def process_message(event)
@@ -102,18 +102,18 @@ module Kafka
102
102
  time_lag = create_time && ((Time.now - create_time) * 1000).to_i
103
103
 
104
104
  if event.payload.key?(:exception)
105
- @process_message_errors.increment(key)
105
+ @process_message_errors.increment(labels: key)
106
106
  else
107
- @process_message_latency.observe(key, event.duration)
108
- @process_messages.increment(key)
107
+ @process_message_latency.observe(event.duration, labels: key)
108
+ @process_messages.increment(labels: key)
109
109
  end
110
110
 
111
- @offset_lag.set(key, offset_lag)
111
+ @offset_lag.set(offset_lag, labels: key)
112
112
 
113
113
  # Not all messages have timestamps.
114
114
  return unless time_lag
115
115
 
116
- @time_lag.set(key, time_lag)
116
+ @time_lag.set(time_lag, labels: key)
117
117
  end
118
118
 
119
119
  def process_batch(event)
@@ -126,10 +126,10 @@ module Kafka
126
126
  message_count = event.payload.fetch(:message_count)
127
127
 
128
128
  if event.payload.key?(:exception)
129
- @process_batch_errors.increment(key)
129
+ @process_batch_errors.increment(labels: key)
130
130
  else
131
- @process_batch_latency.observe(key, event.duration)
132
- @process_messages.increment(key, message_count)
131
+ @process_batch_latency.observe(event.duration, labels: key)
132
+ @process_messages.increment(by: message_count, labels: key)
133
133
  end
134
134
  end
135
135
 
@@ -143,29 +143,29 @@ module Kafka
143
143
  offset_lag = event.payload.fetch(:offset_lag)
144
144
  batch_size = event.payload.fetch(:message_count)
145
145
 
146
- @batch_size.observe(key, batch_size)
147
- @offset_lag.set(key, offset_lag)
146
+ @batch_size.observe(batch_size, labels: key)
147
+ @offset_lag.set(offset_lag, labels: key)
148
148
  end
149
149
 
150
150
  def join_group(event)
151
151
  key = { client: event.payload.fetch(:client_id), group_id: event.payload.fetch(:group_id) }
152
- @join_group.observe(key, event.duration)
152
+ @join_group.observe(event.duration, labels: key)
153
153
 
154
- @join_group_errors.increment(key) if event.payload.key?(:exception)
154
+ @join_group_errors.increment(labels: key) if event.payload.key?(:exception)
155
155
  end
156
156
 
157
157
  def sync_group(event)
158
158
  key = { client: event.payload.fetch(:client_id), group_id: event.payload.fetch(:group_id) }
159
- @sync_group.observe(key, event.duration)
159
+ @sync_group.observe(event.duration, labels: key)
160
160
 
161
- @sync_group_errors.increment(key) if event.payload.key?(:exception)
161
+ @sync_group_errors.increment(labels: key) if event.payload.key?(:exception)
162
162
  end
163
163
 
164
164
  def leave_group(event)
165
165
  key = { client: event.payload.fetch(:client_id), group_id: event.payload.fetch(:group_id) }
166
- @leave_group.observe(key, event.duration)
166
+ @leave_group.observe(event.duration, labels: key)
167
167
 
168
- @leave_group_errors.increment(key) if event.payload.key?(:exception)
168
+ @leave_group_errors.increment(labels: key) if event.payload.key?(:exception)
169
169
  end
170
170
 
171
171
  def pause_status(event)
@@ -177,28 +177,28 @@ module Kafka
177
177
  }
178
178
 
179
179
  duration = event.payload.fetch(:duration)
180
- @pause_duration.set(key, duration)
180
+ @pause_duration.set(duration, labels: key)
181
181
  end
182
182
  end
183
183
 
184
184
  class ProducerSubscriber < ActiveSupport::Subscriber
185
185
  def initialize
186
186
  super
187
- @produce_messages = Prometheus.registry.counter(:producer_produced_messages, 'Produced messages total')
187
+ @produce_messages = Prometheus.registry.counter(:producer_produced_messages, docstring: 'Produced messages total', labels: [:client, :topic])
188
188
  @produce_message_size =
189
- Prometheus.registry.histogram(:producer_message_size, 'Message size', {}, SIZE_BUCKETS)
190
- @buffer_size = Prometheus.registry.histogram(:producer_buffer_size, 'Buffer size', {}, SIZE_BUCKETS)
191
- @buffer_fill_ratio = Prometheus.registry.histogram(:producer_buffer_fill_ratio, 'Buffer fill ratio')
192
- @buffer_fill_percentage = Prometheus.registry.histogram(:producer_buffer_fill_percentage, 'Buffer fill percentage')
193
- @produce_errors = Prometheus.registry.counter(:producer_produce_errors, 'Produce errors')
194
- @deliver_errors = Prometheus.registry.counter(:producer_deliver_errors, 'Deliver error')
189
+ Prometheus.registry.histogram(:producer_message_size, docstring: 'Message size', buckets: SIZE_BUCKETS, labels: [:client, :topic])
190
+ @buffer_size = Prometheus.registry.histogram(:producer_buffer_size, docstring: 'Buffer size', buckets: SIZE_BUCKETS, labels: [:client])
191
+ @buffer_fill_ratio = Prometheus.registry.histogram(:producer_buffer_fill_ratio, docstring: 'Buffer fill ratio', labels: [:client])
192
+ @buffer_fill_percentage = Prometheus.registry.histogram(:producer_buffer_fill_percentage, docstring: 'Buffer fill percentage', labels: [:client])
193
+ @produce_errors = Prometheus.registry.counter(:producer_produce_errors, docstring: 'Produce errors', labels: [:client, :topic])
194
+ @deliver_errors = Prometheus.registry.counter(:producer_deliver_errors, docstring: 'Deliver error', labels: [:client])
195
195
  @deliver_latency =
196
- Prometheus.registry.histogram(:producer_deliver_latency, 'Delivery latency', {}, LATENCY_BUCKETS)
197
- @deliver_messages = Prometheus.registry.counter(:producer_deliver_messages, 'Total count of delivered messages')
198
- @deliver_attempts = Prometheus.registry.histogram(:producer_deliver_attempts, 'Delivery attempts')
199
- @ack_messages = Prometheus.registry.counter(:producer_ack_messages, 'Ack')
200
- @ack_delay = Prometheus.registry.histogram(:producer_ack_delay, 'Ack delay', {}, LATENCY_BUCKETS)
201
- @ack_errors = Prometheus.registry.counter(:producer_ack_errors, 'Ack errors')
196
+ Prometheus.registry.histogram(:producer_deliver_latency, docstring: 'Delivery latency', buckets: LATENCY_BUCKETS, labels: [:client])
197
+ @deliver_messages = Prometheus.registry.counter(:producer_deliver_messages, docstring: 'Total count of delivered messages', labels: [:client])
198
+ @deliver_attempts = Prometheus.registry.histogram(:producer_deliver_attempts, docstring: 'Delivery attempts', labels: [:client])
199
+ @ack_messages = Prometheus.registry.counter(:producer_ack_messages, docstring: 'Ack', labels: [:client, :topic])
200
+ @ack_delay = Prometheus.registry.histogram(:producer_ack_delay, docstring: 'Ack delay', buckets: LATENCY_BUCKETS, labels: [:client, :topic])
201
+ @ack_errors = Prometheus.registry.counter(:producer_ack_errors, docstring: 'Ack errors', labels: [:client, :topic])
202
202
  end
203
203
 
204
204
  def produce_message(event)
@@ -212,20 +212,20 @@ module Kafka
212
212
  buffer_fill_percentage = buffer_fill_ratio * 100.0
213
213
 
214
214
  # This gets us the write rate.
215
- @produce_messages.increment(key)
216
- @produce_message_size.observe(key, message_size)
215
+ @produce_messages.increment(labels: key)
216
+ @produce_message_size.observe(message_size, labels: key)
217
217
 
218
218
  # This gets us the avg/max buffer size per producer.
219
- @buffer_size.observe({ client: client }, buffer_size)
219
+ @buffer_size.observe(buffer_size, labels: { client: client })
220
220
 
221
221
  # This gets us the avg/max buffer fill ratio per producer.
222
- @buffer_fill_ratio.observe({ client: client }, buffer_fill_ratio)
223
- @buffer_fill_percentage.observe({ client: client }, buffer_fill_percentage)
222
+ @buffer_fill_ratio.observe(buffer_fill_ratio, labels: { client: client })
223
+ @buffer_fill_percentage.observe(buffer_fill_percentage, labels: { client: client })
224
224
  end
225
225
 
226
226
  def buffer_overflow(event)
227
227
  key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
228
- @produce_errors.increment(key)
228
+ @produce_errors.increment(labels: key)
229
229
  end
230
230
 
231
231
  def deliver_messages(event)
@@ -233,40 +233,40 @@ module Kafka
233
233
  message_count = event.payload.fetch(:delivered_message_count)
234
234
  attempts = event.payload.fetch(:attempts)
235
235
 
236
- @deliver_errors.increment(key) if event.payload.key?(:exception)
237
- @deliver_latency.observe(key, event.duration)
236
+ @deliver_errors.increment(labels: key) if event.payload.key?(:exception)
237
+ @deliver_latency.observe(event.duration, labels: key)
238
238
 
239
239
  # Messages delivered to Kafka:
240
- @deliver_messages.increment(key, message_count)
240
+ @deliver_messages.increment(by: message_count, labels: key)
241
241
 
242
242
  # Number of attempts to deliver messages:
243
- @deliver_attempts.observe(key, attempts)
243
+ @deliver_attempts.observe(attempts, labels: key)
244
244
  end
245
245
 
246
246
  def ack_message(event)
247
247
  key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
248
248
 
249
249
  # Number of messages ACK'd for the topic.
250
- @ack_messages.increment(key)
250
+ @ack_messages.increment(labels: key)
251
251
 
252
252
  # Histogram of delay between a message being produced and it being ACK'd.
253
- @ack_delay.observe(key, event.payload.fetch(:delay))
253
+ @ack_delay.observe(event.payload.fetch(:delay), labels: key)
254
254
  end
255
255
 
256
256
  def topic_error(event)
257
257
  key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
258
258
 
259
- @ack_errors.increment(key)
259
+ @ack_errors.increment(labels: key)
260
260
  end
261
261
  end
262
262
 
263
263
  class AsyncProducerSubscriber < ActiveSupport::Subscriber
264
264
  def initialize
265
265
  super
266
- @queue_size = Prometheus.registry.histogram(:async_producer_queue_size, 'Queue size', {}, SIZE_BUCKETS)
267
- @queue_fill_ratio = Prometheus.registry.histogram(:async_producer_queue_fill_ratio, 'Queue fill ratio')
268
- @produce_errors = Prometheus.registry.counter(:async_producer_produce_errors, 'Producer errors')
269
- @dropped_messages = Prometheus.registry.counter(:async_producer_dropped_messages, 'Dropped messages')
266
+ @queue_size = Prometheus.registry.histogram(:async_producer_queue_size, docstring: 'Queue size', buckets: SIZE_BUCKETS, labels: [:client, :topic])
267
+ @queue_fill_ratio = Prometheus.registry.histogram(:async_producer_queue_fill_ratio, docstring: 'Queue fill ratio', labels: [:client, :topic])
268
+ @produce_errors = Prometheus.registry.counter(:async_producer_produce_errors, docstring: 'Producer errors', labels: [:client, :topic])
269
+ @dropped_messages = Prometheus.registry.counter(:async_producer_dropped_messages, docstring: 'Dropped messages', labels: [:client])
270
270
  end
271
271
 
272
272
  def enqueue_message(event)
@@ -277,29 +277,28 @@ module Kafka
277
277
  queue_fill_ratio = queue_size.to_f / max_queue_size.to_f
278
278
 
279
279
  # This gets us the avg/max queue size per producer.
280
- @queue_size.observe(key, queue_size)
280
+ @queue_size.observe(queue_size, labels: key)
281
281
 
282
282
  # This gets us the avg/max queue fill ratio per producer.
283
- @queue_fill_ratio.observe(key, queue_fill_ratio)
283
+ @queue_fill_ratio.observe(queue_fill_ratio, labels: key)
284
284
  end
285
285
 
286
286
  def buffer_overflow(event)
287
287
  key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
288
- @produce_errors.increment(key)
288
+ @produce_errors.increment(labels: key)
289
289
  end
290
290
 
291
291
  def drop_messages(event)
292
292
  key = { client: event.payload.fetch(:client_id) }
293
293
  message_count = event.payload.fetch(:message_count)
294
-
295
- @dropped_messages.increment(key, message_count)
294
+ @dropped_messages.increment(by: message_count, labels: key)
296
295
  end
297
296
  end
298
297
 
299
298
  class FetcherSubscriber < ActiveSupport::Subscriber
300
299
  def initialize
301
300
  super
302
- @queue_size = Prometheus.registry.gauge(:fetcher_queue_size, 'Queue size')
301
+ @queue_size = Prometheus.registry.gauge(:fetcher_queue_size, docstring: 'Queue size', labels: [:client, :group_id])
303
302
  end
304
303
 
305
304
  def loop(event)
@@ -307,7 +306,7 @@ module Kafka
307
306
  client = event.payload.fetch(:client_id)
308
307
  group_id = event.payload.fetch(:group_id)
309
308
 
310
- @queue_size.set({ client: client, group_id: group_id }, queue_size)
309
+ @queue_size.set(queue_size, labels: { client: client, group_id: group_id })
311
310
  end
312
311
  end
313
312
  end
@@ -7,13 +7,14 @@ module Kafka
7
7
  class JoinGroupRequest
8
8
  PROTOCOL_TYPE = "consumer"
9
9
 
10
- def initialize(group_id:, session_timeout:, member_id:, topics: [])
10
+ def initialize(group_id:, session_timeout:, rebalance_timeout:, member_id:, topics: [])
11
11
  @group_id = group_id
12
12
  @session_timeout = session_timeout * 1000 # Kafka wants ms.
13
+ @rebalance_timeout = rebalance_timeout * 1000 # Kafka wants ms.
13
14
  @member_id = member_id || ""
14
15
  @protocol_type = PROTOCOL_TYPE
15
16
  @group_protocols = {
16
- "standard" => ConsumerGroupProtocol.new(topics: ["test-messages"]),
17
+ "roundrobin" => ConsumerGroupProtocol.new(topics: topics),
17
18
  }
18
19
  end
19
20
 
@@ -21,6 +22,10 @@ module Kafka
21
22
  JOIN_GROUP_API
22
23
  end
23
24
 
25
+ def api_version
26
+ 1
27
+ end
28
+
24
29
  def response_class
25
30
  JoinGroupResponse
26
31
  end
@@ -28,6 +33,7 @@ module Kafka
28
33
  def encode(encoder)
29
34
  encoder.write_string(@group_id)
30
35
  encoder.write_int32(@session_timeout)
36
+ encoder.write_int32(@rebalance_timeout)
31
37
  encoder.write_string(@member_id)
32
38
  encoder.write_string(@protocol_type)
33
39
 
@@ -12,8 +12,10 @@ module Kafka
12
12
  OFFSET_FETCH_API
13
13
  end
14
14
 
15
+ # setting topics to nil fetches all offsets for a consumer group
16
+ # and that feature is only available in API version 2+
15
17
  def api_version
16
- 1
18
+ @topics.nil? ? 2 : 1
17
19
  end
18
20
 
19
21
  def response_class
@@ -1,6 +1,7 @@
1
1
  # Basic implementation of a tagged logger that matches the API of
2
2
  # ActiveSupport::TaggedLogging.
3
3
 
4
+ require 'delegate'
4
5
  require 'logger'
5
6
 
6
7
  module Kafka
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Kafka
4
- VERSION = "0.7.10"
4
+ VERSION = "1.0.0"
5
5
  end
@@ -36,15 +36,15 @@ Gem::Specification.new do |spec|
36
36
  spec.add_development_dependency "dotenv"
37
37
  spec.add_development_dependency "docker-api"
38
38
  spec.add_development_dependency "rspec-benchmark"
39
- spec.add_development_dependency "activesupport"
39
+ spec.add_development_dependency "activesupport", ">= 4.0", "< 6.1"
40
40
  spec.add_development_dependency "snappy"
41
41
  spec.add_development_dependency "extlz4"
42
42
  spec.add_development_dependency "zstd-ruby"
43
43
  spec.add_development_dependency "colored"
44
44
  spec.add_development_dependency "rspec_junit_formatter", "0.2.2"
45
- spec.add_development_dependency "dogstatsd-ruby", ">= 3.0.0", "< 5.0.0"
45
+ spec.add_development_dependency "dogstatsd-ruby", ">= 4.0.0", "< 5.0.0"
46
46
  spec.add_development_dependency "statsd-ruby"
47
- spec.add_development_dependency "prometheus-client"
47
+ spec.add_development_dependency "prometheus-client", "~> 0.10.0"
48
48
  spec.add_development_dependency "ruby-prof"
49
49
  spec.add_development_dependency "timecop"
50
50
  spec.add_development_dependency "rubocop", "~> 0.49.1"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.10
4
+ version: 1.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2019-08-07 00:00:00.000000000 Z
11
+ date: 2020-02-25 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: digest-crc
@@ -128,14 +128,20 @@ dependencies:
128
128
  requirements:
129
129
  - - ">="
130
130
  - !ruby/object:Gem::Version
131
- version: '0'
131
+ version: '4.0'
132
+ - - "<"
133
+ - !ruby/object:Gem::Version
134
+ version: '6.1'
132
135
  type: :development
133
136
  prerelease: false
134
137
  version_requirements: !ruby/object:Gem::Requirement
135
138
  requirements:
136
139
  - - ">="
137
140
  - !ruby/object:Gem::Version
138
- version: '0'
141
+ version: '4.0'
142
+ - - "<"
143
+ - !ruby/object:Gem::Version
144
+ version: '6.1'
139
145
  - !ruby/object:Gem::Dependency
140
146
  name: snappy
141
147
  requirement: !ruby/object:Gem::Requirement
@@ -212,7 +218,7 @@ dependencies:
212
218
  requirements:
213
219
  - - ">="
214
220
  - !ruby/object:Gem::Version
215
- version: 3.0.0
221
+ version: 4.0.0
216
222
  - - "<"
217
223
  - !ruby/object:Gem::Version
218
224
  version: 5.0.0
@@ -222,7 +228,7 @@ dependencies:
222
228
  requirements:
223
229
  - - ">="
224
230
  - !ruby/object:Gem::Version
225
- version: 3.0.0
231
+ version: 4.0.0
226
232
  - - "<"
227
233
  - !ruby/object:Gem::Version
228
234
  version: 5.0.0
@@ -244,16 +250,16 @@ dependencies:
244
250
  name: prometheus-client
245
251
  requirement: !ruby/object:Gem::Requirement
246
252
  requirements:
247
- - - ">="
253
+ - - "~>"
248
254
  - !ruby/object:Gem::Version
249
- version: '0'
255
+ version: 0.10.0
250
256
  type: :development
251
257
  prerelease: false
252
258
  version_requirements: !ruby/object:Gem::Requirement
253
259
  requirements:
254
- - - ">="
260
+ - - "~>"
255
261
  - !ruby/object:Gem::Version
256
- version: '0'
262
+ version: 0.10.0
257
263
  - !ruby/object:Gem::Dependency
258
264
  name: ruby-prof
259
265
  requirement: !ruby/object:Gem::Requirement
@@ -332,6 +338,7 @@ extensions: []
332
338
  extra_rdoc_files: []
333
339
  files:
334
340
  - ".circleci/config.yml"
341
+ - ".github/workflows/stale.yml"
335
342
  - ".gitignore"
336
343
  - ".readygo"
337
344
  - ".rspec"