ruby-kafka 0.7.4 → 0.7.5.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +5 -0
- data/lib/kafka/client.rb +1 -0
- data/lib/kafka/datadog.rb +14 -1
- data/lib/kafka/fetcher.rb +3 -2
- data/lib/kafka/heartbeat.rb +8 -3
- data/lib/kafka/round_robin_assignment_strategy.rb +10 -7
- data/lib/kafka/statsd.rb +10 -1
- data/lib/kafka/version.rb +1 -1
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: '0619c0e62d82ad058d2b8890e011725bbfd50f472f473c5ea460bcbdde689031'
|
4
|
+
data.tar.gz: 5e9a31e162be2adcb7898fe444916097faa24ea0721d794b402f110fdf37022d
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 41fa3e9d58f03213c0970fd4b4f94ba2479db67776f8b543f1bf65ea4a33078649afac066ff2c87d0269001ed3184d0cad78621cb12c0c2871c45cc7b581b160
|
7
|
+
data.tar.gz: 73c1347bdc0c4ffa354cea563b4e421993222de0908366a32daeea0bca280d5fc01e37b19dd3c5b7e3c22ce329bad643497c295be8b3536aa5162307346a32eb
|
data/CHANGELOG.md
CHANGED
@@ -3,6 +3,11 @@
|
|
3
3
|
Changes and additions to the library will be listed here.
|
4
4
|
|
5
5
|
## Unreleased
|
6
|
+
- Distribute partitions across consumer groups when there are few partitions per topic (#681)
|
7
|
+
- Fix an issue where a consumer would fail to fetch any messages (#689)
|
8
|
+
- Instrumentation for heartbeat event
|
9
|
+
- Synchronously stop the fetcher to prevent race condition when processing commands
|
10
|
+
- Instrument batch fetching (#694)
|
6
11
|
|
7
12
|
## 0.7.4
|
8
13
|
- Fix wrong encoding calculation that leads to message corruption (#682, #680).
|
data/lib/kafka/client.rb
CHANGED
data/lib/kafka/datadog.rb
CHANGED
@@ -167,7 +167,6 @@ module Kafka
|
|
167
167
|
|
168
168
|
def process_batch(event)
|
169
169
|
offset = event.payload.fetch(:last_offset)
|
170
|
-
lag = event.payload.fetch(:offset_lag)
|
171
170
|
messages = event.payload.fetch(:message_count)
|
172
171
|
|
173
172
|
tags = {
|
@@ -185,6 +184,20 @@ module Kafka
|
|
185
184
|
end
|
186
185
|
|
187
186
|
gauge("consumer.offset", offset, tags: tags)
|
187
|
+
end
|
188
|
+
|
189
|
+
def fetch_batch(event)
|
190
|
+
lag = event.payload.fetch(:offset_lag)
|
191
|
+
batch_size = event.payload.fetch(:message_count)
|
192
|
+
|
193
|
+
tags = {
|
194
|
+
client: event.payload.fetch(:client_id),
|
195
|
+
group_id: event.payload.fetch(:group_id),
|
196
|
+
topic: event.payload.fetch(:topic),
|
197
|
+
partition: event.payload.fetch(:partition),
|
198
|
+
}
|
199
|
+
|
200
|
+
histogram("consumer.batch_size", batch_size, tags: tags)
|
188
201
|
gauge("consumer.lag", lag, tags: tags)
|
189
202
|
end
|
190
203
|
|
data/lib/kafka/fetcher.rb
CHANGED
@@ -49,6 +49,8 @@ module Kafka
|
|
49
49
|
def start
|
50
50
|
return if @running
|
51
51
|
|
52
|
+
@running = true
|
53
|
+
|
52
54
|
@thread = Thread.new do
|
53
55
|
while @running
|
54
56
|
loop
|
@@ -56,13 +58,12 @@ module Kafka
|
|
56
58
|
@logger.info "Fetcher thread exited."
|
57
59
|
end
|
58
60
|
@thread.abort_on_exception = true
|
59
|
-
|
60
|
-
@running = true
|
61
61
|
end
|
62
62
|
|
63
63
|
def stop
|
64
64
|
return unless @running
|
65
65
|
@commands << [:stop, []]
|
66
|
+
@thread.join
|
66
67
|
end
|
67
68
|
|
68
69
|
def reset
|
data/lib/kafka/heartbeat.rb
CHANGED
@@ -2,15 +2,20 @@
|
|
2
2
|
|
3
3
|
module Kafka
|
4
4
|
class Heartbeat
|
5
|
-
def initialize(group:, interval:)
|
5
|
+
def initialize(group:, interval:, instrumenter:)
|
6
6
|
@group = group
|
7
7
|
@interval = interval
|
8
8
|
@last_heartbeat = Time.now
|
9
|
+
@instrumenter = instrumenter
|
9
10
|
end
|
10
11
|
|
11
12
|
def trigger!
|
12
|
-
@
|
13
|
-
|
13
|
+
@instrumenter.instrument('heartbeat.consumer',
|
14
|
+
group_id: @group.group_id,
|
15
|
+
topic_partitions: @group.assigned_partitions) do
|
16
|
+
@group.heartbeat
|
17
|
+
@last_heartbeat = Time.now
|
18
|
+
end
|
14
19
|
end
|
15
20
|
|
16
21
|
def trigger
|
@@ -24,20 +24,23 @@ module Kafka
|
|
24
24
|
group_assignment[member_id] = Protocol::MemberAssignment.new
|
25
25
|
end
|
26
26
|
|
27
|
-
topics.
|
27
|
+
topic_partitions = topics.flat_map do |topic|
|
28
28
|
begin
|
29
29
|
partitions = @cluster.partitions_for(topic).map(&:partition_id)
|
30
30
|
rescue UnknownTopicOrPartition
|
31
31
|
raise UnknownTopicOrPartition, "unknown topic #{topic}"
|
32
32
|
end
|
33
|
+
Array.new(partitions.count) { topic }.zip(partitions)
|
34
|
+
end
|
33
35
|
|
34
|
-
|
35
|
-
|
36
|
-
|
36
|
+
partitions_per_member = topic_partitions.group_by.with_index do |_, index|
|
37
|
+
index % members.count
|
38
|
+
end.values
|
37
39
|
|
38
|
-
|
39
|
-
|
40
|
-
|
40
|
+
members.zip(partitions_per_member).each do |member_id, member_partitions|
|
41
|
+
unless member_partitions.nil?
|
42
|
+
member_partitions.each do |topic, partition|
|
43
|
+
group_assignment[member_id].assign(topic, [partition])
|
41
44
|
end
|
42
45
|
end
|
43
46
|
end
|
data/lib/kafka/statsd.rb
CHANGED
@@ -107,7 +107,6 @@ module Kafka
|
|
107
107
|
end
|
108
108
|
|
109
109
|
def process_batch(event)
|
110
|
-
lag = event.payload.fetch(:offset_lag)
|
111
110
|
messages = event.payload.fetch(:message_count)
|
112
111
|
client = event.payload.fetch(:client_id)
|
113
112
|
group_id = event.payload.fetch(:group_id)
|
@@ -120,7 +119,17 @@ module Kafka
|
|
120
119
|
timing("consumer.#{client}.#{group_id}.#{topic}.#{partition}.process_batch.latency", event.duration)
|
121
120
|
count("consumer.#{client}.#{group_id}.#{topic}.#{partition}.messages", messages)
|
122
121
|
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def fetch_batch(event)
|
125
|
+
lag = event.payload.fetch(:offset_lag)
|
126
|
+
batch_size = event.payload.fetch(:message_count)
|
127
|
+
client = event.payload.fetch(:client_id)
|
128
|
+
group_id = event.payload.fetch(:group_id)
|
129
|
+
topic = event.payload.fetch(:topic)
|
130
|
+
partition = event.payload.fetch(:partition)
|
123
131
|
|
132
|
+
count("consumer.#{client}.#{group_id}.#{topic}.#{partition}.batch_size", batch_size)
|
124
133
|
gauge("consumer.#{client}.#{group_id}.#{topic}.#{partition}.lag", lag)
|
125
134
|
end
|
126
135
|
|
data/lib/kafka/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.5.beta1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Daniel Schierbeck
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-
|
11
|
+
date: 2018-12-04 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: digest-crc
|
@@ -446,9 +446,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
446
446
|
version: 2.1.0
|
447
447
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
448
448
|
requirements:
|
449
|
-
- - "
|
449
|
+
- - ">"
|
450
450
|
- !ruby/object:Gem::Version
|
451
|
-
version:
|
451
|
+
version: 1.3.1
|
452
452
|
requirements: []
|
453
453
|
rubyforge_project:
|
454
454
|
rubygems_version: 2.7.6
|