ruby-kafka 0.7.4 → 1.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.circleci/config.yml +168 -3
- data/.github/workflows/stale.yml +19 -0
- data/CHANGELOG.md +48 -0
- data/README.md +59 -0
- data/lib/kafka/async_producer.rb +30 -9
- data/lib/kafka/broker.rb +13 -1
- data/lib/kafka/broker_pool.rb +1 -1
- data/lib/kafka/client.rb +63 -6
- data/lib/kafka/cluster.rb +53 -1
- data/lib/kafka/compression.rb +13 -11
- data/lib/kafka/compressor.rb +1 -0
- data/lib/kafka/connection.rb +7 -1
- data/lib/kafka/connection_builder.rb +1 -1
- data/lib/kafka/consumer.rb +98 -17
- data/lib/kafka/consumer_group.rb +20 -2
- data/lib/kafka/datadog.rb +32 -12
- data/lib/kafka/fetch_operation.rb +1 -1
- data/lib/kafka/fetched_batch.rb +5 -1
- data/lib/kafka/fetched_batch_generator.rb +5 -2
- data/lib/kafka/fetched_message.rb +1 -0
- data/lib/kafka/fetched_offset_resolver.rb +1 -1
- data/lib/kafka/fetcher.rb +13 -6
- data/lib/kafka/gzip_codec.rb +4 -0
- data/lib/kafka/heartbeat.rb +8 -3
- data/lib/kafka/lz4_codec.rb +4 -0
- data/lib/kafka/offset_manager.rb +13 -2
- data/lib/kafka/produce_operation.rb +1 -1
- data/lib/kafka/producer.rb +33 -8
- data/lib/kafka/prometheus.rb +316 -0
- data/lib/kafka/protocol/add_offsets_to_txn_request.rb +29 -0
- data/lib/kafka/protocol/add_offsets_to_txn_response.rb +19 -0
- data/lib/kafka/protocol/join_group_request.rb +8 -2
- data/lib/kafka/protocol/metadata_response.rb +1 -1
- data/lib/kafka/protocol/offset_fetch_request.rb +3 -1
- data/lib/kafka/protocol/produce_request.rb +3 -1
- data/lib/kafka/protocol/record_batch.rb +7 -4
- data/lib/kafka/protocol/sasl_handshake_request.rb +1 -1
- data/lib/kafka/protocol/txn_offset_commit_request.rb +46 -0
- data/lib/kafka/protocol/txn_offset_commit_response.rb +18 -0
- data/lib/kafka/protocol.rb +8 -0
- data/lib/kafka/round_robin_assignment_strategy.rb +10 -7
- data/lib/kafka/sasl/gssapi.rb +1 -1
- data/lib/kafka/sasl/oauth.rb +64 -0
- data/lib/kafka/sasl/plain.rb +1 -1
- data/lib/kafka/sasl/scram.rb +16 -13
- data/lib/kafka/sasl_authenticator.rb +10 -3
- data/lib/kafka/snappy_codec.rb +4 -0
- data/lib/kafka/ssl_context.rb +5 -1
- data/lib/kafka/ssl_socket_with_timeout.rb +1 -0
- data/lib/kafka/statsd.rb +10 -1
- data/lib/kafka/tagged_logger.rb +77 -0
- data/lib/kafka/transaction_manager.rb +26 -1
- data/lib/kafka/transaction_state_machine.rb +1 -1
- data/lib/kafka/version.rb +1 -1
- data/lib/kafka/zstd_codec.rb +27 -0
- data/lib/kafka.rb +4 -0
- data/ruby-kafka.gemspec +5 -3
- metadata +50 -7
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 22b958f3d452f35c3c6084a5b8b790f370b96069e87ef5974838e5ea4b6945e1
|
4
|
+
data.tar.gz: eb6d704dbaace9c13a99bc93924d8a6b84ee5eb686c9c863a81d085ffeb7e92d
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 22c1d59bcdd42849849122b559f0d161653a5cfa492ffb36f28a875b109444d670a76bc62ba0856ae2ebce95fd10abce9e16834928fc7fe6eb2ee006827d307c
|
7
|
+
data.tar.gz: 3a51df6b1d40e1edbd96c06f3473319f0ac1b072b040b676a8e1d980fbaad114ad0248a7670cb7ae0a51a2c79ac390f2f4cfe8f65eedb5dd55be020c6011bc18
|
data/.circleci/config.yml
CHANGED
@@ -113,21 +113,181 @@ jobs:
|
|
113
113
|
environment:
|
114
114
|
LOG_LEVEL: DEBUG
|
115
115
|
- image: wurstmeister/zookeeper
|
116
|
-
- image: wurstmeister/kafka:2.11-2.0.
|
116
|
+
- image: wurstmeister/kafka:2.11-2.0.1
|
117
117
|
environment:
|
118
118
|
KAFKA_ADVERTISED_HOST_NAME: localhost
|
119
119
|
KAFKA_ADVERTISED_PORT: 9092
|
120
120
|
KAFKA_PORT: 9092
|
121
121
|
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
122
122
|
KAFKA_DELETE_TOPIC_ENABLE: true
|
123
|
-
- image: wurstmeister/kafka:2.11-2.0.
|
123
|
+
- image: wurstmeister/kafka:2.11-2.0.1
|
124
124
|
environment:
|
125
125
|
KAFKA_ADVERTISED_HOST_NAME: localhost
|
126
126
|
KAFKA_ADVERTISED_PORT: 9093
|
127
127
|
KAFKA_PORT: 9093
|
128
128
|
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
129
129
|
KAFKA_DELETE_TOPIC_ENABLE: true
|
130
|
-
- image: wurstmeister/kafka:2.11-2.0.
|
130
|
+
- image: wurstmeister/kafka:2.11-2.0.1
|
131
|
+
environment:
|
132
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
133
|
+
KAFKA_ADVERTISED_PORT: 9094
|
134
|
+
KAFKA_PORT: 9094
|
135
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
136
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
137
|
+
steps:
|
138
|
+
- checkout
|
139
|
+
- run: bundle install --path vendor/bundle
|
140
|
+
- run: bundle exec rspec --profile --tag functional spec/functional
|
141
|
+
|
142
|
+
kafka-2.1:
|
143
|
+
docker:
|
144
|
+
- image: circleci/ruby:2.5.1-node
|
145
|
+
environment:
|
146
|
+
LOG_LEVEL: DEBUG
|
147
|
+
- image: wurstmeister/zookeeper
|
148
|
+
- image: wurstmeister/kafka:2.12-2.1.1
|
149
|
+
environment:
|
150
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
151
|
+
KAFKA_ADVERTISED_PORT: 9092
|
152
|
+
KAFKA_PORT: 9092
|
153
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
154
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
155
|
+
- image: wurstmeister/kafka:2.12-2.1.1
|
156
|
+
environment:
|
157
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
158
|
+
KAFKA_ADVERTISED_PORT: 9093
|
159
|
+
KAFKA_PORT: 9093
|
160
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
161
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
162
|
+
- image: wurstmeister/kafka:2.12-2.1.1
|
163
|
+
environment:
|
164
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
165
|
+
KAFKA_ADVERTISED_PORT: 9094
|
166
|
+
KAFKA_PORT: 9094
|
167
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
168
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
169
|
+
steps:
|
170
|
+
- checkout
|
171
|
+
- run: bundle install --path vendor/bundle
|
172
|
+
- run: bundle exec rspec --profile --tag functional spec/functional
|
173
|
+
|
174
|
+
kafka-2.2:
|
175
|
+
docker:
|
176
|
+
- image: circleci/ruby:2.5.1-node
|
177
|
+
environment:
|
178
|
+
LOG_LEVEL: DEBUG
|
179
|
+
- image: wurstmeister/zookeeper
|
180
|
+
- image: wurstmeister/kafka:2.12-2.2.1
|
181
|
+
environment:
|
182
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
183
|
+
KAFKA_ADVERTISED_PORT: 9092
|
184
|
+
KAFKA_PORT: 9092
|
185
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
186
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
187
|
+
- image: wurstmeister/kafka:2.12-2.2.1
|
188
|
+
environment:
|
189
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
190
|
+
KAFKA_ADVERTISED_PORT: 9093
|
191
|
+
KAFKA_PORT: 9093
|
192
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
193
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
194
|
+
- image: wurstmeister/kafka:2.12-2.2.1
|
195
|
+
environment:
|
196
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
197
|
+
KAFKA_ADVERTISED_PORT: 9094
|
198
|
+
KAFKA_PORT: 9094
|
199
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
200
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
201
|
+
steps:
|
202
|
+
- checkout
|
203
|
+
- run: bundle install --path vendor/bundle
|
204
|
+
- run: bundle exec rspec --profile --tag functional spec/functional
|
205
|
+
|
206
|
+
kafka-2.3:
|
207
|
+
docker:
|
208
|
+
- image: circleci/ruby:2.5.1-node
|
209
|
+
environment:
|
210
|
+
LOG_LEVEL: DEBUG
|
211
|
+
- image: wurstmeister/zookeeper
|
212
|
+
- image: wurstmeister/kafka:2.12-2.3.1
|
213
|
+
environment:
|
214
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
215
|
+
KAFKA_ADVERTISED_PORT: 9092
|
216
|
+
KAFKA_PORT: 9092
|
217
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
218
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
219
|
+
- image: wurstmeister/kafka:2.12-2.3.1
|
220
|
+
environment:
|
221
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
222
|
+
KAFKA_ADVERTISED_PORT: 9093
|
223
|
+
KAFKA_PORT: 9093
|
224
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
225
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
226
|
+
- image: wurstmeister/kafka:2.12-2.3.1
|
227
|
+
environment:
|
228
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
229
|
+
KAFKA_ADVERTISED_PORT: 9094
|
230
|
+
KAFKA_PORT: 9094
|
231
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
232
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
233
|
+
steps:
|
234
|
+
- checkout
|
235
|
+
- run: bundle install --path vendor/bundle
|
236
|
+
- run: bundle exec rspec --profile --tag functional spec/functional
|
237
|
+
|
238
|
+
kafka-2.4:
|
239
|
+
docker:
|
240
|
+
- image: circleci/ruby:2.5.1-node
|
241
|
+
environment:
|
242
|
+
LOG_LEVEL: DEBUG
|
243
|
+
- image: wurstmeister/zookeeper
|
244
|
+
- image: wurstmeister/kafka:2.12-2.4.0
|
245
|
+
environment:
|
246
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
247
|
+
KAFKA_ADVERTISED_PORT: 9092
|
248
|
+
KAFKA_PORT: 9092
|
249
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
250
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
251
|
+
- image: wurstmeister/kafka:2.12-2.4.0
|
252
|
+
environment:
|
253
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
254
|
+
KAFKA_ADVERTISED_PORT: 9093
|
255
|
+
KAFKA_PORT: 9093
|
256
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
257
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
258
|
+
- image: wurstmeister/kafka:2.12-2.4.0
|
259
|
+
environment:
|
260
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
261
|
+
KAFKA_ADVERTISED_PORT: 9094
|
262
|
+
KAFKA_PORT: 9094
|
263
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
264
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
265
|
+
steps:
|
266
|
+
- checkout
|
267
|
+
- run: bundle install --path vendor/bundle
|
268
|
+
- run: bundle exec rspec --profile --tag functional spec/functional
|
269
|
+
|
270
|
+
kafka-2.5:
|
271
|
+
docker:
|
272
|
+
- image: circleci/ruby:2.5.1-node
|
273
|
+
environment:
|
274
|
+
LOG_LEVEL: DEBUG
|
275
|
+
- image: wurstmeister/zookeeper
|
276
|
+
- image: wurstmeister/kafka:2.12-2.5.0
|
277
|
+
environment:
|
278
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
279
|
+
KAFKA_ADVERTISED_PORT: 9092
|
280
|
+
KAFKA_PORT: 9092
|
281
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
282
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
283
|
+
- image: wurstmeister/kafka:2.12-2.5.0
|
284
|
+
environment:
|
285
|
+
KAFKA_ADVERTISED_HOST_NAME: localhost
|
286
|
+
KAFKA_ADVERTISED_PORT: 9093
|
287
|
+
KAFKA_PORT: 9093
|
288
|
+
KAFKA_ZOOKEEPER_CONNECT: localhost:2181
|
289
|
+
KAFKA_DELETE_TOPIC_ENABLE: true
|
290
|
+
- image: wurstmeister/kafka:2.12-2.5.0
|
131
291
|
environment:
|
132
292
|
KAFKA_ADVERTISED_HOST_NAME: localhost
|
133
293
|
KAFKA_ADVERTISED_PORT: 9094
|
@@ -148,3 +308,8 @@ workflows:
|
|
148
308
|
- kafka-1.0.0
|
149
309
|
- kafka-1.1
|
150
310
|
- kafka-2.0
|
311
|
+
- kafka-2.1
|
312
|
+
- kafka-2.2
|
313
|
+
- kafka-2.3
|
314
|
+
- kafka-2.4
|
315
|
+
- kafka-2.5
|
@@ -0,0 +1,19 @@
|
|
1
|
+
name: Mark stale issues and pull requests
|
2
|
+
|
3
|
+
on:
|
4
|
+
schedule:
|
5
|
+
- cron: "0 0 * * *"
|
6
|
+
|
7
|
+
jobs:
|
8
|
+
stale:
|
9
|
+
|
10
|
+
runs-on: ubuntu-latest
|
11
|
+
|
12
|
+
steps:
|
13
|
+
- uses: actions/stale@v1
|
14
|
+
with:
|
15
|
+
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
16
|
+
stale-issue-message: 'Issue has been marked as stale due to a lack of activity.'
|
17
|
+
stale-pr-message: 'Pull request has been marked as stale due to a lack of activity.'
|
18
|
+
stale-issue-label: 'no-issue-activity'
|
19
|
+
stale-pr-label: 'no-pr-activity'
|
data/CHANGELOG.md
CHANGED
@@ -4,6 +4,54 @@ Changes and additions to the library will be listed here.
|
|
4
4
|
|
5
5
|
## Unreleased
|
6
6
|
|
7
|
+
## 1.1.0
|
8
|
+
|
9
|
+
- Extra sanity checking when marking offsets as processed (#824).
|
10
|
+
- Make `verify_hostname` settable for SSL contexts (#828).
|
11
|
+
- Instrument `create_time` from last message in batch (#811).
|
12
|
+
- Add client function for fetching topic replica count (#822).
|
13
|
+
- Allow consumers to refresh the topic lists (#818).
|
14
|
+
- Disconnect after leaving a group (#817).
|
15
|
+
- Use `max_wait_time` as the sleep instead of hardcoded 2 seconds (#825).
|
16
|
+
|
17
|
+
## 1.0.0
|
18
|
+
|
19
|
+
- Add client methods to manage configs (#759)
|
20
|
+
- Support Kafka 2.3 and 2.4.
|
21
|
+
|
22
|
+
## 0.7.10
|
23
|
+
|
24
|
+
- Fix logger again (#762)
|
25
|
+
|
26
|
+
## 0.7.9
|
27
|
+
|
28
|
+
- Fix SSL authentication for ruby < 2.4.0 (#742)
|
29
|
+
- Add metrics for prometheus/client (#739)
|
30
|
+
- Do not add nil message entries when ignoring old messages (#746)
|
31
|
+
- Scram authentication thread save (#743)
|
32
|
+
|
33
|
+
## 0.7.8
|
34
|
+
- Optionally verify hostname on SSL certs (#733)
|
35
|
+
|
36
|
+
## 0.7.7
|
37
|
+
- Producer send offsets in transaction (#723)
|
38
|
+
- Support zstd compression (#724)
|
39
|
+
- Verify SSL Certificates (#730)
|
40
|
+
|
41
|
+
## 0.7.6
|
42
|
+
- Introduce regex matching in `Consumer#subscribe` (#700)
|
43
|
+
- Only rejoin group on error if we're not in shutdown mode (#711)
|
44
|
+
- Use `maxTimestamp` for `logAppendTime` timestamps (#706)
|
45
|
+
- Async producer limit number of retries (#708)
|
46
|
+
- Support SASL OAuthBearer Authentication (#710)
|
47
|
+
|
48
|
+
## 0.7.5
|
49
|
+
- Distribute partitions across consumer groups when there are few partitions per topic (#681)
|
50
|
+
- Fix an issue where a consumer would fail to fetch any messages (#689)
|
51
|
+
- Instrumentation for heartbeat event
|
52
|
+
- Synchronously stop the fetcher to prevent race condition when processing commands
|
53
|
+
- Instrument batch fetching (#694)
|
54
|
+
|
7
55
|
## 0.7.4
|
8
56
|
- Fix wrong encoding calculation that leads to message corruption (#682, #680).
|
9
57
|
- Change the log level of the 'Committing offsets' message to debug (#640).
|
data/README.md
CHANGED
@@ -98,6 +98,36 @@ Or install it yourself as:
|
|
98
98
|
<td>Limited support</td>
|
99
99
|
<td>Limited support</td>
|
100
100
|
</tr>
|
101
|
+
<tr>
|
102
|
+
<th>Kafka 2.0</th>
|
103
|
+
<td>Limited support</td>
|
104
|
+
<td>Limited support</td>
|
105
|
+
</tr>
|
106
|
+
<tr>
|
107
|
+
<th>Kafka 2.1</th>
|
108
|
+
<td>Limited support</td>
|
109
|
+
<td>Limited support</td>
|
110
|
+
</tr>
|
111
|
+
<tr>
|
112
|
+
<th>Kafka 2.2</th>
|
113
|
+
<td>Limited support</td>
|
114
|
+
<td>Limited support</td>
|
115
|
+
</tr>
|
116
|
+
<tr>
|
117
|
+
<th>Kafka 2.3</th>
|
118
|
+
<td>Limited support</td>
|
119
|
+
<td>Limited support</td>
|
120
|
+
</tr>
|
121
|
+
<tr>
|
122
|
+
<th>Kafka 2.4</th>
|
123
|
+
<td>Limited support</td>
|
124
|
+
<td>Limited support</td>
|
125
|
+
</tr>
|
126
|
+
<tr>
|
127
|
+
<th>Kafka 2.5</th>
|
128
|
+
<td>Limited support</td>
|
129
|
+
<td>Limited support</td>
|
130
|
+
</tr>
|
101
131
|
</table>
|
102
132
|
|
103
133
|
This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with the v0.5.x series. There's limited support for Kafka 0.8, and things should work with Kafka 0.11, although there may be performance issues due to changes in the protocol.
|
@@ -107,6 +137,12 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
|
|
107
137
|
- **Kafka 0.10:** Full support for the Producer and Consumer API in ruby-kafka v0.5.x. Note that you _must_ run version 0.10.1 or higher of Kafka due to limitations in 0.10.0.
|
108
138
|
- **Kafka 0.11:** Full support for Producer API, limited support for Consumer API in ruby-kafka v0.7.x. New features in 0.11.x includes new Record Batch format, idempotent and transactional production. The missing feature is dirty reading of Consumer API.
|
109
139
|
- **Kafka 1.0:** Everything that works with Kafka 0.11 should still work, but so far no features specific to Kafka 1.0 have been added.
|
140
|
+
- **Kafka 2.0:** Everything that works with Kafka 1.0 should still work, but so far no features specific to Kafka 2.0 have been added.
|
141
|
+
- **Kafka 2.1:** Everything that works with Kafka 2.0 should still work, but so far no features specific to Kafka 2.1 have been added.
|
142
|
+
- **Kafka 2.2:** Everything that works with Kafka 2.1 should still work, but so far no features specific to Kafka 2.2 have been added.
|
143
|
+
- **Kafka 2.3:** Everything that works with Kafka 2.2 should still work, but so far no features specific to Kafka 2.3 have been added.
|
144
|
+
- **Kafka 2.4:** Everything that works with Kafka 2.3 should still work, but so far no features specific to Kafka 2.4 have been added.
|
145
|
+
- **Kafka 2.5:** Everything that works with Kafka 2.4 should still work, but so far no features specific to Kafka 2.5 have been added.
|
110
146
|
|
111
147
|
This library requires Ruby 2.1 or higher.
|
112
148
|
|
@@ -412,6 +448,7 @@ Compression is enabled by passing the `compression_codec` parameter to `#produce
|
|
412
448
|
* `:snappy` for [Snappy](http://google.github.io/snappy/) compression.
|
413
449
|
* `:gzip` for [gzip](https://en.wikipedia.org/wiki/Gzip) compression.
|
414
450
|
* `:lz4` for [LZ4](https://en.wikipedia.org/wiki/LZ4_(compression_algorithm)) compression.
|
451
|
+
* `:zstd` for [zstd](https://facebook.github.io/zstd/) compression.
|
415
452
|
|
416
453
|
By default, all message sets will be compressed if you specify a compression codec. To increase the compression threshold, set `compression_threshold` to an integer value higher than one.
|
417
454
|
|
@@ -915,6 +952,8 @@ This configures the store to look up CA certificates from the system default cer
|
|
915
952
|
|
916
953
|
In order to authenticate the client to the cluster, you need to pass in a certificate and key created for the client and trusted by the brokers.
|
917
954
|
|
955
|
+
**NOTE**: You can disable hostname validation by passing `verify_hostname: false`.
|
956
|
+
|
918
957
|
```ruby
|
919
958
|
kafka = Kafka.new(
|
920
959
|
["kafka1:9092"],
|
@@ -976,6 +1015,26 @@ kafka = Kafka.new(
|
|
976
1015
|
)
|
977
1016
|
```
|
978
1017
|
|
1018
|
+
##### OAUTHBEARER
|
1019
|
+
This mechanism is supported in kafka >= 2.0.0 as of [KIP-255](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876)
|
1020
|
+
|
1021
|
+
In order to authenticate using OAUTHBEARER, you must set the client with an instance of a class that implements a `token` method (the interface is described in [Kafka::Sasl::OAuth](lib/kafka/sasl/oauth.rb)) which returns an ID/Access token.
|
1022
|
+
|
1023
|
+
Optionally, the client may implement an `extensions` method that returns a map of key-value pairs. These can be sent with the SASL/OAUTHBEARER initial client response. This is only supported in kafka >= 2.1.0.
|
1024
|
+
|
1025
|
+
```ruby
|
1026
|
+
class TokenProvider
|
1027
|
+
def token
|
1028
|
+
"some_id_token"
|
1029
|
+
end
|
1030
|
+
end
|
1031
|
+
# ...
|
1032
|
+
client = Kafka.new(
|
1033
|
+
["kafka1:9092"],
|
1034
|
+
sasl_oauth_token_provider: TokenProvider.new
|
1035
|
+
)
|
1036
|
+
```
|
1037
|
+
|
979
1038
|
### Topic management
|
980
1039
|
|
981
1040
|
In addition to producing and consuming messages, ruby-kafka supports managing Kafka topics and their configurations. See [the Kafka documentation](https://kafka.apache.org/documentation/#topicconfigs) for a full list of topic configuration keys.
|
data/lib/kafka/async_producer.rb
CHANGED
@@ -72,7 +72,7 @@ module Kafka
|
|
72
72
|
# @param delivery_interval [Integer] if greater than zero, the number of
|
73
73
|
# seconds between automatic message deliveries.
|
74
74
|
#
|
75
|
-
def initialize(sync_producer:, max_queue_size: 1000, delivery_threshold: 0, delivery_interval: 0, instrumenter:, logger:)
|
75
|
+
def initialize(sync_producer:, max_queue_size: 1000, delivery_threshold: 0, delivery_interval: 0, max_retries: -1, retry_backoff: 0, instrumenter:, logger:)
|
76
76
|
raise ArgumentError unless max_queue_size > 0
|
77
77
|
raise ArgumentError unless delivery_threshold >= 0
|
78
78
|
raise ArgumentError unless delivery_interval >= 0
|
@@ -80,14 +80,16 @@ module Kafka
|
|
80
80
|
@queue = Queue.new
|
81
81
|
@max_queue_size = max_queue_size
|
82
82
|
@instrumenter = instrumenter
|
83
|
-
@logger = logger
|
83
|
+
@logger = TaggedLogger.new(logger)
|
84
84
|
|
85
85
|
@worker = Worker.new(
|
86
86
|
queue: @queue,
|
87
87
|
producer: sync_producer,
|
88
88
|
delivery_threshold: delivery_threshold,
|
89
|
+
max_retries: max_retries,
|
90
|
+
retry_backoff: retry_backoff,
|
89
91
|
instrumenter: instrumenter,
|
90
|
-
logger: logger
|
92
|
+
logger: logger
|
91
93
|
)
|
92
94
|
|
93
95
|
# The timer will no-op if the delivery interval is zero.
|
@@ -101,6 +103,9 @@ module Kafka
|
|
101
103
|
# @raise [BufferOverflow] if the message queue is full.
|
102
104
|
# @return [nil]
|
103
105
|
def produce(value, topic:, **options)
|
106
|
+
# We want to fail fast if `topic` isn't a String
|
107
|
+
topic = topic.to_str
|
108
|
+
|
104
109
|
ensure_threads_running!
|
105
110
|
|
106
111
|
if @queue.size >= @max_queue_size
|
@@ -184,15 +189,18 @@ module Kafka
|
|
184
189
|
end
|
185
190
|
|
186
191
|
class Worker
|
187
|
-
def initialize(queue:, producer:, delivery_threshold:, instrumenter:, logger:)
|
192
|
+
def initialize(queue:, producer:, delivery_threshold:, max_retries: -1, retry_backoff: 0, instrumenter:, logger:)
|
188
193
|
@queue = queue
|
189
194
|
@producer = producer
|
190
195
|
@delivery_threshold = delivery_threshold
|
196
|
+
@max_retries = max_retries
|
197
|
+
@retry_backoff = retry_backoff
|
191
198
|
@instrumenter = instrumenter
|
192
|
-
@logger = logger
|
199
|
+
@logger = TaggedLogger.new(logger)
|
193
200
|
end
|
194
201
|
|
195
202
|
def run
|
203
|
+
@logger.push_tags(@producer.to_s)
|
196
204
|
@logger.info "Starting async producer in the background..."
|
197
205
|
|
198
206
|
loop do
|
@@ -233,15 +241,28 @@ module Kafka
|
|
233
241
|
@logger.error "Async producer crashed!"
|
234
242
|
ensure
|
235
243
|
@producer.shutdown
|
244
|
+
@logger.pop_tags
|
236
245
|
end
|
237
246
|
|
238
247
|
private
|
239
248
|
|
240
249
|
def produce(*args)
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
250
|
+
retries = 0
|
251
|
+
begin
|
252
|
+
@producer.produce(*args)
|
253
|
+
rescue BufferOverflow => e
|
254
|
+
deliver_messages
|
255
|
+
if @max_retries == -1
|
256
|
+
retry
|
257
|
+
elsif retries < @max_retries
|
258
|
+
retries += 1
|
259
|
+
sleep @retry_backoff**retries
|
260
|
+
retry
|
261
|
+
else
|
262
|
+
@logger.error("Failed to asynchronously produce messages due to BufferOverflow")
|
263
|
+
@instrumenter.instrument("error.async_producer", { error: e })
|
264
|
+
end
|
265
|
+
end
|
245
266
|
end
|
246
267
|
|
247
268
|
def deliver_messages
|
data/lib/kafka/broker.rb
CHANGED
@@ -12,7 +12,7 @@ module Kafka
|
|
12
12
|
@host = host
|
13
13
|
@port = port
|
14
14
|
@node_id = node_id
|
15
|
-
@logger = logger
|
15
|
+
@logger = TaggedLogger.new(logger)
|
16
16
|
end
|
17
17
|
|
18
18
|
def address_match?(host, port)
|
@@ -182,6 +182,18 @@ module Kafka
|
|
182
182
|
send_request(request)
|
183
183
|
end
|
184
184
|
|
185
|
+
def add_offsets_to_txn(**options)
|
186
|
+
request = Protocol::AddOffsetsToTxnRequest.new(**options)
|
187
|
+
|
188
|
+
send_request(request)
|
189
|
+
end
|
190
|
+
|
191
|
+
def txn_offset_commit(**options)
|
192
|
+
request = Protocol::TxnOffsetCommitRequest.new(**options)
|
193
|
+
|
194
|
+
send_request(request)
|
195
|
+
end
|
196
|
+
|
185
197
|
private
|
186
198
|
|
187
199
|
def send_request(request)
|
data/lib/kafka/broker_pool.rb
CHANGED
data/lib/kafka/client.rb
CHANGED
@@ -14,6 +14,7 @@ require "kafka/fetch_operation"
|
|
14
14
|
require "kafka/connection_builder"
|
15
15
|
require "kafka/instrumenter"
|
16
16
|
require "kafka/sasl_authenticator"
|
17
|
+
require "kafka/tagged_logger"
|
17
18
|
|
18
19
|
module Kafka
|
19
20
|
class Client
|
@@ -61,14 +62,21 @@ module Kafka
|
|
61
62
|
#
|
62
63
|
# @param sasl_over_ssl [Boolean] whether to enforce SSL with SASL
|
63
64
|
#
|
65
|
+
# @param sasl_oauth_token_provider [Object, nil] OAuthBearer Token Provider instance that
|
66
|
+
# implements method token. See {Sasl::OAuth#initialize}
|
67
|
+
#
|
68
|
+
# @param verify_hostname [Boolean, true] whether to verify that the host serving
|
69
|
+
# the SSL certificate and the signing chain of the certificate have the correct domains
|
70
|
+
# based on the CA certificate
|
71
|
+
#
|
64
72
|
# @return [Client]
|
65
73
|
def initialize(seed_brokers:, client_id: "ruby-kafka", logger: nil, connect_timeout: nil, socket_timeout: nil,
|
66
74
|
ssl_ca_cert_file_path: nil, ssl_ca_cert: nil, ssl_client_cert: nil, ssl_client_cert_key: nil,
|
67
75
|
ssl_client_cert_key_password: nil, ssl_client_cert_chain: nil, sasl_gssapi_principal: nil,
|
68
76
|
sasl_gssapi_keytab: nil, sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
|
69
77
|
sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil,
|
70
|
-
sasl_over_ssl: true, ssl_ca_certs_from_system: false)
|
71
|
-
@logger =
|
78
|
+
sasl_over_ssl: true, ssl_ca_certs_from_system: false, sasl_oauth_token_provider: nil, ssl_verify_hostname: true)
|
79
|
+
@logger = TaggedLogger.new(logger)
|
72
80
|
@instrumenter = Instrumenter.new(client_id: client_id)
|
73
81
|
@seed_brokers = normalize_seed_brokers(seed_brokers)
|
74
82
|
|
@@ -80,6 +88,7 @@ module Kafka
|
|
80
88
|
client_cert_key_password: ssl_client_cert_key_password,
|
81
89
|
client_cert_chain: ssl_client_cert_chain,
|
82
90
|
ca_certs_from_system: ssl_ca_certs_from_system,
|
91
|
+
verify_hostname: ssl_verify_hostname
|
83
92
|
)
|
84
93
|
|
85
94
|
sasl_authenticator = SaslAuthenticator.new(
|
@@ -91,6 +100,7 @@ module Kafka
|
|
91
100
|
sasl_scram_username: sasl_scram_username,
|
92
101
|
sasl_scram_password: sasl_scram_password,
|
93
102
|
sasl_scram_mechanism: sasl_scram_mechanism,
|
103
|
+
sasl_oauth_token_provider: sasl_oauth_token_provider,
|
94
104
|
logger: @logger
|
95
105
|
)
|
96
106
|
|
@@ -132,6 +142,9 @@ module Kafka
|
|
132
142
|
def deliver_message(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, retries: 1)
|
133
143
|
create_time = Time.now
|
134
144
|
|
145
|
+
# We want to fail fast if `topic` isn't a String
|
146
|
+
topic = topic.to_str
|
147
|
+
|
135
148
|
message = PendingMessage.new(
|
136
149
|
value: value,
|
137
150
|
key: key,
|
@@ -228,8 +241,8 @@ module Kafka
|
|
228
241
|
# result in {BufferOverflow} being raised.
|
229
242
|
#
|
230
243
|
# @param compression_codec [Symbol, nil] the name of the compression codec to
|
231
|
-
# use, or nil if no compression should be performed. Valid codecs: `:snappy
|
232
|
-
#
|
244
|
+
# use, or nil if no compression should be performed. Valid codecs: `:snappy`,
|
245
|
+
# `:gzip`, `:lz4`, `:zstd`
|
233
246
|
#
|
234
247
|
# @param compression_threshold [Integer] the number of messages that needs to
|
235
248
|
# be in a message set before it should be compressed. Note that message sets
|
@@ -295,7 +308,7 @@ module Kafka
|
|
295
308
|
#
|
296
309
|
# @see AsyncProducer
|
297
310
|
# @return [AsyncProducer]
|
298
|
-
def async_producer(delivery_interval: 0, delivery_threshold: 0, max_queue_size: 1000, **options)
|
311
|
+
def async_producer(delivery_interval: 0, delivery_threshold: 0, max_queue_size: 1000, max_retries: -1, retry_backoff: 0, **options)
|
299
312
|
sync_producer = producer(**options)
|
300
313
|
|
301
314
|
AsyncProducer.new(
|
@@ -303,6 +316,8 @@ module Kafka
|
|
303
316
|
delivery_interval: delivery_interval,
|
304
317
|
delivery_threshold: delivery_threshold,
|
305
318
|
max_queue_size: max_queue_size,
|
319
|
+
max_retries: max_retries,
|
320
|
+
retry_backoff: retry_backoff,
|
306
321
|
instrumenter: @instrumenter,
|
307
322
|
logger: @logger,
|
308
323
|
)
|
@@ -325,15 +340,20 @@ module Kafka
|
|
325
340
|
# @param fetcher_max_queue_size [Integer] max number of items in the fetch queue that
|
326
341
|
# are stored for further processing. Note, that each item in the queue represents a
|
327
342
|
# response from a single broker.
|
343
|
+
# @param refresh_topic_interval [Integer] interval of refreshing the topic list.
|
344
|
+
# If it is 0, the topic list won't be refreshed (default)
|
345
|
+
# If it is n (n > 0), the topic list will be refreshed every n seconds
|
328
346
|
# @return [Consumer]
|
329
347
|
def consumer(
|
330
348
|
group_id:,
|
331
349
|
session_timeout: 30,
|
350
|
+
rebalance_timeout: 60,
|
332
351
|
offset_commit_interval: 10,
|
333
352
|
offset_commit_threshold: 0,
|
334
353
|
heartbeat_interval: 10,
|
335
354
|
offset_retention_time: nil,
|
336
|
-
fetcher_max_queue_size: 100
|
355
|
+
fetcher_max_queue_size: 100,
|
356
|
+
refresh_topic_interval: 0
|
337
357
|
)
|
338
358
|
cluster = initialize_cluster
|
339
359
|
|
@@ -349,6 +369,7 @@ module Kafka
|
|
349
369
|
logger: @logger,
|
350
370
|
group_id: group_id,
|
351
371
|
session_timeout: session_timeout,
|
372
|
+
rebalance_timeout: rebalance_timeout,
|
352
373
|
retention_time: retention_time,
|
353
374
|
instrumenter: instrumenter,
|
354
375
|
)
|
@@ -374,6 +395,7 @@ module Kafka
|
|
374
395
|
heartbeat = Heartbeat.new(
|
375
396
|
group: group,
|
376
397
|
interval: heartbeat_interval,
|
398
|
+
instrumenter: instrumenter
|
377
399
|
)
|
378
400
|
|
379
401
|
Consumer.new(
|
@@ -385,6 +407,7 @@ module Kafka
|
|
385
407
|
fetcher: fetcher,
|
386
408
|
session_timeout: session_timeout,
|
387
409
|
heartbeat: heartbeat,
|
410
|
+
refresh_topic_interval: refresh_topic_interval
|
388
411
|
)
|
389
412
|
end
|
390
413
|
|
@@ -521,6 +544,24 @@ module Kafka
|
|
521
544
|
end
|
522
545
|
end
|
523
546
|
|
547
|
+
# Describe broker configs
|
548
|
+
#
|
549
|
+
# @param broker_id [int] the id of the broker
|
550
|
+
# @param configs [Array] array of config keys.
|
551
|
+
# @return [Array<Kafka::Protocol::DescribeConfigsResponse::ConfigEntry>]
|
552
|
+
def describe_configs(broker_id, configs = [])
|
553
|
+
@cluster.describe_configs(broker_id, configs)
|
554
|
+
end
|
555
|
+
|
556
|
+
# Alter broker configs
|
557
|
+
#
|
558
|
+
# @param broker_id [int] the id of the broker
|
559
|
+
# @param configs [Array] array of config strings.
|
560
|
+
# @return [nil]
|
561
|
+
def alter_configs(broker_id, configs = [])
|
562
|
+
@cluster.alter_configs(broker_id, configs)
|
563
|
+
end
|
564
|
+
|
524
565
|
# Creates a topic in the cluster.
|
525
566
|
#
|
526
567
|
# @example Creating a topic with log compaction
|
@@ -606,6 +647,14 @@ module Kafka
|
|
606
647
|
@cluster.describe_group(group_id)
|
607
648
|
end
|
608
649
|
|
650
|
+
# Fetch all committed offsets for a consumer group
|
651
|
+
#
|
652
|
+
# @param group_id [String] the id of the consumer group
|
653
|
+
# @return [Hash<String, Hash<Integer, Kafka::Protocol::OffsetFetchResponse::PartitionOffsetInfo>>]
|
654
|
+
def fetch_group_offsets(group_id)
|
655
|
+
@cluster.fetch_group_offsets(group_id)
|
656
|
+
end
|
657
|
+
|
609
658
|
# Create partitions for a topic.
|
610
659
|
#
|
611
660
|
# @param name [String] the name of the topic.
|
@@ -654,6 +703,14 @@ module Kafka
|
|
654
703
|
@cluster.partitions_for(topic).count
|
655
704
|
end
|
656
705
|
|
706
|
+
# Counts the number of replicas for a topic's partition
|
707
|
+
#
|
708
|
+
# @param topic [String]
|
709
|
+
# @return [Integer] the number of replica nodes for the topic's partition
|
710
|
+
def replica_count_for(topic)
|
711
|
+
@cluster.partitions_for(topic).first.replicas.count
|
712
|
+
end
|
713
|
+
|
657
714
|
# Retrieve the offset of the last message in a partition. If there are no
|
658
715
|
# messages in the partition -1 is returned.
|
659
716
|
#
|