logstash-integration-kafka 10.1.0-java → 10.2.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +3 -0
- data/docs/index.asciidoc +1 -1
- data/docs/input-kafka.asciidoc +84 -77
- data/docs/output-kafka.asciidoc +21 -17
- data/lib/logstash/inputs/kafka.rb +49 -51
- data/lib/logstash/outputs/kafka.rb +14 -15
- data/logstash-integration-kafka.gemspec +1 -1
- data/spec/unit/inputs/kafka_spec.rb +50 -0
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 88d87b3c5b9e443c0bc4ba8064b6316243f24bb0c0d6ce150ca0a19dd25ecb2a
|
4
|
+
data.tar.gz: 5fc09468e8cdc7094bc6099c24d70b1c19bc339c3ec3dfa3562e0c389fa85753
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4e0fc4586ca91967e9f8afa4a2eef95b7a5f8cd13f3b6689b4ace12b64d3ea628bc5112735d54edcb20c0fe1c6debac1e4d32983e1555eb9a84d380ab5d68a4e
|
7
|
+
data.tar.gz: 10ae655222832b9b5f6b9d8aa155d0907da91e38f04c7ad74c949d7bf4c8ff5d222bde6b2e1779eca5bd4bebb99aaf54701c8e2fddd4cb5a3cff47fabe9831fe
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,6 @@
|
|
1
|
+
## 10.2.0
|
2
|
+
- Changed: config defaults to be aligned with Kafka client defaults [#30](https://github.com/logstash-plugins/logstash-integration-kafka/pull/30)
|
3
|
+
|
1
4
|
## 10.1.0
|
2
5
|
- updated kafka client (and its dependencies) to version 2.4.1 ([#16](https://github.com/logstash-plugins/logstash-integration-kafka/pull/16))
|
3
6
|
- added the input `client_rack` parameter to enable support for follower fetching
|
data/docs/index.asciidoc
CHANGED
@@ -26,6 +26,6 @@ The Kafka Integration Plugin provides integrated plugins for working with the ht
|
|
26
26
|
- {logstash-ref}/plugins-inputs-kafka.html[Kafka Input Plugin]
|
27
27
|
- {logstash-ref}/plugins-outputs-kafka.html[Kafka Output Plugin]
|
28
28
|
|
29
|
-
This plugin uses Kafka Client 2.
|
29
|
+
This plugin uses Kafka Client 2.4. For broker compatibility, see the official https://cwiki.apache.org/confluence/display/KAFKA/Compatibility+Matrix[Kafka compatibility reference]. If the linked compatibility wiki is not up-to-date, please contact Kafka support/community to confirm compatibility.
|
30
30
|
|
31
31
|
:no_codec!:
|
data/docs/input-kafka.asciidoc
CHANGED
@@ -46,9 +46,9 @@ the same `group_id`.
|
|
46
46
|
Ideally you should have as many threads as the number of partitions for a perfect balance --
|
47
47
|
more threads than partitions means that some threads will be idle
|
48
48
|
|
49
|
-
For more information see
|
49
|
+
For more information see https://kafka.apache.org/24/documentation.html#theconsumer
|
50
50
|
|
51
|
-
Kafka consumer configuration:
|
51
|
+
Kafka consumer configuration: https://kafka.apache.org/24/documentation.html#consumerconfigs
|
52
52
|
|
53
53
|
==== Metadata fields
|
54
54
|
|
@@ -71,45 +71,48 @@ inserted into your original event, you'll have to use the `mutate` filter to man
|
|
71
71
|
|
72
72
|
This plugin supports these configuration options plus the <<plugins-{type}s-{plugin}-common-options>> described later.
|
73
73
|
|
74
|
-
NOTE: Some of these options map to a Kafka option.
|
74
|
+
NOTE: Some of these options map to a Kafka option. Defaults usually reflect the Kafka default setting,
|
75
|
+
and might change if Kafka's consumer defaults change.
|
76
|
+
See the https://kafka.apache.org/24/documentation for more details.
|
75
77
|
|
76
78
|
[cols="<,<,<",options="header",]
|
77
79
|
|=======================================================================
|
78
80
|
|Setting |Input type|Required
|
79
|
-
| <<plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<<
|
81
|
+
| <<plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<<number,number>>|No
|
80
82
|
| <<plugins-{type}s-{plugin}-auto_offset_reset>> |<<string,string>>|No
|
81
83
|
| <<plugins-{type}s-{plugin}-bootstrap_servers>> |<<string,string>>|No
|
82
|
-
| <<plugins-{type}s-{plugin}-check_crcs>> |<<
|
84
|
+
| <<plugins-{type}s-{plugin}-check_crcs>> |<<boolean,boolean>>|No
|
83
85
|
| <<plugins-{type}s-{plugin}-client_id>> |<<string,string>>|No
|
84
|
-
| <<plugins-{type}s-{plugin}-
|
86
|
+
| <<plugins-{type}s-{plugin}-client_rack>> |<<string,string>>|No
|
87
|
+
| <<plugins-{type}s-{plugin}-connections_max_idle_ms>> |<<number,number>>|No
|
85
88
|
| <<plugins-{type}s-{plugin}-consumer_threads>> |<<number,number>>|No
|
86
89
|
| <<plugins-{type}s-{plugin}-decorate_events>> |<<boolean,boolean>>|No
|
87
|
-
| <<plugins-{type}s-{plugin}-enable_auto_commit>> |<<
|
90
|
+
| <<plugins-{type}s-{plugin}-enable_auto_commit>> |<<boolean,boolean>>|No
|
88
91
|
| <<plugins-{type}s-{plugin}-exclude_internal_topics>> |<<string,string>>|No
|
89
|
-
| <<plugins-{type}s-{plugin}-fetch_max_bytes>> |<<
|
90
|
-
| <<plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<<
|
91
|
-
| <<plugins-{type}s-{plugin}-fetch_min_bytes>> |<<
|
92
|
+
| <<plugins-{type}s-{plugin}-fetch_max_bytes>> |<<number,number>>|No
|
93
|
+
| <<plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<<number,number>>|No
|
94
|
+
| <<plugins-{type}s-{plugin}-fetch_min_bytes>> |<<number,number>>|No
|
92
95
|
| <<plugins-{type}s-{plugin}-group_id>> |<<string,string>>|No
|
93
|
-
| <<plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<<
|
96
|
+
| <<plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<<number,number>>|No
|
94
97
|
| <<plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No
|
95
98
|
| <<plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No
|
96
99
|
| <<plugins-{type}s-{plugin}-key_deserializer_class>> |<<string,string>>|No
|
97
|
-
| <<plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<<
|
98
|
-
| <<plugins-{type}s-{plugin}-max_poll_interval_ms>> |<<
|
99
|
-
| <<plugins-{type}s-{plugin}-max_poll_records>> |<<
|
100
|
-
| <<plugins-{type}s-{plugin}-metadata_max_age_ms>> |<<
|
100
|
+
| <<plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<<number,number>>|No
|
101
|
+
| <<plugins-{type}s-{plugin}-max_poll_interval_ms>> |<<number,number>>|No
|
102
|
+
| <<plugins-{type}s-{plugin}-max_poll_records>> |<<number,number>>|No
|
103
|
+
| <<plugins-{type}s-{plugin}-metadata_max_age_ms>> |<<number,number>>|No
|
101
104
|
| <<plugins-{type}s-{plugin}-partition_assignment_strategy>> |<<string,string>>|No
|
102
105
|
| <<plugins-{type}s-{plugin}-poll_timeout_ms>> |<<number,number>>|No
|
103
|
-
| <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<
|
104
|
-
| <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<
|
105
|
-
| <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<
|
106
|
-
| <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<
|
106
|
+
| <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<number,number>>|No
|
107
|
+
| <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<number,number>>|No
|
108
|
+
| <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<number,number>>|No
|
109
|
+
| <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<number,number>>|No
|
107
110
|
| <<plugins-{type}s-{plugin}-sasl_jaas_config>> |<<string,string>>|No
|
108
111
|
| <<plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<<string,string>>|No
|
109
112
|
| <<plugins-{type}s-{plugin}-sasl_mechanism>> |<<string,string>>|No
|
110
113
|
| <<plugins-{type}s-{plugin}-security_protocol>> |<<string,string>>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No
|
111
|
-
| <<plugins-{type}s-{plugin}-send_buffer_bytes>> |<<
|
112
|
-
| <<plugins-{type}s-{plugin}-session_timeout_ms>> |<<
|
114
|
+
| <<plugins-{type}s-{plugin}-send_buffer_bytes>> |<<number,number>>|No
|
115
|
+
| <<plugins-{type}s-{plugin}-session_timeout_ms>> |<<number,number>>|No
|
113
116
|
| <<plugins-{type}s-{plugin}-ssl_endpoint_identification_algorithm>> |<<string,string>>|No
|
114
117
|
| <<plugins-{type}s-{plugin}-ssl_key_password>> |<<password,password>>|No
|
115
118
|
| <<plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No
|
@@ -121,7 +124,6 @@ NOTE: Some of these options map to a Kafka option. See the https://kafka.apache.
|
|
121
124
|
| <<plugins-{type}s-{plugin}-topics>> |<<array,array>>|No
|
122
125
|
| <<plugins-{type}s-{plugin}-topics_pattern>> |<<string,string>>|No
|
123
126
|
| <<plugins-{type}s-{plugin}-value_deserializer_class>> |<<string,string>>|No
|
124
|
-
| <<plugins-{type}s-{plugin}-client_rack>> |<<string,string>>|No
|
125
127
|
|=======================================================================
|
126
128
|
|
127
129
|
Also see <<plugins-{type}s-{plugin}-common-options>> for a list of options supported by all
|
@@ -132,8 +134,8 @@ input plugins.
|
|
132
134
|
[id="plugins-{type}s-{plugin}-auto_commit_interval_ms"]
|
133
135
|
===== `auto_commit_interval_ms`
|
134
136
|
|
135
|
-
* Value type is <<
|
136
|
-
* Default value is `
|
137
|
+
* Value type is <<number,number>>
|
138
|
+
* Default value is `5000`.
|
137
139
|
|
138
140
|
The frequency in milliseconds that the consumer offsets are committed to Kafka.
|
139
141
|
|
@@ -165,12 +167,12 @@ case a server is down).
|
|
165
167
|
[id="plugins-{type}s-{plugin}-check_crcs"]
|
166
168
|
===== `check_crcs`
|
167
169
|
|
168
|
-
* Value type is <<
|
169
|
-
*
|
170
|
+
* Value type is <<boolean,boolean>>
|
171
|
+
* Default value is `true`
|
170
172
|
|
171
|
-
Automatically check the CRC32 of the records consumed.
|
172
|
-
corruption to the messages occurred.
|
173
|
-
disabled in cases seeking extreme performance.
|
173
|
+
Automatically check the CRC32 of the records consumed.
|
174
|
+
This ensures no on-the-wire or on-disk corruption to the messages occurred.
|
175
|
+
This check adds some overhead, so it may be disabled in cases seeking extreme performance.
|
174
176
|
|
175
177
|
[id="plugins-{type}s-{plugin}-client_id"]
|
176
178
|
===== `client_id`
|
@@ -182,12 +184,25 @@ The id string to pass to the server when making requests. The purpose of this
|
|
182
184
|
is to be able to track the source of requests beyond just ip/port by allowing
|
183
185
|
a logical application name to be included.
|
184
186
|
|
185
|
-
[id="plugins-{type}s-{plugin}-
|
186
|
-
===== `
|
187
|
+
[id="plugins-{type}s-{plugin}-client_rack"]
|
188
|
+
===== `client_rack`
|
187
189
|
|
188
190
|
* Value type is <<string,string>>
|
189
191
|
* There is no default value for this setting.
|
190
192
|
|
193
|
+
A rack identifier for the Kafka consumer.
|
194
|
+
Used to select the physically closest rack for the consumer to read from.
|
195
|
+
The setting corresponds with Kafka's `broker.rack` configuration.
|
196
|
+
|
197
|
+
NOTE: Available only for Kafka 2.4.0 and higher. See
|
198
|
+
https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica[KIP-392].
|
199
|
+
|
200
|
+
[id="plugins-{type}s-{plugin}-connections_max_idle_ms"]
|
201
|
+
===== `connections_max_idle_ms`
|
202
|
+
|
203
|
+
* Value type is <<number,number>>
|
204
|
+
* Default value is `540000` milliseconds (9 minutes).
|
205
|
+
|
191
206
|
Close idle connections after the number of milliseconds specified by this config.
|
192
207
|
|
193
208
|
[id="plugins-{type}s-{plugin}-consumer_threads"]
|
@@ -217,8 +232,8 @@ This will add a field named `kafka` to the logstash event containing the followi
|
|
217
232
|
[id="plugins-{type}s-{plugin}-enable_auto_commit"]
|
218
233
|
===== `enable_auto_commit`
|
219
234
|
|
220
|
-
* Value type is <<
|
221
|
-
* Default value is `
|
235
|
+
* Value type is <<boolean,boolean>>
|
236
|
+
* Default value is `true`
|
222
237
|
|
223
238
|
This committed offset will be used when the process fails as the position from
|
224
239
|
which the consumption will begin.
|
@@ -239,8 +254,8 @@ If set to true the only way to receive records from an internal topic is subscri
|
|
239
254
|
[id="plugins-{type}s-{plugin}-fetch_max_bytes"]
|
240
255
|
===== `fetch_max_bytes`
|
241
256
|
|
242
|
-
* Value type is <<
|
243
|
-
*
|
257
|
+
* Value type is <<number,number>>
|
258
|
+
* Default value is `52428800` (50MB)
|
244
259
|
|
245
260
|
The maximum amount of data the server should return for a fetch request. This is not an
|
246
261
|
absolute maximum, if the first message in the first non-empty partition of the fetch is larger
|
@@ -249,8 +264,8 @@ than this value, the message will still be returned to ensure that the consumer
|
|
249
264
|
[id="plugins-{type}s-{plugin}-fetch_max_wait_ms"]
|
250
265
|
===== `fetch_max_wait_ms`
|
251
266
|
|
252
|
-
* Value type is <<
|
253
|
-
*
|
267
|
+
* Value type is <<number,number>>
|
268
|
+
* Default value is `500` milliseconds.
|
254
269
|
|
255
270
|
The maximum amount of time the server will block before answering the fetch request if
|
256
271
|
there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This
|
@@ -259,7 +274,7 @@ should be less than or equal to the timeout used in `poll_timeout_ms`
|
|
259
274
|
[id="plugins-{type}s-{plugin}-fetch_min_bytes"]
|
260
275
|
===== `fetch_min_bytes`
|
261
276
|
|
262
|
-
* Value type is <<
|
277
|
+
* Value type is <<number,number>>
|
263
278
|
* There is no default value for this setting.
|
264
279
|
|
265
280
|
The minimum amount of data the server should return for a fetch request. If insufficient
|
@@ -279,8 +294,8 @@ Logstash instances with the same `group_id`
|
|
279
294
|
[id="plugins-{type}s-{plugin}-heartbeat_interval_ms"]
|
280
295
|
===== `heartbeat_interval_ms`
|
281
296
|
|
282
|
-
* Value type is <<
|
283
|
-
*
|
297
|
+
* Value type is <<number,number>>
|
298
|
+
* Default value is `3000` milliseconds (3 seconds).
|
284
299
|
|
285
300
|
The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure
|
286
301
|
that the consumer's session stays active and to facilitate rebalancing when new
|
@@ -330,8 +345,8 @@ Java Class used to deserialize the record's key
|
|
330
345
|
[id="plugins-{type}s-{plugin}-max_partition_fetch_bytes"]
|
331
346
|
===== `max_partition_fetch_bytes`
|
332
347
|
|
333
|
-
* Value type is <<
|
334
|
-
*
|
348
|
+
* Value type is <<number,number>>
|
349
|
+
* Default value is `1048576` (1MB).
|
335
350
|
|
336
351
|
The maximum amount of data per-partition the server will return. The maximum total memory used for a
|
337
352
|
request will be `#partitions * max.partition.fetch.bytes`. This size must be at least
|
@@ -342,28 +357,28 @@ to fetch a large message on a certain partition.
|
|
342
357
|
[id="plugins-{type}s-{plugin}-max_poll_interval_ms"]
|
343
358
|
===== `max_poll_interval_ms`
|
344
359
|
|
345
|
-
* Value type is <<
|
346
|
-
*
|
360
|
+
* Value type is <<number,number>>
|
361
|
+
* Default value is `300000` milliseconds (5 minutes).
|
347
362
|
|
348
363
|
The maximum delay between invocations of poll() when using consumer group management. This places
|
349
364
|
an upper bound on the amount of time that the consumer can be idle before fetching more records.
|
350
365
|
If poll() is not called before expiration of this timeout, then the consumer is considered failed and
|
351
366
|
the group will rebalance in order to reassign the partitions to another member.
|
352
|
-
The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms
|
367
|
+
The value of the configuration `request_timeout_ms` must always be larger than `max_poll_interval_ms`. ???
|
353
368
|
|
354
369
|
[id="plugins-{type}s-{plugin}-max_poll_records"]
|
355
370
|
===== `max_poll_records`
|
356
371
|
|
357
|
-
* Value type is <<
|
358
|
-
*
|
372
|
+
* Value type is <<number,number>>
|
373
|
+
* Default value is `500`.
|
359
374
|
|
360
375
|
The maximum number of records returned in a single call to poll().
|
361
376
|
|
362
377
|
[id="plugins-{type}s-{plugin}-metadata_max_age_ms"]
|
363
378
|
===== `metadata_max_age_ms`
|
364
379
|
|
365
|
-
* Value type is <<
|
366
|
-
*
|
380
|
+
* Value type is <<number,number>>
|
381
|
+
* Default value is `300000` milliseconds (5 minutes).
|
367
382
|
|
368
383
|
The period of time in milliseconds after which we force a refresh of metadata even if
|
369
384
|
we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
|
@@ -389,23 +404,28 @@ implementations.
|
|
389
404
|
===== `poll_timeout_ms`
|
390
405
|
|
391
406
|
* Value type is <<number,number>>
|
392
|
-
* Default value is `100`
|
407
|
+
* Default value is `100` milliseconds.
|
408
|
+
|
409
|
+
Time Kafka consumer will wait to receive new messages from topics.
|
393
410
|
|
394
|
-
|
411
|
+
After subscribing to a set of topics, the Kafka consumer automatically joins the group when polling.
|
412
|
+
The plugin poll-ing in a loop ensures consumer liveness.
|
413
|
+
Underneath the covers, Kafka client sends periodic heartbeats to the server.
|
414
|
+
The timeout specified the time to block waiting for input on each poll.
|
395
415
|
|
396
416
|
[id="plugins-{type}s-{plugin}-receive_buffer_bytes"]
|
397
417
|
===== `receive_buffer_bytes`
|
398
418
|
|
399
|
-
* Value type is <<
|
400
|
-
*
|
419
|
+
* Value type is <<number,number>>
|
420
|
+
* Default value is `32768` (32KB).
|
401
421
|
|
402
422
|
The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
|
403
423
|
|
404
424
|
[id="plugins-{type}s-{plugin}-reconnect_backoff_ms"]
|
405
425
|
===== `reconnect_backoff_ms`
|
406
426
|
|
407
|
-
* Value type is <<
|
408
|
-
*
|
427
|
+
* Value type is <<number,number>>
|
428
|
+
* Default value is `50` milliseconds.
|
409
429
|
|
410
430
|
The amount of time to wait before attempting to reconnect to a given host.
|
411
431
|
This avoids repeatedly connecting to a host in a tight loop.
|
@@ -414,8 +434,8 @@ This backoff applies to all requests sent by the consumer to the broker.
|
|
414
434
|
[id="plugins-{type}s-{plugin}-request_timeout_ms"]
|
415
435
|
===== `request_timeout_ms`
|
416
436
|
|
417
|
-
* Value type is <<
|
418
|
-
*
|
437
|
+
* Value type is <<number,number>>
|
438
|
+
* Default value is `40000` milliseconds (40 seconds).
|
419
439
|
|
420
440
|
The configuration controls the maximum amount of time the client will wait
|
421
441
|
for the response of a request. If the response is not received before the timeout
|
@@ -425,8 +445,8 @@ retries are exhausted.
|
|
425
445
|
[id="plugins-{type}s-{plugin}-retry_backoff_ms"]
|
426
446
|
===== `retry_backoff_ms`
|
427
447
|
|
428
|
-
* Value type is <<
|
429
|
-
*
|
448
|
+
* Value type is <<number,number>>
|
449
|
+
* Default value is `100` milliseconds.
|
430
450
|
|
431
451
|
The amount of time to wait before attempting to retry a failed fetch request
|
432
452
|
to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
|
@@ -479,16 +499,16 @@ Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SA
|
|
479
499
|
[id="plugins-{type}s-{plugin}-send_buffer_bytes"]
|
480
500
|
===== `send_buffer_bytes`
|
481
501
|
|
482
|
-
* Value type is <<
|
483
|
-
*
|
502
|
+
* Value type is <<number,number>>
|
503
|
+
* Default value is `131072` (128KB).
|
484
504
|
|
485
505
|
The size of the TCP send buffer (SO_SNDBUF) to use when sending data
|
486
506
|
|
487
507
|
[id="plugins-{type}s-{plugin}-session_timeout_ms"]
|
488
508
|
===== `session_timeout_ms`
|
489
509
|
|
490
|
-
* Value type is <<
|
491
|
-
*
|
510
|
+
* Value type is <<number,number>>
|
511
|
+
* Default value is `10000` milliseconds (10 seconds).
|
492
512
|
|
493
513
|
The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead
|
494
514
|
and a rebalance operation is triggered for the group identified by `group_id`
|
@@ -548,7 +568,7 @@ The JKS truststore path to validate the Kafka broker's certificate.
|
|
548
568
|
* Value type is <<password,password>>
|
549
569
|
* There is no default value for this setting.
|
550
570
|
|
551
|
-
The truststore password
|
571
|
+
The truststore password.
|
552
572
|
|
553
573
|
[id="plugins-{type}s-{plugin}-ssl_truststore_type"]
|
554
574
|
===== `ssl_truststore_type`
|
@@ -583,19 +603,6 @@ The topics configuration will be ignored when using this configuration.
|
|
583
603
|
|
584
604
|
Java Class used to deserialize the record's value
|
585
605
|
|
586
|
-
|
587
|
-
[id="plugins-{type}s-{plugin}-client_rack"]
|
588
|
-
===== `client_rack`
|
589
|
-
|
590
|
-
* Value type is <<string,string>>
|
591
|
-
* There is no default value for this setting.
|
592
|
-
|
593
|
-
A rack identifier for the Kafka consumer.
|
594
|
-
Used to select the physically closest rack for the consumer to read from.
|
595
|
-
The setting corresponds with Kafka's `broker.rack` configuration.
|
596
|
-
|
597
|
-
NOTE: Only available for Kafka 2.4.0 and higher; see https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica[KIP-392].
|
598
|
-
|
599
606
|
[id="plugins-{type}s-{plugin}-common-options"]
|
600
607
|
include::{include_path}/{type}.asciidoc[]
|
601
608
|
|
data/docs/output-kafka.asciidoc
CHANGED
@@ -47,15 +47,19 @@ If you want the full content of your events to be sent as json, you should set t
|
|
47
47
|
}
|
48
48
|
}
|
49
49
|
|
50
|
-
For more information see
|
50
|
+
For more information see https://kafka.apache.org/24/documentation.html#theproducer
|
51
51
|
|
52
|
-
Kafka producer configuration:
|
52
|
+
Kafka producer configuration: https://kafka.apache.org/24/documentation.html#producerconfigs
|
53
53
|
|
54
54
|
[id="plugins-{type}s-{plugin}-options"]
|
55
55
|
==== Kafka Output Configuration Options
|
56
56
|
|
57
57
|
This plugin supports the following configuration options plus the <<plugins-{type}s-{plugin}-common-options>> described later.
|
58
58
|
|
59
|
+
NOTE: Some of these options map to a Kafka option. Defaults usually reflect the Kafka default setting,
|
60
|
+
and might change if Kafka's producer defaults change.
|
61
|
+
See the https://kafka.apache.org/24/documentation for more details.
|
62
|
+
|
59
63
|
[cols="<,<,<",options="header",]
|
60
64
|
|=======================================================================
|
61
65
|
|Setting |Input type|Required
|
@@ -76,7 +80,7 @@ This plugin supports the following configuration options plus the <<plugins-{typ
|
|
76
80
|
| <<plugins-{type}s-{plugin}-partitioner>> |<<string,string>>|No
|
77
81
|
| <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<number,number>>|No
|
78
82
|
| <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<number,number>>|No
|
79
|
-
| <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<
|
83
|
+
| <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<number,number>>|No
|
80
84
|
| <<plugins-{type}s-{plugin}-retries>> |<<number,number>>|No
|
81
85
|
| <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<number,number>>|No
|
82
86
|
| <<plugins-{type}s-{plugin}-sasl_jaas_config>> |<<string,string>>|No
|
@@ -119,7 +123,7 @@ acks=all, This means the leader will wait for the full set of in-sync replicas t
|
|
119
123
|
===== `batch_size`
|
120
124
|
|
121
125
|
* Value type is <<number,number>>
|
122
|
-
* Default value is `16384
|
126
|
+
* Default value is `16384`.
|
123
127
|
|
124
128
|
The producer will attempt to batch records together into fewer requests whenever multiple
|
125
129
|
records are being sent to the same partition. This helps performance on both the client
|
@@ -141,7 +145,7 @@ subset of brokers.
|
|
141
145
|
===== `buffer_memory`
|
142
146
|
|
143
147
|
* Value type is <<number,number>>
|
144
|
-
* Default value is `33554432`
|
148
|
+
* Default value is `33554432` (32MB).
|
145
149
|
|
146
150
|
The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
|
147
151
|
|
@@ -221,7 +225,7 @@ to allow other records to be sent so that the sends can be batched together.
|
|
221
225
|
===== `max_request_size`
|
222
226
|
|
223
227
|
* Value type is <<number,number>>
|
224
|
-
* Default value is `1048576`
|
228
|
+
* Default value is `1048576` (1MB).
|
225
229
|
|
226
230
|
The maximum size of a request
|
227
231
|
|
@@ -231,23 +235,23 @@ The maximum size of a request
|
|
231
235
|
* Value type is <<string,string>>
|
232
236
|
* There is no default value for this setting.
|
233
237
|
|
234
|
-
The key for the message
|
238
|
+
The key for the message.
|
235
239
|
|
236
240
|
[id="plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"]
|
237
241
|
===== `metadata_fetch_timeout_ms`
|
238
242
|
|
239
243
|
* Value type is <<number,number>>
|
240
|
-
* Default value is `60000`
|
244
|
+
* Default value is `60000` milliseconds (60 seconds).
|
241
245
|
|
242
|
-
|
246
|
+
The timeout setting for initial metadata request to fetch topic metadata.
|
243
247
|
|
244
248
|
[id="plugins-{type}s-{plugin}-metadata_max_age_ms"]
|
245
249
|
===== `metadata_max_age_ms`
|
246
250
|
|
247
251
|
* Value type is <<number,number>>
|
248
|
-
* Default value is `300000`
|
252
|
+
* Default value is `300000` milliseconds (5 minutes).
|
249
253
|
|
250
|
-
|
254
|
+
The max time in milliseconds before a metadata refresh is forced.
|
251
255
|
|
252
256
|
[id="plugins-{type}s-{plugin}-partitioner"]
|
253
257
|
===== `partitioner`
|
@@ -268,7 +272,7 @@ Available options for choosing a partitioning strategy are as follows:
|
|
268
272
|
===== `receive_buffer_bytes`
|
269
273
|
|
270
274
|
* Value type is <<number,number>>
|
271
|
-
* Default value is `32768`
|
275
|
+
* Default value is `32768` (32KB).
|
272
276
|
|
273
277
|
The size of the TCP receive buffer to use when reading data
|
274
278
|
|
@@ -276,15 +280,15 @@ The size of the TCP receive buffer to use when reading data
|
|
276
280
|
===== `reconnect_backoff_ms`
|
277
281
|
|
278
282
|
* Value type is <<number,number>>
|
279
|
-
* Default value is `
|
283
|
+
* Default value is `50`.
|
280
284
|
|
281
285
|
The amount of time to wait before attempting to reconnect to a given host when a connection fails.
|
282
286
|
|
283
287
|
[id="plugins-{type}s-{plugin}-request_timeout_ms"]
|
284
288
|
===== `request_timeout_ms`
|
285
289
|
|
286
|
-
* Value type is <<
|
287
|
-
*
|
290
|
+
* Value type is <<number,number>>
|
291
|
+
* Default value is `40000` milliseconds (40 seconds).
|
288
292
|
|
289
293
|
The configuration controls the maximum amount of time the client will wait
|
290
294
|
for the response of a request. If the response is not received before the timeout
|
@@ -311,7 +315,7 @@ A value less than zero is a configuration error.
|
|
311
315
|
===== `retry_backoff_ms`
|
312
316
|
|
313
317
|
* Value type is <<number,number>>
|
314
|
-
* Default value is `100`
|
318
|
+
* Default value is `100` milliseconds.
|
315
319
|
|
316
320
|
The amount of time to wait before attempting to retry a failed produce request to a given topic partition.
|
317
321
|
|
@@ -364,7 +368,7 @@ Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SA
|
|
364
368
|
===== `send_buffer_bytes`
|
365
369
|
|
366
370
|
* Value type is <<number,number>>
|
367
|
-
* Default value is `131072`
|
371
|
+
* Default value is `131072` (128KB).
|
368
372
|
|
369
373
|
The size of the TCP send buffer to use when sending data.
|
370
374
|
|
@@ -53,7 +53,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
53
53
|
default :codec, 'plain'
|
54
54
|
|
55
55
|
# The frequency in milliseconds that the consumer offsets are committed to Kafka.
|
56
|
-
config :auto_commit_interval_ms, :validate => :
|
56
|
+
config :auto_commit_interval_ms, :validate => :number, :default => 5000 # Kafka default
|
57
57
|
# What to do when there is no initial offset in Kafka or if an offset is out of range:
|
58
58
|
#
|
59
59
|
# * earliest: automatically reset the offset to the earliest offset
|
@@ -70,35 +70,35 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
70
70
|
# Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk
|
71
71
|
# corruption to the messages occurred. This check adds some overhead, so it may be
|
72
72
|
# disabled in cases seeking extreme performance.
|
73
|
-
config :check_crcs, :validate => :
|
73
|
+
config :check_crcs, :validate => :boolean, :default => true
|
74
74
|
# The id string to pass to the server when making requests. The purpose of this
|
75
75
|
# is to be able to track the source of requests beyond just ip/port by allowing
|
76
76
|
# a logical application name to be included.
|
77
77
|
config :client_id, :validate => :string, :default => "logstash"
|
78
78
|
# Close idle connections after the number of milliseconds specified by this config.
|
79
|
-
config :connections_max_idle_ms, :validate => :
|
79
|
+
config :connections_max_idle_ms, :validate => :number, :default => 540_000 # (9m) Kafka default
|
80
80
|
# Ideally you should have as many threads as the number of partitions for a perfect
|
81
81
|
# balance — more threads than partitions means that some threads will be idle
|
82
82
|
config :consumer_threads, :validate => :number, :default => 1
|
83
83
|
# If true, periodically commit to Kafka the offsets of messages already returned by the consumer.
|
84
84
|
# This committed offset will be used when the process fails as the position from
|
85
85
|
# which the consumption will begin.
|
86
|
-
config :enable_auto_commit, :validate => :
|
86
|
+
config :enable_auto_commit, :validate => :boolean, :default => true
|
87
87
|
# Whether records from internal topics (such as offsets) should be exposed to the consumer.
|
88
88
|
# If set to true the only way to receive records from an internal topic is subscribing to it.
|
89
89
|
config :exclude_internal_topics, :validate => :string
|
90
90
|
# The maximum amount of data the server should return for a fetch request. This is not an
|
91
91
|
# absolute maximum, if the first message in the first non-empty partition of the fetch is larger
|
92
92
|
# than this value, the message will still be returned to ensure that the consumer can make progress.
|
93
|
-
config :fetch_max_bytes, :validate => :
|
93
|
+
config :fetch_max_bytes, :validate => :number, :default => 52_428_800 # (50MB) Kafka default
|
94
94
|
# The maximum amount of time the server will block before answering the fetch request if
|
95
95
|
# there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This
|
96
96
|
# should be less than or equal to the timeout used in `poll_timeout_ms`
|
97
|
-
config :fetch_max_wait_ms, :validate => :
|
97
|
+
config :fetch_max_wait_ms, :validate => :number, :default => 500 # Kafka default
|
98
98
|
# The minimum amount of data the server should return for a fetch request. If insufficient
|
99
99
|
# data is available the request will wait for that much data to accumulate
|
100
100
|
# before answering the request.
|
101
|
-
config :fetch_min_bytes, :validate => :
|
101
|
+
config :fetch_min_bytes, :validate => :number
|
102
102
|
# The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber
|
103
103
|
# that happens to be made up of multiple processors. Messages in a topic will be distributed to all
|
104
104
|
# Logstash instances with the same `group_id`
|
@@ -108,50 +108,50 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
108
108
|
# consumers join or leave the group. The value must be set lower than
|
109
109
|
# `session.timeout.ms`, but typically should be set no higher than 1/3 of that value.
|
110
110
|
# It can be adjusted even lower to control the expected time for normal rebalances.
|
111
|
-
config :heartbeat_interval_ms, :validate => :
|
111
|
+
config :heartbeat_interval_ms, :validate => :number, :default => 3000 # Kafka default
|
112
112
|
# Java Class used to deserialize the record's key
|
113
113
|
config :key_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
|
114
114
|
# The maximum delay between invocations of poll() when using consumer group management. This places
|
115
115
|
# an upper bound on the amount of time that the consumer can be idle before fetching more records.
|
116
116
|
# If poll() is not called before expiration of this timeout, then the consumer is considered failed and
|
117
117
|
# the group will rebalance in order to reassign the partitions to another member.
|
118
|
-
|
119
|
-
config :max_poll_interval_ms, :validate => :string
|
118
|
+
config :max_poll_interval_ms, :validate => :number, :default => 300_000 # (5m) Kafka default
|
120
119
|
# The maximum amount of data per-partition the server will return. The maximum total memory used for a
|
121
120
|
# request will be <code>#partitions * max.partition.fetch.bytes</code>. This size must be at least
|
122
121
|
# as large as the maximum message size the server allows or else it is possible for the producer to
|
123
122
|
# send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying
|
124
123
|
# to fetch a large message on a certain partition.
|
125
|
-
config :max_partition_fetch_bytes, :validate => :
|
124
|
+
config :max_partition_fetch_bytes, :validate => :number, :default => 1_048_576 # (1MB) Kafka default
|
126
125
|
# The maximum number of records returned in a single call to poll().
|
127
|
-
config :max_poll_records, :validate => :
|
126
|
+
config :max_poll_records, :validate => :number, :default => 500 # Kafka default
|
128
127
|
# The period of time in milliseconds after which we force a refresh of metadata even if
|
129
128
|
# we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
|
130
|
-
config :metadata_max_age_ms, :validate => :
|
129
|
+
config :metadata_max_age_ms, :validate => :number, :default => 300_000 # (5m) Kafka default
|
131
130
|
# The name of the partition assignment strategy that the client uses to distribute
|
132
131
|
# partition ownership amongst consumer instances, supported options are `range`,
|
133
132
|
# `round_robin`, `sticky` and `cooperative_sticky`
|
134
133
|
# (for backwards compatibility setting the class name directly is supported).
|
135
134
|
config :partition_assignment_strategy, :validate => :string
|
136
135
|
# The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
|
137
|
-
|
138
|
-
|
136
|
+
# If the value is `-1`, the OS default will be used.
|
137
|
+
config :receive_buffer_bytes, :validate => :number, :default => 32_768 # (32KB) Kafka default
|
138
|
+
# The base amount of time to wait before attempting to reconnect to a given host.
|
139
139
|
# This avoids repeatedly connecting to a host in a tight loop.
|
140
|
-
# This backoff applies to all
|
141
|
-
config :reconnect_backoff_ms, :validate => :
|
142
|
-
# The configuration controls the maximum amount of time the client will wait
|
143
|
-
#
|
144
|
-
#
|
145
|
-
#
|
146
|
-
config :request_timeout_ms, :validate => :string
|
140
|
+
# This backoff applies to all connection attempts by the client to a broker.
|
141
|
+
config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
|
142
|
+
# The configuration controls the maximum amount of time the client will wait for the response of a request.
|
143
|
+
# If the response is not received before the timeout elapses the client will resend the request if necessary
|
144
|
+
# or fail the request if retries are exhausted.
|
145
|
+
config :request_timeout_ms, :validate => :number, :default => 40_000 # Kafka default
|
147
146
|
# The amount of time to wait before attempting to retry a failed fetch request
|
148
147
|
# to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
|
149
|
-
config :retry_backoff_ms, :validate => :
|
150
|
-
# The size of the TCP send buffer (SO_SNDBUF) to use when sending data
|
151
|
-
|
148
|
+
config :retry_backoff_ms, :validate => :number, :default => 100 # Kafka default
|
149
|
+
# The size of the TCP send buffer (SO_SNDBUF) to use when sending data.
|
150
|
+
# If the value is -1, the OS default will be used.
|
151
|
+
config :send_buffer_bytes, :validate => :number, :default => 131_072 # (128KB) Kafka default
|
152
152
|
# The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead
|
153
153
|
# and a rebalance operation is triggered for the group identified by `group_id`
|
154
|
-
config :session_timeout_ms, :validate => :
|
154
|
+
config :session_timeout_ms, :validate => :number, :default => 10_000 # (10s) Kafka default
|
155
155
|
# Java Class used to deserialize the record's value
|
156
156
|
config :value_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
|
157
157
|
# A list of topics to subscribe to, defaults to ["logstash"].
|
@@ -276,9 +276,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
276
276
|
end
|
277
277
|
end
|
278
278
|
# Manual offset commit
|
279
|
-
if @enable_auto_commit
|
280
|
-
consumer.commitSync
|
281
|
-
end
|
279
|
+
consumer.commitSync if @enable_auto_commit.eql?(false)
|
282
280
|
end
|
283
281
|
rescue org.apache.kafka.common.errors.WakeupException => e
|
284
282
|
raise e if !stop?
|
@@ -294,31 +292,31 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
294
292
|
props = java.util.Properties.new
|
295
293
|
kafka = org.apache.kafka.clients.consumer.ConsumerConfig
|
296
294
|
|
297
|
-
props.put(kafka::AUTO_COMMIT_INTERVAL_MS_CONFIG, auto_commit_interval_ms)
|
295
|
+
props.put(kafka::AUTO_COMMIT_INTERVAL_MS_CONFIG, auto_commit_interval_ms.to_s) unless auto_commit_interval_ms.nil?
|
298
296
|
props.put(kafka::AUTO_OFFSET_RESET_CONFIG, auto_offset_reset) unless auto_offset_reset.nil?
|
299
297
|
props.put(kafka::BOOTSTRAP_SERVERS_CONFIG, bootstrap_servers)
|
300
|
-
props.put(kafka::CHECK_CRCS_CONFIG, check_crcs) unless check_crcs.nil?
|
298
|
+
props.put(kafka::CHECK_CRCS_CONFIG, check_crcs.to_s) unless check_crcs.nil?
|
301
299
|
props.put(kafka::CLIENT_ID_CONFIG, client_id)
|
302
|
-
props.put(kafka::CONNECTIONS_MAX_IDLE_MS_CONFIG, connections_max_idle_ms) unless connections_max_idle_ms.nil?
|
303
|
-
props.put(kafka::ENABLE_AUTO_COMMIT_CONFIG, enable_auto_commit)
|
300
|
+
props.put(kafka::CONNECTIONS_MAX_IDLE_MS_CONFIG, connections_max_idle_ms.to_s) unless connections_max_idle_ms.nil?
|
301
|
+
props.put(kafka::ENABLE_AUTO_COMMIT_CONFIG, enable_auto_commit.to_s)
|
304
302
|
props.put(kafka::EXCLUDE_INTERNAL_TOPICS_CONFIG, exclude_internal_topics) unless exclude_internal_topics.nil?
|
305
|
-
props.put(kafka::FETCH_MAX_BYTES_CONFIG, fetch_max_bytes) unless fetch_max_bytes.nil?
|
306
|
-
props.put(kafka::FETCH_MAX_WAIT_MS_CONFIG, fetch_max_wait_ms) unless fetch_max_wait_ms.nil?
|
307
|
-
props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes) unless fetch_min_bytes.nil?
|
303
|
+
props.put(kafka::FETCH_MAX_BYTES_CONFIG, fetch_max_bytes.to_s) unless fetch_max_bytes.nil?
|
304
|
+
props.put(kafka::FETCH_MAX_WAIT_MS_CONFIG, fetch_max_wait_ms.to_s) unless fetch_max_wait_ms.nil?
|
305
|
+
props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes.to_s) unless fetch_min_bytes.nil?
|
308
306
|
props.put(kafka::GROUP_ID_CONFIG, group_id)
|
309
|
-
props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms) unless heartbeat_interval_ms.nil?
|
307
|
+
props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms.to_s) unless heartbeat_interval_ms.nil?
|
310
308
|
props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, key_deserializer_class)
|
311
|
-
props.put(kafka::MAX_PARTITION_FETCH_BYTES_CONFIG, max_partition_fetch_bytes) unless max_partition_fetch_bytes.nil?
|
312
|
-
props.put(kafka::MAX_POLL_RECORDS_CONFIG, max_poll_records) unless max_poll_records.nil?
|
313
|
-
props.put(kafka::MAX_POLL_INTERVAL_MS_CONFIG, max_poll_interval_ms) unless max_poll_interval_ms.nil?
|
314
|
-
props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms) unless metadata_max_age_ms.nil?
|
309
|
+
props.put(kafka::MAX_PARTITION_FETCH_BYTES_CONFIG, max_partition_fetch_bytes.to_s) unless max_partition_fetch_bytes.nil?
|
310
|
+
props.put(kafka::MAX_POLL_RECORDS_CONFIG, max_poll_records.to_s) unless max_poll_records.nil?
|
311
|
+
props.put(kafka::MAX_POLL_INTERVAL_MS_CONFIG, max_poll_interval_ms.to_s) unless max_poll_interval_ms.nil?
|
312
|
+
props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms.to_s) unless metadata_max_age_ms.nil?
|
315
313
|
props.put(kafka::PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partition_assignment_strategy_class) unless partition_assignment_strategy.nil?
|
316
|
-
props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes) unless receive_buffer_bytes.nil?
|
317
|
-
props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms) unless reconnect_backoff_ms.nil?
|
318
|
-
props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms) unless request_timeout_ms.nil?
|
319
|
-
props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms) unless retry_backoff_ms.nil?
|
320
|
-
props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes) unless send_buffer_bytes.nil?
|
321
|
-
props.put(kafka::SESSION_TIMEOUT_MS_CONFIG, session_timeout_ms) unless session_timeout_ms.nil?
|
314
|
+
props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
|
315
|
+
props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
|
316
|
+
props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
|
317
|
+
props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s) unless retry_backoff_ms.nil?
|
318
|
+
props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes.to_s) unless send_buffer_bytes.nil?
|
319
|
+
props.put(kafka::SESSION_TIMEOUT_MS_CONFIG, session_timeout_ms.to_s) unless session_timeout_ms.nil?
|
322
320
|
props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, value_deserializer_class)
|
323
321
|
props.put(kafka::CLIENT_RACK_CONFIG, client_rack) unless client_rack.nil?
|
324
322
|
|
@@ -374,15 +372,15 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
374
372
|
end
|
375
373
|
|
376
374
|
def set_sasl_config(props)
|
377
|
-
java.lang.System.setProperty("java.security.auth.login.config",jaas_path) unless jaas_path.nil?
|
378
|
-
java.lang.System.setProperty("java.security.krb5.conf",kerberos_config) unless kerberos_config.nil?
|
375
|
+
java.lang.System.setProperty("java.security.auth.login.config", jaas_path) unless jaas_path.nil?
|
376
|
+
java.lang.System.setProperty("java.security.krb5.conf", kerberos_config) unless kerberos_config.nil?
|
379
377
|
|
380
|
-
props.put("sasl.mechanism",sasl_mechanism)
|
378
|
+
props.put("sasl.mechanism", sasl_mechanism)
|
381
379
|
if sasl_mechanism == "GSSAPI" && sasl_kerberos_service_name.nil?
|
382
380
|
raise LogStash::ConfigurationError, "sasl_kerberos_service_name must be specified when SASL mechanism is GSSAPI"
|
383
381
|
end
|
384
382
|
|
385
|
-
props.put("sasl.kerberos.service.name",sasl_kerberos_service_name) unless sasl_kerberos_service_name.nil?
|
383
|
+
props.put("sasl.kerberos.service.name", sasl_kerberos_service_name) unless sasl_kerberos_service_name.nil?
|
386
384
|
props.put("sasl.jaas.config", sasl_jaas_config) unless sasl_jaas_config.nil?
|
387
385
|
end
|
388
386
|
end #class LogStash::Inputs::Kafka
|
@@ -67,7 +67,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
67
67
|
# The producer will attempt to batch records together into fewer requests whenever multiple
|
68
68
|
# records are being sent to the same partition. This helps performance on both the client
|
69
69
|
# and the server. This configuration controls the default batch size in bytes.
|
70
|
-
config :batch_size, :validate => :number, :default =>
|
70
|
+
config :batch_size, :validate => :number, :default => 16_384 # Kafka default
|
71
71
|
# This is for bootstrapping and the producer will only use it for getting metadata (topics,
|
72
72
|
# partitions and replicas). The socket connections for sending the actual data will be
|
73
73
|
# established based on the broker information returned in the metadata. The format is
|
@@ -75,7 +75,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
75
75
|
# subset of brokers.
|
76
76
|
config :bootstrap_servers, :validate => :string, :default => 'localhost:9092'
|
77
77
|
# The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
|
78
|
-
config :buffer_memory, :validate => :number, :default =>
|
78
|
+
config :buffer_memory, :validate => :number, :default => 33_554_432 # (32M) Kafka default
|
79
79
|
# The compression type for all data generated by the producer.
|
80
80
|
# The default is none (i.e. no compression). Valid values are none, gzip, or snappy.
|
81
81
|
config :compression_type, :validate => ["none", "gzip", "snappy", "lz4"], :default => "none"
|
@@ -92,26 +92,26 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
92
92
|
# This setting accomplishes this by adding a small amount of artificial delay—that is,
|
93
93
|
# rather than immediately sending out a record the producer will wait for up to the given delay
|
94
94
|
# to allow other records to be sent so that the sends can be batched together.
|
95
|
-
config :linger_ms, :validate => :number, :default => 0
|
95
|
+
config :linger_ms, :validate => :number, :default => 0 # Kafka default
|
96
96
|
# The maximum size of a request
|
97
|
-
config :max_request_size, :validate => :number, :default =>
|
97
|
+
config :max_request_size, :validate => :number, :default => 1_048_576 # (1MB) Kafka default
|
98
98
|
# The key for the message
|
99
99
|
config :message_key, :validate => :string
|
100
100
|
# the timeout setting for initial metadata request to fetch topic metadata.
|
101
|
-
config :metadata_fetch_timeout_ms, :validate => :number, :default =>
|
101
|
+
config :metadata_fetch_timeout_ms, :validate => :number, :default => 60_000
|
102
102
|
# the max time in milliseconds before a metadata refresh is forced.
|
103
|
-
config :metadata_max_age_ms, :validate => :number, :default =>
|
103
|
+
config :metadata_max_age_ms, :validate => :number, :default => 300_000 # (5m) Kafka default
|
104
104
|
# Partitioner to use - can be `default`, `uniform_sticky`, `round_robin` or a fully qualified class name of a custom partitioner.
|
105
105
|
config :partitioner, :validate => :string
|
106
106
|
# The size of the TCP receive buffer to use when reading data
|
107
|
-
config :receive_buffer_bytes, :validate => :number, :default =>
|
107
|
+
config :receive_buffer_bytes, :validate => :number, :default => 32_768 # (32KB) Kafka default
|
108
108
|
# The amount of time to wait before attempting to reconnect to a given host when a connection fails.
|
109
|
-
config :reconnect_backoff_ms, :validate => :number, :default =>
|
109
|
+
config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
|
110
110
|
# The configuration controls the maximum amount of time the client will wait
|
111
111
|
# for the response of a request. If the response is not received before the timeout
|
112
112
|
# elapses the client will resend the request if necessary or fail the request if
|
113
113
|
# retries are exhausted.
|
114
|
-
config :request_timeout_ms, :validate => :
|
114
|
+
config :request_timeout_ms, :validate => :number, :default => 40_000 # (40s) Kafka default
|
115
115
|
# The default retry behavior is to retry until successful. To prevent data loss,
|
116
116
|
# the use of this setting is discouraged.
|
117
117
|
#
|
@@ -122,9 +122,9 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
122
122
|
# A value less than zero is a configuration error.
|
123
123
|
config :retries, :validate => :number
|
124
124
|
# The amount of time to wait before attempting to retry a failed produce request to a given topic partition.
|
125
|
-
config :retry_backoff_ms, :validate => :number, :default => 100
|
125
|
+
config :retry_backoff_ms, :validate => :number, :default => 100 # Kafka default
|
126
126
|
# The size of the TCP send buffer to use when sending data.
|
127
|
-
config :send_buffer_bytes, :validate => :number, :default =>
|
127
|
+
config :send_buffer_bytes, :validate => :number, :default => 131_072 # (128KB) Kafka default
|
128
128
|
# The truststore type.
|
129
129
|
config :ssl_truststore_type, :validate => :string
|
130
130
|
# The JKS truststore path to validate the Kafka broker's certificate.
|
@@ -322,14 +322,14 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
322
322
|
props.put(kafka::KEY_SERIALIZER_CLASS_CONFIG, key_serializer)
|
323
323
|
props.put(kafka::LINGER_MS_CONFIG, linger_ms.to_s)
|
324
324
|
props.put(kafka::MAX_REQUEST_SIZE_CONFIG, max_request_size.to_s)
|
325
|
-
props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms) unless metadata_max_age_ms.nil?
|
325
|
+
props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms.to_s) unless metadata_max_age_ms.nil?
|
326
326
|
unless partitioner.nil?
|
327
327
|
props.put(kafka::PARTITIONER_CLASS_CONFIG, partitioner = partitioner_class)
|
328
328
|
logger.debug('producer configured using partitioner', :partitioner_class => partitioner)
|
329
329
|
end
|
330
330
|
props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
|
331
|
-
props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms) unless reconnect_backoff_ms.nil?
|
332
|
-
props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms) unless request_timeout_ms.nil?
|
331
|
+
props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
|
332
|
+
props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
|
333
333
|
props.put(kafka::RETRIES_CONFIG, retries.to_s) unless retries.nil?
|
334
334
|
props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s)
|
335
335
|
props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes.to_s)
|
@@ -346,7 +346,6 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
346
346
|
set_sasl_config(props)
|
347
347
|
end
|
348
348
|
|
349
|
-
|
350
349
|
org.apache.kafka.clients.producer.KafkaProducer.new(props)
|
351
350
|
rescue => e
|
352
351
|
logger.error("Unable to create Kafka producer from given configuration",
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'logstash-integration-kafka'
|
3
|
-
s.version = '10.
|
3
|
+
s.version = '10.2.0'
|
4
4
|
s.licenses = ['Apache-2.0']
|
5
5
|
s.summary = "Integration with Kafka - input and output plugins"
|
6
6
|
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline "+
|
@@ -48,4 +48,54 @@ describe LogStash::Inputs::Kafka do
|
|
48
48
|
expect( subject.send(:create_consumer, 'sample_client-0') ).to be kafka_client
|
49
49
|
end
|
50
50
|
end
|
51
|
+
|
52
|
+
context 'string integer config' do
|
53
|
+
let(:config) { super.merge('session_timeout_ms' => '25000', 'max_poll_interval_ms' => '345000') }
|
54
|
+
|
55
|
+
it "sets integer values" do
|
56
|
+
expect(org.apache.kafka.clients.consumer.KafkaConsumer).
|
57
|
+
to receive(:new).with(hash_including('session.timeout.ms' => '25000', 'max.poll.interval.ms' => '345000')).
|
58
|
+
and_return kafka_client = double('kafka-consumer')
|
59
|
+
|
60
|
+
expect( subject.send(:create_consumer, 'sample_client-1') ).to be kafka_client
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
context 'integer config' do
|
65
|
+
let(:config) { super.merge('session_timeout_ms' => 25200, 'max_poll_interval_ms' => 123_000) }
|
66
|
+
|
67
|
+
it "sets integer values" do
|
68
|
+
expect(org.apache.kafka.clients.consumer.KafkaConsumer).
|
69
|
+
to receive(:new).with(hash_including('session.timeout.ms' => '25200', 'max.poll.interval.ms' => '123000')).
|
70
|
+
and_return kafka_client = double('kafka-consumer')
|
71
|
+
|
72
|
+
expect( subject.send(:create_consumer, 'sample_client-2') ).to be kafka_client
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
context 'string boolean config' do
|
77
|
+
let(:config) { super.merge('enable_auto_commit' => 'false', 'check_crcs' => 'true') }
|
78
|
+
|
79
|
+
it "sets parameters" do
|
80
|
+
expect(org.apache.kafka.clients.consumer.KafkaConsumer).
|
81
|
+
to receive(:new).with(hash_including('enable.auto.commit' => 'false', 'check.crcs' => 'true')).
|
82
|
+
and_return kafka_client = double('kafka-consumer')
|
83
|
+
|
84
|
+
expect( subject.send(:create_consumer, 'sample_client-3') ).to be kafka_client
|
85
|
+
expect( subject.enable_auto_commit ).to be false
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
context 'boolean config' do
|
90
|
+
let(:config) { super.merge('enable_auto_commit' => true, 'check_crcs' => false) }
|
91
|
+
|
92
|
+
it "sets parameters" do
|
93
|
+
expect(org.apache.kafka.clients.consumer.KafkaConsumer).
|
94
|
+
to receive(:new).with(hash_including('enable.auto.commit' => 'true', 'check.crcs' => 'false')).
|
95
|
+
and_return kafka_client = double('kafka-consumer')
|
96
|
+
|
97
|
+
expect( subject.send(:create_consumer, 'sample_client-4') ).to be kafka_client
|
98
|
+
expect( subject.enable_auto_commit ).to be true
|
99
|
+
end
|
100
|
+
end
|
51
101
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-integration-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 10.
|
4
|
+
version: 10.2.0
|
5
5
|
platform: java
|
6
6
|
authors:
|
7
7
|
- Elastic
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-
|
11
|
+
date: 2020-04-30 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|