logstash-input-kafka 6.2.7 → 6.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: adf059fe901952446cc470414f3bb55003b44aeb
4
- data.tar.gz: af25a62efc3f16b61742956de53e162fa19669c4
3
+ metadata.gz: 67d7e39487de32bc46086f71dc728d0b73a8d51d
4
+ data.tar.gz: 3e95c206a4142c3c90aa814e33d534edf5a6ce94
5
5
  SHA512:
6
- metadata.gz: d8646d1e9f74ba8ff431897ec9be8eb6dded0dc86f4bbec135d7eb8b4e03f68f62bbc4b6fc1b2de1fedea874847d4b46e86e4f6fc4064db1c84cc50941f3ee31
7
- data.tar.gz: c3918567af917e00203ae5a8b49855e2939ecdd6f132b4c25ae6c97abeaadac5454b31117ab61ef9103ae753d76c131abe73e1509a153eeea8448d20769994b3
6
+ metadata.gz: aba387cb731bc155f245f6fd913b4cdd41b0cd9ef39468f392412fb4424b9474bf01f4156465115eb94860d1a22cf8c60a2dcae81be699b30a0d91ce5ba4bb70
7
+ data.tar.gz: aaebae3fb793842fca36c652c3de82812bbb6811a82d7d3ad6e29ebb3c9de38ebf14fd40e6eb5c14f37ac89b39d244de4991bcb6c605de055810da71094212d7
data/CHANGELOG.md CHANGED
@@ -1,3 +1,6 @@
1
+ ## 6.3.0
2
+ - Upgrade Kafka client to version 0.10.2.1
3
+
1
4
  ## 6.2.7
2
5
  - Fix NPE when SASL_SSL+PLAIN (no Kerberos) is specified.
3
6
 
@@ -0,0 +1,551 @@
1
+ :plugin: kafka
2
+ :type: input
3
+
4
+ ///////////////////////////////////////////
5
+ START - GENERATED VARIABLES, DO NOT EDIT!
6
+ ///////////////////////////////////////////
7
+ :version: %VERSION%
8
+ :release_date: %RELEASE_DATE%
9
+ :changelog_url: %CHANGELOG_URL%
10
+ :include_path: ../../../logstash/docs/include
11
+ ///////////////////////////////////////////
12
+ END - GENERATED VARIABLES, DO NOT EDIT!
13
+ ///////////////////////////////////////////
14
+
15
+ [id="plugins-{type}-{plugin}"]
16
+
17
+ === Kafka
18
+
19
+ include::{include_path}/plugin_header.asciidoc[]
20
+
21
+ ==== Description
22
+
23
+ This input will read events from a Kafka topic. It uses the 0.10 version of
24
+ the consumer API provided by Kafka to read messages from the broker.
25
+
26
+ Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination
27
+ of Logstash and the Kafka input plugin:
28
+
29
+ [options="header"]
30
+ |==========================================================
31
+ |Kafka Client Version |Logstash Version |Plugin Version |Why?
32
+ |0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular
33
+ |0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`)
34
+ |0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`)
35
+ |0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker
36
+ |0.10.1.x |2.4.x - 5.x.x | 6.x.x |
37
+ |==========================================================
38
+
39
+ NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should
40
+ upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker
41
+ is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around.
42
+
43
+ This input supports connecting to Kafka over:
44
+
45
+ * SSL (requires plugin version 3.0.0 or later)
46
+ * Kerberos SASL (requires plugin version 5.1.0 or later)
47
+
48
+ By default security is disabled but can be turned on as needed.
49
+
50
+ The Logstash Kafka consumer handles group management and uses the default offset management
51
+ strategy using Kafka topics.
52
+
53
+ Logstash instances by default form a single logical group to subscribe to Kafka topics
54
+ Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively,
55
+ you could run multiple Logstash instances with the same `group_id` to spread the load across
56
+ physical machines. Messages in a topic will be distributed to all Logstash instances with
57
+ the same `group_id`.
58
+
59
+ Ideally you should have as many threads as the number of partitions for a perfect balance --
60
+ more threads than partitions means that some threads will be idle
61
+
62
+ For more information see http://kafka.apache.org/documentation.html#theconsumer
63
+
64
+ Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs
65
+
66
+
67
+ [id="plugins-{type}s-{plugin}-options"]
68
+ ==== Kafka Input Configuration Options
69
+
70
+ This plugin supports the following configuration options plus the <<plugins-{type}s-common-options>> described later.
71
+
72
+ [cols="<,<,<",options="header",]
73
+ |=======================================================================
74
+ |Setting |Input type|Required
75
+ | <<plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<<string,string>>|No
76
+ | <<plugins-{type}s-{plugin}-auto_offset_reset>> |<<string,string>>|No
77
+ | <<plugins-{type}s-{plugin}-bootstrap_servers>> |<<string,string>>|No
78
+ | <<plugins-{type}s-{plugin}-check_crcs>> |<<string,string>>|No
79
+ | <<plugins-{type}s-{plugin}-client_id>> |<<string,string>>|No
80
+ | <<plugins-{type}s-{plugin}-connections_max_idle_ms>> |<<string,string>>|No
81
+ | <<plugins-{type}s-{plugin}-consumer_threads>> |<<number,number>>|No
82
+ | <<plugins-{type}s-{plugin}-decorate_events>> |<<boolean,boolean>>|No
83
+ | <<plugins-{type}s-{plugin}-enable_auto_commit>> |<<string,string>>|No
84
+ | <<plugins-{type}s-{plugin}-exclude_internal_topics>> |<<string,string>>|No
85
+ | <<plugins-{type}s-{plugin}-fetch_max_bytes>> |<<string,string>>|No
86
+ | <<plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<<string,string>>|No
87
+ | <<plugins-{type}s-{plugin}-fetch_min_bytes>> |<<string,string>>|No
88
+ | <<plugins-{type}s-{plugin}-group_id>> |<<string,string>>|No
89
+ | <<plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<<string,string>>|No
90
+ | <<plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No
91
+ | <<plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No
92
+ | <<plugins-{type}s-{plugin}-key_deserializer_class>> |<<string,string>>|No
93
+ | <<plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<<string,string>>|No
94
+ | <<plugins-{type}s-{plugin}-max_poll_interval_ms>> |<<string,string>>|No
95
+ | <<plugins-{type}s-{plugin}-max_poll_records>> |<<string,string>>|No
96
+ | <<plugins-{type}s-{plugin}-metadata_max_age_ms>> |<<string,string>>|No
97
+ | <<plugins-{type}s-{plugin}-partition_assignment_strategy>> |<<string,string>>|No
98
+ | <<plugins-{type}s-{plugin}-poll_timeout_ms>> |<<number,number>>|No
99
+ | <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<string,string>>|No
100
+ | <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<string,string>>|No
101
+ | <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<string,string>>|No
102
+ | <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<string,string>>|No
103
+ | <<plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<<string,string>>|No
104
+ | <<plugins-{type}s-{plugin}-sasl_mechanism>> |<<string,string>>|No
105
+ | <<plugins-{type}s-{plugin}-security_protocol>> |<<string,string>>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No
106
+ | <<plugins-{type}s-{plugin}-send_buffer_bytes>> |<<string,string>>|No
107
+ | <<plugins-{type}s-{plugin}-session_timeout_ms>> |<<string,string>>|No
108
+ | <<plugins-{type}s-{plugin}-ssl_key_password>> |<<password,password>>|No
109
+ | <<plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No
110
+ | <<plugins-{type}s-{plugin}-ssl_keystore_password>> |<<password,password>>|No
111
+ | <<plugins-{type}s-{plugin}-ssl_keystore_type>> |<<string,string>>|No
112
+ | <<plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No
113
+ | <<plugins-{type}s-{plugin}-ssl_truststore_password>> |<<password,password>>|No
114
+ | <<plugins-{type}s-{plugin}-ssl_truststore_type>> |<<string,string>>|No
115
+ | <<plugins-{type}s-{plugin}-topics>> |<<array,array>>|No
116
+ | <<plugins-{type}s-{plugin}-topics_pattern>> |<<string,string>>|No
117
+ | <<plugins-{type}s-{plugin}-value_deserializer_class>> |<<string,string>>|No
118
+ |=======================================================================
119
+
120
+ Also see <<plugins-{type}s-common-options>> for a list of options supported by all
121
+ input plugins.
122
+
123
+ &nbsp;
124
+
125
+ [id="plugins-{type}s-{plugin}-auto_commit_interval_ms"]
126
+ ===== `auto_commit_interval_ms`
127
+
128
+ * Value type is <<string,string>>
129
+ * Default value is `"5000"`
130
+
131
+ The frequency in milliseconds that the consumer offsets are committed to Kafka.
132
+
133
+ [id="plugins-{type}s-{plugin}-auto_offset_reset"]
134
+ ===== `auto_offset_reset`
135
+
136
+ * Value type is <<string,string>>
137
+ * There is no default value for this setting.
138
+
139
+ What to do when there is no initial offset in Kafka or if an offset is out of range:
140
+
141
+ * earliest: automatically reset the offset to the earliest offset
142
+ * latest: automatically reset the offset to the latest offset
143
+ * none: throw exception to the consumer if no previous offset is found for the consumer's group
144
+ * anything else: throw exception to the consumer.
145
+
146
+ [id="plugins-{type}s-{plugin}-bootstrap_servers"]
147
+ ===== `bootstrap_servers`
148
+
149
+ * Value type is <<string,string>>
150
+ * Default value is `"localhost:9092"`
151
+
152
+ A list of URLs of Kafka instances to use for establishing the initial connection to the cluster.
153
+ This list should be in the form of `host1:port1,host2:port2` These urls are just used
154
+ for the initial connection to discover the full cluster membership (which may change dynamically)
155
+ so this list need not contain the full set of servers (you may want more than one, though, in
156
+ case a server is down).
157
+
158
+ [id="plugins-{type}s-{plugin}-check_crcs"]
159
+ ===== `check_crcs`
160
+
161
+ * Value type is <<string,string>>
162
+ * There is no default value for this setting.
163
+
164
+ Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk
165
+ corruption to the messages occurred. This check adds some overhead, so it may be
166
+ disabled in cases seeking extreme performance.
167
+
168
+ [id="plugins-{type}s-{plugin}-client_id"]
169
+ ===== `client_id`
170
+
171
+ * Value type is <<string,string>>
172
+ * Default value is `"logstash"`
173
+
174
+ The id string to pass to the server when making requests. The purpose of this
175
+ is to be able to track the source of requests beyond just ip/port by allowing
176
+ a logical application name to be included.
177
+
178
+ [id="plugins-{type}s-{plugin}-connections_max_idle_ms"]
179
+ ===== `connections_max_idle_ms`
180
+
181
+ * Value type is <<string,string>>
182
+ * There is no default value for this setting.
183
+
184
+ Close idle connections after the number of milliseconds specified by this config.
185
+
186
+ [id="plugins-{type}s-{plugin}-consumer_threads"]
187
+ ===== `consumer_threads`
188
+
189
+ * Value type is <<number,number>>
190
+ * Default value is `1`
191
+
192
+ Ideally you should have as many threads as the number of partitions for a perfect
193
+ balance — more threads than partitions means that some threads will be idle
194
+
195
+ [id="plugins-{type}s-{plugin}-decorate_events"]
196
+ ===== `decorate_events`
197
+
198
+ * Value type is <<boolean,boolean>>
199
+ * Default value is `false`
200
+
201
+ Option to add Kafka metadata like topic, message size to the event.
202
+ This will add a field named `kafka` to the logstash event containing the following attributes:
203
+ `topic`: The topic this message is associated with
204
+ `consumer_group`: The consumer group used to read in this event
205
+ `partition`: The partition this message is associated with
206
+ `offset`: The offset from the partition this message is associated with
207
+ `key`: A ByteBuffer containing the message key
208
+
209
+ [id="plugins-{type}s-{plugin}-enable_auto_commit"]
210
+ ===== `enable_auto_commit`
211
+
212
+ * Value type is <<string,string>>
213
+ * Default value is `"true"`
214
+
215
+ If true, periodically commit to Kafka the offsets of messages already returned by the consumer.
216
+ This committed offset will be used when the process fails as the position from
217
+ which the consumption will begin.
218
+
219
+ [id="plugins-{type}s-{plugin}-exclude_internal_topics"]
220
+ ===== `exclude_internal_topics`
221
+
222
+ * Value type is <<string,string>>
223
+ * There is no default value for this setting.
224
+
225
+ Whether records from internal topics (such as offsets) should be exposed to the consumer.
226
+ If set to true the only way to receive records from an internal topic is subscribing to it.
227
+
228
+ [id="plugins-{type}s-{plugin}-fetch_max_bytes"]
229
+ ===== `fetch_max_bytes`
230
+
231
+ * Value type is <<string,string>>
232
+ * There is no default value for this setting.
233
+
234
+ The maximum amount of data the server should return for a fetch request. This is not an
235
+ absolute maximum, if the first message in the first non-empty partition of the fetch is larger
236
+ than this value, the message will still be returned to ensure that the consumer can make progress.
237
+
238
+ [id="plugins-{type}s-{plugin}-fetch_max_wait_ms"]
239
+ ===== `fetch_max_wait_ms`
240
+
241
+ * Value type is <<string,string>>
242
+ * There is no default value for this setting.
243
+
244
+ The maximum amount of time the server will block before answering the fetch request if
245
+ there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This
246
+ should be less than or equal to the timeout used in `poll_timeout_ms`
247
+
248
+ [id="plugins-{type}s-{plugin}-fetch_min_bytes"]
249
+ ===== `fetch_min_bytes`
250
+
251
+ * Value type is <<string,string>>
252
+ * There is no default value for this setting.
253
+
254
+ The minimum amount of data the server should return for a fetch request. If insufficient
255
+ data is available the request will wait for that much data to accumulate
256
+ before answering the request.
257
+
258
+ [id="plugins-{type}s-{plugin}-group_id"]
259
+ ===== `group_id`
260
+
261
+ * Value type is <<string,string>>
262
+ * Default value is `"logstash"`
263
+
264
+ The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber
265
+ that happens to be made up of multiple processors. Messages in a topic will be distributed to all
266
+ Logstash instances with the same `group_id`
267
+
268
+ [id="plugins-{type}s-{plugin}-heartbeat_interval_ms"]
269
+ ===== `heartbeat_interval_ms`
270
+
271
+ * Value type is <<string,string>>
272
+ * There is no default value for this setting.
273
+
274
+ The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure
275
+ that the consumer's session stays active and to facilitate rebalancing when new
276
+ consumers join or leave the group. The value must be set lower than
277
+ `session.timeout.ms`, but typically should be set no higher than 1/3 of that value.
278
+ It can be adjusted even lower to control the expected time for normal rebalances.
279
+
280
+ [id="plugins-{type}s-{plugin}-jaas_path"]
281
+ ===== `jaas_path`
282
+
283
+ * Value type is <<path,path>>
284
+ * There is no default value for this setting.
285
+
286
+ The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization
287
+ services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client:
288
+ [source,java]
289
+ ----------------------------------
290
+ KafkaClient {
291
+ com.sun.security.auth.module.Krb5LoginModule required
292
+ useTicketCache=true
293
+ renewTicket=true
294
+ serviceName="kafka";
295
+ };
296
+ ----------------------------------
297
+
298
+ Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these
299
+ to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same
300
+ `jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on
301
+ different JVM instances.
302
+
303
+ [id="plugins-{type}s-{plugin}-kerberos_config"]
304
+ ===== `kerberos_config`
305
+
306
+ * Value type is <<path,path>>
307
+ * There is no default value for this setting.
308
+
309
+ Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html
310
+
311
+ [id="plugins-{type}s-{plugin}-key_deserializer_class"]
312
+ ===== `key_deserializer_class`
313
+
314
+ * Value type is <<string,string>>
315
+ * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"`
316
+
317
+ Java Class used to deserialize the record's key
318
+
319
+ [id="plugins-{type}s-{plugin}-max_partition_fetch_bytes"]
320
+ ===== `max_partition_fetch_bytes`
321
+
322
+ * Value type is <<string,string>>
323
+ * There is no default value for this setting.
324
+
325
+ The maximum amount of data per-partition the server will return. The maximum total memory used for a
326
+ request will be <code>#partitions * max.partition.fetch.bytes</code>. This size must be at least
327
+ as large as the maximum message size the server allows or else it is possible for the producer to
328
+ send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying
329
+ to fetch a large message on a certain partition.
330
+
331
+ [id="plugins-{type}s-{plugin}-max_poll_interval_ms"]
332
+ ===== `max_poll_interval_ms`
333
+
334
+ * Value type is <<string,string>>
335
+ * There is no default value for this setting.
336
+
337
+ The maximum delay between invocations of poll() when using consumer group management. This places
338
+ an upper bound on the amount of time that the consumer can be idle before fetching more records.
339
+ If poll() is not called before expiration of this timeout, then the consumer is considered failed and
340
+ the group will rebalance in order to reassign the partitions to another member.
341
+ The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms
342
+
343
+ [id="plugins-{type}s-{plugin}-max_poll_records"]
344
+ ===== `max_poll_records`
345
+
346
+ * Value type is <<string,string>>
347
+ * There is no default value for this setting.
348
+
349
+ The maximum number of records returned in a single call to poll().
350
+
351
+ [id="plugins-{type}s-{plugin}-metadata_max_age_ms"]
352
+ ===== `metadata_max_age_ms`
353
+
354
+ * Value type is <<string,string>>
355
+ * There is no default value for this setting.
356
+
357
+ The period of time in milliseconds after which we force a refresh of metadata even if
358
+ we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
359
+
360
+ [id="plugins-{type}s-{plugin}-partition_assignment_strategy"]
361
+ ===== `partition_assignment_strategy`
362
+
363
+ * Value type is <<string,string>>
364
+ * There is no default value for this setting.
365
+
366
+ The class name of the partition assignment strategy that the client will use to distribute
367
+ partition ownership amongst consumer instances
368
+
369
+ [id="plugins-{type}s-{plugin}-poll_timeout_ms"]
370
+ ===== `poll_timeout_ms`
371
+
372
+ * Value type is <<number,number>>
373
+ * Default value is `100`
374
+
375
+ Time kafka consumer will wait to receive new messages from topics
376
+
377
+ [id="plugins-{type}s-{plugin}-receive_buffer_bytes"]
378
+ ===== `receive_buffer_bytes`
379
+
380
+ * Value type is <<string,string>>
381
+ * There is no default value for this setting.
382
+
383
+ The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
384
+
385
+ [id="plugins-{type}s-{plugin}-reconnect_backoff_ms"]
386
+ ===== `reconnect_backoff_ms`
387
+
388
+ * Value type is <<string,string>>
389
+ * There is no default value for this setting.
390
+
391
+ The amount of time to wait before attempting to reconnect to a given host.
392
+ This avoids repeatedly connecting to a host in a tight loop.
393
+ This backoff applies to all requests sent by the consumer to the broker.
394
+
395
+ [id="plugins-{type}s-{plugin}-request_timeout_ms"]
396
+ ===== `request_timeout_ms`
397
+
398
+ * Value type is <<string,string>>
399
+ * There is no default value for this setting.
400
+
401
+ The configuration controls the maximum amount of time the client will wait
402
+ for the response of a request. If the response is not received before the timeout
403
+ elapses the client will resend the request if necessary or fail the request if
404
+ retries are exhausted.
405
+
406
+ [id="plugins-{type}s-{plugin}-retry_backoff_ms"]
407
+ ===== `retry_backoff_ms`
408
+
409
+ * Value type is <<string,string>>
410
+ * There is no default value for this setting.
411
+
412
+ The amount of time to wait before attempting to retry a failed fetch request
413
+ to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
414
+
415
+ [id="plugins-{type}s-{plugin}-sasl_kerberos_service_name"]
416
+ ===== `sasl_kerberos_service_name`
417
+
418
+ * Value type is <<string,string>>
419
+ * There is no default value for this setting.
420
+
421
+ The Kerberos principal name that Kafka broker runs as.
422
+ This can be defined either in Kafka's JAAS config or in Kafka's config.
423
+
424
+ [id="plugins-{type}s-{plugin}-sasl_mechanism"]
425
+ ===== `sasl_mechanism`
426
+
427
+ * Value type is <<string,string>>
428
+ * Default value is `"GSSAPI"`
429
+
430
+ http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections.
431
+ This may be any mechanism for which a security provider is available.
432
+ GSSAPI is the default mechanism.
433
+
434
+ [id="plugins-{type}s-{plugin}-security_protocol"]
435
+ ===== `security_protocol`
436
+
437
+ * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`
438
+ * Default value is `"PLAINTEXT"`
439
+
440
+ Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL
441
+
442
+ [id="plugins-{type}s-{plugin}-send_buffer_bytes"]
443
+ ===== `send_buffer_bytes`
444
+
445
+ * Value type is <<string,string>>
446
+ * There is no default value for this setting.
447
+
448
+ The size of the TCP send buffer (SO_SNDBUF) to use when sending data
449
+
450
+ [id="plugins-{type}s-{plugin}-session_timeout_ms"]
451
+ ===== `session_timeout_ms`
452
+
453
+ * Value type is <<string,string>>
454
+ * There is no default value for this setting.
455
+
456
+ The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead
457
+ and a rebalance operation is triggered for the group identified by `group_id`
458
+
459
+ [id="plugins-{type}s-{plugin}-ssl"]
460
+ ===== `ssl` (DEPRECATED)
461
+
462
+ * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
463
+ * Value type is <<boolean,boolean>>
464
+ * Default value is `false`
465
+
466
+ Enable SSL/TLS secured communication to Kafka broker.
467
+
468
+ [id="plugins-{type}s-{plugin}-ssl_key_password"]
469
+ ===== `ssl_key_password`
470
+
471
+ * Value type is <<password,password>>
472
+ * There is no default value for this setting.
473
+
474
+ The password of the private key in the key store file.
475
+
476
+ [id="plugins-{type}s-{plugin}-ssl_keystore_location"]
477
+ ===== `ssl_keystore_location`
478
+
479
+ * Value type is <<path,path>>
480
+ * There is no default value for this setting.
481
+
482
+ If client authentication is required, this setting stores the keystore path.
483
+
484
+ [id="plugins-{type}s-{plugin}-ssl_keystore_password"]
485
+ ===== `ssl_keystore_password`
486
+
487
+ * Value type is <<password,password>>
488
+ * There is no default value for this setting.
489
+
490
+ If client authentication is required, this setting stores the keystore password
491
+
492
+ [id="plugins-{type}s-{plugin}-ssl_keystore_type"]
493
+ ===== `ssl_keystore_type`
494
+
495
+ * Value type is <<string,string>>
496
+ * There is no default value for this setting.
497
+
498
+ The keystore type.
499
+
500
+ [id="plugins-{type}s-{plugin}-ssl_truststore_location"]
501
+ ===== `ssl_truststore_location`
502
+
503
+ * Value type is <<path,path>>
504
+ * There is no default value for this setting.
505
+
506
+ The JKS truststore path to validate the Kafka broker's certificate.
507
+
508
+ [id="plugins-{type}s-{plugin}-ssl_truststore_password"]
509
+ ===== `ssl_truststore_password`
510
+
511
+ * Value type is <<password,password>>
512
+ * There is no default value for this setting.
513
+
514
+ The truststore password
515
+
516
+ [id="plugins-{type}s-{plugin}-ssl_truststore_type"]
517
+ ===== `ssl_truststore_type`
518
+
519
+ * Value type is <<string,string>>
520
+ * There is no default value for this setting.
521
+
522
+ The truststore type.
523
+
524
+ [id="plugins-{type}s-{plugin}-topics"]
525
+ ===== `topics`
526
+
527
+ * Value type is <<array,array>>
528
+ * Default value is `["logstash"]`
529
+
530
+ A list of topics to subscribe to, defaults to ["logstash"].
531
+
532
+ [id="plugins-{type}s-{plugin}-topics_pattern"]
533
+ ===== `topics_pattern`
534
+
535
+ * Value type is <<string,string>>
536
+ * There is no default value for this setting.
537
+
538
+ A topic regex pattern to subscribe to.
539
+ The topics configuration will be ignored when using this configuration.
540
+
541
+ [id="plugins-{type}s-{plugin}-value_deserializer_class"]
542
+ ===== `value_deserializer_class`
543
+
544
+ * Value type is <<string,string>>
545
+ * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"`
546
+
547
+ Java Class used to deserialize the record's value
548
+
549
+
550
+
551
+ include::{include_path}/{type}.asciidoc[]
@@ -61,7 +61,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
61
61
  # * none: throw exception to the consumer if no previous offset is found for the consumer's group
62
62
  # * anything else: throw exception to the consumer.
63
63
  config :auto_offset_reset, :validate => :string
64
- # A list of URLs to use for establishing the initial connection to the cluster.
64
+ # A list of URLs of Kafka instances to use for establishing the initial connection to the cluster.
65
65
  # This list should be in the form of `host1:port1,host2:port2` These urls are just used
66
66
  # for the initial connection to discover the full cluster membership (which may change dynamically)
67
67
  # so this list need not contain the full set of servers (you may want more than one, though, in
@@ -261,6 +261,10 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
261
261
  logstash_queue << event
262
262
  end
263
263
  end
264
+ # Manual offset commit
265
+ if @enable_auto_commit == "false"
266
+ consumer.commitSync
267
+ end
264
268
  end
265
269
  rescue org.apache.kafka.common.errors.WakeupException => e
266
270
  raise e if !stop?
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-input-kafka'
3
- s.version = '6.2.7'
3
+ s.version = '6.3.0'
4
4
  s.licenses = ['Apache License (2.0)']
5
5
  s.summary = 'This input will read events from a Kafka topic. It uses the high level consumer API provided by Kafka to read messages from the broker'
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
@@ -10,7 +10,7 @@ Gem::Specification.new do |s|
10
10
  s.require_paths = ['lib']
11
11
 
12
12
  # Files
13
- s.files = Dir['lib/**/*.rb','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
13
+ s.files = Dir["lib/**/*","spec/**/*","*.gemspec","*.md","CONTRIBUTORS","Gemfile","LICENSE","NOTICE.TXT", "vendor/jar-dependencies/**/*.jar", "vendor/jar-dependencies/**/*.rb", "VERSION", "docs/**/*"]
14
14
 
15
15
  # Tests
16
16
  s.test_files = s.files.grep(%r{^(test|spec|features)/})
@@ -18,7 +18,7 @@ Gem::Specification.new do |s|
18
18
  # Special flag to let us know this is actually a logstash plugin
19
19
  s.metadata = { 'logstash_plugin' => 'true', 'group' => 'input'}
20
20
 
21
- s.requirements << "jar 'org.apache.kafka:kafka-clients', '0.10.1.1'"
21
+ s.requirements << "jar 'org.apache.kafka:kafka-clients', '0.10.2.1'"
22
22
  s.requirements << "jar 'org.slf4j:slf4j-log4j12', '1.7.21'"
23
23
  s.requirements << "jar 'org.apache.logging.log4j:log4j-1.2-api', '2.6.2'"
24
24
 
@@ -17,6 +17,7 @@ describe "inputs/kafka", :integration => true do
17
17
  let(:lz4_config) { { 'topics' => ['logstash_topic_lz4'], 'codec' => 'plain', 'group_id' => group_id_1, 'auto_offset_reset' => 'earliest'} }
18
18
  let(:pattern_config) { { 'topics_pattern' => 'logstash_topic_.*', 'group_id' => group_id_2, 'codec' => 'plain', 'auto_offset_reset' => 'earliest'} }
19
19
  let(:decorate_config) { { 'topics' => ['logstash_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_3, 'auto_offset_reset' => 'earliest', 'decorate_events' => true} }
20
+ let(:manual_commit_config) { { 'topics' => ['logstash_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_4, 'auto_offset_reset' => 'earliest', 'enable_auto_commit' => 'false'} }
20
21
  let(:timeout_seconds) { 120 }
21
22
  let(:num_events) { 103 }
22
23
 
@@ -109,4 +110,23 @@ describe "inputs/kafka", :integration => true do
109
110
  expect(event.get("kafka")["consumer_group"]).to eq(group_id_3)
110
111
  end
111
112
  end
113
+
114
+ describe "#kafka-offset-commit" do
115
+ def thread_it(kafka_input, queue)
116
+ Thread.new do
117
+ begin
118
+ kafka_input.run(queue)
119
+ end
120
+ end
121
+ end
122
+
123
+ it "should manually commit offsets" do
124
+ kafka_input = LogStash::Inputs::Kafka.new(manual_commit_config)
125
+ queue = Array.new
126
+ t = thread_it(kafka_input, queue)
127
+ t.run
128
+ wait(timeout_seconds).for { queue.length }.to eq(num_events)
129
+ expect(queue.length).to eq(num_events)
130
+ end
131
+ end
112
132
  end
metadata CHANGED
@@ -1,39 +1,30 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-input-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 6.2.7
4
+ version: 6.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elasticsearch
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-04-09 00:00:00.000000000 Z
11
+ date: 2017-05-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
- name: jar-dependencies
15
- version_requirements: !ruby/object:Gem::Requirement
16
- requirements:
17
- - - "~>"
18
- - !ruby/object:Gem::Version
19
- version: 0.3.2
20
14
  requirement: !ruby/object:Gem::Requirement
21
15
  requirements:
22
16
  - - "~>"
23
17
  - !ruby/object:Gem::Version
24
18
  version: 0.3.2
19
+ name: jar-dependencies
25
20
  prerelease: false
26
21
  type: :development
27
- - !ruby/object:Gem::Dependency
28
- name: logstash-core-plugin-api
29
22
  version_requirements: !ruby/object:Gem::Requirement
30
23
  requirements:
31
- - - ">="
32
- - !ruby/object:Gem::Version
33
- version: '1.60'
34
- - - "<="
24
+ - - "~>"
35
25
  - !ruby/object:Gem::Version
36
- version: '2.99'
26
+ version: 0.3.2
27
+ - !ruby/object:Gem::Dependency
37
28
  requirement: !ruby/object:Gem::Requirement
38
29
  requirements:
39
30
  - - ">="
@@ -42,46 +33,46 @@ dependencies:
42
33
  - - "<="
43
34
  - !ruby/object:Gem::Version
44
35
  version: '2.99'
36
+ name: logstash-core-plugin-api
45
37
  prerelease: false
46
38
  type: :runtime
47
- - !ruby/object:Gem::Dependency
48
- name: logstash-codec-json
49
39
  version_requirements: !ruby/object:Gem::Requirement
50
40
  requirements:
51
41
  - - ">="
52
42
  - !ruby/object:Gem::Version
53
- version: '0'
43
+ version: '1.60'
44
+ - - "<="
45
+ - !ruby/object:Gem::Version
46
+ version: '2.99'
47
+ - !ruby/object:Gem::Dependency
54
48
  requirement: !ruby/object:Gem::Requirement
55
49
  requirements:
56
50
  - - ">="
57
51
  - !ruby/object:Gem::Version
58
52
  version: '0'
53
+ name: logstash-codec-json
59
54
  prerelease: false
60
55
  type: :runtime
61
- - !ruby/object:Gem::Dependency
62
- name: logstash-codec-plain
63
56
  version_requirements: !ruby/object:Gem::Requirement
64
57
  requirements:
65
58
  - - ">="
66
59
  - !ruby/object:Gem::Version
67
60
  version: '0'
61
+ - !ruby/object:Gem::Dependency
68
62
  requirement: !ruby/object:Gem::Requirement
69
63
  requirements:
70
64
  - - ">="
71
65
  - !ruby/object:Gem::Version
72
66
  version: '0'
67
+ name: logstash-codec-plain
73
68
  prerelease: false
74
69
  type: :runtime
75
- - !ruby/object:Gem::Dependency
76
- name: stud
77
70
  version_requirements: !ruby/object:Gem::Requirement
78
71
  requirements:
79
72
  - - ">="
80
73
  - !ruby/object:Gem::Version
81
- version: 0.0.22
82
- - - "<"
83
- - !ruby/object:Gem::Version
84
- version: 0.1.0
74
+ version: '0'
75
+ - !ruby/object:Gem::Dependency
85
76
  requirement: !ruby/object:Gem::Requirement
86
77
  requirements:
87
78
  - - ">="
@@ -90,36 +81,45 @@ dependencies:
90
81
  - - "<"
91
82
  - !ruby/object:Gem::Version
92
83
  version: 0.1.0
84
+ name: stud
93
85
  prerelease: false
94
86
  type: :runtime
95
- - !ruby/object:Gem::Dependency
96
- name: logstash-devutils
97
87
  version_requirements: !ruby/object:Gem::Requirement
98
88
  requirements:
99
89
  - - ">="
100
90
  - !ruby/object:Gem::Version
101
- version: '0'
91
+ version: 0.0.22
92
+ - - "<"
93
+ - !ruby/object:Gem::Version
94
+ version: 0.1.0
95
+ - !ruby/object:Gem::Dependency
102
96
  requirement: !ruby/object:Gem::Requirement
103
97
  requirements:
104
98
  - - ">="
105
99
  - !ruby/object:Gem::Version
106
100
  version: '0'
101
+ name: logstash-devutils
107
102
  prerelease: false
108
103
  type: :development
109
- - !ruby/object:Gem::Dependency
110
- name: rspec-wait
111
104
  version_requirements: !ruby/object:Gem::Requirement
112
105
  requirements:
113
106
  - - ">="
114
107
  - !ruby/object:Gem::Version
115
108
  version: '0'
109
+ - !ruby/object:Gem::Dependency
116
110
  requirement: !ruby/object:Gem::Requirement
117
111
  requirements:
118
112
  - - ">="
119
113
  - !ruby/object:Gem::Version
120
114
  version: '0'
115
+ name: rspec-wait
121
116
  prerelease: false
122
117
  type: :development
118
+ version_requirements: !ruby/object:Gem::Requirement
119
+ requirements:
120
+ - - ">="
121
+ - !ruby/object:Gem::Version
122
+ version: '0'
123
123
  description: This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program
124
124
  email: info@elastic.co
125
125
  executables: []
@@ -133,12 +133,22 @@ files:
133
133
  - LICENSE
134
134
  - NOTICE.TXT
135
135
  - README.md
136
+ - docs/index.asciidoc
137
+ - lib/log4j/log4j/1.2.17/log4j-1.2.17.jar
136
138
  - lib/logstash-input-kafka_jars.rb
137
139
  - lib/logstash/inputs/kafka.rb
140
+ - lib/net/jpountz/lz4/lz4/1.3.0/lz4-1.3.0.jar
141
+ - lib/org/apache/kafka/kafka-clients/0.10.2.1/kafka-clients-0.10.2.1.jar
142
+ - lib/org/apache/logging/log4j/log4j-1.2-api/2.6.2/log4j-1.2-api-2.6.2.jar
143
+ - lib/org/apache/logging/log4j/log4j-api/2.6.2/log4j-api-2.6.2.jar
144
+ - lib/org/apache/logging/log4j/log4j-core/2.6.2/log4j-core-2.6.2.jar
145
+ - lib/org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar
146
+ - lib/org/slf4j/slf4j-log4j12/1.7.21/slf4j-log4j12-1.7.21.jar
147
+ - lib/org/xerial/snappy/snappy-java/1.1.2.6/snappy-java-1.1.2.6.jar
138
148
  - logstash-input-kafka.gemspec
139
149
  - spec/integration/inputs/kafka_spec.rb
140
150
  - spec/unit/inputs/kafka_spec.rb
141
- - vendor/jar-dependencies/runtime-jars/kafka-clients-0.10.1.1.jar
151
+ - vendor/jar-dependencies/runtime-jars/kafka-clients-0.10.2.1.jar
142
152
  - vendor/jar-dependencies/runtime-jars/log4j-1.2-api-2.6.2.jar
143
153
  - vendor/jar-dependencies/runtime-jars/log4j-1.2.17.jar
144
154
  - vendor/jar-dependencies/runtime-jars/log4j-api-2.6.2.jar
@@ -168,7 +178,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
168
178
  - !ruby/object:Gem::Version
169
179
  version: '0'
170
180
  requirements:
171
- - jar 'org.apache.kafka:kafka-clients', '0.10.1.1'
181
+ - jar 'org.apache.kafka:kafka-clients', '0.10.2.1'
172
182
  - jar 'org.slf4j:slf4j-log4j12', '1.7.21'
173
183
  - jar 'org.apache.logging.log4j:log4j-1.2-api', '2.6.2'
174
184
  rubyforge_project: