logstash-integration-kafka 10.0.0-java → 10.4.0-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/CHANGELOG.md +24 -2
- data/CONTRIBUTORS +2 -0
- data/LICENSE +199 -10
- data/docs/index.asciidoc +4 -1
- data/docs/input-kafka.asciidoc +122 -71
- data/docs/output-kafka.asciidoc +50 -18
- data/lib/logstash-integration-kafka_jars.rb +3 -3
- data/lib/logstash/inputs/kafka.rb +90 -54
- data/lib/logstash/outputs/kafka.rb +59 -32
- data/logstash-integration-kafka.gemspec +3 -3
- data/spec/integration/inputs/kafka_spec.rb +81 -112
- data/spec/integration/outputs/kafka_spec.rb +89 -72
- data/spec/unit/inputs/kafka_spec.rb +63 -1
- data/spec/unit/outputs/kafka_spec.rb +26 -5
- data/vendor/jar-dependencies/com/github/luben/zstd-jni/1.4.3-1/zstd-jni-1.4.3-1.jar +0 -0
- data/vendor/jar-dependencies/org/apache/kafka/kafka-clients/2.4.1/kafka-clients-2.4.1.jar +0 -0
- data/vendor/jar-dependencies/org/slf4j/slf4j-api/1.7.28/slf4j-api-1.7.28.jar +0 -0
- metadata +9 -9
- data/vendor/jar-dependencies/com/github/luben/zstd-jni/1.4.2-1/zstd-jni-1.4.2-1.jar +0 -0
- data/vendor/jar-dependencies/org/apache/kafka/kafka-clients/2.3.0/kafka-clients-2.3.0.jar +0 -0
- data/vendor/jar-dependencies/org/slf4j/slf4j-api/1.7.26/slf4j-api-1.7.26.jar +0 -0
data/docs/output-kafka.asciidoc
CHANGED
@@ -23,7 +23,7 @@ include::{include_path}/plugin_header.asciidoc[]
|
|
23
23
|
|
24
24
|
Write events to a Kafka topic.
|
25
25
|
|
26
|
-
This plugin uses Kafka Client 2.
|
26
|
+
This plugin uses Kafka Client 2.3.0. For broker compatibility, see the official https://cwiki.apache.org/confluence/display/KAFKA/Compatibility+Matrix[Kafka compatibility reference]. If the linked compatibility wiki is not up-to-date, please contact Kafka support/community to confirm compatibility.
|
27
27
|
|
28
28
|
If you require features not yet available in this plugin (including client version upgrades), please file an issue with details about what you need.
|
29
29
|
|
@@ -47,15 +47,19 @@ If you want the full content of your events to be sent as json, you should set t
|
|
47
47
|
}
|
48
48
|
}
|
49
49
|
|
50
|
-
For more information see
|
50
|
+
For more information see https://kafka.apache.org/24/documentation.html#theproducer
|
51
51
|
|
52
|
-
Kafka producer configuration:
|
52
|
+
Kafka producer configuration: https://kafka.apache.org/24/documentation.html#producerconfigs
|
53
53
|
|
54
54
|
[id="plugins-{type}s-{plugin}-options"]
|
55
55
|
==== Kafka Output Configuration Options
|
56
56
|
|
57
57
|
This plugin supports the following configuration options plus the <<plugins-{type}s-{plugin}-common-options>> described later.
|
58
58
|
|
59
|
+
NOTE: Some of these options map to a Kafka option. Defaults usually reflect the Kafka default setting,
|
60
|
+
and might change if Kafka's producer defaults change.
|
61
|
+
See the https://kafka.apache.org/24/documentation for more details.
|
62
|
+
|
59
63
|
[cols="<,<,<",options="header",]
|
60
64
|
|=======================================================================
|
61
65
|
|Setting |Input type|Required
|
@@ -63,6 +67,7 @@ This plugin supports the following configuration options plus the < |
|
63
67
|
| <<plugins-{type}s-{plugin}-batch_size>> |<<number,number>>|No
|
64
68
|
| <<plugins-{type}s-{plugin}-bootstrap_servers>> |<<string,string>>|No
|
65
69
|
| <<plugins-{type}s-{plugin}-buffer_memory>> |<<number,number>>|No
|
70
|
+
| <<plugins-{type}s-{plugin}-client_dns_lookup>> |<<string,string>>|No
|
66
71
|
| <<plugins-{type}s-{plugin}-client_id>> |<<string,string>>|No
|
67
72
|
| <<plugins-{type}s-{plugin}-compression_type>> |<<string,string>>, one of `["none", "gzip", "snappy", "lz4"]`|No
|
68
73
|
| <<plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No
|
@@ -73,9 +78,10 @@ This plugin supports the following configuration options plus the < |
|
73
78
|
| <<plugins-{type}s-{plugin}-message_key>> |<<string,string>>|No
|
74
79
|
| <<plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<<number,number>>|No
|
75
80
|
| <<plugins-{type}s-{plugin}-metadata_max_age_ms>> |<<number,number>>|No
|
81
|
+
| <<plugins-{type}s-{plugin}-partitioner>> |<<string,string>>|No
|
76
82
|
| <<plugins-{type}s-{plugin}-receive_buffer_bytes>> |<<number,number>>|No
|
77
83
|
| <<plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<<number,number>>|No
|
78
|
-
| <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<
|
84
|
+
| <<plugins-{type}s-{plugin}-request_timeout_ms>> |<<number,number>>|No
|
79
85
|
| <<plugins-{type}s-{plugin}-retries>> |<<number,number>>|No
|
80
86
|
| <<plugins-{type}s-{plugin}-retry_backoff_ms>> |<<number,number>>|No
|
81
87
|
| <<plugins-{type}s-{plugin}-sasl_jaas_config>> |<<string,string>>|No
|
@@ -118,7 +124,7 @@ acks=all, This means the leader will wait for the full set of in-sync replicas t
|
|
118
124
|
===== `batch_size`
|
119
125
|
|
120
126
|
* Value type is <<number,number>>
|
121
|
-
* Default value is `16384
|
127
|
+
* Default value is `16384`.
|
122
128
|
|
123
129
|
The producer will attempt to batch records together into fewer requests whenever multiple
|
124
130
|
records are being sent to the same partition. This helps performance on both the client
|
@@ -140,10 +146,21 @@ subset of brokers.
|
|
140
146
|
===== `buffer_memory`
|
141
147
|
|
142
148
|
* Value type is <<number,number>>
|
143
|
-
* Default value is `33554432`
|
149
|
+
* Default value is `33554432` (32MB).
|
144
150
|
|
145
151
|
The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
|
146
152
|
|
153
|
+
[id="plugins-{type}s-{plugin}-client_dns_lookup"]
|
154
|
+
===== `client_dns_lookup`
|
155
|
+
|
156
|
+
* Value type is <<string,string>>
|
157
|
+
* Default value is `"default"`
|
158
|
+
|
159
|
+
How DNS lookups should be done. If set to `use_all_dns_ips`, when the lookup returns multiple
|
160
|
+
IP addresses for a hostname, they will all be attempted to connect to before failing the
|
161
|
+
connection. If the value is `resolve_canonical_bootstrap_servers_only` each entry will be
|
162
|
+
resolved and expanded into a list of canonical names.
|
163
|
+
|
147
164
|
[id="plugins-{type}s-{plugin}-client_id"]
|
148
165
|
===== `client_id`
|
149
166
|
|
@@ -220,7 +237,7 @@ to allow other records to be sent so that the sends can be batched together.
|
|
220
237
|
===== `max_request_size`
|
221
238
|
|
222
239
|
* Value type is <<number,number>>
|
223
|
-
* Default value is `1048576`
|
240
|
+
* Default value is `1048576` (1MB).
|
224
241
|
|
225
242
|
The maximum size of a request
|
226
243
|
|
@@ -230,29 +247,44 @@ The maximum size of a request
|
|
230
247
|
* Value type is <<string,string>>
|
231
248
|
* There is no default value for this setting.
|
232
249
|
|
233
|
-
The key for the message
|
250
|
+
The key for the message.
|
234
251
|
|
235
252
|
[id="plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"]
|
236
253
|
===== `metadata_fetch_timeout_ms`
|
237
254
|
|
238
255
|
* Value type is <<number,number>>
|
239
|
-
* Default value is `60000`
|
256
|
+
* Default value is `60000` milliseconds (60 seconds).
|
240
257
|
|
241
|
-
|
258
|
+
The timeout setting for initial metadata request to fetch topic metadata.
|
242
259
|
|
243
260
|
[id="plugins-{type}s-{plugin}-metadata_max_age_ms"]
|
244
261
|
===== `metadata_max_age_ms`
|
245
262
|
|
246
263
|
* Value type is <<number,number>>
|
247
|
-
* Default value is `300000`
|
264
|
+
* Default value is `300000` milliseconds (5 minutes).
|
265
|
+
|
266
|
+
The max time in milliseconds before a metadata refresh is forced.
|
267
|
+
|
268
|
+
[id="plugins-{type}s-{plugin}-partitioner"]
|
269
|
+
===== `partitioner`
|
248
270
|
|
249
|
-
|
271
|
+
* Value type is <<string,string>>
|
272
|
+
* There is no default value for this setting.
|
273
|
+
|
274
|
+
The default behavior is to hash the `message_key` of an event to get the partition.
|
275
|
+
When no message key is present, the plugin picks a partition in a round-robin fashion.
|
276
|
+
|
277
|
+
Available options for choosing a partitioning strategy are as follows:
|
278
|
+
|
279
|
+
* `default` use the default partitioner as described above
|
280
|
+
* `round_robin` distributes writes to all partitions equally, regardless of `message_key`
|
281
|
+
* `uniform_sticky` sticks to a partition for the duration of a batch than randomly picks a new one
|
250
282
|
|
251
283
|
[id="plugins-{type}s-{plugin}-receive_buffer_bytes"]
|
252
284
|
===== `receive_buffer_bytes`
|
253
285
|
|
254
286
|
* Value type is <<number,number>>
|
255
|
-
* Default value is `32768`
|
287
|
+
* Default value is `32768` (32KB).
|
256
288
|
|
257
289
|
The size of the TCP receive buffer to use when reading data
|
258
290
|
|
@@ -260,15 +292,15 @@ The size of the TCP receive buffer to use when reading data
|
|
260
292
|
===== `reconnect_backoff_ms`
|
261
293
|
|
262
294
|
* Value type is <<number,number>>
|
263
|
-
* Default value is `
|
295
|
+
* Default value is `50`.
|
264
296
|
|
265
297
|
The amount of time to wait before attempting to reconnect to a given host when a connection fails.
|
266
298
|
|
267
299
|
[id="plugins-{type}s-{plugin}-request_timeout_ms"]
|
268
300
|
===== `request_timeout_ms`
|
269
301
|
|
270
|
-
* Value type is <<
|
271
|
-
*
|
302
|
+
* Value type is <<number,number>>
|
303
|
+
* Default value is `40000` milliseconds (40 seconds).
|
272
304
|
|
273
305
|
The configuration controls the maximum amount of time the client will wait
|
274
306
|
for the response of a request. If the response is not received before the timeout
|
@@ -295,7 +327,7 @@ A value less than zero is a configuration error.
|
|
295
327
|
===== `retry_backoff_ms`
|
296
328
|
|
297
329
|
* Value type is <<number,number>>
|
298
|
-
* Default value is `100`
|
330
|
+
* Default value is `100` milliseconds.
|
299
331
|
|
300
332
|
The amount of time to wait before attempting to retry a failed produce request to a given topic partition.
|
301
333
|
|
@@ -348,7 +380,7 @@ Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SA
|
|
348
380
|
===== `send_buffer_bytes`
|
349
381
|
|
350
382
|
* Value type is <<number,number>>
|
351
|
-
* Default value is `131072`
|
383
|
+
* Default value is `131072` (128KB).
|
352
384
|
|
353
385
|
The size of the TCP send buffer to use when sending data.
|
354
386
|
|
@@ -1,8 +1,8 @@
|
|
1
1
|
# AUTOGENERATED BY THE GRADLE SCRIPT. DO NOT EDIT.
|
2
2
|
|
3
3
|
require 'jar_dependencies'
|
4
|
-
require_jar('org.apache.kafka', 'kafka-clients', '2.
|
5
|
-
require_jar('com.github.luben', 'zstd-jni', '1.4.
|
6
|
-
require_jar('org.slf4j', 'slf4j-api', '1.7.
|
4
|
+
require_jar('org.apache.kafka', 'kafka-clients', '2.4.1')
|
5
|
+
require_jar('com.github.luben', 'zstd-jni', '1.4.3-1')
|
6
|
+
require_jar('org.slf4j', 'slf4j-api', '1.7.28')
|
7
7
|
require_jar('org.lz4', 'lz4-java', '1.6.0')
|
8
8
|
require_jar('org.xerial.snappy', 'snappy-java', '1.1.7.3')
|
@@ -53,7 +53,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
53
53
|
default :codec, 'plain'
|
54
54
|
|
55
55
|
# The frequency in milliseconds that the consumer offsets are committed to Kafka.
|
56
|
-
config :auto_commit_interval_ms, :validate => :
|
56
|
+
config :auto_commit_interval_ms, :validate => :number, :default => 5000 # Kafka default
|
57
57
|
# What to do when there is no initial offset in Kafka or if an offset is out of range:
|
58
58
|
#
|
59
59
|
# * earliest: automatically reset the offset to the earliest offset
|
@@ -70,35 +70,40 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
70
70
|
# Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk
|
71
71
|
# corruption to the messages occurred. This check adds some overhead, so it may be
|
72
72
|
# disabled in cases seeking extreme performance.
|
73
|
-
config :check_crcs, :validate => :
|
73
|
+
config :check_crcs, :validate => :boolean, :default => true
|
74
|
+
# How DNS lookups should be done. If set to `use_all_dns_ips`, when the lookup returns multiple
|
75
|
+
# IP addresses for a hostname, they will all be attempted to connect to before failing the
|
76
|
+
# connection. If the value is `resolve_canonical_bootstrap_servers_only` each entry will be
|
77
|
+
# resolved and expanded into a list of canonical names.
|
78
|
+
config :client_dns_lookup, :validate => ["default", "use_all_dns_ips", "resolve_canonical_bootstrap_servers_only"], :default => "default"
|
74
79
|
# The id string to pass to the server when making requests. The purpose of this
|
75
80
|
# is to be able to track the source of requests beyond just ip/port by allowing
|
76
81
|
# a logical application name to be included.
|
77
82
|
config :client_id, :validate => :string, :default => "logstash"
|
78
83
|
# Close idle connections after the number of milliseconds specified by this config.
|
79
|
-
config :connections_max_idle_ms, :validate => :
|
84
|
+
config :connections_max_idle_ms, :validate => :number, :default => 540_000 # (9m) Kafka default
|
80
85
|
# Ideally you should have as many threads as the number of partitions for a perfect
|
81
86
|
# balance — more threads than partitions means that some threads will be idle
|
82
87
|
config :consumer_threads, :validate => :number, :default => 1
|
83
88
|
# If true, periodically commit to Kafka the offsets of messages already returned by the consumer.
|
84
89
|
# This committed offset will be used when the process fails as the position from
|
85
90
|
# which the consumption will begin.
|
86
|
-
config :enable_auto_commit, :validate => :
|
91
|
+
config :enable_auto_commit, :validate => :boolean, :default => true
|
87
92
|
# Whether records from internal topics (such as offsets) should be exposed to the consumer.
|
88
93
|
# If set to true the only way to receive records from an internal topic is subscribing to it.
|
89
94
|
config :exclude_internal_topics, :validate => :string
|
90
95
|
# The maximum amount of data the server should return for a fetch request. This is not an
|
91
96
|
# absolute maximum, if the first message in the first non-empty partition of the fetch is larger
|
92
97
|
# than this value, the message will still be returned to ensure that the consumer can make progress.
|
93
|
-
config :fetch_max_bytes, :validate => :
|
98
|
+
config :fetch_max_bytes, :validate => :number, :default => 52_428_800 # (50MB) Kafka default
|
94
99
|
# The maximum amount of time the server will block before answering the fetch request if
|
95
100
|
# there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This
|
96
101
|
# should be less than or equal to the timeout used in `poll_timeout_ms`
|
97
|
-
config :fetch_max_wait_ms, :validate => :
|
102
|
+
config :fetch_max_wait_ms, :validate => :number, :default => 500 # Kafka default
|
98
103
|
# The minimum amount of data the server should return for a fetch request. If insufficient
|
99
104
|
# data is available the request will wait for that much data to accumulate
|
100
105
|
# before answering the request.
|
101
|
-
config :fetch_min_bytes, :validate => :
|
106
|
+
config :fetch_min_bytes, :validate => :number
|
102
107
|
# The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber
|
103
108
|
# that happens to be made up of multiple processors. Messages in a topic will be distributed to all
|
104
109
|
# Logstash instances with the same `group_id`
|
@@ -108,48 +113,55 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
108
113
|
# consumers join or leave the group. The value must be set lower than
|
109
114
|
# `session.timeout.ms`, but typically should be set no higher than 1/3 of that value.
|
110
115
|
# It can be adjusted even lower to control the expected time for normal rebalances.
|
111
|
-
config :heartbeat_interval_ms, :validate => :
|
116
|
+
config :heartbeat_interval_ms, :validate => :number, :default => 3000 # Kafka default
|
117
|
+
# Controls how to read messages written transactionally. If set to read_committed, consumer.poll()
|
118
|
+
# will only return transactional messages which have been committed. If set to read_uncommitted'
|
119
|
+
# (the default), consumer.poll() will return all messages, even transactional messages which have
|
120
|
+
# been aborted. Non-transactional messages will be returned unconditionally in either mode.
|
121
|
+
config :isolation_level, :validate => ["read_uncommitted", "read_committed"], :default => "read_uncommitted" # Kafka default
|
112
122
|
# Java Class used to deserialize the record's key
|
113
123
|
config :key_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
|
114
124
|
# The maximum delay between invocations of poll() when using consumer group management. This places
|
115
125
|
# an upper bound on the amount of time that the consumer can be idle before fetching more records.
|
116
126
|
# If poll() is not called before expiration of this timeout, then the consumer is considered failed and
|
117
127
|
# the group will rebalance in order to reassign the partitions to another member.
|
118
|
-
|
119
|
-
config :max_poll_interval_ms, :validate => :string
|
128
|
+
config :max_poll_interval_ms, :validate => :number, :default => 300_000 # (5m) Kafka default
|
120
129
|
# The maximum amount of data per-partition the server will return. The maximum total memory used for a
|
121
130
|
# request will be <code>#partitions * max.partition.fetch.bytes</code>. This size must be at least
|
122
131
|
# as large as the maximum message size the server allows or else it is possible for the producer to
|
123
132
|
# send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying
|
124
133
|
# to fetch a large message on a certain partition.
|
125
|
-
config :max_partition_fetch_bytes, :validate => :
|
134
|
+
config :max_partition_fetch_bytes, :validate => :number, :default => 1_048_576 # (1MB) Kafka default
|
126
135
|
# The maximum number of records returned in a single call to poll().
|
127
|
-
config :max_poll_records, :validate => :
|
136
|
+
config :max_poll_records, :validate => :number, :default => 500 # Kafka default
|
128
137
|
# The period of time in milliseconds after which we force a refresh of metadata even if
|
129
138
|
# we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
|
130
|
-
config :metadata_max_age_ms, :validate => :
|
131
|
-
# The
|
132
|
-
# partition ownership amongst consumer instances
|
139
|
+
config :metadata_max_age_ms, :validate => :number, :default => 300_000 # (5m) Kafka default
|
140
|
+
# The name of the partition assignment strategy that the client uses to distribute
|
141
|
+
# partition ownership amongst consumer instances, supported options are `range`,
|
142
|
+
# `round_robin`, `sticky` and `cooperative_sticky`
|
143
|
+
# (for backwards compatibility setting the class name directly is supported).
|
133
144
|
config :partition_assignment_strategy, :validate => :string
|
134
145
|
# The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
|
135
|
-
|
136
|
-
|
146
|
+
# If the value is `-1`, the OS default will be used.
|
147
|
+
config :receive_buffer_bytes, :validate => :number, :default => 32_768 # (32KB) Kafka default
|
148
|
+
# The base amount of time to wait before attempting to reconnect to a given host.
|
137
149
|
# This avoids repeatedly connecting to a host in a tight loop.
|
138
|
-
# This backoff applies to all
|
139
|
-
config :reconnect_backoff_ms, :validate => :
|
140
|
-
# The configuration controls the maximum amount of time the client will wait
|
141
|
-
#
|
142
|
-
#
|
143
|
-
#
|
144
|
-
config :request_timeout_ms, :validate => :string
|
150
|
+
# This backoff applies to all connection attempts by the client to a broker.
|
151
|
+
config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
|
152
|
+
# The configuration controls the maximum amount of time the client will wait for the response of a request.
|
153
|
+
# If the response is not received before the timeout elapses the client will resend the request if necessary
|
154
|
+
# or fail the request if retries are exhausted.
|
155
|
+
config :request_timeout_ms, :validate => :number, :default => 40_000 # Kafka default
|
145
156
|
# The amount of time to wait before attempting to retry a failed fetch request
|
146
157
|
# to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
|
147
|
-
config :retry_backoff_ms, :validate => :
|
148
|
-
# The size of the TCP send buffer (SO_SNDBUF) to use when sending data
|
149
|
-
|
158
|
+
config :retry_backoff_ms, :validate => :number, :default => 100 # Kafka default
|
159
|
+
# The size of the TCP send buffer (SO_SNDBUF) to use when sending data.
|
160
|
+
# If the value is -1, the OS default will be used.
|
161
|
+
config :send_buffer_bytes, :validate => :number, :default => 131_072 # (128KB) Kafka default
|
150
162
|
# The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead
|
151
163
|
# and a rebalance operation is triggered for the group identified by `group_id`
|
152
|
-
config :session_timeout_ms, :validate => :
|
164
|
+
config :session_timeout_ms, :validate => :number, :default => 10_000 # (10s) Kafka default
|
153
165
|
# Java Class used to deserialize the record's value
|
154
166
|
config :value_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
|
155
167
|
# A list of topics to subscribe to, defaults to ["logstash"].
|
@@ -159,6 +171,11 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
159
171
|
config :topics_pattern, :validate => :string
|
160
172
|
# Time kafka consumer will wait to receive new messages from topics
|
161
173
|
config :poll_timeout_ms, :validate => :number, :default => 100
|
174
|
+
# The rack id string to pass to the server when making requests. This is used
|
175
|
+
# as a selector for a rack, region, or datacenter. Corresponds to the broker.rack parameter
|
176
|
+
# in the broker configuration.
|
177
|
+
# Only has an effect in combination with brokers with Kafka 2.4+ with the broker.rack setting. Ignored otherwise.
|
178
|
+
config :client_rack, :validate => :string
|
162
179
|
# The truststore type.
|
163
180
|
config :ssl_truststore_type, :validate => :string
|
164
181
|
# The JKS truststore path to validate the Kafka broker's certificate.
|
@@ -269,9 +286,7 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
269
286
|
end
|
270
287
|
end
|
271
288
|
# Manual offset commit
|
272
|
-
if @enable_auto_commit
|
273
|
-
consumer.commitSync
|
274
|
-
end
|
289
|
+
consumer.commitSync if @enable_auto_commit.eql?(false)
|
275
290
|
end
|
276
291
|
rescue org.apache.kafka.common.errors.WakeupException => e
|
277
292
|
raise e if !stop?
|
@@ -287,32 +302,35 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
287
302
|
props = java.util.Properties.new
|
288
303
|
kafka = org.apache.kafka.clients.consumer.ConsumerConfig
|
289
304
|
|
290
|
-
props.put(kafka::AUTO_COMMIT_INTERVAL_MS_CONFIG, auto_commit_interval_ms)
|
305
|
+
props.put(kafka::AUTO_COMMIT_INTERVAL_MS_CONFIG, auto_commit_interval_ms.to_s) unless auto_commit_interval_ms.nil?
|
291
306
|
props.put(kafka::AUTO_OFFSET_RESET_CONFIG, auto_offset_reset) unless auto_offset_reset.nil?
|
292
307
|
props.put(kafka::BOOTSTRAP_SERVERS_CONFIG, bootstrap_servers)
|
293
|
-
props.put(kafka::CHECK_CRCS_CONFIG, check_crcs) unless check_crcs.nil?
|
308
|
+
props.put(kafka::CHECK_CRCS_CONFIG, check_crcs.to_s) unless check_crcs.nil?
|
309
|
+
props.put(kafka::CLIENT_DNS_LOOKUP_CONFIG, client_dns_lookup)
|
294
310
|
props.put(kafka::CLIENT_ID_CONFIG, client_id)
|
295
|
-
props.put(kafka::CONNECTIONS_MAX_IDLE_MS_CONFIG, connections_max_idle_ms) unless connections_max_idle_ms.nil?
|
296
|
-
props.put(kafka::ENABLE_AUTO_COMMIT_CONFIG, enable_auto_commit)
|
311
|
+
props.put(kafka::CONNECTIONS_MAX_IDLE_MS_CONFIG, connections_max_idle_ms.to_s) unless connections_max_idle_ms.nil?
|
312
|
+
props.put(kafka::ENABLE_AUTO_COMMIT_CONFIG, enable_auto_commit.to_s)
|
297
313
|
props.put(kafka::EXCLUDE_INTERNAL_TOPICS_CONFIG, exclude_internal_topics) unless exclude_internal_topics.nil?
|
298
|
-
props.put(kafka::FETCH_MAX_BYTES_CONFIG, fetch_max_bytes) unless fetch_max_bytes.nil?
|
299
|
-
props.put(kafka::FETCH_MAX_WAIT_MS_CONFIG, fetch_max_wait_ms) unless fetch_max_wait_ms.nil?
|
300
|
-
props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes) unless fetch_min_bytes.nil?
|
314
|
+
props.put(kafka::FETCH_MAX_BYTES_CONFIG, fetch_max_bytes.to_s) unless fetch_max_bytes.nil?
|
315
|
+
props.put(kafka::FETCH_MAX_WAIT_MS_CONFIG, fetch_max_wait_ms.to_s) unless fetch_max_wait_ms.nil?
|
316
|
+
props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes.to_s) unless fetch_min_bytes.nil?
|
301
317
|
props.put(kafka::GROUP_ID_CONFIG, group_id)
|
302
|
-
props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms) unless heartbeat_interval_ms.nil?
|
318
|
+
props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms.to_s) unless heartbeat_interval_ms.nil?
|
319
|
+
props.put(kafka::ISOLATION_LEVEL_CONFIG, isolation_level)
|
303
320
|
props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, key_deserializer_class)
|
304
|
-
props.put(kafka::MAX_PARTITION_FETCH_BYTES_CONFIG, max_partition_fetch_bytes) unless max_partition_fetch_bytes.nil?
|
305
|
-
props.put(kafka::MAX_POLL_RECORDS_CONFIG, max_poll_records) unless max_poll_records.nil?
|
306
|
-
props.put(kafka::MAX_POLL_INTERVAL_MS_CONFIG, max_poll_interval_ms) unless max_poll_interval_ms.nil?
|
307
|
-
props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms) unless metadata_max_age_ms.nil?
|
308
|
-
props.put(kafka::PARTITION_ASSIGNMENT_STRATEGY_CONFIG,
|
309
|
-
props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes) unless receive_buffer_bytes.nil?
|
310
|
-
props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms) unless reconnect_backoff_ms.nil?
|
311
|
-
props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms) unless request_timeout_ms.nil?
|
312
|
-
props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms) unless retry_backoff_ms.nil?
|
313
|
-
props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes) unless send_buffer_bytes.nil?
|
314
|
-
props.put(kafka::SESSION_TIMEOUT_MS_CONFIG, session_timeout_ms) unless session_timeout_ms.nil?
|
321
|
+
props.put(kafka::MAX_PARTITION_FETCH_BYTES_CONFIG, max_partition_fetch_bytes.to_s) unless max_partition_fetch_bytes.nil?
|
322
|
+
props.put(kafka::MAX_POLL_RECORDS_CONFIG, max_poll_records.to_s) unless max_poll_records.nil?
|
323
|
+
props.put(kafka::MAX_POLL_INTERVAL_MS_CONFIG, max_poll_interval_ms.to_s) unless max_poll_interval_ms.nil?
|
324
|
+
props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms.to_s) unless metadata_max_age_ms.nil?
|
325
|
+
props.put(kafka::PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partition_assignment_strategy_class) unless partition_assignment_strategy.nil?
|
326
|
+
props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
|
327
|
+
props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
|
328
|
+
props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
|
329
|
+
props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s) unless retry_backoff_ms.nil?
|
330
|
+
props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes.to_s) unless send_buffer_bytes.nil?
|
331
|
+
props.put(kafka::SESSION_TIMEOUT_MS_CONFIG, session_timeout_ms.to_s) unless session_timeout_ms.nil?
|
315
332
|
props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, value_deserializer_class)
|
333
|
+
props.put(kafka::CLIENT_RACK_CONFIG, client_rack) unless client_rack.nil?
|
316
334
|
|
317
335
|
props.put("security.protocol", security_protocol) unless security_protocol.nil?
|
318
336
|
|
@@ -334,6 +352,24 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
334
352
|
end
|
335
353
|
end
|
336
354
|
|
355
|
+
def partition_assignment_strategy_class
|
356
|
+
case partition_assignment_strategy
|
357
|
+
when 'range'
|
358
|
+
'org.apache.kafka.clients.consumer.RangeAssignor'
|
359
|
+
when 'round_robin'
|
360
|
+
'org.apache.kafka.clients.consumer.RoundRobinAssignor'
|
361
|
+
when 'sticky'
|
362
|
+
'org.apache.kafka.clients.consumer.StickyAssignor'
|
363
|
+
when 'cooperative_sticky'
|
364
|
+
'org.apache.kafka.clients.consumer.CooperativeStickyAssignor'
|
365
|
+
else
|
366
|
+
unless partition_assignment_strategy.index('.')
|
367
|
+
raise LogStash::ConfigurationError, "unsupported partition_assignment_strategy: #{partition_assignment_strategy.inspect}"
|
368
|
+
end
|
369
|
+
partition_assignment_strategy # assume a fully qualified class-name
|
370
|
+
end
|
371
|
+
end
|
372
|
+
|
337
373
|
def set_trustore_keystore_config(props)
|
338
374
|
props.put("ssl.truststore.type", ssl_truststore_type) unless ssl_truststore_type.nil?
|
339
375
|
props.put("ssl.truststore.location", ssl_truststore_location) unless ssl_truststore_location.nil?
|
@@ -348,15 +384,15 @@ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
|
|
348
384
|
end
|
349
385
|
|
350
386
|
def set_sasl_config(props)
|
351
|
-
java.lang.System.setProperty("java.security.auth.login.config",jaas_path) unless jaas_path.nil?
|
352
|
-
java.lang.System.setProperty("java.security.krb5.conf",kerberos_config) unless kerberos_config.nil?
|
387
|
+
java.lang.System.setProperty("java.security.auth.login.config", jaas_path) unless jaas_path.nil?
|
388
|
+
java.lang.System.setProperty("java.security.krb5.conf", kerberos_config) unless kerberos_config.nil?
|
353
389
|
|
354
|
-
props.put("sasl.mechanism",sasl_mechanism)
|
390
|
+
props.put("sasl.mechanism", sasl_mechanism)
|
355
391
|
if sasl_mechanism == "GSSAPI" && sasl_kerberos_service_name.nil?
|
356
392
|
raise LogStash::ConfigurationError, "sasl_kerberos_service_name must be specified when SASL mechanism is GSSAPI"
|
357
393
|
end
|
358
394
|
|
359
|
-
props.put("sasl.kerberos.service.name",sasl_kerberos_service_name) unless sasl_kerberos_service_name.nil?
|
395
|
+
props.put("sasl.kerberos.service.name", sasl_kerberos_service_name) unless sasl_kerberos_service_name.nil?
|
360
396
|
props.put("sasl.jaas.config", sasl_jaas_config) unless sasl_jaas_config.nil?
|
361
397
|
end
|
362
398
|
end #class LogStash::Inputs::Kafka
|
@@ -3,8 +3,6 @@ require 'logstash/outputs/base'
|
|
3
3
|
require 'java'
|
4
4
|
require 'logstash-integration-kafka_jars.rb'
|
5
5
|
|
6
|
-
java_import org.apache.kafka.clients.producer.ProducerRecord
|
7
|
-
|
8
6
|
# Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on
|
9
7
|
# the broker.
|
10
8
|
#
|
@@ -49,6 +47,9 @@ java_import org.apache.kafka.clients.producer.ProducerRecord
|
|
49
47
|
#
|
50
48
|
# Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs
|
51
49
|
class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
50
|
+
|
51
|
+
java_import org.apache.kafka.clients.producer.ProducerRecord
|
52
|
+
|
52
53
|
declare_threadsafe!
|
53
54
|
|
54
55
|
config_name 'kafka'
|
@@ -66,7 +67,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
66
67
|
# The producer will attempt to batch records together into fewer requests whenever multiple
|
67
68
|
# records are being sent to the same partition. This helps performance on both the client
|
68
69
|
# and the server. This configuration controls the default batch size in bytes.
|
69
|
-
config :batch_size, :validate => :number, :default =>
|
70
|
+
config :batch_size, :validate => :number, :default => 16_384 # Kafka default
|
70
71
|
# This is for bootstrapping and the producer will only use it for getting metadata (topics,
|
71
72
|
# partitions and replicas). The socket connections for sending the actual data will be
|
72
73
|
# established based on the broker information returned in the metadata. The format is
|
@@ -74,10 +75,15 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
74
75
|
# subset of brokers.
|
75
76
|
config :bootstrap_servers, :validate => :string, :default => 'localhost:9092'
|
76
77
|
# The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
|
77
|
-
config :buffer_memory, :validate => :number, :default =>
|
78
|
+
config :buffer_memory, :validate => :number, :default => 33_554_432 # (32M) Kafka default
|
78
79
|
# The compression type for all data generated by the producer.
|
79
80
|
# The default is none (i.e. no compression). Valid values are none, gzip, or snappy.
|
80
81
|
config :compression_type, :validate => ["none", "gzip", "snappy", "lz4"], :default => "none"
|
82
|
+
# How DNS lookups should be done. If set to `use_all_dns_ips`, when the lookup returns multiple
|
83
|
+
# IP addresses for a hostname, they will all be attempted to connect to before failing the
|
84
|
+
# connection. If the value is `resolve_canonical_bootstrap_servers_only` each entry will be
|
85
|
+
# resolved and expanded into a list of canonical names.
|
86
|
+
config :client_dns_lookup, :validate => ["default", "use_all_dns_ips", "resolve_canonical_bootstrap_servers_only"], :default => "default"
|
81
87
|
# The id string to pass to the server when making requests.
|
82
88
|
# The purpose of this is to be able to track the source of requests beyond just
|
83
89
|
# ip/port by allowing a logical application name to be included with the request
|
@@ -91,24 +97,26 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
91
97
|
# This setting accomplishes this by adding a small amount of artificial delay—that is,
|
92
98
|
# rather than immediately sending out a record the producer will wait for up to the given delay
|
93
99
|
# to allow other records to be sent so that the sends can be batched together.
|
94
|
-
config :linger_ms, :validate => :number, :default => 0
|
100
|
+
config :linger_ms, :validate => :number, :default => 0 # Kafka default
|
95
101
|
# The maximum size of a request
|
96
|
-
config :max_request_size, :validate => :number, :default =>
|
102
|
+
config :max_request_size, :validate => :number, :default => 1_048_576 # (1MB) Kafka default
|
97
103
|
# The key for the message
|
98
104
|
config :message_key, :validate => :string
|
99
105
|
# the timeout setting for initial metadata request to fetch topic metadata.
|
100
|
-
config :metadata_fetch_timeout_ms, :validate => :number, :default =>
|
106
|
+
config :metadata_fetch_timeout_ms, :validate => :number, :default => 60_000
|
101
107
|
# the max time in milliseconds before a metadata refresh is forced.
|
102
|
-
config :metadata_max_age_ms, :validate => :number, :default =>
|
108
|
+
config :metadata_max_age_ms, :validate => :number, :default => 300_000 # (5m) Kafka default
|
109
|
+
# Partitioner to use - can be `default`, `uniform_sticky`, `round_robin` or a fully qualified class name of a custom partitioner.
|
110
|
+
config :partitioner, :validate => :string
|
103
111
|
# The size of the TCP receive buffer to use when reading data
|
104
|
-
config :receive_buffer_bytes, :validate => :number, :default =>
|
112
|
+
config :receive_buffer_bytes, :validate => :number, :default => 32_768 # (32KB) Kafka default
|
105
113
|
# The amount of time to wait before attempting to reconnect to a given host when a connection fails.
|
106
|
-
config :reconnect_backoff_ms, :validate => :number, :default =>
|
114
|
+
config :reconnect_backoff_ms, :validate => :number, :default => 50 # Kafka default
|
107
115
|
# The configuration controls the maximum amount of time the client will wait
|
108
116
|
# for the response of a request. If the response is not received before the timeout
|
109
117
|
# elapses the client will resend the request if necessary or fail the request if
|
110
118
|
# retries are exhausted.
|
111
|
-
config :request_timeout_ms, :validate => :
|
119
|
+
config :request_timeout_ms, :validate => :number, :default => 40_000 # (40s) Kafka default
|
112
120
|
# The default retry behavior is to retry until successful. To prevent data loss,
|
113
121
|
# the use of this setting is discouraged.
|
114
122
|
#
|
@@ -119,9 +127,9 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
119
127
|
# A value less than zero is a configuration error.
|
120
128
|
config :retries, :validate => :number
|
121
129
|
# The amount of time to wait before attempting to retry a failed produce request to a given topic partition.
|
122
|
-
config :retry_backoff_ms, :validate => :number, :default => 100
|
130
|
+
config :retry_backoff_ms, :validate => :number, :default => 100 # Kafka default
|
123
131
|
# The size of the TCP send buffer to use when sending data.
|
124
|
-
config :send_buffer_bytes, :validate => :number, :default =>
|
132
|
+
config :send_buffer_bytes, :validate => :number, :default => 131_072 # (128KB) Kafka default
|
125
133
|
# The truststore type.
|
126
134
|
config :ssl_truststore_type, :validate => :string
|
127
135
|
# The JKS truststore path to validate the Kafka broker's certificate.
|
@@ -183,7 +191,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
183
191
|
raise ConfigurationError, "A negative retry count (#{@retries}) is not valid. Must be a value >= 0"
|
184
192
|
end
|
185
193
|
|
186
|
-
|
194
|
+
logger.warn("Kafka output is configured with finite retry. This instructs Logstash to LOSE DATA after a set number of send attempts fails. If you do not want to lose data if Kafka is down, then you must remove the retry setting.", :retries => @retries)
|
187
195
|
end
|
188
196
|
|
189
197
|
|
@@ -201,8 +209,6 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
201
209
|
end
|
202
210
|
end
|
203
211
|
|
204
|
-
# def register
|
205
|
-
|
206
212
|
def prepare(record)
|
207
213
|
# This output is threadsafe, so we need to keep a batch per thread.
|
208
214
|
@thread_batch_map[Thread.current].add(record)
|
@@ -268,7 +274,7 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
268
274
|
result = future.get()
|
269
275
|
rescue => e
|
270
276
|
# TODO(sissel): Add metric to count failures, possibly by exception type.
|
271
|
-
logger.warn("
|
277
|
+
logger.warn("producer send failed", :exception => e.class, :message => e.message)
|
272
278
|
failures << batch[i]
|
273
279
|
end
|
274
280
|
end
|
@@ -302,10 +308,9 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
302
308
|
end
|
303
309
|
prepare(record)
|
304
310
|
rescue LogStash::ShutdownSignal
|
305
|
-
|
311
|
+
logger.debug('producer received shutdown signal')
|
306
312
|
rescue => e
|
307
|
-
|
308
|
-
:exception => e)
|
313
|
+
logger.warn('producer threw exception, restarting', :exception => e.class, :message => e.message)
|
309
314
|
end
|
310
315
|
|
311
316
|
def create_producer
|
@@ -318,14 +323,19 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
318
323
|
props.put(kafka::BOOTSTRAP_SERVERS_CONFIG, bootstrap_servers)
|
319
324
|
props.put(kafka::BUFFER_MEMORY_CONFIG, buffer_memory.to_s)
|
320
325
|
props.put(kafka::COMPRESSION_TYPE_CONFIG, compression_type)
|
326
|
+
props.put(kafka::CLIENT_DNS_LOOKUP_CONFIG, client_dns_lookup)
|
321
327
|
props.put(kafka::CLIENT_ID_CONFIG, client_id) unless client_id.nil?
|
322
328
|
props.put(kafka::KEY_SERIALIZER_CLASS_CONFIG, key_serializer)
|
323
329
|
props.put(kafka::LINGER_MS_CONFIG, linger_ms.to_s)
|
324
330
|
props.put(kafka::MAX_REQUEST_SIZE_CONFIG, max_request_size.to_s)
|
325
|
-
props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms) unless metadata_max_age_ms.nil?
|
331
|
+
props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms.to_s) unless metadata_max_age_ms.nil?
|
332
|
+
unless partitioner.nil?
|
333
|
+
props.put(kafka::PARTITIONER_CLASS_CONFIG, partitioner = partitioner_class)
|
334
|
+
logger.debug('producer configured using partitioner', :partitioner_class => partitioner)
|
335
|
+
end
|
326
336
|
props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes.to_s) unless receive_buffer_bytes.nil?
|
327
|
-
props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms) unless reconnect_backoff_ms.nil?
|
328
|
-
props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms) unless request_timeout_ms.nil?
|
337
|
+
props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms.to_s) unless reconnect_backoff_ms.nil?
|
338
|
+
props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms.to_s) unless request_timeout_ms.nil?
|
329
339
|
props.put(kafka::RETRIES_CONFIG, retries.to_s) unless retries.nil?
|
330
340
|
props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s)
|
331
341
|
props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes.to_s)
|
@@ -342,7 +352,6 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
342
352
|
set_sasl_config(props)
|
343
353
|
end
|
344
354
|
|
345
|
-
|
346
355
|
org.apache.kafka.clients.producer.KafkaProducer.new(props)
|
347
356
|
rescue => e
|
348
357
|
logger.error("Unable to create Kafka producer from given configuration",
|
@@ -352,13 +361,31 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
352
361
|
end
|
353
362
|
end
|
354
363
|
|
364
|
+
def partitioner_class
|
365
|
+
case partitioner
|
366
|
+
when 'round_robin'
|
367
|
+
'org.apache.kafka.clients.producer.RoundRobinPartitioner'
|
368
|
+
when 'uniform_sticky'
|
369
|
+
'org.apache.kafka.clients.producer.UniformStickyPartitioner'
|
370
|
+
when 'default'
|
371
|
+
'org.apache.kafka.clients.producer.internals.DefaultPartitioner'
|
372
|
+
else
|
373
|
+
unless partitioner.index('.')
|
374
|
+
raise LogStash::ConfigurationError, "unsupported partitioner: #{partitioner.inspect}"
|
375
|
+
end
|
376
|
+
partitioner # assume a fully qualified class-name
|
377
|
+
end
|
378
|
+
end
|
379
|
+
|
355
380
|
def set_trustore_keystore_config(props)
|
356
|
-
|
357
|
-
|
381
|
+
unless ssl_endpoint_identification_algorithm.to_s.strip.empty?
|
382
|
+
if ssl_truststore_location.nil?
|
383
|
+
raise LogStash::ConfigurationError, "ssl_truststore_location must be set when SSL is enabled"
|
384
|
+
end
|
385
|
+
props.put("ssl.truststore.type", ssl_truststore_type) unless ssl_truststore_type.nil?
|
386
|
+
props.put("ssl.truststore.location", ssl_truststore_location)
|
387
|
+
props.put("ssl.truststore.password", ssl_truststore_password.value) unless ssl_truststore_password.nil?
|
358
388
|
end
|
359
|
-
props.put("ssl.truststore.type", ssl_truststore_type) unless ssl_truststore_type.nil?
|
360
|
-
props.put("ssl.truststore.location", ssl_truststore_location)
|
361
|
-
props.put("ssl.truststore.password", ssl_truststore_password.value) unless ssl_truststore_password.nil?
|
362
389
|
|
363
390
|
# Client auth stuff
|
364
391
|
props.put("ssl.keystore.type", ssl_keystore_type) unless ssl_keystore_type.nil?
|
@@ -369,15 +396,15 @@ class LogStash::Outputs::Kafka < LogStash::Outputs::Base
|
|
369
396
|
end
|
370
397
|
|
371
398
|
def set_sasl_config(props)
|
372
|
-
java.lang.System.setProperty("java.security.auth.login.config",jaas_path) unless jaas_path.nil?
|
373
|
-
java.lang.System.setProperty("java.security.krb5.conf",kerberos_config) unless kerberos_config.nil?
|
399
|
+
java.lang.System.setProperty("java.security.auth.login.config", jaas_path) unless jaas_path.nil?
|
400
|
+
java.lang.System.setProperty("java.security.krb5.conf", kerberos_config) unless kerberos_config.nil?
|
374
401
|
|
375
402
|
props.put("sasl.mechanism",sasl_mechanism)
|
376
403
|
if sasl_mechanism == "GSSAPI" && sasl_kerberos_service_name.nil?
|
377
404
|
raise LogStash::ConfigurationError, "sasl_kerberos_service_name must be specified when SASL mechanism is GSSAPI"
|
378
405
|
end
|
379
406
|
|
380
|
-
props.put("sasl.kerberos.service.name",sasl_kerberos_service_name) unless sasl_kerberos_service_name.nil?
|
407
|
+
props.put("sasl.kerberos.service.name", sasl_kerberos_service_name) unless sasl_kerberos_service_name.nil?
|
381
408
|
props.put("sasl.jaas.config", sasl_jaas_config) unless sasl_jaas_config.nil?
|
382
409
|
end
|
383
410
|
|