logstash-input-logQueue 0.1.0-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 3980db697ae53398657f65157acf4679ec1434a17958a4a7461eb1d29abefebf
4
+ data.tar.gz: c39ad8c4ce6428b146b55f408d90049f314ae04030a65ba9e71ac84f3f3cd287
5
+ SHA512:
6
+ metadata.gz: 9f4be28ed5b312b39d71f008322efd009f2aae375614593df454bae9d7a253d1872a4fa478353001307bdb2df5780eff67e8f680a6082052a71cc380bb6cda85
7
+ data.tar.gz: 9be081df11d0026acafdf8817c84701d5421aef1fc5e94fa643fb35c3d85c915bfffa17d52efc2c2d0619a2c34d242b7d69379969023e0ed5f9b9554ce3e964b
data/CHANGELOG.md ADDED
@@ -0,0 +1,2 @@
1
+ ## 0.1.0
2
+ - Plugin created with the logstash plugin generator
data/CONTRIBUTORS ADDED
@@ -0,0 +1,10 @@
1
+ The following is a list of people who have contributed ideas, code, bug
2
+ reports, or in general have helped logstash along its way.
3
+
4
+ Contributors:
5
+ * -
6
+
7
+ Note: If you've sent us patches, bug reports, or otherwise contributed to
8
+ Logstash, and you aren't on the list above and want to be, please let us know
9
+ and we'll make sure you're here. Contributions from folks like you are what make
10
+ open source awesome.
data/Gemfile ADDED
@@ -0,0 +1,13 @@
1
+ source 'https://rubygems.org'
2
+
3
+ gemspec
4
+
5
+ logstash_path = ENV["LOGSTASH_PATH"] || "../../logstash"
6
+ use_logstash_source = ENV["LOGSTASH_SOURCE"] && ENV["LOGSTASH_SOURCE"].to_s == "1"
7
+
8
+ if Dir.exist?(logstash_path) && use_logstash_source
9
+ gem 'logstash-core', :path => "#{logstash_path}/logstash-core"
10
+ gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api"
11
+ end
12
+
13
+ gem 'java', '~> 0.0.2'
data/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright (c) 2012–2016 Elasticsearch <http://www.elastic.co>
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
data/NOTICE.TXT ADDED
@@ -0,0 +1,5 @@
1
+ Elasticsearch
2
+ Copyright 2012-2015 Elasticsearch
3
+
4
+ This product includes software developed by The Apache Software
5
+ Foundation (http://www.apache.org/).
@@ -0,0 +1,363 @@
1
+ # encoding: utf-8
2
+ require "logstash/inputs/base"
3
+ require "stud/interval"
4
+ require "socket" # for Socket.gethostname
5
+ require 'java'
6
+ require_relative '../../org/apache/kafka/kafka-clients/2.3.0/kafka-clients-2.3.0.jar'
7
+ require_relative '../../org/slf4j/slf4j-api/1.7.26/slf4j-api-1.7.26.jar'
8
+ # This input will read events from a Kafka topic. It uses the 0.10 version of
9
+ # the consumer API provided by Kafka to read messages from the broker.
10
+ #
11
+ # Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination
12
+ # of Logstash and the Kafka input plugin:
13
+ #
14
+ # [options="header"]
15
+ # |==========================================================
16
+ # |Kafka Client Version |Logstash Version |Plugin Version |Why?
17
+ # |0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular
18
+ # |0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`)
19
+ # |0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`)
20
+ # |0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker
21
+ # |0.10.1.x |2.4.x - 5.x.x | 6.x.x |
22
+ # |==========================================================
23
+ #
24
+ # NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should
25
+ # upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker
26
+ # is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around.
27
+ #
28
+ # This input supports connecting to Kafka over:
29
+ #
30
+ # * SSL (requires plugin version 3.0.0 or later)
31
+ # * Kerberos SASL (requires plugin version 5.1.0 or later)
32
+ #
33
+ # By default security is disabled but can be turned on as needed.
34
+ #
35
+ # The Logstash Kafka consumer handles group management and uses the default offset management
36
+ # strategy using Kafka topics.
37
+ #
38
+ # Logstash instances by default form a single logical group to subscribe to Kafka topics
39
+ # Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively,
40
+ # you could run multiple Logstash instances with the same `group_id` to spread the load across
41
+ # physical machines. Messages in a topic will be distributed to all Logstash instances with
42
+ # the same `group_id`.
43
+ #
44
+ # Ideally you should have as many threads as the number of partitions for a perfect balance --
45
+ # more threads than partitions means that some threads will be idle
46
+ #
47
+ # For more information see http://kafka.apache.org/documentation.html#theconsumer
48
+ #
49
+ # Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs
50
+ #
51
+ class LogStash::Inputs::Kafka < LogStash::Inputs::Base
52
+ config_name 'logQueue'
53
+
54
+ default :codec, 'plain'
55
+
56
+ # The frequency in milliseconds that the consumer offsets are committed to Kafka.
57
+ config :auto_commit_interval_ms, :validate => :string, :default => "5000"
58
+ # What to do when there is no initial offset in Kafka or if an offset is out of range:
59
+ #
60
+ # * earliest: automatically reset the offset to the earliest offset
61
+ # * latest: automatically reset the offset to the latest offset
62
+ # * none: throw exception to the consumer if no previous offset is found for the consumer's group
63
+ # * anything else: throw exception to the consumer.
64
+ config :auto_offset_reset, :validate => :string
65
+ # A list of URLs of Kafka instances to use for establishing the initial connection to the cluster.
66
+ # This list should be in the form of `host1:port1,host2:port2` These urls are just used
67
+ # for the initial connection to discover the full cluster membership (which may change dynamically)
68
+ # so this list need not contain the full set of servers (you may want more than one, though, in
69
+ # case a server is down).
70
+ config :bootstrap_servers, :validate => :string, :default => "localhost:9092"
71
+ # Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk
72
+ # corruption to the messages occurred. This check adds some overhead, so it may be
73
+ # disabled in cases seeking extreme performance.
74
+ config :check_crcs, :validate => :string
75
+ # The id string to pass to the server when making requests. The purpose of this
76
+ # is to be able to track the source of requests beyond just ip/port by allowing
77
+ # a logical application name to be included.
78
+ config :client_id, :validate => :string, :default => "logstash"
79
+ # Close idle connections after the number of milliseconds specified by this config.
80
+ config :connections_max_idle_ms, :validate => :string
81
+ # Ideally you should have as many threads as the number of partitions for a perfect
82
+ # balance — more threads than partitions means that some threads will be idle
83
+ config :consumer_threads, :validate => :number, :default => 1
84
+ # If true, periodically commit to Kafka the offsets of messages already returned by the consumer.
85
+ # This committed offset will be used when the process fails as the position from
86
+ # which the consumption will begin.
87
+ config :enable_auto_commit, :validate => :string, :default => "true"
88
+ # Whether records from internal topics (such as offsets) should be exposed to the consumer.
89
+ # If set to true the only way to receive records from an internal topic is subscribing to it.
90
+ config :exclude_internal_topics, :validate => :string
91
+ # The maximum amount of data the server should return for a fetch request. This is not an
92
+ # absolute maximum, if the first message in the first non-empty partition of the fetch is larger
93
+ # than this value, the message will still be returned to ensure that the consumer can make progress.
94
+ config :fetch_max_bytes, :validate => :string
95
+ # The maximum amount of time the server will block before answering the fetch request if
96
+ # there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This
97
+ # should be less than or equal to the timeout used in `poll_timeout_ms`
98
+ config :fetch_max_wait_ms, :validate => :string
99
+ # The minimum amount of data the server should return for a fetch request. If insufficient
100
+ # data is available the request will wait for that much data to accumulate
101
+ # before answering the request.
102
+ config :fetch_min_bytes, :validate => :string
103
+ # The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber
104
+ # that happens to be made up of multiple processors. Messages in a topic will be distributed to all
105
+ # Logstash instances with the same `group_id`
106
+ config :group_id, :validate => :string, :default => "logstash"
107
+ # The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure
108
+ # that the consumer's session stays active and to facilitate rebalancing when new
109
+ # consumers join or leave the group. The value must be set lower than
110
+ # `session.timeout.ms`, but typically should be set no higher than 1/3 of that value.
111
+ # It can be adjusted even lower to control the expected time for normal rebalances.
112
+ config :heartbeat_interval_ms, :validate => :string
113
+ # Java Class used to deserialize the record's key
114
+ config :key_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
115
+ # The maximum delay between invocations of poll() when using consumer group management. This places
116
+ # an upper bound on the amount of time that the consumer can be idle before fetching more records.
117
+ # If poll() is not called before expiration of this timeout, then the consumer is considered failed and
118
+ # the group will rebalance in order to reassign the partitions to another member.
119
+ # The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms
120
+ config :max_poll_interval_ms, :validate => :string
121
+ # The maximum amount of data per-partition the server will return. The maximum total memory used for a
122
+ # request will be <code>#partitions * max.partition.fetch.bytes</code>. This size must be at least
123
+ # as large as the maximum message size the server allows or else it is possible for the producer to
124
+ # send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying
125
+ # to fetch a large message on a certain partition.
126
+ config :max_partition_fetch_bytes, :validate => :string
127
+ # The maximum number of records returned in a single call to poll().
128
+ config :max_poll_records, :validate => :string
129
+ # The period of time in milliseconds after which we force a refresh of metadata even if
130
+ # we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
131
+ config :metadata_max_age_ms, :validate => :string
132
+ # The class name of the partition assignment strategy that the client will use to distribute
133
+ # partition ownership amongst consumer instances
134
+ config :partition_assignment_strategy, :validate => :string
135
+ # The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
136
+ config :receive_buffer_bytes, :validate => :string
137
+ # The amount of time to wait before attempting to reconnect to a given host.
138
+ # This avoids repeatedly connecting to a host in a tight loop.
139
+ # This backoff applies to all requests sent by the consumer to the broker.
140
+ config :reconnect_backoff_ms, :validate => :string
141
+ # The configuration controls the maximum amount of time the client will wait
142
+ # for the response of a request. If the response is not received before the timeout
143
+ # elapses the client will resend the request if necessary or fail the request if
144
+ # retries are exhausted.
145
+ config :request_timeout_ms, :validate => :string
146
+ # The amount of time to wait before attempting to retry a failed fetch request
147
+ # to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
148
+ config :retry_backoff_ms, :validate => :string
149
+ # The size of the TCP send buffer (SO_SNDBUF) to use when sending data
150
+ config :send_buffer_bytes, :validate => :string
151
+ # The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead
152
+ # and a rebalance operation is triggered for the group identified by `group_id`
153
+ config :session_timeout_ms, :validate => :string
154
+ # Java Class used to deserialize the record's value
155
+ config :value_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
156
+ # A list of topics to subscribe to, defaults to ["logstash"].
157
+ config :topics, :validate => :array, :default => ["logstash"]
158
+ # A topic regex pattern to subscribe to.
159
+ # The topics configuration will be ignored when using this configuration.
160
+ config :topics_pattern, :validate => :string
161
+ # Time kafka consumer will wait to receive new messages from topics
162
+ config :poll_timeout_ms, :validate => :number, :default => 100
163
+ # The truststore type.
164
+ config :ssl_truststore_type, :validate => :string
165
+ # The JKS truststore path to validate the Kafka broker's certificate.
166
+ config :ssl_truststore_location, :validate => :path
167
+ # The truststore password
168
+ config :ssl_truststore_password, :validate => :password
169
+ # The keystore type.
170
+ config :ssl_keystore_type, :validate => :string
171
+ # If client authentication is required, this setting stores the keystore path.
172
+ config :ssl_keystore_location, :validate => :path
173
+ # If client authentication is required, this setting stores the keystore password
174
+ config :ssl_keystore_password, :validate => :password
175
+ # The password of the private key in the key store file.
176
+ config :ssl_key_password, :validate => :password
177
+ # Algorithm to use when verifying host. Set to "" to disable
178
+ config :ssl_endpoint_identification_algorithm, :validate => :string, :default => 'https'
179
+ # Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL
180
+ config :security_protocol, :validate => ["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"], :default => "PLAINTEXT"
181
+ # http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections.
182
+ # This may be any mechanism for which a security provider is available.
183
+ # GSSAPI is the default mechanism.
184
+ config :sasl_mechanism, :validate => :string, :default => "GSSAPI"
185
+ # The Kerberos principal name that Kafka broker runs as.
186
+ # This can be defined either in Kafka's JAAS config or in Kafka's config.
187
+ config :sasl_kerberos_service_name, :validate => :string
188
+ # The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization
189
+ # services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client:
190
+ # [source,java]
191
+ # ----------------------------------
192
+ # KafkaClient {
193
+ # com.sun.security.auth.module.Krb5LoginModule required
194
+ # useTicketCache=true
195
+ # renewTicket=true
196
+ # serviceName="kafka";
197
+ # };
198
+ # ----------------------------------
199
+ #
200
+ # Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these
201
+ # to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same
202
+ # `jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on
203
+ # different JVM instances.
204
+ config :jaas_path, :validate => :path
205
+ # JAAS configuration settings. This allows JAAS config to be a part of the plugin configuration and allows for different JAAS configuration per each plugin config.
206
+ config :sasl_jaas_config, :validate => :string
207
+ # Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html
208
+ config :kerberos_config, :validate => :path
209
+ # Option to add Kafka metadata like topic, message size to the event.
210
+ # This will add a field named `kafka` to the logstash event containing the following attributes:
211
+ # `topic`: The topic this message is associated with
212
+ # `consumer_group`: The consumer group used to read in this event
213
+ # `partition`: The partition this message is associated with
214
+ # `offset`: The offset from the partition this message is associated with
215
+ # `key`: A ByteBuffer containing the message key
216
+ # `timestamp`: The timestamp of this message
217
+ config :decorate_events, :validate => :boolean, :default => false
218
+
219
+
220
+ public
221
+ def register
222
+ @runner_threads = []
223
+ end # def register
224
+
225
+ public
226
+ def run(logstash_queue)
227
+ @runner_consumers = consumer_threads.times.map { |i| create_consumer("#{client_id}-#{i}") }
228
+ @runner_threads = @runner_consumers.map { |consumer| thread_runner(logstash_queue, consumer) }
229
+ @runner_threads.each { |t| t.join }
230
+ end # def run
231
+
232
+ public
233
+ def stop
234
+ # if we have consumers, wake them up to unblock our runner threads
235
+ @runner_consumers && @runner_consumers.each(&:wakeup)
236
+ end
237
+
238
+ public
239
+ def kafka_consumers
240
+ @runner_consumers
241
+ end
242
+
243
+ private
244
+ def thread_runner(logstash_queue, consumer)
245
+ Thread.new do
246
+ begin
247
+ unless @topics_pattern.nil?
248
+ nooplistener = org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener.new
249
+ pattern = java.util.regex.Pattern.compile(@topics_pattern)
250
+ consumer.subscribe(pattern, nooplistener)
251
+ else
252
+ consumer.subscribe(topics);
253
+ end
254
+ codec_instance = @codec.clone
255
+ while !stop?
256
+ records = consumer.poll(poll_timeout_ms)
257
+ next unless records.count > 0
258
+ for record in records do
259
+ codec_instance.decode(record.value.to_s) do |event|
260
+ decorate(event)
261
+ if @decorate_events
262
+ event.set("[@metadata][kafka][topic]", record.topic)
263
+ event.set("[@metadata][kafka][consumer_group]", @group_id)
264
+ event.set("[@metadata][kafka][partition]", record.partition)
265
+ event.set("[@metadata][kafka][offset]", record.offset)
266
+ event.set("[@metadata][kafka][key]", record.key)
267
+ event.set("[@metadata][kafka][timestamp]", record.timestamp)
268
+ end
269
+ logstash_queue << event
270
+ end
271
+ end
272
+ # Manual offset commit
273
+ if @enable_auto_commit == "false"
274
+ consumer.commitSync
275
+ end
276
+ end
277
+ rescue org.apache.kafka.common.errors.WakeupException => e
278
+ raise e if !stop?
279
+ ensure
280
+ consumer.close
281
+ end
282
+ end
283
+ end
284
+
285
+ private
286
+ def create_consumer(client_id)
287
+ begin
288
+ props = java.util.Properties.new
289
+ kafka = org.apache.kafka.clients.consumer.ConsumerConfig
290
+
291
+ props.put(kafka::AUTO_COMMIT_INTERVAL_MS_CONFIG, auto_commit_interval_ms)
292
+ props.put(kafka::AUTO_OFFSET_RESET_CONFIG, auto_offset_reset) unless auto_offset_reset.nil?
293
+ props.put(kafka::BOOTSTRAP_SERVERS_CONFIG, bootstrap_servers)
294
+ props.put(kafka::CHECK_CRCS_CONFIG, check_crcs) unless check_crcs.nil?
295
+ props.put(kafka::CLIENT_ID_CONFIG, client_id)
296
+ props.put(kafka::CONNECTIONS_MAX_IDLE_MS_CONFIG, connections_max_idle_ms) unless connections_max_idle_ms.nil?
297
+ props.put(kafka::ENABLE_AUTO_COMMIT_CONFIG, enable_auto_commit)
298
+ props.put(kafka::EXCLUDE_INTERNAL_TOPICS_CONFIG, exclude_internal_topics) unless exclude_internal_topics.nil?
299
+ props.put(kafka::FETCH_MAX_BYTES_CONFIG, fetch_max_bytes) unless fetch_max_bytes.nil?
300
+ props.put(kafka::FETCH_MAX_WAIT_MS_CONFIG, fetch_max_wait_ms) unless fetch_max_wait_ms.nil?
301
+ props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes) unless fetch_min_bytes.nil?
302
+ props.put(kafka::GROUP_ID_CONFIG, group_id)
303
+ props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms) unless heartbeat_interval_ms.nil?
304
+ props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, key_deserializer_class)
305
+ props.put(kafka::MAX_PARTITION_FETCH_BYTES_CONFIG, max_partition_fetch_bytes) unless max_partition_fetch_bytes.nil?
306
+ props.put(kafka::MAX_POLL_RECORDS_CONFIG, max_poll_records) unless max_poll_records.nil?
307
+ props.put(kafka::MAX_POLL_INTERVAL_MS_CONFIG, max_poll_interval_ms) unless max_poll_interval_ms.nil?
308
+ props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms) unless metadata_max_age_ms.nil?
309
+ props.put(kafka::PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partition_assignment_strategy) unless partition_assignment_strategy.nil?
310
+ props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes) unless receive_buffer_bytes.nil?
311
+ props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms) unless reconnect_backoff_ms.nil?
312
+ props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms) unless request_timeout_ms.nil?
313
+ props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms) unless retry_backoff_ms.nil?
314
+ props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes) unless send_buffer_bytes.nil?
315
+ props.put(kafka::SESSION_TIMEOUT_MS_CONFIG, session_timeout_ms) unless session_timeout_ms.nil?
316
+ props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, value_deserializer_class)
317
+
318
+ props.put("security.protocol", security_protocol) unless security_protocol.nil?
319
+
320
+ if security_protocol == "SSL"
321
+ set_trustore_keystore_config(props)
322
+ elsif security_protocol == "SASL_PLAINTEXT"
323
+ set_sasl_config(props)
324
+ elsif security_protocol == "SASL_SSL"
325
+ set_trustore_keystore_config(props)
326
+ set_sasl_config(props)
327
+ end
328
+
329
+ org.apache.kafka.clients.consumer.KafkaConsumer.new(props)
330
+ rescue => e
331
+ logger.error("Unable to create Kafka consumer from given configuration",
332
+ :kafka_error_message => e,
333
+ :cause => e.respond_to?(:getCause) ? e.getCause() : nil)
334
+ raise e
335
+ end
336
+ end
337
+
338
+ def set_trustore_keystore_config(props)
339
+ props.put("ssl.truststore.type", ssl_truststore_type) unless ssl_truststore_type.nil?
340
+ props.put("ssl.truststore.location", ssl_truststore_location) unless ssl_truststore_location.nil?
341
+ props.put("ssl.truststore.password", ssl_truststore_password.value) unless ssl_truststore_password.nil?
342
+
343
+ # Client auth stuff
344
+ props.put("ssl.keystore.type", ssl_keystore_type) unless ssl_keystore_type.nil?
345
+ props.put("ssl.key.password", ssl_key_password.value) unless ssl_key_password.nil?
346
+ props.put("ssl.keystore.location", ssl_keystore_location) unless ssl_keystore_location.nil?
347
+ props.put("ssl.keystore.password", ssl_keystore_password.value) unless ssl_keystore_password.nil?
348
+ props.put("ssl.endpoint.identification.algorithm", ssl_endpoint_identification_algorithm) unless ssl_endpoint_identification_algorithm.nil?
349
+ end
350
+
351
+ def set_sasl_config(props)
352
+ java.lang.System.setProperty("java.security.auth.login.config",jaas_path) unless jaas_path.nil?
353
+ java.lang.System.setProperty("java.security.krb5.conf",kerberos_config) unless kerberos_config.nil?
354
+
355
+ props.put("sasl.mechanism",sasl_mechanism)
356
+ if sasl_mechanism == "GSSAPI" && sasl_kerberos_service_name.nil?
357
+ raise LogStash::ConfigurationError, "sasl_kerberos_service_name must be specified when SASL mechanism is GSSAPI"
358
+ end
359
+
360
+ props.put("sasl.kerberos.service.name",sasl_kerberos_service_name) unless sasl_kerberos_service_name.nil?
361
+ props.put("sasl.jaas.config", sasl_jaas_config) unless sasl_jaas_config.nil?
362
+ end
363
+ end #class LogStash::Inputs::Kafka
@@ -0,0 +1,18 @@
1
+ # this is a generated file, to avoid over-writing it just delete this comment
2
+ begin
3
+ require 'jar_dependencies'
4
+ rescue LoadError
5
+ require 'com/github/luben/zstd-jni/1.4.0-1/zstd-jni-1.4.0-1.jar'
6
+ require 'org/xerial/snappy/snappy-java/1.1.7.3/snappy-java-1.1.7.3.jar'
7
+ require 'org/apache/kafka/kafka-clients/2.3.0/kafka-clients-2.3.0.jar'
8
+ require 'org/slf4j/slf4j-api/1.7.26/slf4j-api-1.7.26.jar'
9
+ require 'org/lz4/lz4-java/1.6.0/lz4-java-1.6.0.jar'
10
+ end
11
+
12
+ if defined? Jars
13
+ require_jar 'com.github.luben', 'zstd-jni', '1.4.0-1'
14
+ require_jar 'org.xerial.snappy', 'snappy-java', '1.1.7.3'
15
+ require_jar 'org.apache.kafka', 'kafka-clients', '2.3.0'
16
+ require_jar 'org.slf4j', 'slf4j-api', '1.7.26'
17
+ require_jar 'org.lz4', 'lz4-java', '1.6.0'
18
+ end
@@ -0,0 +1,26 @@
1
+ Gem::Specification.new do |s|
2
+ s.name = 'logstash-input-logQueue'
3
+ s.version = '0.1.0'
4
+ s.licenses = ['Apache-2.0']
5
+ s.summary = 'logstash-input-plugin'
6
+ s.description = 'logstash-input-plugin'
7
+ s.authors = ['sdx']
8
+ s.email = 'sdx.com.cn@qq.com'
9
+ s.require_paths = ['lib']
10
+ s.platform = 'java'
11
+ # Files
12
+ s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
13
+ # Tests
14
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
15
+
16
+ # Special flag to let us know this is actually a logstash plugin
17
+ s.metadata = { "logstash_plugin" => "true", "logstash_group" => "input" }
18
+
19
+ # Gem dependencies
20
+ s.add_runtime_dependency "logstash-core-plugin-api", "~> 2.0"
21
+ s.add_runtime_dependency 'logstash-codec-plain'
22
+ s.add_runtime_dependency 'stud', '>= 0.0.22'
23
+ s.add_development_dependency 'logstash-devutils', '>= 0.0.16'
24
+ s.requirements << "jar 'org.apache.kafka:kafka-clients', '2.3.0'"
25
+ s.add_runtime_dependency 'jar-dependencies'
26
+ end
@@ -0,0 +1,11 @@
1
+ # encoding: utf-8
2
+ require "logstash/devutils/rspec/spec_helper"
3
+ require "logstash/inputs/test"
4
+
5
+ describe LogStash::Inputs::Test do
6
+
7
+ it_behaves_like "an interruptible input plugin" do
8
+ let(:config) { { "interval" => 100 } }
9
+ end
10
+
11
+ end
metadata ADDED
@@ -0,0 +1,130 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: logstash-input-logQueue
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: java
6
+ authors:
7
+ - sdx
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2022-07-21 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: logstash-core-plugin-api
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '2.0'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '2.0'
27
+ - !ruby/object:Gem::Dependency
28
+ name: logstash-codec-plain
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ">="
32
+ - !ruby/object:Gem::Version
33
+ version: '0'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
41
+ - !ruby/object:Gem::Dependency
42
+ name: stud
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - ">="
46
+ - !ruby/object:Gem::Version
47
+ version: 0.0.22
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - ">="
53
+ - !ruby/object:Gem::Version
54
+ version: 0.0.22
55
+ - !ruby/object:Gem::Dependency
56
+ name: logstash-devutils
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - ">="
60
+ - !ruby/object:Gem::Version
61
+ version: 0.0.16
62
+ type: :development
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - ">="
67
+ - !ruby/object:Gem::Version
68
+ version: 0.0.16
69
+ - !ruby/object:Gem::Dependency
70
+ name: jar-dependencies
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - ">="
74
+ - !ruby/object:Gem::Version
75
+ version: '0'
76
+ type: :runtime
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - ">="
81
+ - !ruby/object:Gem::Version
82
+ version: '0'
83
+ description: logstash-input-plugin
84
+ email: sdx.com.cn@qq.com
85
+ executables: []
86
+ extensions: []
87
+ extra_rdoc_files: []
88
+ files:
89
+ - CHANGELOG.md
90
+ - CONTRIBUTORS
91
+ - Gemfile
92
+ - LICENSE
93
+ - NOTICE.TXT
94
+ - lib/com/github/luben/zstd-jni/1.4.0-1/zstd-jni-1.4.0-1.jar
95
+ - lib/logstash-input-logQueue_jars.rb
96
+ - lib/logstash/inputs/logQueue.rb
97
+ - lib/org/apache/kafka/kafka-clients/2.3.0/kafka-clients-2.3.0.jar
98
+ - lib/org/lz4/lz4-java/1.6.0/lz4-java-1.6.0.jar
99
+ - lib/org/slf4j/slf4j-api/1.7.26/slf4j-api-1.7.26.jar
100
+ - lib/org/xerial/snappy/snappy-java/1.1.7.3/snappy-java-1.1.7.3.jar
101
+ - logstash-input-logQueue.gemspec
102
+ - spec/inputs/test_spec.rb
103
+ homepage:
104
+ licenses:
105
+ - Apache-2.0
106
+ metadata:
107
+ logstash_plugin: 'true'
108
+ logstash_group: input
109
+ post_install_message:
110
+ rdoc_options: []
111
+ require_paths:
112
+ - lib
113
+ required_ruby_version: !ruby/object:Gem::Requirement
114
+ requirements:
115
+ - - ">="
116
+ - !ruby/object:Gem::Version
117
+ version: '0'
118
+ required_rubygems_version: !ruby/object:Gem::Requirement
119
+ requirements:
120
+ - - ">="
121
+ - !ruby/object:Gem::Version
122
+ version: '0'
123
+ requirements:
124
+ - jar 'org.apache.kafka:kafka-clients', '2.3.0'
125
+ rubygems_version: 3.1.4
126
+ signing_key:
127
+ specification_version: 4
128
+ summary: logstash-input-plugin
129
+ test_files:
130
+ - spec/inputs/test_spec.rb