logstash-input-logQueue 0.1.0-java → 3.0.2-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3980db697ae53398657f65157acf4679ec1434a17958a4a7461eb1d29abefebf
4
- data.tar.gz: c39ad8c4ce6428b146b55f408d90049f314ae04030a65ba9e71ac84f3f3cd287
3
+ metadata.gz: 690d35be9e8405f94f00c203c36115f3486487542be8e5cb86d0f8a6f8c84643
4
+ data.tar.gz: c1a1f5d4ba58080d7d35606dc1a3338769e2e511ee9e2ed6f0ad40d66915ca2a
5
5
  SHA512:
6
- metadata.gz: 9f4be28ed5b312b39d71f008322efd009f2aae375614593df454bae9d7a253d1872a4fa478353001307bdb2df5780eff67e8f680a6082052a71cc380bb6cda85
7
- data.tar.gz: 9be081df11d0026acafdf8817c84701d5421aef1fc5e94fa643fb35c3d85c915bfffa17d52efc2c2d0619a2c34d242b7d69379969023e0ed5f9b9554ce3e964b
6
+ metadata.gz: 5498a21456ef1882151b53ada4bbc19809c69fb59a37713cd696ee71b1c6c12e82d25075d00e48347bfa21714ae6edbd5cfa6341f20738414a0d7175323a0570
7
+ data.tar.gz: cfc30519b6b871700104194ea045839ca08edefe8620cc9cb166f380663f7a4e75c465d87f2fbc86b92bf899275d5827a2c60e6ef8947f9ea981475ed1b7a84d
@@ -1,363 +1,42 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/inputs/base"
3
+ require "logstash/namespace"
3
4
  require "stud/interval"
4
5
  require "socket" # for Socket.gethostname
5
- require 'java'
6
- require_relative '../../org/apache/kafka/kafka-clients/2.3.0/kafka-clients-2.3.0.jar'
7
- require_relative '../../org/slf4j/slf4j-api/1.7.26/slf4j-api-1.7.26.jar'
8
- # This input will read events from a Kafka topic. It uses the 0.10 version of
9
- # the consumer API provided by Kafka to read messages from the broker.
10
- #
11
- # Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination
12
- # of Logstash and the Kafka input plugin:
13
- #
14
- # [options="header"]
15
- # |==========================================================
16
- # |Kafka Client Version |Logstash Version |Plugin Version |Why?
17
- # |0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular
18
- # |0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`)
19
- # |0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`)
20
- # |0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker
21
- # |0.10.1.x |2.4.x - 5.x.x | 6.x.x |
22
- # |==========================================================
23
- #
24
- # NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should
25
- # upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker
26
- # is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around.
27
- #
28
- # This input supports connecting to Kafka over:
29
- #
30
- # * SSL (requires plugin version 3.0.0 or later)
31
- # * Kerberos SASL (requires plugin version 5.1.0 or later)
32
- #
33
- # By default security is disabled but can be turned on as needed.
34
- #
35
- # The Logstash Kafka consumer handles group management and uses the default offset management
36
- # strategy using Kafka topics.
37
- #
38
- # Logstash instances by default form a single logical group to subscribe to Kafka topics
39
- # Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively,
40
- # you could run multiple Logstash instances with the same `group_id` to spread the load across
41
- # physical machines. Messages in a topic will be distributed to all Logstash instances with
42
- # the same `group_id`.
43
- #
44
- # Ideally you should have as many threads as the number of partitions for a perfect balance --
45
- # more threads than partitions means that some threads will be idle
46
- #
47
- # For more information see http://kafka.apache.org/documentation.html#theconsumer
48
- #
49
- # Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs
6
+
7
+ # Generate a repeating message.
50
8
  #
51
- class LogStash::Inputs::Kafka < LogStash::Inputs::Base
52
- config_name 'logQueue'
9
+ # This plugin is intented only as an logQueue.
53
10
 
54
- default :codec, 'plain'
11
+ class LogStash::Inputs::logQueue < LogStash::Inputs::Base
12
+ config_name "logQueue"
55
13
 
56
- # The frequency in milliseconds that the consumer offsets are committed to Kafka.
57
- config :auto_commit_interval_ms, :validate => :string, :default => "5000"
58
- # What to do when there is no initial offset in Kafka or if an offset is out of range:
59
- #
60
- # * earliest: automatically reset the offset to the earliest offset
61
- # * latest: automatically reset the offset to the latest offset
62
- # * none: throw exception to the consumer if no previous offset is found for the consumer's group
63
- # * anything else: throw exception to the consumer.
64
- config :auto_offset_reset, :validate => :string
65
- # A list of URLs of Kafka instances to use for establishing the initial connection to the cluster.
66
- # This list should be in the form of `host1:port1,host2:port2` These urls are just used
67
- # for the initial connection to discover the full cluster membership (which may change dynamically)
68
- # so this list need not contain the full set of servers (you may want more than one, though, in
69
- # case a server is down).
70
- config :bootstrap_servers, :validate => :string, :default => "localhost:9092"
71
- # Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk
72
- # corruption to the messages occurred. This check adds some overhead, so it may be
73
- # disabled in cases seeking extreme performance.
74
- config :check_crcs, :validate => :string
75
- # The id string to pass to the server when making requests. The purpose of this
76
- # is to be able to track the source of requests beyond just ip/port by allowing
77
- # a logical application name to be included.
78
- config :client_id, :validate => :string, :default => "logstash"
79
- # Close idle connections after the number of milliseconds specified by this config.
80
- config :connections_max_idle_ms, :validate => :string
81
- # Ideally you should have as many threads as the number of partitions for a perfect
82
- # balance — more threads than partitions means that some threads will be idle
83
- config :consumer_threads, :validate => :number, :default => 1
84
- # If true, periodically commit to Kafka the offsets of messages already returned by the consumer.
85
- # This committed offset will be used when the process fails as the position from
86
- # which the consumption will begin.
87
- config :enable_auto_commit, :validate => :string, :default => "true"
88
- # Whether records from internal topics (such as offsets) should be exposed to the consumer.
89
- # If set to true the only way to receive records from an internal topic is subscribing to it.
90
- config :exclude_internal_topics, :validate => :string
91
- # The maximum amount of data the server should return for a fetch request. This is not an
92
- # absolute maximum, if the first message in the first non-empty partition of the fetch is larger
93
- # than this value, the message will still be returned to ensure that the consumer can make progress.
94
- config :fetch_max_bytes, :validate => :string
95
- # The maximum amount of time the server will block before answering the fetch request if
96
- # there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This
97
- # should be less than or equal to the timeout used in `poll_timeout_ms`
98
- config :fetch_max_wait_ms, :validate => :string
99
- # The minimum amount of data the server should return for a fetch request. If insufficient
100
- # data is available the request will wait for that much data to accumulate
101
- # before answering the request.
102
- config :fetch_min_bytes, :validate => :string
103
- # The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber
104
- # that happens to be made up of multiple processors. Messages in a topic will be distributed to all
105
- # Logstash instances with the same `group_id`
106
- config :group_id, :validate => :string, :default => "logstash"
107
- # The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure
108
- # that the consumer's session stays active and to facilitate rebalancing when new
109
- # consumers join or leave the group. The value must be set lower than
110
- # `session.timeout.ms`, but typically should be set no higher than 1/3 of that value.
111
- # It can be adjusted even lower to control the expected time for normal rebalances.
112
- config :heartbeat_interval_ms, :validate => :string
113
- # Java Class used to deserialize the record's key
114
- config :key_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
115
- # The maximum delay between invocations of poll() when using consumer group management. This places
116
- # an upper bound on the amount of time that the consumer can be idle before fetching more records.
117
- # If poll() is not called before expiration of this timeout, then the consumer is considered failed and
118
- # the group will rebalance in order to reassign the partitions to another member.
119
- # The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms
120
- config :max_poll_interval_ms, :validate => :string
121
- # The maximum amount of data per-partition the server will return. The maximum total memory used for a
122
- # request will be <code>#partitions * max.partition.fetch.bytes</code>. This size must be at least
123
- # as large as the maximum message size the server allows or else it is possible for the producer to
124
- # send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying
125
- # to fetch a large message on a certain partition.
126
- config :max_partition_fetch_bytes, :validate => :string
127
- # The maximum number of records returned in a single call to poll().
128
- config :max_poll_records, :validate => :string
129
- # The period of time in milliseconds after which we force a refresh of metadata even if
130
- # we haven't seen any partition leadership changes to proactively discover any new brokers or partitions
131
- config :metadata_max_age_ms, :validate => :string
132
- # The class name of the partition assignment strategy that the client will use to distribute
133
- # partition ownership amongst consumer instances
134
- config :partition_assignment_strategy, :validate => :string
135
- # The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
136
- config :receive_buffer_bytes, :validate => :string
137
- # The amount of time to wait before attempting to reconnect to a given host.
138
- # This avoids repeatedly connecting to a host in a tight loop.
139
- # This backoff applies to all requests sent by the consumer to the broker.
140
- config :reconnect_backoff_ms, :validate => :string
141
- # The configuration controls the maximum amount of time the client will wait
142
- # for the response of a request. If the response is not received before the timeout
143
- # elapses the client will resend the request if necessary or fail the request if
144
- # retries are exhausted.
145
- config :request_timeout_ms, :validate => :string
146
- # The amount of time to wait before attempting to retry a failed fetch request
147
- # to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
148
- config :retry_backoff_ms, :validate => :string
149
- # The size of the TCP send buffer (SO_SNDBUF) to use when sending data
150
- config :send_buffer_bytes, :validate => :string
151
- # The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead
152
- # and a rebalance operation is triggered for the group identified by `group_id`
153
- config :session_timeout_ms, :validate => :string
154
- # Java Class used to deserialize the record's value
155
- config :value_deserializer_class, :validate => :string, :default => "org.apache.kafka.common.serialization.StringDeserializer"
156
- # A list of topics to subscribe to, defaults to ["logstash"].
157
- config :topics, :validate => :array, :default => ["logstash"]
158
- # A topic regex pattern to subscribe to.
159
- # The topics configuration will be ignored when using this configuration.
160
- config :topics_pattern, :validate => :string
161
- # Time kafka consumer will wait to receive new messages from topics
162
- config :poll_timeout_ms, :validate => :number, :default => 100
163
- # The truststore type.
164
- config :ssl_truststore_type, :validate => :string
165
- # The JKS truststore path to validate the Kafka broker's certificate.
166
- config :ssl_truststore_location, :validate => :path
167
- # The truststore password
168
- config :ssl_truststore_password, :validate => :password
169
- # The keystore type.
170
- config :ssl_keystore_type, :validate => :string
171
- # If client authentication is required, this setting stores the keystore path.
172
- config :ssl_keystore_location, :validate => :path
173
- # If client authentication is required, this setting stores the keystore password
174
- config :ssl_keystore_password, :validate => :password
175
- # The password of the private key in the key store file.
176
- config :ssl_key_password, :validate => :password
177
- # Algorithm to use when verifying host. Set to "" to disable
178
- config :ssl_endpoint_identification_algorithm, :validate => :string, :default => 'https'
179
- # Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL
180
- config :security_protocol, :validate => ["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"], :default => "PLAINTEXT"
181
- # http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections.
182
- # This may be any mechanism for which a security provider is available.
183
- # GSSAPI is the default mechanism.
184
- config :sasl_mechanism, :validate => :string, :default => "GSSAPI"
185
- # The Kerberos principal name that Kafka broker runs as.
186
- # This can be defined either in Kafka's JAAS config or in Kafka's config.
187
- config :sasl_kerberos_service_name, :validate => :string
188
- # The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization
189
- # services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client:
190
- # [source,java]
191
- # ----------------------------------
192
- # KafkaClient {
193
- # com.sun.security.auth.module.Krb5LoginModule required
194
- # useTicketCache=true
195
- # renewTicket=true
196
- # serviceName="kafka";
197
- # };
198
- # ----------------------------------
199
- #
200
- # Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these
201
- # to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same
202
- # `jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on
203
- # different JVM instances.
204
- config :jaas_path, :validate => :path
205
- # JAAS configuration settings. This allows JAAS config to be a part of the plugin configuration and allows for different JAAS configuration per each plugin config.
206
- config :sasl_jaas_config, :validate => :string
207
- # Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html
208
- config :kerberos_config, :validate => :path
209
- # Option to add Kafka metadata like topic, message size to the event.
210
- # This will add a field named `kafka` to the logstash event containing the following attributes:
211
- # `topic`: The topic this message is associated with
212
- # `consumer_group`: The consumer group used to read in this event
213
- # `partition`: The partition this message is associated with
214
- # `offset`: The offset from the partition this message is associated with
215
- # `key`: A ByteBuffer containing the message key
216
- # `timestamp`: The timestamp of this message
217
- config :decorate_events, :validate => :boolean, :default => false
14
+ # If undefined, Logstash will complain, even if codec is unused.
15
+ default :codec, "plain"
16
+
17
+ # The message string to use in the event.
18
+ config :message, :validate => :string, :default => "Hello World!"
218
19
 
20
+ # Set how frequently messages should be sent.
21
+ #
22
+ # The default, `1`, means send a message every second.
23
+ config :interval, :validate => :number, :default => 1
219
24
 
220
25
  public
221
26
  def register
222
- @runner_threads = []
27
+ @host = Socket.gethostname
223
28
  end # def register
224
29
 
225
- public
226
- def run(logstash_queue)
227
- @runner_consumers = consumer_threads.times.map { |i| create_consumer("#{client_id}-#{i}") }
228
- @runner_threads = @runner_consumers.map { |consumer| thread_runner(logstash_queue, consumer) }
229
- @runner_threads.each { |t| t.join }
30
+ def run(queue)
31
+ # we can abort the loop if stop? becomes true
32
+ while !stop?
33
+ event = LogStash::Event.new("message" => @message, "host" => @host)
34
+ decorate(event)
35
+ queue << event
36
+ Stud.stoppable_sleep(@interval) { stop? }
37
+ end # loop
230
38
  end # def run
231
39
 
232
- public
233
40
  def stop
234
- # if we have consumers, wake them up to unblock our runner threads
235
- @runner_consumers && @runner_consumers.each(&:wakeup)
236
- end
237
-
238
- public
239
- def kafka_consumers
240
- @runner_consumers
241
- end
242
-
243
- private
244
- def thread_runner(logstash_queue, consumer)
245
- Thread.new do
246
- begin
247
- unless @topics_pattern.nil?
248
- nooplistener = org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener.new
249
- pattern = java.util.regex.Pattern.compile(@topics_pattern)
250
- consumer.subscribe(pattern, nooplistener)
251
- else
252
- consumer.subscribe(topics);
253
- end
254
- codec_instance = @codec.clone
255
- while !stop?
256
- records = consumer.poll(poll_timeout_ms)
257
- next unless records.count > 0
258
- for record in records do
259
- codec_instance.decode(record.value.to_s) do |event|
260
- decorate(event)
261
- if @decorate_events
262
- event.set("[@metadata][kafka][topic]", record.topic)
263
- event.set("[@metadata][kafka][consumer_group]", @group_id)
264
- event.set("[@metadata][kafka][partition]", record.partition)
265
- event.set("[@metadata][kafka][offset]", record.offset)
266
- event.set("[@metadata][kafka][key]", record.key)
267
- event.set("[@metadata][kafka][timestamp]", record.timestamp)
268
- end
269
- logstash_queue << event
270
- end
271
- end
272
- # Manual offset commit
273
- if @enable_auto_commit == "false"
274
- consumer.commitSync
275
- end
276
- end
277
- rescue org.apache.kafka.common.errors.WakeupException => e
278
- raise e if !stop?
279
- ensure
280
- consumer.close
281
- end
282
- end
283
- end
284
-
285
- private
286
- def create_consumer(client_id)
287
- begin
288
- props = java.util.Properties.new
289
- kafka = org.apache.kafka.clients.consumer.ConsumerConfig
290
-
291
- props.put(kafka::AUTO_COMMIT_INTERVAL_MS_CONFIG, auto_commit_interval_ms)
292
- props.put(kafka::AUTO_OFFSET_RESET_CONFIG, auto_offset_reset) unless auto_offset_reset.nil?
293
- props.put(kafka::BOOTSTRAP_SERVERS_CONFIG, bootstrap_servers)
294
- props.put(kafka::CHECK_CRCS_CONFIG, check_crcs) unless check_crcs.nil?
295
- props.put(kafka::CLIENT_ID_CONFIG, client_id)
296
- props.put(kafka::CONNECTIONS_MAX_IDLE_MS_CONFIG, connections_max_idle_ms) unless connections_max_idle_ms.nil?
297
- props.put(kafka::ENABLE_AUTO_COMMIT_CONFIG, enable_auto_commit)
298
- props.put(kafka::EXCLUDE_INTERNAL_TOPICS_CONFIG, exclude_internal_topics) unless exclude_internal_topics.nil?
299
- props.put(kafka::FETCH_MAX_BYTES_CONFIG, fetch_max_bytes) unless fetch_max_bytes.nil?
300
- props.put(kafka::FETCH_MAX_WAIT_MS_CONFIG, fetch_max_wait_ms) unless fetch_max_wait_ms.nil?
301
- props.put(kafka::FETCH_MIN_BYTES_CONFIG, fetch_min_bytes) unless fetch_min_bytes.nil?
302
- props.put(kafka::GROUP_ID_CONFIG, group_id)
303
- props.put(kafka::HEARTBEAT_INTERVAL_MS_CONFIG, heartbeat_interval_ms) unless heartbeat_interval_ms.nil?
304
- props.put(kafka::KEY_DESERIALIZER_CLASS_CONFIG, key_deserializer_class)
305
- props.put(kafka::MAX_PARTITION_FETCH_BYTES_CONFIG, max_partition_fetch_bytes) unless max_partition_fetch_bytes.nil?
306
- props.put(kafka::MAX_POLL_RECORDS_CONFIG, max_poll_records) unless max_poll_records.nil?
307
- props.put(kafka::MAX_POLL_INTERVAL_MS_CONFIG, max_poll_interval_ms) unless max_poll_interval_ms.nil?
308
- props.put(kafka::METADATA_MAX_AGE_CONFIG, metadata_max_age_ms) unless metadata_max_age_ms.nil?
309
- props.put(kafka::PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partition_assignment_strategy) unless partition_assignment_strategy.nil?
310
- props.put(kafka::RECEIVE_BUFFER_CONFIG, receive_buffer_bytes) unless receive_buffer_bytes.nil?
311
- props.put(kafka::RECONNECT_BACKOFF_MS_CONFIG, reconnect_backoff_ms) unless reconnect_backoff_ms.nil?
312
- props.put(kafka::REQUEST_TIMEOUT_MS_CONFIG, request_timeout_ms) unless request_timeout_ms.nil?
313
- props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms) unless retry_backoff_ms.nil?
314
- props.put(kafka::SEND_BUFFER_CONFIG, send_buffer_bytes) unless send_buffer_bytes.nil?
315
- props.put(kafka::SESSION_TIMEOUT_MS_CONFIG, session_timeout_ms) unless session_timeout_ms.nil?
316
- props.put(kafka::VALUE_DESERIALIZER_CLASS_CONFIG, value_deserializer_class)
317
-
318
- props.put("security.protocol", security_protocol) unless security_protocol.nil?
319
-
320
- if security_protocol == "SSL"
321
- set_trustore_keystore_config(props)
322
- elsif security_protocol == "SASL_PLAINTEXT"
323
- set_sasl_config(props)
324
- elsif security_protocol == "SASL_SSL"
325
- set_trustore_keystore_config(props)
326
- set_sasl_config(props)
327
- end
328
-
329
- org.apache.kafka.clients.consumer.KafkaConsumer.new(props)
330
- rescue => e
331
- logger.error("Unable to create Kafka consumer from given configuration",
332
- :kafka_error_message => e,
333
- :cause => e.respond_to?(:getCause) ? e.getCause() : nil)
334
- raise e
335
- end
336
- end
337
-
338
- def set_trustore_keystore_config(props)
339
- props.put("ssl.truststore.type", ssl_truststore_type) unless ssl_truststore_type.nil?
340
- props.put("ssl.truststore.location", ssl_truststore_location) unless ssl_truststore_location.nil?
341
- props.put("ssl.truststore.password", ssl_truststore_password.value) unless ssl_truststore_password.nil?
342
-
343
- # Client auth stuff
344
- props.put("ssl.keystore.type", ssl_keystore_type) unless ssl_keystore_type.nil?
345
- props.put("ssl.key.password", ssl_key_password.value) unless ssl_key_password.nil?
346
- props.put("ssl.keystore.location", ssl_keystore_location) unless ssl_keystore_location.nil?
347
- props.put("ssl.keystore.password", ssl_keystore_password.value) unless ssl_keystore_password.nil?
348
- props.put("ssl.endpoint.identification.algorithm", ssl_endpoint_identification_algorithm) unless ssl_endpoint_identification_algorithm.nil?
349
- end
350
-
351
- def set_sasl_config(props)
352
- java.lang.System.setProperty("java.security.auth.login.config",jaas_path) unless jaas_path.nil?
353
- java.lang.System.setProperty("java.security.krb5.conf",kerberos_config) unless kerberos_config.nil?
354
-
355
- props.put("sasl.mechanism",sasl_mechanism)
356
- if sasl_mechanism == "GSSAPI" && sasl_kerberos_service_name.nil?
357
- raise LogStash::ConfigurationError, "sasl_kerberos_service_name must be specified when SASL mechanism is GSSAPI"
358
- end
359
-
360
- props.put("sasl.kerberos.service.name",sasl_kerberos_service_name) unless sasl_kerberos_service_name.nil?
361
- props.put("sasl.jaas.config", sasl_jaas_config) unless sasl_jaas_config.nil?
362
41
  end
363
- end #class LogStash::Inputs::Kafka
42
+ end # class LogStash::Inputs::logQueue
@@ -1,11 +1,11 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-input-logQueue'
3
- s.version = '0.1.0'
3
+ s.version = '3.0.2'
4
4
  s.licenses = ['Apache-2.0']
5
5
  s.summary = 'logstash-input-plugin'
6
6
  s.description = 'logstash-input-plugin'
7
- s.authors = ['sdx']
8
- s.email = 'sdx.com.cn@qq.com'
7
+ s.authors = ["demo"]
8
+ s.email = 'demo'
9
9
  s.require_paths = ['lib']
10
10
  s.platform = 'java'
11
11
  # Files
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-input-logQueue
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 3.0.2
5
5
  platform: java
6
6
  authors:
7
- - sdx
7
+ - demo
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-07-21 00:00:00.000000000 Z
11
+ date: 2022-07-30 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: logstash-core-plugin-api
@@ -81,7 +81,7 @@ dependencies:
81
81
  - !ruby/object:Gem::Version
82
82
  version: '0'
83
83
  description: logstash-input-plugin
84
- email: sdx.com.cn@qq.com
84
+ email: demo
85
85
  executables: []
86
86
  extensions: []
87
87
  extra_rdoc_files: []
@@ -91,13 +91,8 @@ files:
91
91
  - Gemfile
92
92
  - LICENSE
93
93
  - NOTICE.TXT
94
- - lib/com/github/luben/zstd-jni/1.4.0-1/zstd-jni-1.4.0-1.jar
95
94
  - lib/logstash-input-logQueue_jars.rb
96
95
  - lib/logstash/inputs/logQueue.rb
97
- - lib/org/apache/kafka/kafka-clients/2.3.0/kafka-clients-2.3.0.jar
98
- - lib/org/lz4/lz4-java/1.6.0/lz4-java-1.6.0.jar
99
- - lib/org/slf4j/slf4j-api/1.7.26/slf4j-api-1.7.26.jar
100
- - lib/org/xerial/snappy/snappy-java/1.1.7.3/snappy-java-1.1.7.3.jar
101
96
  - logstash-input-logQueue.gemspec
102
97
  - spec/inputs/test_spec.rb
103
98
  homepage: