logstash-output-elasticsearch 1.1.0-java → 2.0.0.beta4-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +1 -0
- data/CHANGELOG.md +10 -3
- data/README.md +4 -4
- data/lib/logstash/outputs/elasticsearch/http_client.rb +144 -0
- data/lib/logstash/outputs/elasticsearch.rb +93 -319
- data/logstash-output-elasticsearch.gemspec +1 -3
- data/spec/es_spec_helper.rb +38 -34
- data/spec/integration/outputs/create_spec.rb +56 -0
- data/spec/integration/outputs/index_spec.rb +5 -7
- data/spec/integration/outputs/retry_spec.rb +118 -126
- data/spec/integration/outputs/routing_spec.rb +5 -33
- data/spec/integration/outputs/secure_spec.rb +4 -9
- data/spec/integration/outputs/templates_spec.rb +85 -91
- data/spec/integration/outputs/update_spec.rb +41 -46
- data/spec/unit/outputs/elasticsearch/protocol_spec.rb +45 -36
- data/spec/unit/outputs/elasticsearch_proxy_spec.rb +3 -4
- data/spec/unit/outputs/elasticsearch_spec.rb +2 -151
- data/spec/unit/outputs/elasticsearch_ssl_spec.rb +38 -63
- metadata +67 -101
- data/lib/logstash/outputs/elasticsearch/protocol.rb +0 -333
- data/lib/logstash-output-elasticsearch_jars.rb +0 -5
- data/spec/integration/outputs/elasticsearch/node_spec.rb +0 -36
- data/spec/integration/outputs/transport_create_spec.rb +0 -94
- data/vendor/jar-dependencies/runtime-jars/antlr-runtime-3.5.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/asm-4.1.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/asm-commons-4.1.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/elasticsearch-1.7.0.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-analyzers-common-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-core-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-grouping-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-highlighter-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-join-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-memory-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-misc-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-queries-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-queryparser-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-sandbox-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-spatial-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-suggest-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/spatial4j-0.4.1.jar +0 -0
@@ -8,32 +8,20 @@ require "stud/buffer"
|
|
8
8
|
require "socket" # for Socket.gethostname
|
9
9
|
require "thread" # for safe queueing
|
10
10
|
require "uri" # for escaping user input
|
11
|
-
require
|
11
|
+
require "logstash/outputs/elasticsearch/http_client"
|
12
12
|
|
13
13
|
# This output lets you store logs in Elasticsearch and is the most recommended
|
14
14
|
# output for Logstash. If you plan on using the Kibana web interface, you'll
|
15
|
-
#
|
15
|
+
# want to use this output.
|
16
16
|
#
|
17
|
-
#
|
17
|
+
# This output only speaks the HTTP, which is the preferred protocol for interacting with Elasticsearch. By default
|
18
|
+
# Elasticsearch exposes HTTP on port 9200.
|
18
19
|
#
|
19
|
-
#
|
20
|
-
#
|
21
|
-
#
|
22
|
-
# * Create an `elasticsearch.yml` file in the $PWD of the Logstash process
|
23
|
-
# * Pass in es.* java properties (`java -Des.node.foo=` or `ruby -J-Des.node.foo=`)
|
24
|
-
#
|
25
|
-
# With the default `protocol` setting ("node"), this plugin will join your
|
26
|
-
# Elasticsearch cluster as a client node, so it will show up in Elasticsearch's
|
27
|
-
# cluster status.
|
20
|
+
# We strongly encourage the use of HTTP over the node protocol. It is just as
|
21
|
+
# fast and far easier to administer. For those wishing to use the java protocol please see the 'elasticsearch_java' gem.
|
28
22
|
#
|
29
23
|
# You can learn more about Elasticsearch at <https://www.elastic.co/products/elasticsearch>
|
30
24
|
#
|
31
|
-
# ==== Operational Notes
|
32
|
-
#
|
33
|
-
# If using the default `protocol` setting ("node"), your firewalls might need
|
34
|
-
# to permit port 9300 in *both* directions (from Logstash to Elasticsearch, and
|
35
|
-
# Elasticsearch to Logstash)
|
36
|
-
#
|
37
25
|
# ==== Retry Policy
|
38
26
|
#
|
39
27
|
# By default all bulk requests to ES are synchronous. Not all events in the bulk requests
|
@@ -125,90 +113,24 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
125
113
|
# This can be dynamic using the `%{foo}` syntax.
|
126
114
|
config :routing, :validate => :string
|
127
115
|
|
128
|
-
#
|
129
|
-
# for discovery when using `node` or `transport` protocols.
|
130
|
-
# By default, it looks for a cluster named 'elasticsearch'.
|
131
|
-
config :cluster, :validate => :string
|
132
|
-
|
133
|
-
# For the `node` protocol, if you do not specify `host`, it will attempt to use
|
134
|
-
# multicast discovery to connect to Elasticsearch. If http://www.elastic.co/guide/en/elasticsearch/guide/current/_important_configuration_changes.html#_prefer_unicast_over_multicast[multicast is disabled] in Elasticsearch,
|
135
|
-
# you must include the hostname or IP address of the host(s) to use for Elasticsearch unicast discovery.
|
136
|
-
# Remember the `node` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html#modules-transport[transport] address (eg. 9300, not 9200).
|
137
|
-
# `"127.0.0.1"`
|
138
|
-
# `["127.0.0.1:9300","127.0.0.2:9300"]`
|
139
|
-
# When setting hosts for `node` protocol, it is important to confirm that at least one non-client
|
140
|
-
# node is listed in the `host` list. Also keep in mind that the `host` parameter when used with
|
141
|
-
# the `node` protocol is for *discovery purposes only* (not for load balancing). When multiple hosts
|
142
|
-
# are specified, it will contact the first host to see if it can use it to discover the cluster. If not,
|
143
|
-
# then it will contact the second host in the list and so forth. With the `node` protocol,
|
144
|
-
# Logstash will join the Elasticsearch cluster as a node client (which has a copy of the cluster
|
145
|
-
# state) and this node client is the one that will automatically handle the load balancing of requests
|
146
|
-
# across data nodes in the cluster.
|
147
|
-
# If you are looking for a high availability setup, our recommendation is to use the `transport` protocol (below),
|
148
|
-
# set up multiple http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[client nodes] and list the client nodes in the `host` parameter.
|
149
|
-
#
|
150
|
-
# For the `transport` protocol, it will load balance requests across the hosts specified in the `host` parameter.
|
151
|
-
# Remember the `transport` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html#modules-transport[transport] address (eg. 9300, not 9200).
|
152
|
-
# `"127.0.0.1"`
|
153
|
-
# `["127.0.0.1:9300","127.0.0.2:9300"]`
|
154
|
-
# There is also a `sniffing` option (see below) that can be used with the transport protocol to instruct it to use the host to sniff for
|
155
|
-
# "alive" nodes in the cluster and automatically use it as the hosts list (but will skip the dedicated master nodes).
|
156
|
-
# If you do not use the sniffing option, it is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `host` list
|
157
|
-
# to prevent Logstash from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes.
|
158
|
-
#
|
159
|
-
# For the `http` protocol, it will load balance requests across the hosts specified in the `host` parameter.
|
116
|
+
# Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `host` parameter.
|
160
117
|
# Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300).
|
161
118
|
# `"127.0.0.1"`
|
162
119
|
# `["127.0.0.1:9200","127.0.0.2:9200"]`
|
163
120
|
# It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `host` list
|
164
121
|
# to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes.
|
165
122
|
|
166
|
-
config :
|
167
|
-
|
168
|
-
# The port for Elasticsearch transport to use.
|
169
|
-
#
|
170
|
-
# If you do not set this, the following defaults are used:
|
171
|
-
# * `protocol => http` - port 9200
|
172
|
-
# * `protocol => transport` - port 9300-9305
|
173
|
-
# * `protocol => node` - port 9300-9305
|
174
|
-
config :port, :validate => :string
|
123
|
+
config :hosts, :validate => :array
|
175
124
|
|
176
|
-
#
|
177
|
-
config :
|
178
|
-
|
179
|
-
# This is only valid for the 'node' protocol.
|
180
|
-
#
|
181
|
-
# The port for the node to listen on.
|
182
|
-
config :bind_port, :validate => :number
|
183
|
-
|
184
|
-
# Run the Elasticsearch server embedded in this process.
|
185
|
-
# This option is useful if you want to run a single Logstash process that
|
186
|
-
# handles log processing and indexing; it saves you from needing to run
|
187
|
-
# a separate Elasticsearch process. An example use case is
|
188
|
-
# proof-of-concept testing.
|
189
|
-
# WARNING: This is not recommended for production use!
|
190
|
-
config :embedded, :validate => :boolean, :default => false
|
191
|
-
|
192
|
-
# If you are running the embedded Elasticsearch server, you can set the http
|
193
|
-
# port it listens on here; it is not common to need this setting changed from
|
194
|
-
# default.
|
195
|
-
config :embedded_http_port, :validate => :string, :default => "9200-9300"
|
196
|
-
|
197
|
-
# This setting no longer does anything. It exists to keep config validation
|
198
|
-
# from failing. It will be removed in future versions.
|
199
|
-
config :max_inflight_requests, :validate => :number, :default => 50, :deprecated => true
|
200
|
-
|
201
|
-
# The node name Elasticsearch will use when joining a cluster.
|
202
|
-
#
|
203
|
-
# By default, this is generated internally by the ES client.
|
204
|
-
config :node_name, :validate => :string
|
125
|
+
# You can set the remote port as part of the host, or explicitly here as well
|
126
|
+
config :port, :validate => :string, :default => 9200
|
205
127
|
|
206
128
|
# This plugin uses the bulk index api for improved indexing performance.
|
207
129
|
# To make efficient bulk api calls, we will buffer a certain number of
|
208
130
|
# events before flushing that out to Elasticsearch. This setting
|
209
131
|
# controls how many events will be buffered before sending a batch
|
210
132
|
# of events.
|
211
|
-
config :flush_size, :validate => :number, :default =>
|
133
|
+
config :flush_size, :validate => :number, :default => 500
|
212
134
|
|
213
135
|
# The amount of time since last flush before a flush is forced.
|
214
136
|
#
|
@@ -221,32 +143,6 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
221
143
|
# near-real-time.
|
222
144
|
config :idle_flush_time, :validate => :number, :default => 1
|
223
145
|
|
224
|
-
# Choose the protocol used to talk to Elasticsearch.
|
225
|
-
#
|
226
|
-
# The 'node' protocol (default) will connect to the cluster as a normal Elasticsearch
|
227
|
-
# node (but will not store data). If you use the `node` protocol, you must permit
|
228
|
-
# bidirectional communication on the port 9300 (or whichever port you have
|
229
|
-
# configured).
|
230
|
-
#
|
231
|
-
# If you do not specify the `host` parameter, it will use multicast for http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html[Elasticsearch discovery]. While this may work in a test/dev environment where multicast is enabled in
|
232
|
-
# Elasticsearch, we strongly recommend http://www.elastic.co/guide/en/elasticsearch/guide/current/_important_configuration_changes.html#_prefer_unicast_over_multicast[disabling multicast]
|
233
|
-
# in Elasticsearch. To connect to an Elasticsearch cluster with multicast disabled,
|
234
|
-
# you must include the `host` parameter (see relevant section above).
|
235
|
-
#
|
236
|
-
# The 'transport' protocol will connect to the host you specify and will
|
237
|
-
# not show up as a 'node' in the Elasticsearch cluster. This is useful
|
238
|
-
# in situations where you cannot permit connections outbound from the
|
239
|
-
# Elasticsearch cluster to this Logstash server.
|
240
|
-
#
|
241
|
-
# The 'http' protocol will use the Elasticsearch REST/HTTP interface to talk
|
242
|
-
# to elasticsearch.
|
243
|
-
#
|
244
|
-
# All protocols will use bulk requests when talking to Elasticsearch.
|
245
|
-
#
|
246
|
-
# The default `protocol` setting under java/jruby is "node". The default
|
247
|
-
# `protocol` on non-java rubies is "http"
|
248
|
-
config :protocol, :validate => [ "node", "transport", "http" ]
|
249
|
-
|
250
146
|
# The Elasticsearch action to perform. Valid actions are: `index`, `delete`.
|
251
147
|
#
|
252
148
|
# Use of this setting *REQUIRES* you also configure the `document_id` setting
|
@@ -259,10 +155,9 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
259
155
|
# - create: indexes a document, fails if a document by that id already exists in the index.
|
260
156
|
# - update: updates a document by id
|
261
157
|
# following action is not supported by HTTP protocol
|
262
|
-
# - create_unless_exists: creates a document, fails if no id is provided
|
263
158
|
#
|
264
159
|
# For more details on actions, check out the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
|
265
|
-
config :action, :validate =>
|
160
|
+
config :action, :validate => %w(index delete create update), :default => "index"
|
266
161
|
|
267
162
|
# Username and password (only valid when protocol is HTTP; this setting works with HTTP or HTTPS auth)
|
268
163
|
config :user, :validate => :string
|
@@ -299,10 +194,16 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
299
194
|
# Set the truststore password
|
300
195
|
config :keystore_password, :validate => :password
|
301
196
|
|
302
|
-
# Enable cluster sniffing
|
197
|
+
# Enable cluster sniffing
|
303
198
|
# Asks host for the list of all cluster nodes and adds them to the hosts list
|
199
|
+
# Will return ALL nodes with HTTP enabled (including master nodes!). If you use
|
200
|
+
# this with master nodes, you probably want to disable HTTP on them by setting
|
201
|
+
# `http.enabled` to false in their elasticsearch.yml.
|
304
202
|
config :sniffing, :validate => :boolean, :default => false
|
305
203
|
|
204
|
+
# How long to wait, in seconds, between sniffing attempts
|
205
|
+
config :sniffing_delay, :validate => :number, :default => 30
|
206
|
+
|
306
207
|
# Set max retry for each event
|
307
208
|
config :max_retries, :validate => :number, :default => 3
|
308
209
|
|
@@ -326,14 +227,9 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
326
227
|
# create a new document with this parameter as json string if document_id doesn't exists
|
327
228
|
config :upsert, :validate => :string, :default => ""
|
328
229
|
|
329
|
-
|
330
|
-
# Set the timeout for network operations and requests sent Elasticsearch. If
|
331
|
-
# a timeout occurs, the request will be retried.
|
332
|
-
config :timeout, :validate => :number
|
333
|
-
|
334
230
|
public
|
335
231
|
def register
|
336
|
-
@
|
232
|
+
@hosts = Array(@hosts)
|
337
233
|
# retry-specific variables
|
338
234
|
@retry_flush_mutex = Mutex.new
|
339
235
|
@retry_teardown_requested = Concurrent::AtomicBoolean.new(false)
|
@@ -341,67 +237,25 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
341
237
|
@retry_queue_needs_flushing = ConditionVariable.new
|
342
238
|
@retry_queue_not_full = ConditionVariable.new
|
343
239
|
@retry_queue = Queue.new
|
240
|
+
@submit_mutex = Mutex.new
|
344
241
|
|
345
242
|
client_settings = {}
|
243
|
+
common_options = {
|
244
|
+
:client_settings => client_settings,
|
245
|
+
:sniffing => @sniffing,
|
246
|
+
:sniffing_delay => @sniffing_delay
|
247
|
+
}
|
346
248
|
|
249
|
+
client_settings[:path] = "/#{@path}/".gsub(/\/+/, "/") # Normalize slashes
|
250
|
+
@logger.debug? && @logger.debug("Normalizing http path", :path => @path, :normalized => client_settings[:path])
|
347
251
|
|
348
|
-
if @
|
349
|
-
@protocol = LogStash::Environment.jruby? ? "node" : "http"
|
350
|
-
end
|
351
|
-
|
352
|
-
if @protocol == "http"
|
353
|
-
if @action == "create_unless_exists"
|
354
|
-
raise(LogStash::ConfigurationError, "action => 'create_unless_exists' is not supported under the HTTP protocol");
|
355
|
-
end
|
356
|
-
|
357
|
-
client_settings[:path] = "/#{@path}/".gsub(/\/+/, "/") # Normalize slashes
|
358
|
-
@logger.debug? && @logger.debug("Normalizing http path", :path => @path, :normalized => client_settings[:path])
|
359
|
-
end
|
360
|
-
|
361
|
-
if ["node", "transport"].include?(@protocol)
|
362
|
-
# Node or TransportClient; requires JRuby
|
363
|
-
raise(LogStash::PluginLoadingError, "This configuration requires JRuby. If you are not using JRuby, you must set 'protocol' to 'http'. For example: output { elasticsearch { protocol => \"http\" } }") unless LogStash::Environment.jruby?
|
364
|
-
|
365
|
-
client_settings["cluster.name"] = @cluster if @cluster
|
366
|
-
client_settings["network.host"] = @bind_host if @bind_host
|
367
|
-
client_settings["transport.tcp.port"] = @bind_port if @bind_port
|
368
|
-
client_settings["client.transport.sniff"] = @sniffing
|
369
|
-
|
370
|
-
if @node_name
|
371
|
-
client_settings["node.name"] = @node_name
|
372
|
-
else
|
373
|
-
client_settings["node.name"] = "logstash-#{Socket.gethostname}-#{$$}-#{object_id}"
|
374
|
-
end
|
375
|
-
|
376
|
-
@@plugins.each do |plugin|
|
377
|
-
name = plugin.name.split('-')[-1]
|
378
|
-
client_settings.merge!(LogStash::Outputs::ElasticSearch.const_get(name.capitalize).create_client_config(self))
|
379
|
-
end
|
380
|
-
end
|
381
|
-
|
382
|
-
require "logstash/outputs/elasticsearch/protocol"
|
383
|
-
|
384
|
-
if @port.nil?
|
385
|
-
@port = case @protocol
|
386
|
-
when "http"; "9200"
|
387
|
-
when "transport", "node"; "9300-9305"
|
388
|
-
end
|
389
|
-
end
|
390
|
-
|
391
|
-
if @host.nil? && @protocol != "node" # node can use zen discovery
|
252
|
+
if @hosts.nil? || @hosts.empty?
|
392
253
|
@logger.info("No 'host' set in elasticsearch output. Defaulting to localhost")
|
393
|
-
@
|
254
|
+
@hosts = ["localhost"]
|
394
255
|
end
|
395
256
|
|
396
257
|
client_settings.merge! setup_ssl()
|
397
258
|
client_settings.merge! setup_proxy()
|
398
|
-
|
399
|
-
common_options = {
|
400
|
-
:protocol => @protocol,
|
401
|
-
:client_settings => client_settings
|
402
|
-
}
|
403
|
-
|
404
|
-
common_options[:timeout] = @timeout if @timeout
|
405
259
|
common_options.merge! setup_basic_auth()
|
406
260
|
|
407
261
|
# Update API setup
|
@@ -411,61 +265,22 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
411
265
|
}
|
412
266
|
common_options.merge! update_options if @action == 'update'
|
413
267
|
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
when "node"
|
418
|
-
LogStash::Outputs::Elasticsearch::Protocols::NodeClient
|
419
|
-
when /http/
|
420
|
-
LogStash::Outputs::Elasticsearch::Protocols::HTTPClient
|
421
|
-
end
|
422
|
-
|
423
|
-
if @embedded
|
424
|
-
raise(LogStash::ConfigurationError, "The 'embedded => true' setting is only valid for the elasticsearch output under JRuby. You are running #{RUBY_DESCRIPTION}") unless LogStash::Environment.jruby?
|
425
|
-
@logger.warn("The 'embedded => true' setting is enabled. This is not recommended for production use!!!")
|
426
|
-
# LogStash::Environment.load_elasticsearch_jars!
|
427
|
-
|
428
|
-
# Default @host with embedded to localhost. This should help avoid
|
429
|
-
# newbies tripping on ubuntu and other distros that have a default
|
430
|
-
# firewall that blocks multicast.
|
431
|
-
@host ||= ["localhost"]
|
432
|
-
|
433
|
-
# Start Elasticsearch local.
|
434
|
-
start_local_elasticsearch
|
435
|
-
end
|
436
|
-
|
437
|
-
@client = Array.new
|
438
|
-
|
439
|
-
if protocol == "node" || @host.nil? # if @protocol is "node" or @host is not set
|
440
|
-
options = { :host => @host, :port => @port }.merge(common_options)
|
441
|
-
@client = [client_class.new(options)]
|
442
|
-
else # if @protocol in ["transport","http"]
|
443
|
-
@client = @host.map do |host|
|
444
|
-
(_host,_port) = host.split ":"
|
445
|
-
options = { :host => _host, :port => _port || @port }.merge(common_options)
|
446
|
-
@logger.info "Create client to elasticsearch server on #{_host}:#{_port}"
|
447
|
-
client_class.new(options)
|
448
|
-
end # @host.map
|
449
|
-
end
|
268
|
+
@client = LogStash::Outputs::Elasticsearch::HttpClient.new(
|
269
|
+
common_options.merge(:hosts => @hosts, :port => @port)
|
270
|
+
)
|
450
271
|
|
451
272
|
if @manage_template
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
end
|
460
|
-
end # for @client loop
|
461
|
-
end # if @manage_templates
|
273
|
+
begin
|
274
|
+
@logger.info("Automatic template management enabled", :manage_template => @manage_template.to_s)
|
275
|
+
@client.template_install(@template_name, get_template, @template_overwrite)
|
276
|
+
rescue => e
|
277
|
+
@logger.error("Failed to install template: #{e.message}")
|
278
|
+
end
|
279
|
+
end
|
462
280
|
|
463
|
-
@logger.info("New Elasticsearch output", :
|
464
|
-
:host => @host, :port => @port, :embedded => @embedded,
|
465
|
-
:protocol => @protocol)
|
281
|
+
@logger.info("New Elasticsearch output", :hosts => @hosts, :port => @port)
|
466
282
|
|
467
283
|
@client_idx = 0
|
468
|
-
@current_client = @client[@client_idx]
|
469
284
|
|
470
285
|
buffer_initialize(
|
471
286
|
:max_items => @flush_size,
|
@@ -488,7 +303,6 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
488
303
|
end
|
489
304
|
end # def register
|
490
305
|
|
491
|
-
|
492
306
|
public
|
493
307
|
def get_template
|
494
308
|
if @template.nil?
|
@@ -538,29 +352,27 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
538
352
|
end # def receive
|
539
353
|
|
540
354
|
public
|
541
|
-
#
|
542
|
-
#
|
543
|
-
# # Stud::Buffer flush thread and from our own retry thread.
|
355
|
+
# The submit method can be called from both the
|
356
|
+
# Stud::Buffer flush thread and from our own retry thread.
|
544
357
|
def submit(actions)
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
bulk_response = @
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
@logger.warn "failed action with response of #{resp_code}, dropping action: #{action}"
|
358
|
+
@submit_mutex.synchronize do
|
359
|
+
es_actions = actions.map { |a, doc, event| [a, doc, event.to_hash] }
|
360
|
+
|
361
|
+
bulk_response = @client.bulk(es_actions)
|
362
|
+
|
363
|
+
if bulk_response["errors"]
|
364
|
+
actions_with_responses = actions.zip(bulk_response['statuses'])
|
365
|
+
actions_to_retry = []
|
366
|
+
actions_with_responses.each do |action, resp_code|
|
367
|
+
if RETRYABLE_CODES.include?(resp_code)
|
368
|
+
@logger.warn "retrying failed action with response code: #{resp_code}"
|
369
|
+
actions_to_retry << action
|
370
|
+
elsif not SUCCESS_CODES.include?(resp_code)
|
371
|
+
@logger.warn "failed action with response of #{resp_code}, dropping action: #{action}"
|
372
|
+
end
|
561
373
|
end
|
374
|
+
retry_push(actions_to_retry) unless actions_to_retry.empty?
|
562
375
|
end
|
563
|
-
retry_push(actions_to_retry) unless actions_to_retry.empty?
|
564
376
|
end
|
565
377
|
end
|
566
378
|
|
@@ -570,22 +382,37 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
570
382
|
def flush(actions, teardown = false)
|
571
383
|
begin
|
572
384
|
submit(actions)
|
385
|
+
rescue Manticore::SocketException => e
|
386
|
+
# If we can't even connect to the server let's just print out the URL (:hosts is actually a URL)
|
387
|
+
# and let the user sort it out from there
|
388
|
+
@logger.error(
|
389
|
+
"Attempted to send a bulk request to Elasticsearch configured at '#{@client.client_options[:hosts]}',"+
|
390
|
+
" but Elasticsearch appears to be unreachable or down!",
|
391
|
+
:client_config => @client.client_options,
|
392
|
+
:error_message => e.message
|
393
|
+
)
|
394
|
+
@logger.debug("Failed actions for last bad bulk request!", :actions => actions)
|
573
395
|
rescue => e
|
574
|
-
|
396
|
+
# For all other errors print out full connection issues
|
397
|
+
@logger.error(
|
398
|
+
"Attempted to send a bulk request to Elasticsearch configured at '#{@client.client_options[:hosts]}'," +
|
399
|
+
" but an error occurred and it failed! Are you sure you can reach elasticsearch from this machine using " +
|
400
|
+
"the configuration provided?",
|
401
|
+
:client_config => @client.client_options,
|
402
|
+
:error_message => e.message,
|
403
|
+
:error_class => e.class.name,
|
404
|
+
:backtrace => e.backtrace
|
405
|
+
)
|
406
|
+
|
407
|
+
@logger.debug("Failed actions for last bad bulk request!", :actions => actions)
|
408
|
+
|
575
409
|
raise e
|
576
|
-
ensure
|
577
|
-
unless @protocol == "node"
|
578
|
-
@logger.debug? and @logger.debug "Shifting current elasticsearch client"
|
579
|
-
shift_client
|
580
|
-
end
|
581
410
|
end
|
582
411
|
end # def flush
|
583
412
|
|
584
413
|
public
|
585
414
|
def teardown
|
586
|
-
|
587
|
-
File.delete(@truststore)
|
588
|
-
end
|
415
|
+
@client.stop_sniffing!
|
589
416
|
|
590
417
|
@retry_teardown_requested.make_true
|
591
418
|
# First, make sure retry_timer_thread is stopped
|
@@ -607,36 +434,10 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
607
434
|
retry_flush
|
608
435
|
end
|
609
436
|
|
610
|
-
protected
|
611
|
-
def start_local_elasticsearch
|
612
|
-
@logger.info("Starting embedded Elasticsearch local node.")
|
613
|
-
builder = org.elasticsearch.node.NodeBuilder.nodeBuilder
|
614
|
-
# Disable 'local only' - LOGSTASH-277
|
615
|
-
#builder.local(true)
|
616
|
-
builder.settings.put("cluster.name", @cluster) if @cluster
|
617
|
-
builder.settings.put("node.name", @node_name) if @node_name
|
618
|
-
builder.settings.put("network.host", @bind_host) if @bind_host
|
619
|
-
builder.settings.put("http.port", @embedded_http_port)
|
620
|
-
|
621
|
-
@embedded_elasticsearch = builder.node
|
622
|
-
@embedded_elasticsearch.start
|
623
|
-
end # def start_local_elasticsearch
|
624
|
-
|
625
|
-
protected
|
626
|
-
def shift_client
|
627
|
-
@client_idx = (@client_idx+1) % @client.length
|
628
|
-
@current_client = @client[@client_idx]
|
629
|
-
@logger.debug? and @logger.debug("Switched current elasticsearch client to ##{@client_idx} at #{@host[@client_idx]}")
|
630
|
-
end
|
631
|
-
|
632
437
|
private
|
633
438
|
def setup_proxy
|
634
439
|
return {} unless @proxy
|
635
440
|
|
636
|
-
if @protocol != "http"
|
637
|
-
raise(LogStash::ConfigurationError, "Proxy is not supported for '#{@protocol}'. Change the protocol to 'http' if you need HTTP proxy.")
|
638
|
-
end
|
639
|
-
|
640
441
|
# Symbolize keys
|
641
442
|
proxy = if @proxy.is_a?(Hash)
|
642
443
|
Hash[@proxy.map {|k,v| [k.to_sym, v]}]
|
@@ -652,19 +453,19 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
652
453
|
private
|
653
454
|
def setup_ssl
|
654
455
|
return {} unless @ssl
|
655
|
-
|
656
|
-
raise(LogStash::ConfigurationError, "SSL is not supported for '#{@protocol}'. Change the protocol to 'http' if you need SSL.")
|
657
|
-
end
|
658
|
-
@protocol = "https"
|
456
|
+
|
659
457
|
if @cacert && @truststore
|
660
458
|
raise(LogStash::ConfigurationError, "Use either \"cacert\" or \"truststore\" when configuring the CA certificate") if @truststore
|
661
459
|
end
|
460
|
+
|
662
461
|
ssl_options = {}
|
663
|
-
|
664
|
-
|
462
|
+
|
463
|
+
if @cacert
|
464
|
+
ssl_options[:ca_file] = @cacert
|
665
465
|
elsif @truststore
|
666
466
|
ssl_options[:truststore_password] = @truststore_password.value if @truststore_password
|
667
467
|
end
|
468
|
+
|
668
469
|
ssl_options[:truststore] = @truststore if @truststore
|
669
470
|
if @keystore
|
670
471
|
ssl_options[:keystore] = @keystore
|
@@ -685,37 +486,10 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
685
486
|
def setup_basic_auth
|
686
487
|
return {} unless @user && @password
|
687
488
|
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
}
|
693
|
-
else
|
694
|
-
raise(LogStash::ConfigurationError, "User and password parameters are not supported for '#{@protocol}'. Change the protocol to 'http' if you need them.")
|
695
|
-
end
|
696
|
-
end
|
697
|
-
|
698
|
-
private
|
699
|
-
def generate_jks cert_path
|
700
|
-
|
701
|
-
require 'securerandom'
|
702
|
-
require 'tempfile'
|
703
|
-
require 'java'
|
704
|
-
import java.io.FileInputStream
|
705
|
-
import java.io.FileOutputStream
|
706
|
-
import java.security.KeyStore
|
707
|
-
import java.security.cert.CertificateFactory
|
708
|
-
|
709
|
-
jks = java.io.File.createTempFile("cert", ".jks")
|
710
|
-
|
711
|
-
ks = KeyStore.getInstance "JKS"
|
712
|
-
ks.load nil, nil
|
713
|
-
cf = CertificateFactory.getInstance "X.509"
|
714
|
-
cert = cf.generateCertificate FileInputStream.new(cert_path)
|
715
|
-
ks.setCertificateEntry "cacert", cert
|
716
|
-
pwd = SecureRandom.urlsafe_base64(9)
|
717
|
-
ks.store FileOutputStream.new(jks), pwd.to_java.toCharArray
|
718
|
-
[jks.path, pwd]
|
489
|
+
{
|
490
|
+
:user => ::URI.escape(@user, "@:"),
|
491
|
+
:password => ::URI.escape(@password.value, "@:")
|
492
|
+
}
|
719
493
|
end
|
720
494
|
|
721
495
|
private
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
|
3
3
|
s.name = 'logstash-output-elasticsearch'
|
4
|
-
s.version = '
|
4
|
+
s.version = '2.0.0.beta4'
|
5
5
|
s.licenses = ['apache-2.0']
|
6
6
|
s.summary = "Logstash Output to Elasticsearch"
|
7
7
|
s.description = "Output events to elasticsearch"
|
@@ -29,7 +29,6 @@ Gem::Specification.new do |s|
|
|
29
29
|
s.add_development_dependency 'ftw', '~> 0.0.42'
|
30
30
|
s.add_development_dependency 'logstash-input-generator'
|
31
31
|
|
32
|
-
|
33
32
|
if RUBY_PLATFORM == 'java'
|
34
33
|
s.platform = RUBY_PLATFORM
|
35
34
|
s.add_runtime_dependency "manticore", '~> 0.4.2'
|
@@ -37,5 +36,4 @@ Gem::Specification.new do |s|
|
|
37
36
|
|
38
37
|
s.add_development_dependency 'logstash-devutils'
|
39
38
|
s.add_development_dependency 'longshoreman'
|
40
|
-
s.add_development_dependency 'flores'
|
41
39
|
end
|