logstash-output-elasticsearch-leprechaun-fork 1.0.8
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +5 -0
- data/CHANGELOG.md +30 -0
- data/CONTRIBUTORS +31 -0
- data/Gemfile +3 -0
- data/LICENSE +13 -0
- data/NOTICE.TXT +5 -0
- data/README.md +98 -0
- data/Rakefile +1 -0
- data/lib/logstash-output-elasticsearch_jars.rb +5 -0
- data/lib/logstash/outputs/elasticsearch.rb +784 -0
- data/lib/logstash/outputs/elasticsearch/elasticsearch-template.json +41 -0
- data/lib/logstash/outputs/elasticsearch/protocol.rb +339 -0
- data/logstash-output-elasticsearch.gemspec +40 -0
- data/spec/es_spec_helper.rb +65 -0
- data/spec/integration/outputs/elasticsearch/node_spec.rb +36 -0
- data/spec/integration/outputs/index_spec.rb +90 -0
- data/spec/integration/outputs/retry_spec.rb +156 -0
- data/spec/integration/outputs/routing_spec.rb +114 -0
- data/spec/integration/outputs/secure_spec.rb +113 -0
- data/spec/integration/outputs/templates_spec.rb +97 -0
- data/spec/integration/outputs/transport_create_spec.rb +94 -0
- data/spec/integration/outputs/update_spec.rb +87 -0
- data/spec/unit/outputs/elasticsearch/protocol_spec.rb +54 -0
- data/spec/unit/outputs/elasticsearch_proxy_spec.rb +59 -0
- data/spec/unit/outputs/elasticsearch_spec.rb +183 -0
- data/spec/unit/outputs/elasticsearch_ssl_spec.rb +82 -0
- data/vendor/jar-dependencies/runtime-jars/antlr-runtime-3.5.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/asm-4.1.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/asm-commons-4.1.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/elasticsearch-1.7.0.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-analyzers-common-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-core-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-grouping-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-highlighter-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-join-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-memory-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-misc-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-queries-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-queryparser-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-sandbox-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-spatial-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/lucene-suggest-4.10.4.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/spatial4j-0.4.1.jar +0 -0
- metadata +246 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: daa3edafdf0128a562b57a1767682b0eda55a1f9
|
4
|
+
data.tar.gz: 2e940294b8cea6a77a25f3a37c549ce4caebad6d
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 11ef24ef78b7fda70665f96ab2c40632279781471e97023009b948b8579fc82ed060b01382ca60eac95220ccb2f23a531a043f071e12605d32e765937acee112
|
7
|
+
data.tar.gz: 49db1911287ffc040d90e4509d36dec512cbbb072819f0e76eec755ba4831b826fe4f2e10bffddd58f7137cd27786d2ef1603d5f3f39b91b23573c0ac55a79be
|
data/CHANGELOG.md
ADDED
@@ -0,0 +1,30 @@
|
|
1
|
+
## 1.0.7
|
2
|
+
- Add update API support
|
3
|
+
|
4
|
+
## 1.0.6
|
5
|
+
- Fix warning about Concurrent lib deprecation
|
6
|
+
|
7
|
+
## 1.0.4
|
8
|
+
- Update to Elasticsearch 1.7
|
9
|
+
|
10
|
+
## 1.0.3
|
11
|
+
- Add HTTP proxy support
|
12
|
+
|
13
|
+
## 1.0.2
|
14
|
+
- Upgrade Manticore HTTP Client
|
15
|
+
|
16
|
+
## 1.0.1
|
17
|
+
- Allow client certificates
|
18
|
+
|
19
|
+
## 0.2.9
|
20
|
+
- Add 'path' parameter for ES HTTP hosts behind a proxy on a subpath
|
21
|
+
|
22
|
+
## 0.2.8 (June 12, 2015)
|
23
|
+
- Add option to enable and disable SSL certificate verification during handshake (#160)
|
24
|
+
- Doc improvements for clarifying round robin behavior using hosts config
|
25
|
+
|
26
|
+
## 0.2.7 (May 28, 2015)
|
27
|
+
- Bump es-ruby version to 1.0.10
|
28
|
+
|
29
|
+
## 0.2.6 (May 28, 2015)
|
30
|
+
- Disable timeouts when using http protocol which would cause bulk requests to fail (#103)
|
data/CONTRIBUTORS
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
The following is a list of people who have contributed ideas, code, bug
|
2
|
+
reports, or in general have helped logstash along its way.
|
3
|
+
|
4
|
+
Contributors:
|
5
|
+
* Aaron Mildenstein (untergeek)
|
6
|
+
* Bob Corsaro (dokipen)
|
7
|
+
* Colin Surprenant (colinsurprenant)
|
8
|
+
* Dmitry Koprov (dkoprov)
|
9
|
+
* Graham Bleach (bleach)
|
10
|
+
* Hao Chen (haoch)
|
11
|
+
* James Turnbull (jamtur01)
|
12
|
+
* John E. Vincent (lusis)
|
13
|
+
* Jordan Sissel (jordansissel)
|
14
|
+
* João Duarte (jsvd)
|
15
|
+
* Kurt Hurtado (kurtado)
|
16
|
+
* Miah Johnson (miah)
|
17
|
+
* Pere Urbón (purbon)
|
18
|
+
* Pete Fritchman (fetep)
|
19
|
+
* Pier-Hugues Pellerin (ph)
|
20
|
+
* Raymond Feng (raymondfeng)
|
21
|
+
* Richard Pijnenburg (electrical)
|
22
|
+
* Spenser Jones (SpenserJ)
|
23
|
+
* Suyog Rao (suyograo)
|
24
|
+
* Tal Levy (talevy)
|
25
|
+
* Tom Hodder (tolland)
|
26
|
+
* jimmyjones2
|
27
|
+
|
28
|
+
Note: If you've sent us patches, bug reports, or otherwise contributed to
|
29
|
+
Logstash, and you aren't on the list above and want to be, please let us know
|
30
|
+
and we'll make sure you're here. Contributions from folks like you are what make
|
31
|
+
open source awesome.
|
data/Gemfile
ADDED
data/LICENSE
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
Copyright (c) 2012–2015 Elasticsearch <http://www.elastic.co>
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
data/NOTICE.TXT
ADDED
data/README.md
ADDED
@@ -0,0 +1,98 @@
|
|
1
|
+
# Logstash Plugin
|
2
|
+
|
3
|
+
This is a plugin for [Logstash](https://github.com/elasticsearch/logstash).
|
4
|
+
|
5
|
+
It is fully free and fully open source. The license is Apache 2.0, meaning you are pretty much free to use it however you want in whatever way.
|
6
|
+
|
7
|
+
## Documentation
|
8
|
+
|
9
|
+
Logstash provides infrastructure to automatically generate documentation for this plugin. We use the asciidoc format to write documentation so any comments in the source code will be first converted into asciidoc and then into html. All plugin documentation are placed under one [central location](http://www.elasticsearch.org/guide/en/logstash/current/).
|
10
|
+
|
11
|
+
- For formatting code or config example, you can use the asciidoc `[source,ruby]` directive
|
12
|
+
- For more asciidoc formatting tips, see the excellent reference here https://github.com/elasticsearch/docs#asciidoc-guide
|
13
|
+
|
14
|
+
## Need Help?
|
15
|
+
|
16
|
+
Need help? Try #logstash on freenode IRC or the https://discuss.elastic.co/c/logstash discussion forum.
|
17
|
+
|
18
|
+
## Developing
|
19
|
+
|
20
|
+
### 1. Plugin Developement and Testing
|
21
|
+
|
22
|
+
#### Code
|
23
|
+
- To get started, you'll need JRuby with the Bundler gem installed.
|
24
|
+
|
25
|
+
- Create a new plugin or clone and existing from the GitHub [logstash-plugins](https://github.com/logstash-plugins) organization. We also provide [example plugins](https://github.com/logstash-plugins?query=example).
|
26
|
+
|
27
|
+
- Install dependencies
|
28
|
+
```sh
|
29
|
+
bundle install
|
30
|
+
```
|
31
|
+
|
32
|
+
#### Test
|
33
|
+
|
34
|
+
- Update your dependencies
|
35
|
+
|
36
|
+
```sh
|
37
|
+
bundle install
|
38
|
+
```
|
39
|
+
|
40
|
+
- Run unit tests
|
41
|
+
|
42
|
+
```sh
|
43
|
+
bundle exec rspec
|
44
|
+
```
|
45
|
+
|
46
|
+
- Run integration tests
|
47
|
+
|
48
|
+
Dependencies: [Docker](http://docker.com)
|
49
|
+
|
50
|
+
Before the test suite is run, we will load and run an
|
51
|
+
Elasticsearch instance within a docker container. This container
|
52
|
+
will be cleaned up when suite has finished.
|
53
|
+
|
54
|
+
```sh
|
55
|
+
bundle exec rspec --tag integration
|
56
|
+
```
|
57
|
+
|
58
|
+
### 2. Running your unpublished Plugin in Logstash
|
59
|
+
|
60
|
+
#### 2.1 Run in a local Logstash clone
|
61
|
+
|
62
|
+
- Edit Logstash `Gemfile` and add the local plugin path, for example:
|
63
|
+
```ruby
|
64
|
+
gem "logstash-filter-awesome", :path => "/your/local/logstash-filter-awesome"
|
65
|
+
```
|
66
|
+
- Install plugin
|
67
|
+
```sh
|
68
|
+
bin/plugin install --no-verify
|
69
|
+
```
|
70
|
+
- Run Logstash with your plugin
|
71
|
+
```sh
|
72
|
+
bin/logstash -e 'filter {awesome {}}'
|
73
|
+
```
|
74
|
+
At this point any modifications to the plugin code will be applied to this local Logstash setup. After modifying the plugin, simply rerun Logstash.
|
75
|
+
|
76
|
+
#### 2.2 Run in an installed Logstash
|
77
|
+
|
78
|
+
You can use the same **2.1** method to run your plugin in an installed Logstash by editing its `Gemfile` and pointing the `:path` to your local plugin development directory or you can build the gem and install it using:
|
79
|
+
|
80
|
+
- Build your plugin gem
|
81
|
+
```sh
|
82
|
+
gem build logstash-filter-awesome.gemspec
|
83
|
+
```
|
84
|
+
- Install the plugin from the Logstash home
|
85
|
+
```sh
|
86
|
+
bin/plugin install /your/local/plugin/logstash-filter-awesome.gem
|
87
|
+
```
|
88
|
+
- Start Logstash and proceed to test the plugin
|
89
|
+
|
90
|
+
## Contributing
|
91
|
+
|
92
|
+
All contributions are welcome: ideas, patches, documentation, bug reports, complaints, and even something you drew up on a napkin.
|
93
|
+
|
94
|
+
Programming is not a required skill. Whatever you've seen about open source and maintainers or community members saying "send patches or die" - you will not see that here.
|
95
|
+
|
96
|
+
It is more important to the community that you are able to contribute.
|
97
|
+
|
98
|
+
For more information about contributing, see the [CONTRIBUTING](https://github.com/elasticsearch/logstash/blob/master/CONTRIBUTING.md) file.
|
data/Rakefile
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
require "logstash/devutils/rake"
|
@@ -0,0 +1,784 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/namespace"
|
3
|
+
require "logstash/environment"
|
4
|
+
require "logstash/outputs/base"
|
5
|
+
require "logstash/json"
|
6
|
+
require "concurrent"
|
7
|
+
require "stud/buffer"
|
8
|
+
require "socket" # for Socket.gethostname
|
9
|
+
require "thread" # for safe queueing
|
10
|
+
require "uri" # for escaping user input
|
11
|
+
require 'logstash-output-elasticsearch_jars.rb'
|
12
|
+
|
13
|
+
# This output lets you store logs in Elasticsearch and is the most recommended
|
14
|
+
# output for Logstash. If you plan on using the Kibana web interface, you'll
|
15
|
+
# need to use this output.
|
16
|
+
#
|
17
|
+
# *VERSION NOTE*: Your Elasticsearch cluster must be running Elasticsearch 1.0.0 or later.
|
18
|
+
#
|
19
|
+
# If you want to set other Elasticsearch options that are not exposed directly
|
20
|
+
# as configuration options, there are two methods:
|
21
|
+
#
|
22
|
+
# * Create an `elasticsearch.yml` file in the $PWD of the Logstash process
|
23
|
+
# * Pass in es.* java properties (`java -Des.node.foo=` or `ruby -J-Des.node.foo=`)
|
24
|
+
#
|
25
|
+
# With the default `protocol` setting ("node"), this plugin will join your
|
26
|
+
# Elasticsearch cluster as a client node, so it will show up in Elasticsearch's
|
27
|
+
# cluster status.
|
28
|
+
#
|
29
|
+
# You can learn more about Elasticsearch at <https://www.elastic.co/products/elasticsearch>
|
30
|
+
#
|
31
|
+
# ==== Operational Notes
|
32
|
+
#
|
33
|
+
# If using the default `protocol` setting ("node"), your firewalls might need
|
34
|
+
# to permit port 9300 in *both* directions (from Logstash to Elasticsearch, and
|
35
|
+
# Elasticsearch to Logstash)
|
36
|
+
#
|
37
|
+
# ==== Retry Policy
|
38
|
+
#
|
39
|
+
# By default all bulk requests to ES are synchronous. Not all events in the bulk requests
|
40
|
+
# always make it successfully. For example, there could be events which are not formatted
|
41
|
+
# correctly for the index they are targeting (type mismatch in mapping). So that we minimize loss of
|
42
|
+
# events, we have a specific retry policy in place. We retry all events which fail to be reached by
|
43
|
+
# Elasticsearch for network related issues. We retry specific events which exhibit errors under a separate
|
44
|
+
# policy described below. Events of this nature are ones which experience ES error codes described as
|
45
|
+
# retryable errors.
|
46
|
+
#
|
47
|
+
# *Retryable Errors:*
|
48
|
+
#
|
49
|
+
# - 429, Too Many Requests (RFC6585)
|
50
|
+
# - 503, The server is currently unable to handle the request due to a temporary overloading or maintenance of the server.
|
51
|
+
#
|
52
|
+
# *Possibly retryable errors*
|
53
|
+
#
|
54
|
+
# You may run into a situation where ES rejects an event because a property does not match the type
|
55
|
+
# already defined in the mapping. By default, this error is NOT retryable, and will generate an error in
|
56
|
+
# the log. However, you may prefer to send it to ES anyway by renaming the type. This *will* affect your analytics
|
57
|
+
# if you depend on the _type field in any way. However, for some, it is preferably to have mislabelled events,
|
58
|
+
# over not having them all together. see @rename_type_on_mismatch
|
59
|
+
#
|
60
|
+
# Here are the rules of what is retried when:
|
61
|
+
#
|
62
|
+
# - Block and retry all events in bulk response that experiences transient network exceptions until
|
63
|
+
# a successful submission is received by Elasticsearch.
|
64
|
+
# - Retry subset of sent events which resulted in ES errors of a retryable nature which can be found
|
65
|
+
# in RETRYABLE_CODES
|
66
|
+
# - For events which returned retryable error codes, they will be pushed onto a separate queue for
|
67
|
+
# retrying events. events in this queue will be retried a maximum of 5 times by default (configurable through :max_retries). The size of
|
68
|
+
# this queue is capped by the value set in :retry_max_items.
|
69
|
+
# - Events from the retry queue are submitted again either when the queue reaches its max size or when
|
70
|
+
# the max interval time is reached, which is set in :retry_max_interval.
|
71
|
+
# - Events which are not retryable or have reached their max retry count are logged to stderr.
|
72
|
+
class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
73
|
+
attr_reader :client
|
74
|
+
|
75
|
+
include Stud::Buffer
|
76
|
+
RETRYABLE_CODES = [409, 429, 503]
|
77
|
+
SUCCESS_CODES = [200, 201]
|
78
|
+
|
79
|
+
config_name "elasticsearch"
|
80
|
+
|
81
|
+
# The index to write events to. This can be dynamic using the `%{foo}` syntax.
|
82
|
+
# The default value will partition your indices by day so you can more easily
|
83
|
+
# delete old data or only search specific date ranges.
|
84
|
+
# Indexes may not contain uppercase characters.
|
85
|
+
# For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}
|
86
|
+
config :index, :validate => :string, :default => "logstash-%{+YYYY.MM.dd}"
|
87
|
+
|
88
|
+
# The index type to write events to. Generally you should try to write only
|
89
|
+
# similar events to the same 'type'. String expansion `%{foo}` works here.
|
90
|
+
#
|
91
|
+
# Deprecated in favor of `document_type` field.
|
92
|
+
config :index_type, :validate => :string, :deprecated => "Please use the 'document_type' setting instead. It has the same effect, but is more appropriately named."
|
93
|
+
|
94
|
+
# The document type to write events to. Generally you should try to write only
|
95
|
+
# similar events to the same 'type'. String expansion `%{foo}` works here.
|
96
|
+
# Unless you set 'document_type', the event 'type' will be used if it exists
|
97
|
+
# otherwise the document type will be assigned the value of 'logs'
|
98
|
+
config :document_type, :validate => :string
|
99
|
+
|
100
|
+
# Starting in Logstash 1.3 (unless you set option `manage_template` to false)
|
101
|
+
# a default mapping template for Elasticsearch will be applied, if you do not
|
102
|
+
# already have one set to match the index pattern defined (default of
|
103
|
+
# `logstash-%{+YYYY.MM.dd}`), minus any variables. For example, in this case
|
104
|
+
# the template will be applied to all indices starting with `logstash-*`
|
105
|
+
#
|
106
|
+
# If you have dynamic templating (e.g. creating indices based on field names)
|
107
|
+
# then you should set `manage_template` to false and use the REST API to upload
|
108
|
+
# your templates manually.
|
109
|
+
config :manage_template, :validate => :boolean, :default => true
|
110
|
+
|
111
|
+
# This configuration option defines how the template is named inside Elasticsearch.
|
112
|
+
# Note that if you have used the template management features and subsequently
|
113
|
+
# change this, you will need to prune the old template manually, e.g.
|
114
|
+
#
|
115
|
+
# `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
|
116
|
+
#
|
117
|
+
# where `OldTemplateName` is whatever the former setting was.
|
118
|
+
config :template_name, :validate => :string, :default => "logstash"
|
119
|
+
|
120
|
+
# You can set the path to your own template here, if you so desire.
|
121
|
+
# If not set, the included template will be used.
|
122
|
+
config :template, :validate => :path
|
123
|
+
|
124
|
+
# Overwrite the current template with whatever is configured
|
125
|
+
# in the `template` and `template_name` directives.
|
126
|
+
config :template_overwrite, :validate => :boolean, :default => false
|
127
|
+
|
128
|
+
# The document ID for the index. Useful for overwriting existing entries in
|
129
|
+
# Elasticsearch with the same ID.
|
130
|
+
config :document_id, :validate => :string
|
131
|
+
|
132
|
+
# A routing override to be applied to all processed events.
|
133
|
+
# This can be dynamic using the `%{foo}` syntax.
|
134
|
+
config :routing, :validate => :string
|
135
|
+
|
136
|
+
# The name of your cluster if you set it on the Elasticsearch side. Useful
|
137
|
+
# for discovery when using `node` or `transport` protocols.
|
138
|
+
# By default, it looks for a cluster named 'elasticsearch'.
|
139
|
+
config :cluster, :validate => :string
|
140
|
+
|
141
|
+
# For the `node` protocol, if you do not specify `host`, it will attempt to use
|
142
|
+
# multicast discovery to connect to Elasticsearch. If http://www.elastic.co/guide/en/elasticsearch/guide/current/_important_configuration_changes.html#_prefer_unicast_over_multicast[multicast is disabled] in Elasticsearch,
|
143
|
+
# you must include the hostname or IP address of the host(s) to use for Elasticsearch unicast discovery.
|
144
|
+
# Remember the `node` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html#modules-transport[transport] address (eg. 9300, not 9200).
|
145
|
+
# `"127.0.0.1"`
|
146
|
+
# `["127.0.0.1:9300","127.0.0.2:9300"]`
|
147
|
+
# When setting hosts for `node` protocol, it is important to confirm that at least one non-client
|
148
|
+
# node is listed in the `host` list. Also keep in mind that the `host` parameter when used with
|
149
|
+
# the `node` protocol is for *discovery purposes only* (not for load balancing). When multiple hosts
|
150
|
+
# are specified, it will contact the first host to see if it can use it to discover the cluster. If not,
|
151
|
+
# then it will contact the second host in the list and so forth. With the `node` protocol,
|
152
|
+
# Logstash will join the Elasticsearch cluster as a node client (which has a copy of the cluster
|
153
|
+
# state) and this node client is the one that will automatically handle the load balancing of requests
|
154
|
+
# across data nodes in the cluster.
|
155
|
+
# If you are looking for a high availability setup, our recommendation is to use the `transport` protocol (below),
|
156
|
+
# set up multiple http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[client nodes] and list the client nodes in the `host` parameter.
|
157
|
+
#
|
158
|
+
# For the `transport` protocol, it will load balance requests across the hosts specified in the `host` parameter.
|
159
|
+
# Remember the `transport` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html#modules-transport[transport] address (eg. 9300, not 9200).
|
160
|
+
# `"127.0.0.1"`
|
161
|
+
# `["127.0.0.1:9300","127.0.0.2:9300"]`
|
162
|
+
# There is also a `sniffing` option (see below) that can be used with the transport protocol to instruct it to use the host to sniff for
|
163
|
+
# "alive" nodes in the cluster and automatically use it as the hosts list (but will skip the dedicated master nodes).
|
164
|
+
# If you do not use the sniffing option, it is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `host` list
|
165
|
+
# to prevent Logstash from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes.
|
166
|
+
#
|
167
|
+
# For the `http` protocol, it will load balance requests across the hosts specified in the `host` parameter.
|
168
|
+
# Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300).
|
169
|
+
# `"127.0.0.1"`
|
170
|
+
# `["127.0.0.1:9200","127.0.0.2:9200"]`
|
171
|
+
# It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `host` list
|
172
|
+
# to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes.
|
173
|
+
|
174
|
+
config :host, :validate => :array
|
175
|
+
|
176
|
+
# The port for Elasticsearch transport to use.
|
177
|
+
#
|
178
|
+
# If you do not set this, the following defaults are used:
|
179
|
+
# * `protocol => http` - port 9200
|
180
|
+
# * `protocol => transport` - port 9300-9305
|
181
|
+
# * `protocol => node` - port 9300-9305
|
182
|
+
config :port, :validate => :string
|
183
|
+
|
184
|
+
# The name/address of the host to bind to for Elasticsearch clustering
|
185
|
+
config :bind_host, :validate => :string
|
186
|
+
|
187
|
+
# This is only valid for the 'node' protocol.
|
188
|
+
#
|
189
|
+
# The port for the node to listen on.
|
190
|
+
config :bind_port, :validate => :number
|
191
|
+
|
192
|
+
# Run the Elasticsearch server embedded in this process.
|
193
|
+
# This option is useful if you want to run a single Logstash process that
|
194
|
+
# handles log processing and indexing; it saves you from needing to run
|
195
|
+
# a separate Elasticsearch process. An example use case is
|
196
|
+
# proof-of-concept testing.
|
197
|
+
# WARNING: This is not recommended for production use!
|
198
|
+
config :embedded, :validate => :boolean, :default => false
|
199
|
+
|
200
|
+
# If you are running the embedded Elasticsearch server, you can set the http
|
201
|
+
# port it listens on here; it is not common to need this setting changed from
|
202
|
+
# default.
|
203
|
+
config :embedded_http_port, :validate => :string, :default => "9200-9300"
|
204
|
+
|
205
|
+
# This setting no longer does anything. It exists to keep config validation
|
206
|
+
# from failing. It will be removed in future versions.
|
207
|
+
config :max_inflight_requests, :validate => :number, :default => 50, :deprecated => true
|
208
|
+
|
209
|
+
# The node name Elasticsearch will use when joining a cluster.
|
210
|
+
#
|
211
|
+
# By default, this is generated internally by the ES client.
|
212
|
+
config :node_name, :validate => :string
|
213
|
+
|
214
|
+
# This plugin uses the bulk index api for improved indexing performance.
|
215
|
+
# To make efficient bulk api calls, we will buffer a certain number of
|
216
|
+
# events before flushing that out to Elasticsearch. This setting
|
217
|
+
# controls how many events will be buffered before sending a batch
|
218
|
+
# of events.
|
219
|
+
config :flush_size, :validate => :number, :default => 5000
|
220
|
+
|
221
|
+
# The amount of time since last flush before a flush is forced.
|
222
|
+
#
|
223
|
+
# This setting helps ensure slow event rates don't get stuck in Logstash.
|
224
|
+
# For example, if your `flush_size` is 100, and you have received 10 events,
|
225
|
+
# and it has been more than `idle_flush_time` seconds since the last flush,
|
226
|
+
# Logstash will flush those 10 events automatically.
|
227
|
+
#
|
228
|
+
# This helps keep both fast and slow log streams moving along in
|
229
|
+
# near-real-time.
|
230
|
+
config :idle_flush_time, :validate => :number, :default => 1
|
231
|
+
|
232
|
+
# Choose the protocol used to talk to Elasticsearch.
|
233
|
+
#
|
234
|
+
# The 'node' protocol (default) will connect to the cluster as a normal Elasticsearch
|
235
|
+
# node (but will not store data). If you use the `node` protocol, you must permit
|
236
|
+
# bidirectional communication on the port 9300 (or whichever port you have
|
237
|
+
# configured).
|
238
|
+
#
|
239
|
+
# If you do not specify the `host` parameter, it will use multicast for http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html[Elasticsearch discovery]. While this may work in a test/dev environment where multicast is enabled in
|
240
|
+
# Elasticsearch, we strongly recommend http://www.elastic.co/guide/en/elasticsearch/guide/current/_important_configuration_changes.html#_prefer_unicast_over_multicast[disabling multicast]
|
241
|
+
# in Elasticsearch. To connect to an Elasticsearch cluster with multicast disabled,
|
242
|
+
# you must include the `host` parameter (see relevant section above).
|
243
|
+
#
|
244
|
+
# The 'transport' protocol will connect to the host you specify and will
|
245
|
+
# not show up as a 'node' in the Elasticsearch cluster. This is useful
|
246
|
+
# in situations where you cannot permit connections outbound from the
|
247
|
+
# Elasticsearch cluster to this Logstash server.
|
248
|
+
#
|
249
|
+
# The 'http' protocol will use the Elasticsearch REST/HTTP interface to talk
|
250
|
+
# to elasticsearch.
|
251
|
+
#
|
252
|
+
# All protocols will use bulk requests when talking to Elasticsearch.
|
253
|
+
#
|
254
|
+
# The default `protocol` setting under java/jruby is "node". The default
|
255
|
+
# `protocol` on non-java rubies is "http"
|
256
|
+
config :protocol, :validate => [ "node", "transport", "http" ]
|
257
|
+
|
258
|
+
# The Elasticsearch action to perform. Valid actions are: `index`, `delete`.
|
259
|
+
#
|
260
|
+
# Use of this setting *REQUIRES* you also configure the `document_id` setting
|
261
|
+
# because `delete` actions all require a document id.
|
262
|
+
#
|
263
|
+
# What does each action do?
|
264
|
+
#
|
265
|
+
# - index: indexes a document (an event from Logstash).
|
266
|
+
# - delete: deletes a document by id
|
267
|
+
# - create: indexes a document, fails if a document by that id already exists in the index.
|
268
|
+
# - update: updates a document by id
|
269
|
+
# following action is not supported by HTTP protocol
|
270
|
+
# - create_unless_exists: creates a document, fails if no id is provided
|
271
|
+
#
|
272
|
+
# For more details on actions, check out the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
|
273
|
+
config :action, :validate => :string, :default => "index"
|
274
|
+
|
275
|
+
# Username and password (only valid when protocol is HTTP; this setting works with HTTP or HTTPS auth)
|
276
|
+
config :user, :validate => :string
|
277
|
+
config :password, :validate => :password
|
278
|
+
|
279
|
+
# HTTP Path at which the Elasticsearch server lives. Use this if you must run ES behind a proxy that remaps
|
280
|
+
# the root path for the Elasticsearch HTTP API lives. This option is ignored for non-HTTP transports.
|
281
|
+
config :path, :validate => :string, :default => "/"
|
282
|
+
|
283
|
+
# SSL Configurations (only valid when protocol is HTTP)
|
284
|
+
#
|
285
|
+
# Enable SSL
|
286
|
+
config :ssl, :validate => :boolean, :default => false
|
287
|
+
|
288
|
+
# Validate the server's certificate
|
289
|
+
# Disabling this severely compromises security
|
290
|
+
# For more information read https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf
|
291
|
+
config :ssl_certificate_verification, :validate => :boolean, :default => true
|
292
|
+
|
293
|
+
# The .cer or .pem file to validate the server's certificate
|
294
|
+
config :cacert, :validate => :path
|
295
|
+
|
296
|
+
# The JKS truststore to validate the server's certificate
|
297
|
+
# Use either `:truststore` or `:cacert`
|
298
|
+
config :truststore, :validate => :path
|
299
|
+
|
300
|
+
# Set the truststore password
|
301
|
+
config :truststore_password, :validate => :password
|
302
|
+
|
303
|
+
# The keystore used to present a certificate to the server
|
304
|
+
# It can be either .jks or .p12
|
305
|
+
config :keystore, :validate => :path
|
306
|
+
|
307
|
+
# Set the truststore password
|
308
|
+
config :keystore_password, :validate => :password
|
309
|
+
|
310
|
+
# Enable cluster sniffing (transport only)
|
311
|
+
# Asks host for the list of all cluster nodes and adds them to the hosts list
|
312
|
+
config :sniffing, :validate => :boolean, :default => false
|
313
|
+
|
314
|
+
# Set max retry for each event
|
315
|
+
config :max_retries, :validate => :number, :default => 3
|
316
|
+
|
317
|
+
# Set retry policy for events that failed to send
|
318
|
+
config :retry_max_items, :validate => :number, :default => 5000
|
319
|
+
|
320
|
+
# Set max interval between bulk retries
|
321
|
+
config :retry_max_interval, :validate => :number, :default => 5
|
322
|
+
|
323
|
+
# Set the address of a forward HTTP proxy. Must be used with the 'http' protocol
|
324
|
+
# Can be either a string, such as 'http://localhost:123' or a hash in the form
|
325
|
+
# {host: 'proxy.org' port: 80 scheme: 'http'}
|
326
|
+
# Note, this is NOT a SOCKS proxy, but a plain HTTP proxy
|
327
|
+
config :proxy
|
328
|
+
|
329
|
+
# Enable doc_as_upsert for update mode
|
330
|
+
# create a new document with source if document_id doesn't exists
|
331
|
+
config :doc_as_upsert, :validate => :boolean, :default => false
|
332
|
+
|
333
|
+
# Set upsert content for update mode
|
334
|
+
# create a new document with this parameter as json string if document_id doesn't exists
|
335
|
+
config :upsert, :validate => :string, :default => ""
|
336
|
+
|
337
|
+
# Allow logstash to mangle the _type/mapping if ever there is a clash between
|
338
|
+
# the mapping and the event that failed to be posted.
|
339
|
+
config :rename_type_on_mismatch, :validate => :boolean, :default => false
|
340
|
+
|
341
|
+
public
|
342
|
+
def register
|
343
|
+
@submit_mutex = Mutex.new
|
344
|
+
# retry-specific variables
|
345
|
+
@retry_flush_mutex = Mutex.new
|
346
|
+
@retry_teardown_requested = Concurrent::AtomicBoolean.new(false)
|
347
|
+
# needs flushing when interval
|
348
|
+
@retry_queue_needs_flushing = ConditionVariable.new
|
349
|
+
@retry_queue_not_full = ConditionVariable.new
|
350
|
+
@retry_queue = Queue.new
|
351
|
+
|
352
|
+
client_settings = {}
|
353
|
+
|
354
|
+
|
355
|
+
if @protocol.nil?
|
356
|
+
@protocol = LogStash::Environment.jruby? ? "node" : "http"
|
357
|
+
end
|
358
|
+
|
359
|
+
if @protocol == "http"
|
360
|
+
if @action == "create_unless_exists"
|
361
|
+
raise(LogStash::ConfigurationError, "action => 'create_unless_exists' is not supported under the HTTP protocol");
|
362
|
+
end
|
363
|
+
|
364
|
+
client_settings[:path] = "/#{@path}/".gsub(/\/+/, "/") # Normalize slashes
|
365
|
+
@logger.debug? && @logger.debug("Normalizing http path", :path => @path, :normalized => client_settings[:path])
|
366
|
+
end
|
367
|
+
|
368
|
+
if ["node", "transport"].include?(@protocol)
|
369
|
+
# Node or TransportClient; requires JRuby
|
370
|
+
raise(LogStash::PluginLoadingError, "This configuration requires JRuby. If you are not using JRuby, you must set 'protocol' to 'http'. For example: output { elasticsearch { protocol => \"http\" } }") unless LogStash::Environment.jruby?
|
371
|
+
|
372
|
+
client_settings["cluster.name"] = @cluster if @cluster
|
373
|
+
client_settings["network.host"] = @bind_host if @bind_host
|
374
|
+
client_settings["transport.tcp.port"] = @bind_port if @bind_port
|
375
|
+
client_settings["client.transport.sniff"] = @sniffing
|
376
|
+
|
377
|
+
if @node_name
|
378
|
+
client_settings["node.name"] = @node_name
|
379
|
+
else
|
380
|
+
client_settings["node.name"] = "logstash-#{Socket.gethostname}-#{$$}-#{object_id}"
|
381
|
+
end
|
382
|
+
|
383
|
+
@@plugins.each do |plugin|
|
384
|
+
name = plugin.name.split('-')[-1]
|
385
|
+
client_settings.merge!(LogStash::Outputs::ElasticSearch.const_get(name.capitalize).create_client_config(self))
|
386
|
+
end
|
387
|
+
end
|
388
|
+
|
389
|
+
require "logstash/outputs/elasticsearch/protocol"
|
390
|
+
|
391
|
+
if @port.nil?
|
392
|
+
@port = case @protocol
|
393
|
+
when "http"; "9200"
|
394
|
+
when "transport", "node"; "9300-9305"
|
395
|
+
end
|
396
|
+
end
|
397
|
+
|
398
|
+
if @host.nil? && @protocol != "node" # node can use zen discovery
|
399
|
+
@logger.info("No 'host' set in elasticsearch output. Defaulting to localhost")
|
400
|
+
@host = ["localhost"]
|
401
|
+
end
|
402
|
+
|
403
|
+
client_settings.merge! setup_ssl()
|
404
|
+
client_settings.merge! setup_proxy()
|
405
|
+
|
406
|
+
common_options = {
|
407
|
+
:protocol => @protocol,
|
408
|
+
:client_settings => client_settings
|
409
|
+
}
|
410
|
+
|
411
|
+
common_options.merge! setup_basic_auth()
|
412
|
+
|
413
|
+
# Update API setup
|
414
|
+
update_options = {
|
415
|
+
:upsert => @upsert,
|
416
|
+
:doc_as_upsert => @doc_as_upsert
|
417
|
+
}
|
418
|
+
common_options.merge! update_options if @action == 'update'
|
419
|
+
|
420
|
+
client_class = case @protocol
|
421
|
+
when "transport"
|
422
|
+
LogStash::Outputs::Elasticsearch::Protocols::TransportClient
|
423
|
+
when "node"
|
424
|
+
LogStash::Outputs::Elasticsearch::Protocols::NodeClient
|
425
|
+
when /http/
|
426
|
+
LogStash::Outputs::Elasticsearch::Protocols::HTTPClient
|
427
|
+
end
|
428
|
+
|
429
|
+
if @embedded
|
430
|
+
raise(LogStash::ConfigurationError, "The 'embedded => true' setting is only valid for the elasticsearch output under JRuby. You are running #{RUBY_DESCRIPTION}") unless LogStash::Environment.jruby?
|
431
|
+
@logger.warn("The 'embedded => true' setting is enabled. This is not recommended for production use!!!")
|
432
|
+
# LogStash::Environment.load_elasticsearch_jars!
|
433
|
+
|
434
|
+
# Default @host with embedded to localhost. This should help avoid
|
435
|
+
# newbies tripping on ubuntu and other distros that have a default
|
436
|
+
# firewall that blocks multicast.
|
437
|
+
@host ||= ["localhost"]
|
438
|
+
|
439
|
+
# Start Elasticsearch local.
|
440
|
+
start_local_elasticsearch
|
441
|
+
end
|
442
|
+
|
443
|
+
@client = Array.new
|
444
|
+
|
445
|
+
if protocol == "node" || @host.nil? # if @protocol is "node" or @host is not set
|
446
|
+
options = { :host => @host, :port => @port }.merge(common_options)
|
447
|
+
@client = [client_class.new(options)]
|
448
|
+
else # if @protocol in ["transport","http"]
|
449
|
+
@client = @host.map do |host|
|
450
|
+
(_host,_port) = host.split ":"
|
451
|
+
options = { :host => _host, :port => _port || @port }.merge(common_options)
|
452
|
+
@logger.info "Create client to elasticsearch server on #{_host}:#{_port}"
|
453
|
+
client_class.new(options)
|
454
|
+
end # @host.map
|
455
|
+
end
|
456
|
+
|
457
|
+
if @manage_template
|
458
|
+
for client in @client
|
459
|
+
begin
|
460
|
+
@logger.info("Automatic template management enabled", :manage_template => @manage_template.to_s)
|
461
|
+
client.template_install(@template_name, get_template, @template_overwrite)
|
462
|
+
break
|
463
|
+
rescue => e
|
464
|
+
@logger.error("Failed to install template: #{e.message}")
|
465
|
+
end
|
466
|
+
end # for @client loop
|
467
|
+
end # if @manage_templates
|
468
|
+
|
469
|
+
@logger.info("New Elasticsearch output", :cluster => @cluster,
|
470
|
+
:host => @host, :port => @port, :embedded => @embedded,
|
471
|
+
:protocol => @protocol)
|
472
|
+
|
473
|
+
@client_idx = 0
|
474
|
+
@current_client = @client[@client_idx]
|
475
|
+
|
476
|
+
buffer_initialize(
|
477
|
+
:max_items => @flush_size,
|
478
|
+
:max_interval => @idle_flush_time,
|
479
|
+
:logger => @logger
|
480
|
+
)
|
481
|
+
|
482
|
+
@retry_timer_thread = Thread.new do
|
483
|
+
loop do
|
484
|
+
sleep(@retry_max_interval)
|
485
|
+
@retry_flush_mutex.synchronize { @retry_queue_needs_flushing.signal }
|
486
|
+
end
|
487
|
+
end
|
488
|
+
|
489
|
+
@retry_thread = Thread.new do
|
490
|
+
while @retry_teardown_requested.false?
|
491
|
+
@retry_flush_mutex.synchronize { @retry_queue_needs_flushing.wait(@retry_flush_mutex) }
|
492
|
+
retry_flush
|
493
|
+
end
|
494
|
+
end
|
495
|
+
end # def register
|
496
|
+
|
497
|
+
|
498
|
+
public
|
499
|
+
def get_template
|
500
|
+
if @template.nil?
|
501
|
+
@template = ::File.expand_path('elasticsearch/elasticsearch-template.json', ::File.dirname(__FILE__))
|
502
|
+
if !File.exists?(@template)
|
503
|
+
raise "You must specify 'template => ...' in your elasticsearch output (I looked for '#{@template}')"
|
504
|
+
end
|
505
|
+
end
|
506
|
+
template_json = IO.read(@template).gsub(/\n/,'')
|
507
|
+
template = LogStash::Json.load(template_json)
|
508
|
+
@logger.info("Using mapping template", :template => template)
|
509
|
+
return template
|
510
|
+
end # def get_template
|
511
|
+
|
512
|
+
public
|
513
|
+
def receive(event)
|
514
|
+
return unless output?(event)
|
515
|
+
|
516
|
+
# block until we have not maxed out our
|
517
|
+
# retry queue. This is applying back-pressure
|
518
|
+
# to slow down the receive-rate
|
519
|
+
@retry_flush_mutex.synchronize {
|
520
|
+
@retry_queue_not_full.wait(@retry_flush_mutex) while @retry_queue.size > @retry_max_items
|
521
|
+
}
|
522
|
+
|
523
|
+
event['@metadata']['retry_count'] = 0
|
524
|
+
|
525
|
+
# Set the 'type' value for the index.
|
526
|
+
type = if @document_type
|
527
|
+
event.sprintf(@document_type)
|
528
|
+
elsif @index_type # deprecated
|
529
|
+
event.sprintf(@index_type)
|
530
|
+
else
|
531
|
+
event["type"] || "logs"
|
532
|
+
end
|
533
|
+
|
534
|
+
params = {
|
535
|
+
:_id => @document_id ? event.sprintf(@document_id) : nil,
|
536
|
+
:_index => event.sprintf(@index),
|
537
|
+
:_type => type,
|
538
|
+
:_routing => @routing ? event.sprintf(@routing) : nil
|
539
|
+
}
|
540
|
+
|
541
|
+
params[:_upsert] = LogStash::Json.load(event.sprintf(@upsert)) if @action == 'update' && @upsert != ""
|
542
|
+
|
543
|
+
buffer_receive([event.sprintf(@action), params, event])
|
544
|
+
end # def receive
|
545
|
+
|
546
|
+
public
|
547
|
+
# synchronize the @current_client.bulk call to avoid concurrency/thread safety issues with the
|
548
|
+
# # client libraries which might not be thread safe. the submit method can be called from both the
|
549
|
+
# # Stud::Buffer flush thread and from our own retry thread.
|
550
|
+
def submit(actions)
|
551
|
+
es_actions = actions.map { |a, doc, event| [a, doc, event.to_hash] }
|
552
|
+
@submit_mutex.lock
|
553
|
+
begin
|
554
|
+
bulk_response = @current_client.bulk(es_actions)
|
555
|
+
ensure
|
556
|
+
@submit_mutex.unlock
|
557
|
+
end
|
558
|
+
if bulk_response["errors"]
|
559
|
+
actions_with_responses = actions.zip(bulk_response['statuses'], bulk_response['error_messages'])
|
560
|
+
actions_to_retry = []
|
561
|
+
actions_with_responses.each do |action, resp_code, error_message|
|
562
|
+
if RETRYABLE_CODES.include?(resp_code)
|
563
|
+
@logger.warn "retrying failed action with response code: #{resp_code}"
|
564
|
+
actions_to_retry << action
|
565
|
+
elsif @rename_type_on_mismatch and resp_code == 400 and mapping_mismatch? error_message
|
566
|
+
@logger.warn "retrying mapping mismatch: #{resp_code}"
|
567
|
+
action[2]["tags"] ||= []
|
568
|
+
actions_to_retry << rename_type(action)
|
569
|
+
elsif not SUCCESS_CODES.include?(resp_code)
|
570
|
+
@logger.warn "failed action with response of #{resp_code}, dropping action: #{action}"
|
571
|
+
end
|
572
|
+
end
|
573
|
+
retry_push(actions_to_retry) unless actions_to_retry.empty?
|
574
|
+
end
|
575
|
+
end
|
576
|
+
|
577
|
+
# When there are exceptions raised upon submission, we raise an exception so that
|
578
|
+
# Stud::Buffer will retry to flush
|
579
|
+
public
|
580
|
+
def mapping_mismatch?(error_message)
|
581
|
+
error_message.include? "MapperParsingException"
|
582
|
+
end
|
583
|
+
|
584
|
+
def rename_type(action)
|
585
|
+
action[1][:_type]= action[1][:_type] + Time.now.to_i.to_s
|
586
|
+
action
|
587
|
+
end
|
588
|
+
|
589
|
+
def flush(actions, teardown = false)
|
590
|
+
begin
|
591
|
+
submit(actions)
|
592
|
+
rescue => e
|
593
|
+
@logger.error "Got error to send bulk of actions: #{e.message}"
|
594
|
+
raise e
|
595
|
+
ensure
|
596
|
+
unless @protocol == "node"
|
597
|
+
@logger.debug? and @logger.debug "Shifting current elasticsearch client"
|
598
|
+
shift_client
|
599
|
+
end
|
600
|
+
end
|
601
|
+
end # def flush
|
602
|
+
|
603
|
+
public
|
604
|
+
def teardown
|
605
|
+
if @cacert # remove temporary jks store created from the cacert
|
606
|
+
File.delete(@truststore)
|
607
|
+
end
|
608
|
+
|
609
|
+
@retry_teardown_requested.make_true
|
610
|
+
# First, make sure retry_timer_thread is stopped
|
611
|
+
# to ensure we do not signal a retry based on
|
612
|
+
# the retry interval.
|
613
|
+
Thread.kill(@retry_timer_thread)
|
614
|
+
@retry_timer_thread.join
|
615
|
+
# Signal flushing in the case that #retry_flush is in
|
616
|
+
# the process of waiting for a signal.
|
617
|
+
@retry_flush_mutex.synchronize { @retry_queue_needs_flushing.signal }
|
618
|
+
# Now, #retry_flush is ensured to not be in a state of
|
619
|
+
# waiting and can be safely joined into the main thread
|
620
|
+
# for further final execution of an in-process remaining call.
|
621
|
+
@retry_thread.join
|
622
|
+
|
623
|
+
# execute any final actions along with a proceeding retry for any
|
624
|
+
# final actions that did not succeed.
|
625
|
+
buffer_flush(:final => true)
|
626
|
+
retry_flush
|
627
|
+
end
|
628
|
+
|
629
|
+
protected
|
630
|
+
def start_local_elasticsearch
|
631
|
+
@logger.info("Starting embedded Elasticsearch local node.")
|
632
|
+
builder = org.elasticsearch.node.NodeBuilder.nodeBuilder
|
633
|
+
# Disable 'local only' - LOGSTASH-277
|
634
|
+
#builder.local(true)
|
635
|
+
builder.settings.put("cluster.name", @cluster) if @cluster
|
636
|
+
builder.settings.put("node.name", @node_name) if @node_name
|
637
|
+
builder.settings.put("network.host", @bind_host) if @bind_host
|
638
|
+
builder.settings.put("http.port", @embedded_http_port)
|
639
|
+
|
640
|
+
@embedded_elasticsearch = builder.node
|
641
|
+
@embedded_elasticsearch.start
|
642
|
+
end # def start_local_elasticsearch
|
643
|
+
|
644
|
+
protected
|
645
|
+
def shift_client
|
646
|
+
@client_idx = (@client_idx+1) % @client.length
|
647
|
+
@current_client = @client[@client_idx]
|
648
|
+
@logger.debug? and @logger.debug("Switched current elasticsearch client to ##{@client_idx} at #{@host[@client_idx]}")
|
649
|
+
end
|
650
|
+
|
651
|
+
private
|
652
|
+
def setup_proxy
|
653
|
+
return {} unless @proxy
|
654
|
+
|
655
|
+
if @protocol != "http"
|
656
|
+
raise(LogStash::ConfigurationError, "Proxy is not supported for '#{@protocol}'. Change the protocol to 'http' if you need HTTP proxy.")
|
657
|
+
end
|
658
|
+
|
659
|
+
# Symbolize keys
|
660
|
+
proxy = if @proxy.is_a?(Hash)
|
661
|
+
Hash[@proxy.map {|k,v| [k.to_sym, v]}]
|
662
|
+
elsif @proxy.is_a?(String)
|
663
|
+
@proxy
|
664
|
+
else
|
665
|
+
raise LogStash::ConfigurationError, "Expected 'proxy' to be a string or hash, not '#{@proxy}''!"
|
666
|
+
end
|
667
|
+
|
668
|
+
return {:proxy => proxy}
|
669
|
+
end
|
670
|
+
|
671
|
+
private
|
672
|
+
def setup_ssl
|
673
|
+
return {} unless @ssl
|
674
|
+
if @protocol != "http"
|
675
|
+
raise(LogStash::ConfigurationError, "SSL is not supported for '#{@protocol}'. Change the protocol to 'http' if you need SSL.")
|
676
|
+
end
|
677
|
+
@protocol = "https"
|
678
|
+
if @cacert && @truststore
|
679
|
+
raise(LogStash::ConfigurationError, "Use either \"cacert\" or \"truststore\" when configuring the CA certificate") if @truststore
|
680
|
+
end
|
681
|
+
ssl_options = {}
|
682
|
+
if @cacert then
|
683
|
+
@truststore, ssl_options[:truststore_password] = generate_jks @cacert
|
684
|
+
elsif @truststore
|
685
|
+
ssl_options[:truststore_password] = @truststore_password.value if @truststore_password
|
686
|
+
end
|
687
|
+
ssl_options[:truststore] = @truststore if @truststore
|
688
|
+
if @keystore
|
689
|
+
ssl_options[:keystore] = @keystore
|
690
|
+
ssl_options[:keystore_password] = @keystore_password.value if @keystore_password
|
691
|
+
end
|
692
|
+
if @ssl_certificate_verification == false
|
693
|
+
@logger.warn [
|
694
|
+
"** WARNING ** Detected UNSAFE options in elasticsearch output configuration!",
|
695
|
+
"** WARNING ** You have enabled encryption but DISABLED certificate verification.",
|
696
|
+
"** WARNING ** To make sure your data is secure change :ssl_certificate_verification to true"
|
697
|
+
].join("\n")
|
698
|
+
ssl_options[:verify] = false
|
699
|
+
end
|
700
|
+
{ ssl: ssl_options }
|
701
|
+
end
|
702
|
+
|
703
|
+
private
|
704
|
+
def setup_basic_auth
|
705
|
+
return {} unless @user && @password
|
706
|
+
|
707
|
+
if @protocol =~ /http/
|
708
|
+
{
|
709
|
+
:user => ::URI.escape(@user, "@:"),
|
710
|
+
:password => ::URI.escape(@password.value, "@:")
|
711
|
+
}
|
712
|
+
else
|
713
|
+
raise(LogStash::ConfigurationError, "User and password parameters are not supported for '#{@protocol}'. Change the protocol to 'http' if you need them.")
|
714
|
+
end
|
715
|
+
end
|
716
|
+
|
717
|
+
private
|
718
|
+
def generate_jks cert_path
|
719
|
+
|
720
|
+
require 'securerandom'
|
721
|
+
require 'tempfile'
|
722
|
+
require 'java'
|
723
|
+
import java.io.FileInputStream
|
724
|
+
import java.io.FileOutputStream
|
725
|
+
import java.security.KeyStore
|
726
|
+
import java.security.cert.CertificateFactory
|
727
|
+
|
728
|
+
jks = java.io.File.createTempFile("cert", ".jks")
|
729
|
+
|
730
|
+
ks = KeyStore.getInstance "JKS"
|
731
|
+
ks.load nil, nil
|
732
|
+
cf = CertificateFactory.getInstance "X.509"
|
733
|
+
cert = cf.generateCertificate FileInputStream.new(cert_path)
|
734
|
+
ks.setCertificateEntry "cacert", cert
|
735
|
+
pwd = SecureRandom.urlsafe_base64(9)
|
736
|
+
ks.store FileOutputStream.new(jks), pwd.to_java.toCharArray
|
737
|
+
[jks.path, pwd]
|
738
|
+
end
|
739
|
+
|
740
|
+
private
|
741
|
+
# in charge of submitting any actions in @retry_queue that need to be
|
742
|
+
# retried
|
743
|
+
#
|
744
|
+
# This method is not called concurrently. It is only called by @retry_thread
|
745
|
+
# and once that thread is ended during the teardown process, a final call
|
746
|
+
# to this method is done upon teardown in the main thread.
|
747
|
+
def retry_flush()
|
748
|
+
unless @retry_queue.empty?
|
749
|
+
buffer = @retry_queue.size.times.map do
|
750
|
+
next_action, next_doc, next_event = @retry_queue.pop
|
751
|
+
next_event['@metadata']['retry_count'] += 1
|
752
|
+
|
753
|
+
if next_event['@metadata']['retry_count'] > @max_retries
|
754
|
+
@logger.error "too many attempts at sending event. dropping: #{next_event}"
|
755
|
+
nil
|
756
|
+
else
|
757
|
+
[next_action, next_doc, next_event]
|
758
|
+
end
|
759
|
+
end.compact
|
760
|
+
|
761
|
+
submit(buffer) unless buffer.empty?
|
762
|
+
end
|
763
|
+
|
764
|
+
@retry_flush_mutex.synchronize {
|
765
|
+
@retry_queue_not_full.signal if @retry_queue.size < @retry_max_items
|
766
|
+
}
|
767
|
+
end
|
768
|
+
|
769
|
+
private
|
770
|
+
def retry_push(actions)
|
771
|
+
Array(actions).each{|action| @retry_queue << action}
|
772
|
+
@retry_flush_mutex.synchronize {
|
773
|
+
@retry_queue_needs_flushing.signal if @retry_queue.size >= @retry_max_items
|
774
|
+
}
|
775
|
+
end
|
776
|
+
|
777
|
+
@@plugins = Gem::Specification.find_all{|spec| spec.name =~ /logstash-output-elasticsearch-/ }
|
778
|
+
|
779
|
+
@@plugins.each do |plugin|
|
780
|
+
name = plugin.name.split('-')[-1]
|
781
|
+
require "logstash/outputs/elasticsearch/#{name}"
|
782
|
+
end
|
783
|
+
|
784
|
+
end # class LogStash::Outputs::Elasticsearch
|