logstash-output-elasticsearch 0.1.6 → 3.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -13
- data/CHANGELOG.md +117 -0
- data/CONTRIBUTORS +32 -0
- data/Gemfile +4 -4
- data/LICENSE +1 -1
- data/NOTICE.TXT +5 -0
- data/README.md +110 -0
- data/lib/logstash/outputs/elasticsearch.rb +97 -425
- data/lib/logstash/outputs/elasticsearch/buffer.rb +124 -0
- data/lib/logstash/outputs/elasticsearch/common.rb +205 -0
- data/lib/logstash/outputs/elasticsearch/common_configs.rb +164 -0
- data/lib/logstash/outputs/elasticsearch/elasticsearch-template.json +36 -24
- data/lib/logstash/outputs/elasticsearch/http_client.rb +236 -0
- data/lib/logstash/outputs/elasticsearch/http_client_builder.rb +106 -0
- data/lib/logstash/outputs/elasticsearch/template_manager.rb +35 -0
- data/logstash-output-elasticsearch.gemspec +17 -15
- data/spec/es_spec_helper.rb +77 -0
- data/spec/fixtures/scripts/scripted_update.groovy +2 -0
- data/spec/fixtures/scripts/scripted_update_nested.groovy +2 -0
- data/spec/fixtures/scripts/scripted_upsert.groovy +2 -0
- data/spec/integration/outputs/create_spec.rb +55 -0
- data/spec/integration/outputs/index_spec.rb +68 -0
- data/spec/integration/outputs/parent_spec.rb +73 -0
- data/spec/integration/outputs/pipeline_spec.rb +75 -0
- data/spec/integration/outputs/retry_spec.rb +163 -0
- data/spec/integration/outputs/routing_spec.rb +65 -0
- data/spec/integration/outputs/secure_spec.rb +108 -0
- data/spec/integration/outputs/templates_spec.rb +90 -0
- data/spec/integration/outputs/update_spec.rb +188 -0
- data/spec/unit/buffer_spec.rb +118 -0
- data/spec/unit/http_client_builder_spec.rb +27 -0
- data/spec/unit/outputs/elasticsearch/http_client_spec.rb +133 -0
- data/spec/unit/outputs/elasticsearch_proxy_spec.rb +58 -0
- data/spec/unit/outputs/elasticsearch_spec.rb +227 -0
- data/spec/unit/outputs/elasticsearch_ssl_spec.rb +55 -0
- metadata +137 -51
- data/.gitignore +0 -4
- data/Rakefile +0 -6
- data/lib/logstash/outputs/elasticsearch/protocol.rb +0 -253
- data/rakelib/publish.rake +0 -9
- data/rakelib/vendor.rake +0 -169
- data/spec/outputs/elasticsearch.rb +0 -518
checksums.yaml
CHANGED
@@ -1,15 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
|
5
|
-
data.tar.gz: !binary |-
|
6
|
-
ODJkYzg1NWU5YWEzYmZmOWRiMDFkNjFlYmM0Y2Q2NmZiZGEwNjYyMQ==
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 51dba928b726d91c06b243e7cb8283d6acde3096
|
4
|
+
data.tar.gz: a877ceda5efa85042dbd832bb86d13e9f1ce98d7
|
7
5
|
SHA512:
|
8
|
-
metadata.gz:
|
9
|
-
|
10
|
-
YWY0MmEzNzQ2OWUwODc0ZWRkNmI0OWY3MDViOTU3ZjlhMGY5NzFjZDJhNGQz
|
11
|
-
NjdlZWQ4YWM3Nzk1NTI3OWQ1MDEyZmY0MzBkODM0Nzc3YTgzNWM=
|
12
|
-
data.tar.gz: !binary |-
|
13
|
-
MjY2Mzc0ZDRiOGEyOGIzMDg2NWMzNGRiMDA2MWYxMDM4YzhiZmZiN2ViZDhk
|
14
|
-
ZDI2ZGE2MzRhMTA1ZDMxZGM2NTQ3YzU1ZjE0NjYzMTU0NTk4OTJhMWFiNTMy
|
15
|
-
MzQzYzc0OGM2NTNkMzJmNmIxY2I4YWY5ODZhYTgzOTAwODUxYWM=
|
6
|
+
metadata.gz: 03367771f9c23e04d3bd88cd191022e90236361a45ede08dd904915ba2b1df890384cdf7e979553dd5defa5e1e705a6af4898732be4a235ed5bd315769ac8aaa
|
7
|
+
data.tar.gz: 0694642bc5d96be78358761f60724d2b68adf05c2d5c05c670b929a8696751baaa691a851b488a8551ac404bdb31cf21bf0089ec863f98fb7f0487dc81bf194d
|
data/CHANGELOG.md
ADDED
@@ -0,0 +1,117 @@
|
|
1
|
+
## 3.0.0
|
2
|
+
- Update the plugin to the version 2.0 of the plugin api, this change is required for Logstash 5.0 compatibility. See https://github.com/elastic/logstash/issues/5141
|
3
|
+
## 2.7.0
|
4
|
+
- Add `pipeline` configuration option for setting an ingest pipeline to run upon indexing
|
5
|
+
|
6
|
+
## 2.6.2
|
7
|
+
- Fix bug where update index actions would not work with events with 'data' field
|
8
|
+
|
9
|
+
## 2.6.1
|
10
|
+
- Add 'retry_on_conflict' configuration option which should have been here from the beginning
|
11
|
+
|
12
|
+
## 2.5.2
|
13
|
+
- Fix bug with update document with doc_as_upsert and scripting (#364, #359)
|
14
|
+
- Make error messages more verbose and easier to parse by humans
|
15
|
+
- Retryable failures are now logged at the info level instead of warning. (issue #372)
|
16
|
+
|
17
|
+
## 2.5.1
|
18
|
+
- Fix bug where SSL would sometimes not be enabled
|
19
|
+
|
20
|
+
## 2.5.0
|
21
|
+
- Host settings now are more robust to bad input
|
22
|
+
- Host settings can now take full URLs
|
23
|
+
|
24
|
+
## 2.4.2
|
25
|
+
- Make flush_size actually cap the batch size in LS 2.2+
|
26
|
+
|
27
|
+
## 2.4.1
|
28
|
+
- Used debug level instead of info when emitting flush log message
|
29
|
+
- Updated docs about template
|
30
|
+
|
31
|
+
## 2.4.0
|
32
|
+
- Scripted update support courtesy of @Da-Wei
|
33
|
+
|
34
|
+
## 2.3.2
|
35
|
+
- Fix bug where max_retry_interval was not respected for HTTP error codes
|
36
|
+
|
37
|
+
## 2.3.1
|
38
|
+
- Bump manticore dependenvy to 0.5.2
|
39
|
+
|
40
|
+
## 2.3.0
|
41
|
+
- Now retry too busy and service unavailable errors infinitely.
|
42
|
+
- Never retry conflict errors
|
43
|
+
- Fix broken delete verb that would fail due to sending body with verb
|
44
|
+
|
45
|
+
## 2.2.0
|
46
|
+
- Serialize access to the connection pool in es-ruby client
|
47
|
+
- Add support for parent relationship
|
48
|
+
|
49
|
+
## 2.1.5
|
50
|
+
- Sprintf style 'action' parameters no longer raise a LogStash::ConfigurationError
|
51
|
+
|
52
|
+
## 2.1.4
|
53
|
+
- Improved the default template to disable fielddata on analyzed string fields. #309
|
54
|
+
- Dependend on logstash-core 2.0.0 released version, rather than RC1
|
55
|
+
|
56
|
+
## 2.1.3
|
57
|
+
- Improved the default template to use doc_values wherever possible.
|
58
|
+
- Template contains example mappings for every numeric type. You must map your
|
59
|
+
own fields to make use of anything other than long and double.
|
60
|
+
|
61
|
+
## 2.1.2
|
62
|
+
- Fixed dependencies (#280)
|
63
|
+
- Fixed an RSpec test (#281)
|
64
|
+
|
65
|
+
## 2.1.1
|
66
|
+
- Made host config obsolete.
|
67
|
+
|
68
|
+
## 2.1.0
|
69
|
+
- New setting: timeout. This lets you control the behavior of a slow/stuck
|
70
|
+
request to Elasticsearch that could be, for example, caused by network,
|
71
|
+
firewall, or load balancer issues.
|
72
|
+
|
73
|
+
## 2.0.0
|
74
|
+
- Plugins were updated to follow the new shutdown semantic, this mainly allows Logstash to instruct input plugins to terminate gracefully,
|
75
|
+
instead of using Thread.raise on the plugins' threads. Ref: https://github.com/elastic/logstash/pull/3895
|
76
|
+
- Dependency on logstash-core update to 2.0
|
77
|
+
|
78
|
+
## 2.0.0-beta2
|
79
|
+
- Massive internal refactor of client handling
|
80
|
+
- Background HTTP sniffing support
|
81
|
+
- Reduced bulk request size to 500 from 5000 (better memory utilization)
|
82
|
+
- Removed 'host' config option. Now use 'hosts'
|
83
|
+
|
84
|
+
## 2.0.0-beta
|
85
|
+
- Only support HTTP Protocol
|
86
|
+
- Removed support for node and transport protocols (now in logstash-output-elasticsearch_java)
|
87
|
+
|
88
|
+
## 1.0.7
|
89
|
+
- Add update API support
|
90
|
+
|
91
|
+
## 1.0.6
|
92
|
+
- Fix warning about Concurrent lib deprecation
|
93
|
+
|
94
|
+
## 1.0.4
|
95
|
+
- Update to Elasticsearch 1.7
|
96
|
+
|
97
|
+
## 1.0.3
|
98
|
+
- Add HTTP proxy support
|
99
|
+
|
100
|
+
## 1.0.2
|
101
|
+
- Upgrade Manticore HTTP Client
|
102
|
+
|
103
|
+
## 1.0.1
|
104
|
+
- Allow client certificates
|
105
|
+
|
106
|
+
## 0.2.9
|
107
|
+
- Add 'path' parameter for ES HTTP hosts behind a proxy on a subpath
|
108
|
+
|
109
|
+
## 0.2.8 (June 12, 2015)
|
110
|
+
- Add option to enable and disable SSL certificate verification during handshake (#160)
|
111
|
+
- Doc improvements for clarifying round robin behavior using hosts config
|
112
|
+
|
113
|
+
## 0.2.7 (May 28, 2015)
|
114
|
+
- Bump es-ruby version to 1.0.10
|
115
|
+
|
116
|
+
## 0.2.6 (May 28, 2015)
|
117
|
+
- Disable timeouts when using http protocol which would cause bulk requests to fail (#103)
|
data/CONTRIBUTORS
ADDED
@@ -0,0 +1,32 @@
|
|
1
|
+
The following is a list of people who have contributed ideas, code, bug
|
2
|
+
reports, or in general have helped logstash along its way.
|
3
|
+
|
4
|
+
Contributors:
|
5
|
+
* Aaron Mildenstein (untergeek)
|
6
|
+
* Bob Corsaro (dokipen)
|
7
|
+
* Colin Surprenant (colinsurprenant)
|
8
|
+
* Dmitry Koprov (dkoprov)
|
9
|
+
* Graham Bleach (bleach)
|
10
|
+
* Hao Chen (haoch)
|
11
|
+
* Ivan Babrou (bobrik)
|
12
|
+
* James Turnbull (jamtur01)
|
13
|
+
* John E. Vincent (lusis)
|
14
|
+
* Jordan Sissel (jordansissel)
|
15
|
+
* João Duarte (jsvd)
|
16
|
+
* Kurt Hurtado (kurtado)
|
17
|
+
* Miah Johnson (miah)
|
18
|
+
* Pere Urbón (purbon)
|
19
|
+
* Pete Fritchman (fetep)
|
20
|
+
* Pier-Hugues Pellerin (ph)
|
21
|
+
* Raymond Feng (raymondfeng)
|
22
|
+
* Richard Pijnenburg (electrical)
|
23
|
+
* Spenser Jones (SpenserJ)
|
24
|
+
* Suyog Rao (suyograo)
|
25
|
+
* Tal Levy (talevy)
|
26
|
+
* Tom Hodder (tolland)
|
27
|
+
* jimmyjones2
|
28
|
+
|
29
|
+
Note: If you've sent us patches, bug reports, or otherwise contributed to
|
30
|
+
Logstash, and you aren't on the list above and want to be, please let us know
|
31
|
+
and we'll make sure you're here. Contributions from folks like you are what make
|
32
|
+
open source awesome.
|
data/Gemfile
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
source '
|
2
|
-
|
3
|
-
gem
|
4
|
-
|
1
|
+
source 'https://rubygems.org'
|
2
|
+
|
3
|
+
# Specify your gem's dependencies in logstash-mass_effect.gemspec
|
4
|
+
gemspec
|
data/LICENSE
CHANGED
data/NOTICE.TXT
ADDED
data/README.md
ADDED
@@ -0,0 +1,110 @@
|
|
1
|
+
# Logstash Plugin
|
2
|
+
|
3
|
+
[![Travis Build Status](https://travis-ci.org/logstash-plugins/logstash-output-elasticsearch.svg)](https://travis-ci.org/logstash-plugins/logstash-output-elasticsearch)
|
4
|
+
|
5
|
+
This is a plugin for [Logstash](https://github.com/elastic/logstash).
|
6
|
+
|
7
|
+
It is fully free and fully open source. The license is Apache 2.0, meaning you are pretty much free to use it however you want in whatever way.
|
8
|
+
|
9
|
+
## Documentation
|
10
|
+
|
11
|
+
Logstash provides infrastructure to automatically generate documentation for this plugin. We use the asciidoc format to write documentation so any comments in the source code will be first converted into asciidoc and then into html. All plugin documentation are placed under one [central location](http://www.elastic.co/guide/en/logstash/current/).
|
12
|
+
|
13
|
+
- For formatting code or config example, you can use the asciidoc `[source,ruby]` directive
|
14
|
+
- For more asciidoc formatting tips, see the excellent reference here https://github.com/elastic/docs#asciidoc-guide
|
15
|
+
|
16
|
+
## Need Help?
|
17
|
+
|
18
|
+
Need help? Try #logstash on freenode IRC or the https://discuss.elastic.co/c/logstash discussion forum.
|
19
|
+
|
20
|
+
## Developing
|
21
|
+
|
22
|
+
### 1. Plugin Developement and Testing
|
23
|
+
|
24
|
+
#### Code
|
25
|
+
- To get started, you'll need JRuby with the Bundler gem installed.
|
26
|
+
|
27
|
+
- Create a new plugin or clone and existing from the GitHub [logstash-plugins](https://github.com/logstash-plugins) organization. We also provide [example plugins](https://github.com/logstash-plugins?query=example).
|
28
|
+
|
29
|
+
- Install dependencies
|
30
|
+
```sh
|
31
|
+
bundle install
|
32
|
+
```
|
33
|
+
|
34
|
+
#### Test
|
35
|
+
|
36
|
+
- Update your dependencies
|
37
|
+
|
38
|
+
```sh
|
39
|
+
bundle install
|
40
|
+
```
|
41
|
+
|
42
|
+
- Run unit tests
|
43
|
+
|
44
|
+
```sh
|
45
|
+
bundle exec rspec
|
46
|
+
```
|
47
|
+
|
48
|
+
- Run integration tests
|
49
|
+
|
50
|
+
Dependencies: [Docker](http://docker.com)
|
51
|
+
|
52
|
+
Before the test suite is run, we will load and run an
|
53
|
+
Elasticsearch instance within a docker container. This container
|
54
|
+
will be cleaned up when suite has finished.
|
55
|
+
|
56
|
+
```sh
|
57
|
+
bundle exec rspec --tag integration
|
58
|
+
```
|
59
|
+
|
60
|
+
### 2. Running your unpublished Plugin in Logstash
|
61
|
+
|
62
|
+
#### 2.1 Run in a local Logstash clone
|
63
|
+
|
64
|
+
- Edit Logstash `Gemfile` and add the local plugin path, for example:
|
65
|
+
```ruby
|
66
|
+
gem "logstash-filter-awesome", :path => "/your/local/logstash-filter-awesome"
|
67
|
+
```
|
68
|
+
- Install plugin
|
69
|
+
```sh
|
70
|
+
# Logstash 2.3 and higher
|
71
|
+
bin/logstash-plugin install --no-verify
|
72
|
+
|
73
|
+
# Prior to Logstash 2.3
|
74
|
+
bin/plugin install --no-verify
|
75
|
+
|
76
|
+
```
|
77
|
+
- Run Logstash with your plugin
|
78
|
+
```sh
|
79
|
+
bin/logstash -e 'filter {awesome {}}'
|
80
|
+
```
|
81
|
+
At this point any modifications to the plugin code will be applied to this local Logstash setup. After modifying the plugin, simply rerun Logstash.
|
82
|
+
|
83
|
+
#### 2.2 Run in an installed Logstash
|
84
|
+
|
85
|
+
You can use the same **2.1** method to run your plugin in an installed Logstash by editing its `Gemfile` and pointing the `:path` to your local plugin development directory or you can build the gem and install it using:
|
86
|
+
|
87
|
+
- Build your plugin gem
|
88
|
+
```sh
|
89
|
+
gem build logstash-filter-awesome.gemspec
|
90
|
+
```
|
91
|
+
- Install the plugin from the Logstash home
|
92
|
+
```sh
|
93
|
+
# Logstash 2.3 and higher
|
94
|
+
bin/logstash-plugin install --no-verify
|
95
|
+
|
96
|
+
# Prior to Logstash 2.3
|
97
|
+
bin/plugin install --no-verify
|
98
|
+
|
99
|
+
```
|
100
|
+
- Start Logstash and proceed to test the plugin
|
101
|
+
|
102
|
+
## Contributing
|
103
|
+
|
104
|
+
All contributions are welcome: ideas, patches, documentation, bug reports, complaints, and even something you drew up on a napkin.
|
105
|
+
|
106
|
+
Programming is not a required skill. Whatever you've seen about open source and maintainers or community members saying "send patches or die" - you will not see that here.
|
107
|
+
|
108
|
+
It is more important to the community that you are able to contribute.
|
109
|
+
|
110
|
+
For more information about contributing, see the [CONTRIBUTING](https://github.com/elastic/logstash/blob/master/CONTRIBUTING.md) file.
|
@@ -3,467 +3,139 @@ require "logstash/namespace"
|
|
3
3
|
require "logstash/environment"
|
4
4
|
require "logstash/outputs/base"
|
5
5
|
require "logstash/json"
|
6
|
+
require "concurrent"
|
6
7
|
require "stud/buffer"
|
7
8
|
require "socket" # for Socket.gethostname
|
9
|
+
require "thread" # for safe queueing
|
8
10
|
require "uri" # for escaping user input
|
9
|
-
require 'logstash-output-elasticsearch_jars.rb'
|
10
11
|
|
11
|
-
# This
|
12
|
-
#
|
13
|
-
# need to use this output.
|
12
|
+
# This plugin is the recommended method of storing logs in Elasticsearch.
|
13
|
+
# If you plan on using the Kibana web interface, you'll want to use this output.
|
14
14
|
#
|
15
|
-
#
|
16
|
-
#
|
15
|
+
# This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0.
|
16
|
+
# We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower,
|
17
|
+
# yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having
|
18
|
+
# to upgrade Logstash in lock-step. For those still wishing to use the node or transport protocols please see
|
19
|
+
# the <<plugins-outputs-elasticsearch_java,elasticsearch_java output plugin>>.
|
17
20
|
#
|
18
|
-
#
|
19
|
-
# as configuration options, there are two methods:
|
21
|
+
# You can learn more about Elasticsearch at <https://www.elastic.co/products/elasticsearch>
|
20
22
|
#
|
21
|
-
#
|
22
|
-
# * Pass in es.* java properties (`java -Des.node.foo=` or `ruby -J-Des.node.foo=`)
|
23
|
+
# ==== Retry Policy
|
23
24
|
#
|
24
|
-
#
|
25
|
-
# Elasticsearch
|
26
|
-
#
|
25
|
+
# The retry policy has changed significantly in the 2.2.0 release.
|
26
|
+
# This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience
|
27
|
+
# either partial or total failures.
|
27
28
|
#
|
28
|
-
#
|
29
|
+
# The following errors are retried infinitely:
|
29
30
|
#
|
30
|
-
#
|
31
|
+
# - Network errors (inability to connect)
|
32
|
+
# - 429 (Too many requests) and
|
33
|
+
# - 503 (Service unavailable) errors
|
31
34
|
#
|
32
|
-
#
|
33
|
-
#
|
34
|
-
#
|
35
|
+
# NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions.
|
36
|
+
# It is more performant for Elasticsearch to retry these exceptions than this plugin.
|
37
|
+
#
|
38
|
+
# ==== DNS Caching
|
39
|
+
#
|
40
|
+
# This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl],
|
41
|
+
# a global setting for the JVM.
|
42
|
+
#
|
43
|
+
# As an example, to set your DNS TTL to 1 second you would set
|
44
|
+
# the `LS_JAVA_OPTS` environment variable to `-Dnetwordaddress.cache.ttl=1`.
|
45
|
+
#
|
46
|
+
# Keep in mind that a connection with keepalive enabled will
|
47
|
+
# not reevaluate its DNS value while the keepalive is in effect.
|
35
48
|
class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
# The index to write events to. This can be dynamic using the `%{foo}` syntax.
|
42
|
-
# The default value will partition your indices by day so you can more easily
|
43
|
-
# delete old data or only search specific date ranges.
|
44
|
-
# Indexes may not contain uppercase characters.
|
45
|
-
# For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}
|
46
|
-
config :index, :validate => :string, :default => "logstash-%{+YYYY.MM.dd}"
|
47
|
-
|
48
|
-
# The index type to write events to. Generally you should try to write only
|
49
|
-
# similar events to the same 'type'. String expansion `%{foo}` works here.
|
50
|
-
config :index_type, :validate => :string
|
51
|
-
|
52
|
-
# Starting in Logstash 1.3 (unless you set option `manage_template` to false)
|
53
|
-
# a default mapping template for Elasticsearch will be applied, if you do not
|
54
|
-
# already have one set to match the index pattern defined (default of
|
55
|
-
# `logstash-%{+YYYY.MM.dd}`), minus any variables. For example, in this case
|
56
|
-
# the template will be applied to all indices starting with `logstash-*`
|
57
|
-
#
|
58
|
-
# If you have dynamic templating (e.g. creating indices based on field names)
|
59
|
-
# then you should set `manage_template` to false and use the REST API to upload
|
60
|
-
# your templates manually.
|
61
|
-
config :manage_template, :validate => :boolean, :default => true
|
62
|
-
|
63
|
-
# This configuration option defines how the template is named inside Elasticsearch.
|
64
|
-
# Note that if you have used the template management features and subsequently
|
65
|
-
# change this, you will need to prune the old template manually, e.g.
|
66
|
-
#
|
67
|
-
# `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
|
68
|
-
#
|
69
|
-
# where `OldTemplateName` is whatever the former setting was.
|
70
|
-
config :template_name, :validate => :string, :default => "logstash"
|
71
|
-
|
72
|
-
# You can set the path to your own template here, if you so desire.
|
73
|
-
# If not set, the included template will be used.
|
74
|
-
config :template, :validate => :path
|
75
|
-
|
76
|
-
# Overwrite the current template with whatever is configured
|
77
|
-
# in the `template` and `template_name` directives.
|
78
|
-
config :template_overwrite, :validate => :boolean, :default => false
|
79
|
-
|
80
|
-
# The document ID for the index. Useful for overwriting existing entries in
|
81
|
-
# Elasticsearch with the same ID.
|
82
|
-
config :document_id, :validate => :string, :default => nil
|
83
|
-
|
84
|
-
# The name of your cluster if you set it on the Elasticsearch side. Useful
|
85
|
-
# for discovery.
|
86
|
-
config :cluster, :validate => :string
|
87
|
-
|
88
|
-
# The hostname or IP address of the host to use for Elasticsearch unicast discovery
|
89
|
-
# This is only required if the normal multicast/cluster discovery stuff won't
|
90
|
-
# work in your environment.
|
91
|
-
#
|
92
|
-
# `"127.0.0.1"`
|
93
|
-
# `["127.0.0.1:9300","127.0.0.2:9300"]`
|
94
|
-
config :host, :validate => :array
|
95
|
-
|
96
|
-
# The port for Elasticsearch transport to use.
|
97
|
-
#
|
98
|
-
# If you do not set this, the following defaults are used:
|
99
|
-
# * `protocol => http` - port 9200
|
100
|
-
# * `protocol => transport` - port 9300-9305
|
101
|
-
# * `protocol => node` - port 9300-9305
|
102
|
-
config :port, :validate => :string
|
49
|
+
require "logstash/outputs/elasticsearch/http_client"
|
50
|
+
require "logstash/outputs/elasticsearch/http_client_builder"
|
51
|
+
require "logstash/outputs/elasticsearch/common_configs"
|
52
|
+
require "logstash/outputs/elasticsearch/common"
|
103
53
|
|
104
|
-
#
|
105
|
-
|
54
|
+
# Protocol agnostic (i.e. non-http, non-java specific) configs go here
|
55
|
+
include(LogStash::Outputs::ElasticSearch::CommonConfigs)
|
106
56
|
|
107
|
-
#
|
108
|
-
|
109
|
-
# The port for the node to listen on.
|
110
|
-
config :bind_port, :validate => :number
|
111
|
-
|
112
|
-
# Run the Elasticsearch server embedded in this process.
|
113
|
-
# This option is useful if you want to run a single Logstash process that
|
114
|
-
# handles log processing and indexing; it saves you from needing to run
|
115
|
-
# a separate Elasticsearch process.
|
116
|
-
config :embedded, :validate => :boolean, :default => false
|
117
|
-
|
118
|
-
# If you are running the embedded Elasticsearch server, you can set the http
|
119
|
-
# port it listens on here; it is not common to need this setting changed from
|
120
|
-
# default.
|
121
|
-
config :embedded_http_port, :validate => :string, :default => "9200-9300"
|
122
|
-
|
123
|
-
# This setting no longer does anything. It exists to keep config validation
|
124
|
-
# from failing. It will be removed in future versions.
|
125
|
-
config :max_inflight_requests, :validate => :number, :default => 50, :deprecated => true
|
126
|
-
|
127
|
-
# The node name Elasticsearch will use when joining a cluster.
|
128
|
-
#
|
129
|
-
# By default, this is generated internally by the ES client.
|
130
|
-
config :node_name, :validate => :string
|
57
|
+
# Protocol agnostic methods
|
58
|
+
include(LogStash::Outputs::ElasticSearch::Common)
|
131
59
|
|
132
|
-
|
133
|
-
# To make efficient bulk api calls, we will buffer a certain number of
|
134
|
-
# events before flushing that out to Elasticsearch. This setting
|
135
|
-
# controls how many events will be buffered before sending a batch
|
136
|
-
# of events.
|
137
|
-
config :flush_size, :validate => :number, :default => 5000
|
138
|
-
|
139
|
-
# The amount of time since last flush before a flush is forced.
|
140
|
-
#
|
141
|
-
# This setting helps ensure slow event rates don't get stuck in Logstash.
|
142
|
-
# For example, if your `flush_size` is 100, and you have received 10 events,
|
143
|
-
# and it has been more than `idle_flush_time` seconds since the last flush,
|
144
|
-
# Logstash will flush those 10 events automatically.
|
145
|
-
#
|
146
|
-
# This helps keep both fast and slow log streams moving along in
|
147
|
-
# near-real-time.
|
148
|
-
config :idle_flush_time, :validate => :number, :default => 1
|
149
|
-
|
150
|
-
# Choose the protocol used to talk to Elasticsearch.
|
151
|
-
#
|
152
|
-
# The 'node' protocol will connect to the cluster as a normal Elasticsearch
|
153
|
-
# node (but will not store data). This allows you to use things like
|
154
|
-
# multicast discovery. If you use the `node` protocol, you must permit
|
155
|
-
# bidirectional communication on the port 9300 (or whichever port you have
|
156
|
-
# configured).
|
157
|
-
#
|
158
|
-
# The 'transport' protocol will connect to the host you specify and will
|
159
|
-
# not show up as a 'node' in the Elasticsearch cluster. This is useful
|
160
|
-
# in situations where you cannot permit connections outbound from the
|
161
|
-
# Elasticsearch cluster to this Logstash server.
|
162
|
-
#
|
163
|
-
# The 'http' protocol will use the Elasticsearch REST/HTTP interface to talk
|
164
|
-
# to elasticsearch.
|
165
|
-
#
|
166
|
-
# All protocols will use bulk requests when talking to Elasticsearch.
|
167
|
-
#
|
168
|
-
# The default `protocol` setting under java/jruby is "node". The default
|
169
|
-
# `protocol` on non-java rubies is "http"
|
170
|
-
config :protocol, :validate => [ "node", "transport", "http" ]
|
60
|
+
config_name "elasticsearch"
|
171
61
|
|
172
|
-
# The Elasticsearch action to perform. Valid actions are:
|
173
|
-
#
|
174
|
-
# Use of this setting *REQUIRES* you also configure the `document_id` setting
|
175
|
-
# because `delete` actions all require a document id.
|
176
|
-
#
|
177
|
-
# What does each action do?
|
62
|
+
# The Elasticsearch action to perform. Valid actions are:
|
178
63
|
#
|
179
64
|
# - index: indexes a document (an event from Logstash).
|
180
|
-
# - delete: deletes a document by id
|
181
|
-
#
|
182
|
-
#
|
65
|
+
# - delete: deletes a document by id (An id is required for this action)
|
66
|
+
# - create: indexes a document, fails if a document by that id already exists in the index.
|
67
|
+
# - update: updates a document by id. Update has a special case where you can upsert -- update a
|
68
|
+
# document if not already present. See the `upsert` option
|
69
|
+
# - A sprintf style string to change the action based on the content of the event. The value `%{[foo]}`
|
70
|
+
# would use the foo field for the action
|
71
|
+
#
|
72
|
+
# For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
|
183
73
|
config :action, :validate => :string, :default => "index"
|
184
74
|
|
185
|
-
# Username
|
75
|
+
# Username to authenticate to a secure Elasticsearch cluster
|
186
76
|
config :user, :validate => :string
|
77
|
+
# Password to authenticate to a secure Elasticsearch cluster
|
187
78
|
config :password, :validate => :password
|
188
79
|
|
189
|
-
#
|
190
|
-
#
|
191
|
-
|
192
|
-
|
80
|
+
# HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps
|
81
|
+
# the root path for the Elasticsearch HTTP API lives.
|
82
|
+
config :path, :validate => :string, :default => "/"
|
83
|
+
|
84
|
+
# Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme
|
85
|
+
# is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used.
|
86
|
+
# If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts'
|
87
|
+
config :ssl, :validate => :boolean
|
88
|
+
|
89
|
+
# Option to validate the server's certificate. Disabling this severely compromises security.
|
90
|
+
# For more information on disabling certificate verification please read
|
91
|
+
# https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf
|
92
|
+
config :ssl_certificate_verification, :validate => :boolean, :default => true
|
193
93
|
|
194
94
|
# The .cer or .pem file to validate the server's certificate
|
195
95
|
config :cacert, :validate => :path
|
196
96
|
|
197
|
-
# The JKS truststore to validate the server's certificate
|
97
|
+
# The JKS truststore to validate the server's certificate.
|
198
98
|
# Use either `:truststore` or `:cacert`
|
199
99
|
config :truststore, :validate => :path
|
200
100
|
|
201
101
|
# Set the truststore password
|
202
102
|
config :truststore_password, :validate => :password
|
203
103
|
|
204
|
-
#
|
205
|
-
#
|
206
|
-
|
207
|
-
# "logs-%{YYYY}" -> "logs-*"
|
208
|
-
def wildcard_substitute(name)
|
209
|
-
name.gsub(/%\{[^}]+\}/, "*")
|
210
|
-
end
|
211
|
-
|
212
|
-
public
|
213
|
-
def register
|
214
|
-
client_settings = {}
|
215
|
-
|
216
|
-
if @protocol.nil?
|
217
|
-
@protocol = LogStash::Environment.jruby? ? "node" : "http"
|
218
|
-
end
|
219
|
-
|
220
|
-
if ["node", "transport"].include?(@protocol)
|
221
|
-
# Node or TransportClient; requires JRuby
|
222
|
-
raise(LogStash::PluginLoadingError, "This configuration requires JRuby. If you are not using JRuby, you must set 'protocol' to 'http'. For example: output { elasticsearch { protocol => \"http\" } }") unless LogStash::Environment.jruby?
|
223
|
-
|
224
|
-
client_settings["cluster.name"] = @cluster if @cluster
|
225
|
-
client_settings["network.host"] = @bind_host if @bind_host
|
226
|
-
client_settings["transport.tcp.port"] = @bind_port if @bind_port
|
227
|
-
|
228
|
-
if @node_name
|
229
|
-
client_settings["node.name"] = @node_name
|
230
|
-
else
|
231
|
-
client_settings["node.name"] = "logstash-#{Socket.gethostname}-#{$$}-#{object_id}"
|
232
|
-
end
|
233
|
-
|
234
|
-
@@plugins.each do |plugin|
|
235
|
-
name = plugin.name.split('-')[-1]
|
236
|
-
client_settings.merge!(LogStash::Outputs::ElasticSearch.const_get(name.capitalize).create_client_config(self))
|
237
|
-
end
|
238
|
-
end
|
239
|
-
|
240
|
-
require "logstash/outputs/elasticsearch/protocol"
|
241
|
-
|
242
|
-
if @port.nil?
|
243
|
-
@port = case @protocol
|
244
|
-
when "http"; "9200"
|
245
|
-
when "transport", "node"; "9300-9305"
|
246
|
-
end
|
247
|
-
end
|
248
|
-
|
249
|
-
if @host.nil? && @protocol == "http"
|
250
|
-
@logger.info("No 'host' set in elasticsearch output. Defaulting to localhost")
|
251
|
-
@host = ["localhost"]
|
252
|
-
end
|
253
|
-
|
254
|
-
client_settings.merge! setup_ssl()
|
255
|
-
|
256
|
-
common_options = {
|
257
|
-
:protocol => @protocol,
|
258
|
-
:client_settings => client_settings
|
259
|
-
}
|
260
|
-
|
261
|
-
common_options.merge! setup_basic_auth()
|
262
|
-
|
263
|
-
client_class = case @protocol
|
264
|
-
when "transport"
|
265
|
-
LogStash::Outputs::Elasticsearch::Protocols::TransportClient
|
266
|
-
when "node"
|
267
|
-
LogStash::Outputs::Elasticsearch::Protocols::NodeClient
|
268
|
-
when /http/
|
269
|
-
LogStash::Outputs::Elasticsearch::Protocols::HTTPClient
|
270
|
-
end
|
271
|
-
|
272
|
-
if @embedded
|
273
|
-
raise(LogStash::ConfigurationError, "The 'embedded => true' setting is only valid for the elasticsearch output under JRuby. You are running #{RUBY_DESCRIPTION}") unless LogStash::Environment.jruby?
|
274
|
-
# LogStash::Environment.load_elasticsearch_jars!
|
275
|
-
|
276
|
-
# Default @host with embedded to localhost. This should help avoid
|
277
|
-
# newbies tripping on ubuntu and other distros that have a default
|
278
|
-
# firewall that blocks multicast.
|
279
|
-
@host ||= ["localhost"]
|
280
|
-
|
281
|
-
# Start Elasticsearch local.
|
282
|
-
start_local_elasticsearch
|
283
|
-
end
|
284
|
-
|
285
|
-
@client = Array.new
|
286
|
-
|
287
|
-
if protocol == "node" or @host.nil? # if @protocol is "node" or @host is not set
|
288
|
-
options = {
|
289
|
-
:host => @host,
|
290
|
-
:port => @port,
|
291
|
-
}.merge(common_options)
|
292
|
-
@client << client_class.new(options)
|
293
|
-
else # if @protocol in ["transport","http"]
|
294
|
-
@host.each do |host|
|
295
|
-
(_host,_port) = host.split ":"
|
296
|
-
options = {
|
297
|
-
:host => _host,
|
298
|
-
:port => _port || @port,
|
299
|
-
}.merge(common_options)
|
300
|
-
@logger.info "Create client to elasticsearch server on #{_host}:#{_port}"
|
301
|
-
@client << client_class.new(options)
|
302
|
-
end # @host.each
|
303
|
-
end
|
304
|
-
|
305
|
-
if @manage_template
|
306
|
-
for client in @client
|
307
|
-
begin
|
308
|
-
@logger.info("Automatic template management enabled", :manage_template => @manage_template.to_s)
|
309
|
-
client.template_install(@template_name, get_template, @template_overwrite)
|
310
|
-
break
|
311
|
-
rescue => e
|
312
|
-
@logger.error("Failed to install template: #{e.message}")
|
313
|
-
end
|
314
|
-
end # for @client loop
|
315
|
-
end # if @manage_templates
|
316
|
-
|
317
|
-
@logger.info("New Elasticsearch output", :cluster => @cluster,
|
318
|
-
:host => @host, :port => @port, :embedded => @embedded,
|
319
|
-
:protocol => @protocol)
|
104
|
+
# The keystore used to present a certificate to the server.
|
105
|
+
# It can be either .jks or .p12
|
106
|
+
config :keystore, :validate => :path
|
320
107
|
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
raise(LogStash::ConfigurationError, "Use either \"cacert\" or \"truststore\" when configuring the CA certificate") if @truststore
|
347
|
-
end
|
348
|
-
ssl_options = {}
|
349
|
-
if @cacert then
|
350
|
-
@truststore, ssl_options[:truststore_password] = generate_jks @cacert
|
351
|
-
elsif @truststore
|
352
|
-
ssl_options[:truststore_password] = @truststore_password.value if @truststore_password
|
353
|
-
end
|
354
|
-
ssl_options[:truststore] = @truststore
|
355
|
-
{ ssl: ssl_options }
|
356
|
-
end
|
357
|
-
|
358
|
-
private
|
359
|
-
def setup_basic_auth
|
360
|
-
return {} unless @user && @password
|
361
|
-
|
362
|
-
if @protocol =~ /http/
|
363
|
-
{
|
364
|
-
:user => ::URI.escape(@user, "@:"),
|
365
|
-
:password => ::URI.escape(@password.value, "@:")
|
366
|
-
}
|
367
|
-
else
|
368
|
-
raise(LogStash::ConfigurationError, "User and password parameters are not supported for '#{@protocol}'. Change the protocol to 'http' if you need them.")
|
369
|
-
end
|
370
|
-
end
|
371
|
-
|
372
|
-
public
|
373
|
-
def get_template
|
374
|
-
if @template.nil?
|
375
|
-
@template = ::File.expand_path('elasticsearch/elasticsearch-template.json', ::File.dirname(__FILE__))
|
376
|
-
if !File.exists?(@template)
|
377
|
-
raise "You must specify 'template => ...' in your elasticsearch output (I looked for '#{@template}')"
|
378
|
-
end
|
379
|
-
end
|
380
|
-
template_json = IO.read(@template).gsub(/\n/,'')
|
381
|
-
template = LogStash::Json.load(template_json)
|
382
|
-
template['template'] = wildcard_substitute(@index)
|
383
|
-
@logger.info("Using mapping template", :template => template)
|
384
|
-
return template
|
385
|
-
end # def get_template
|
386
|
-
|
387
|
-
protected
|
388
|
-
def start_local_elasticsearch
|
389
|
-
@logger.info("Starting embedded Elasticsearch local node.")
|
390
|
-
builder = org.elasticsearch.node.NodeBuilder.nodeBuilder
|
391
|
-
# Disable 'local only' - LOGSTASH-277
|
392
|
-
#builder.local(true)
|
393
|
-
builder.settings.put("cluster.name", @cluster) if @cluster
|
394
|
-
builder.settings.put("node.name", @node_name) if @node_name
|
395
|
-
builder.settings.put("network.host", @bind_host) if @bind_host
|
396
|
-
builder.settings.put("http.port", @embedded_http_port)
|
397
|
-
|
398
|
-
@embedded_elasticsearch = builder.node
|
399
|
-
@embedded_elasticsearch.start
|
400
|
-
end # def start_local_elasticsearch
|
401
|
-
|
402
|
-
private
|
403
|
-
def generate_jks cert_path
|
404
|
-
|
405
|
-
require 'securerandom'
|
406
|
-
require 'tempfile'
|
407
|
-
require 'java'
|
408
|
-
import java.io.FileInputStream
|
409
|
-
import java.io.FileOutputStream
|
410
|
-
import java.security.KeyStore
|
411
|
-
import java.security.cert.CertificateFactory
|
412
|
-
|
413
|
-
jks = java.io.File.createTempFile("cert", ".jks")
|
414
|
-
|
415
|
-
ks = KeyStore.getInstance "JKS"
|
416
|
-
ks.load nil, nil
|
417
|
-
cf = CertificateFactory.getInstance "X.509"
|
418
|
-
cert = cf.generateCertificate FileInputStream.new(cert_path)
|
419
|
-
ks.setCertificateEntry "cacert", cert
|
420
|
-
pwd = SecureRandom.urlsafe_base64(9)
|
421
|
-
ks.store FileOutputStream.new(jks), pwd.to_java.toCharArray
|
422
|
-
[jks.path, pwd]
|
108
|
+
# Set the truststore password
|
109
|
+
config :keystore_password, :validate => :password
|
110
|
+
|
111
|
+
# This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list.
|
112
|
+
# Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use
|
113
|
+
# this with master nodes, you probably want to disable HTTP on them by setting
|
114
|
+
# `http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or
|
115
|
+
# manually enter multiple Elasticsearch hosts using the `hosts` parameter.
|
116
|
+
config :sniffing, :validate => :boolean, :default => false
|
117
|
+
|
118
|
+
# How long to wait, in seconds, between sniffing attempts
|
119
|
+
config :sniffing_delay, :validate => :number, :default => 5
|
120
|
+
|
121
|
+
# Set the address of a forward HTTP proxy.
|
122
|
+
# Can be either a string, such as `http://localhost:123` or a hash in the form
|
123
|
+
# of `{host: 'proxy.org' port: 80 scheme: 'http'}`.
|
124
|
+
# Note, this is NOT a SOCKS proxy, but a plain HTTP proxy
|
125
|
+
config :proxy
|
126
|
+
|
127
|
+
# Set the timeout for network operations and requests sent Elasticsearch. If
|
128
|
+
# a timeout occurs, the request will be retried.
|
129
|
+
config :timeout, :validate => :number
|
130
|
+
|
131
|
+
def build_client
|
132
|
+
@client = ::LogStash::Outputs::ElasticSearch::HttpClientBuilder.build(@logger, @hosts, params)
|
423
133
|
end
|
424
134
|
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
# Set the 'type' value for the index.
|
430
|
-
if @index_type
|
431
|
-
type = event.sprintf(@index_type)
|
432
|
-
else
|
433
|
-
type = event["type"] || "logs"
|
434
|
-
end
|
435
|
-
|
436
|
-
index = event.sprintf(@index)
|
437
|
-
|
438
|
-
document_id = @document_id ? event.sprintf(@document_id) : nil
|
439
|
-
buffer_receive([event.sprintf(@action), { :_id => document_id, :_index => index, :_type => type }, event.to_hash])
|
440
|
-
end # def receive
|
441
|
-
|
442
|
-
def flush(actions, teardown=false)
|
443
|
-
begin
|
444
|
-
@logger.debug? and @logger.debug "Sending bulk of actions to client[#{@client_idx}]: #{@host[@client_idx]}"
|
445
|
-
@current_client.bulk(actions)
|
446
|
-
rescue => e
|
447
|
-
@logger.error "Got error to send bulk of actions to elasticsearch server at #{@host[@client_idx]} : #{e.message}"
|
448
|
-
raise e
|
449
|
-
ensure
|
450
|
-
unless @protocol == "node"
|
451
|
-
@logger.debug? and @logger.debug "Shifting current elasticsearch client"
|
452
|
-
shift_client
|
453
|
-
end
|
454
|
-
end
|
455
|
-
# TODO(sissel): Handle errors. Since bulk requests could mostly succeed
|
456
|
-
# (aka partially fail), we need to figure out what documents need to be
|
457
|
-
# retried.
|
458
|
-
#
|
459
|
-
# In the worst case, a failing flush (exception) will incur a retry from Stud::Buffer.
|
460
|
-
end # def flush
|
461
|
-
|
462
|
-
def teardown
|
463
|
-
if @cacert # remove temporary jks store created from the cacert
|
464
|
-
File.delete(@truststore)
|
465
|
-
end
|
466
|
-
buffer_flush(:final => true)
|
135
|
+
def close
|
136
|
+
@stopping.make_true
|
137
|
+
@client.stop_sniffing!
|
138
|
+
@buffer.stop
|
467
139
|
end
|
468
140
|
|
469
141
|
@@plugins = Gem::Specification.find_all{|spec| spec.name =~ /logstash-output-elasticsearch-/ }
|