logstash-output-elasticsearch_java 2.1.3 → 2.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile +9 -1
- data/LICENSE +1 -1
- data/README.md +13 -4
- data/docs/index.asciidoc +491 -0
- data/logstash-output-elasticsearch_java.gemspec +3 -3
- metadata +4 -3
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA1:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: a03452475da09898bc5f42a0036cf64077578ea9
|
|
4
|
+
data.tar.gz: 118a6942cf9d76c086ad31ea4d7bd5e6804804a9
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 2a321d5291119efb39c87033be5b2f05f1f33c3fc19f8cd793232882ad988e545f750dd7f9882da2b45a7f56434aa9645083c0ba1d37ada08e093788498ac359
|
|
7
|
+
data.tar.gz: 5d8b51294557e50a0ae5279e1966e304063d9b1087eab59f3904c9be4474226c78e4d88ab5e35ee5878d142ce239d3f23baee0f3f40a8116a8c5be30a244428f
|
data/Gemfile
CHANGED
|
@@ -1,3 +1,11 @@
|
|
|
1
1
|
source 'https://rubygems.org'
|
|
2
2
|
|
|
3
|
-
gemspec
|
|
3
|
+
gemspec
|
|
4
|
+
|
|
5
|
+
logstash_path = ENV["LOGSTASH_PATH"] || "../../logstash"
|
|
6
|
+
use_logstash_source = ENV["LOGSTASH_SOURCE"] && ENV["LOGSTASH_SOURCE"].to_s == "1"
|
|
7
|
+
|
|
8
|
+
if Dir.exist?(logstash_path) && use_logstash_source
|
|
9
|
+
gem 'logstash-core', :path => "#{logstash_path}/logstash-core"
|
|
10
|
+
gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api"
|
|
11
|
+
end
|
data/LICENSE
CHANGED
data/README.md
CHANGED
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
# Logstash Plugin
|
|
2
2
|
|
|
3
|
-
[](http://build-eu-00.elastic.co/view/LS%20Plugins/view/LS%20Outputs/job/logstash-plugin-output-elasticsearch_java-unit/)
|
|
3
|
+
[](https://travis-ci.org/logstash-plugins/logstash-output-elasticsearch_java)
|
|
5
4
|
|
|
6
|
-
[](https://travis-ci.org/logstash-plugins/logstash-output-elasticsearch_java)
|
|
5
|
+
[](https://travis-ci.org/logstash-plugins/logstash-output-elasticsearch_java)
|
|
7
6
|
|
|
8
7
|
This is a plugin for [Logstash](https://github.com/elastic/logstash).
|
|
9
8
|
|
|
@@ -70,7 +69,12 @@ gem "logstash-filter-awesome", :path => "/your/local/logstash-filter-awesome"
|
|
|
70
69
|
```
|
|
71
70
|
- Install plugin
|
|
72
71
|
```sh
|
|
72
|
+
# Logstash 2.3 and higher
|
|
73
|
+
bin/logstash-plugin install --no-verify
|
|
74
|
+
|
|
75
|
+
# Prior to Logstash 2.3
|
|
73
76
|
bin/plugin install --no-verify
|
|
77
|
+
|
|
74
78
|
```
|
|
75
79
|
- Run Logstash with your plugin
|
|
76
80
|
```sh
|
|
@@ -88,7 +92,12 @@ gem build logstash-filter-awesome.gemspec
|
|
|
88
92
|
```
|
|
89
93
|
- Install the plugin from the Logstash home
|
|
90
94
|
```sh
|
|
91
|
-
|
|
95
|
+
# Logstash 2.3 and higher
|
|
96
|
+
bin/logstash-plugin install --no-verify
|
|
97
|
+
|
|
98
|
+
# Prior to Logstash 2.3
|
|
99
|
+
bin/plugin install --no-verify
|
|
100
|
+
|
|
92
101
|
```
|
|
93
102
|
- Start Logstash and proceed to test the plugin
|
|
94
103
|
|
data/docs/index.asciidoc
ADDED
|
@@ -0,0 +1,491 @@
|
|
|
1
|
+
:plugin: elasticsearch_java
|
|
2
|
+
:type: output
|
|
3
|
+
|
|
4
|
+
///////////////////////////////////////////
|
|
5
|
+
START - GENERATED VARIABLES, DO NOT EDIT!
|
|
6
|
+
///////////////////////////////////////////
|
|
7
|
+
:version: %VERSION%
|
|
8
|
+
:release_date: %RELEASE_DATE%
|
|
9
|
+
:changelog_url: %CHANGELOG_URL%
|
|
10
|
+
:include_path: ../../../../logstash/docs/include
|
|
11
|
+
///////////////////////////////////////////
|
|
12
|
+
END - GENERATED VARIABLES, DO NOT EDIT!
|
|
13
|
+
///////////////////////////////////////////
|
|
14
|
+
|
|
15
|
+
[id="plugins-{type}-{plugin}"]
|
|
16
|
+
|
|
17
|
+
=== Elasticsearch_java output plugin
|
|
18
|
+
|
|
19
|
+
include::{include_path}/plugin_header.asciidoc[]
|
|
20
|
+
|
|
21
|
+
==== Description
|
|
22
|
+
|
|
23
|
+
This output lets you store logs in Elasticsearch using the native 'node' and 'transport'
|
|
24
|
+
protocols. It is highly recommended to use the regular 'logstash-output-elasticsearch' output
|
|
25
|
+
which uses HTTP instead. This output is, in-fact, sometimes slower, and never faster than that one.
|
|
26
|
+
Additionally, upgrading your Elasticsearch cluster may require you to simultaneously update this
|
|
27
|
+
plugin for any protocol level changes. The HTTP client may be easier to work with due to wider
|
|
28
|
+
familiarity with HTTP.
|
|
29
|
+
|
|
30
|
+
*VERSION NOTE*: Your Elasticsearch cluster must be running Elasticsearch 1.0.0 or later.
|
|
31
|
+
|
|
32
|
+
If you want to set other Elasticsearch options that are not exposed directly
|
|
33
|
+
as configuration options, there are two methods:
|
|
34
|
+
|
|
35
|
+
* Create an `elasticsearch.yml` file in the $PWD of the Logstash process
|
|
36
|
+
* Pass in es.* java properties (`java -Des.node.foo=` or `ruby -J-Des.node.foo=`)
|
|
37
|
+
|
|
38
|
+
With the default `protocol` setting ("node"), this plugin will join your
|
|
39
|
+
Elasticsearch cluster as a client node, so it will show up in Elasticsearch's
|
|
40
|
+
cluster status.
|
|
41
|
+
|
|
42
|
+
You can learn more about Elasticsearch at <https://www.elastic.co/products/elasticsearch>
|
|
43
|
+
|
|
44
|
+
==== Operational Notes
|
|
45
|
+
|
|
46
|
+
If using the default `protocol` setting ("node"), your firewalls might need
|
|
47
|
+
to permit port 9300 in *both* directions (from Logstash to Elasticsearch, and
|
|
48
|
+
Elasticsearch to Logstash)
|
|
49
|
+
|
|
50
|
+
==== Retry Policy
|
|
51
|
+
|
|
52
|
+
By default all bulk requests to ES are synchronous. Not all events in the bulk requests
|
|
53
|
+
always make it successfully. For example, there could be events which are not formatted
|
|
54
|
+
correctly for the index they are targeting (type mismatch in mapping). So that we minimize loss of
|
|
55
|
+
events, we have a specific retry policy in place. We retry all events which fail to be reached by
|
|
56
|
+
Elasticsearch for network related issues. We retry specific events which exhibit errors under a separate
|
|
57
|
+
policy described below. Events of this nature are ones which experience ES error codes described as
|
|
58
|
+
retryable errors.
|
|
59
|
+
|
|
60
|
+
*Retryable Errors:*
|
|
61
|
+
|
|
62
|
+
- 429, Too Many Requests (RFC6585)
|
|
63
|
+
- 503, The server is currently unable to handle the request due to a temporary overloading or maintenance of the server.
|
|
64
|
+
|
|
65
|
+
Here are the rules of what is retried when:
|
|
66
|
+
|
|
67
|
+
- Block and retry all events in bulk response that experiences transient network exceptions until
|
|
68
|
+
a successful submission is received by Elasticsearch.
|
|
69
|
+
- Retry subset of sent events which resulted in ES errors of a retryable nature which can be found
|
|
70
|
+
in RETRYABLE_CODES
|
|
71
|
+
- For events which returned retryable error codes, they will be pushed onto a separate queue for
|
|
72
|
+
retrying events. events in this queue will be retried a maximum of 5 times by default (configurable through :max_retries). The size of
|
|
73
|
+
this queue is capped by the value set in :retry_max_items.
|
|
74
|
+
- Events from the retry queue are submitted again either when the queue reaches its max size or when
|
|
75
|
+
the max interval time is reached, which is set in :retry_max_interval.
|
|
76
|
+
- Events which are not retryable or have reached their max retry count are logged to stderr.
|
|
77
|
+
|
|
78
|
+
[id="plugins-{type}s-{plugin}-options"]
|
|
79
|
+
==== Elasticsearch_java Output Configuration Options
|
|
80
|
+
|
|
81
|
+
This plugin supports the following configuration options plus the <<plugins-{type}s-{plugin}-common-options>> described later.
|
|
82
|
+
|
|
83
|
+
[cols="<,<,<",options="header",]
|
|
84
|
+
|=======================================================================
|
|
85
|
+
|Setting |Input type|Required
|
|
86
|
+
| <<plugins-{type}s-{plugin}-action>> |<<string,string>>, one of `["index", "delete", "create", "update", "create_unless_exists"]`|No
|
|
87
|
+
| <<plugins-{type}s-{plugin}-cluster>> |<<string,string>>|No
|
|
88
|
+
| <<plugins-{type}s-{plugin}-doc_as_upsert>> |<<boolean,boolean>>|No
|
|
89
|
+
| <<plugins-{type}s-{plugin}-document_id>> |<<string,string>>|No
|
|
90
|
+
| <<plugins-{type}s-{plugin}-document_type>> |<<string,string>>|No
|
|
91
|
+
| <<plugins-{type}s-{plugin}-hosts>> |<<uri,uri>>|No
|
|
92
|
+
| <<plugins-{type}s-{plugin}-index>> |<<string,string>>|No
|
|
93
|
+
| <<plugins-{type}s-{plugin}-manage_template>> |<<boolean,boolean>>|No
|
|
94
|
+
| <<plugins-{type}s-{plugin}-network_host>> |<<string,string>>|Yes
|
|
95
|
+
| <<plugins-{type}s-{plugin}-node_name>> |<<string,string>>|No
|
|
96
|
+
| <<plugins-{type}s-{plugin}-parent>> |<<string,string>>|No
|
|
97
|
+
| <<plugins-{type}s-{plugin}-pipeline>> |<<string,string>>|No
|
|
98
|
+
| <<plugins-{type}s-{plugin}-protocol>> |<<string,string>>, one of `["node", "transport"]`|No
|
|
99
|
+
| <<plugins-{type}s-{plugin}-retry_initial_interval>> |<<number,number>>|No
|
|
100
|
+
| <<plugins-{type}s-{plugin}-retry_max_interval>> |<<number,number>>|No
|
|
101
|
+
| <<plugins-{type}s-{plugin}-retry_on_conflict>> |<<number,number>>|No
|
|
102
|
+
| <<plugins-{type}s-{plugin}-routing>> |<<string,string>>|No
|
|
103
|
+
| <<plugins-{type}s-{plugin}-script>> |<<string,string>>|No
|
|
104
|
+
| <<plugins-{type}s-{plugin}-script_lang>> |<<string,string>>|No
|
|
105
|
+
| <<plugins-{type}s-{plugin}-script_type>> |<<string,string>>, one of `["inline", "indexed", "file"]`|No
|
|
106
|
+
| <<plugins-{type}s-{plugin}-script_var_name>> |<<string,string>>|No
|
|
107
|
+
| <<plugins-{type}s-{plugin}-scripted_upsert>> |<<boolean,boolean>>|No
|
|
108
|
+
| <<plugins-{type}s-{plugin}-sniffing>> |<<boolean,boolean>>|No
|
|
109
|
+
| <<plugins-{type}s-{plugin}-template>> |a valid filesystem path|No
|
|
110
|
+
| <<plugins-{type}s-{plugin}-template_name>> |<<string,string>>|No
|
|
111
|
+
| <<plugins-{type}s-{plugin}-template_overwrite>> |<<boolean,boolean>>|No
|
|
112
|
+
| <<plugins-{type}s-{plugin}-transport_tcp_port>> |<<number,number>>|No
|
|
113
|
+
| <<plugins-{type}s-{plugin}-upsert>> |<<string,string>>|No
|
|
114
|
+
| <<plugins-{type}s-{plugin}-version>> |<<string,string>>|No
|
|
115
|
+
| <<plugins-{type}s-{plugin}-version_type>> |<<string,string>>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No
|
|
116
|
+
|=======================================================================
|
|
117
|
+
|
|
118
|
+
Also see <<plugins-{type}s-{plugin}-common-options>> for a list of options supported by all
|
|
119
|
+
output plugins.
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
[id="plugins-{type}s-{plugin}-action"]
|
|
124
|
+
===== `action`
|
|
125
|
+
|
|
126
|
+
* Value can be any of: `index`, `delete`, `create`, `update`, `create_unless_exists`
|
|
127
|
+
* Default value is `"index"`
|
|
128
|
+
|
|
129
|
+
The Elasticsearch action to perform. Valid actions are:
|
|
130
|
+
|
|
131
|
+
- index: indexes a document (an event from Logstash).
|
|
132
|
+
- delete: deletes a document by id (An id is required for this action)
|
|
133
|
+
- create: indexes a document, fails if a document by that id already exists in the index.
|
|
134
|
+
- update: updates a document by id. Update has a special case where you can upsert -- update a
|
|
135
|
+
document if not already present. See the `upsert` option
|
|
136
|
+
- create_unless_exists: create the document unless it already exists, in which case do nothing.
|
|
137
|
+
|
|
138
|
+
For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
|
|
139
|
+
|
|
140
|
+
[id="plugins-{type}s-{plugin}-cluster"]
|
|
141
|
+
===== `cluster`
|
|
142
|
+
|
|
143
|
+
* Value type is <<string,string>>
|
|
144
|
+
* There is no default value for this setting.
|
|
145
|
+
|
|
146
|
+
The name of your cluster if you set it on the Elasticsearch side. Useful
|
|
147
|
+
for discovery when using `node` or `transport` protocols.
|
|
148
|
+
By default, it looks for a cluster named 'elasticsearch'.
|
|
149
|
+
Equivalent to the Elasticsearch option 'cluster.name'
|
|
150
|
+
|
|
151
|
+
[id="plugins-{type}s-{plugin}-doc_as_upsert"]
|
|
152
|
+
===== `doc_as_upsert`
|
|
153
|
+
|
|
154
|
+
* Value type is <<boolean,boolean>>
|
|
155
|
+
* Default value is `false`
|
|
156
|
+
|
|
157
|
+
Enable `doc_as_upsert` for update mode.
|
|
158
|
+
Create a new document with source if `document_id` doesn't exist in Elasticsearch
|
|
159
|
+
|
|
160
|
+
[id="plugins-{type}s-{plugin}-document_id"]
|
|
161
|
+
===== `document_id`
|
|
162
|
+
|
|
163
|
+
* Value type is <<string,string>>
|
|
164
|
+
* There is no default value for this setting.
|
|
165
|
+
|
|
166
|
+
The document ID for the index. Useful for overwriting existing entries in
|
|
167
|
+
Elasticsearch with the same ID.
|
|
168
|
+
|
|
169
|
+
[id="plugins-{type}s-{plugin}-document_type"]
|
|
170
|
+
===== `document_type`
|
|
171
|
+
|
|
172
|
+
* Value type is <<string,string>>
|
|
173
|
+
* There is no default value for this setting.
|
|
174
|
+
|
|
175
|
+
The document type to write events to. Generally you should try to write only
|
|
176
|
+
similar events to the same 'type'. String expansion `%{foo}` works here.
|
|
177
|
+
Unless you set 'document_type', the event 'type' will be used if it exists
|
|
178
|
+
otherwise the document type will be assigned the value of 'logs'
|
|
179
|
+
|
|
180
|
+
[id="plugins-{type}s-{plugin}-flush_size"]
|
|
181
|
+
===== `flush_size` (DEPRECATED)
|
|
182
|
+
|
|
183
|
+
* DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
|
|
184
|
+
* Value type is <<number,number>>
|
|
185
|
+
* There is no default value for this setting.
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
[id="plugins-{type}s-{plugin}-hosts"]
|
|
190
|
+
===== `hosts`
|
|
191
|
+
|
|
192
|
+
* Value type is <<uri,uri>>
|
|
193
|
+
* Default value is `[//127.0.0.1]`
|
|
194
|
+
|
|
195
|
+
Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter.
|
|
196
|
+
Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300).
|
|
197
|
+
`"127.0.0.1"`
|
|
198
|
+
`["127.0.0.1:9200","127.0.0.2:9200"]`
|
|
199
|
+
`["http://127.0.0.1"]`
|
|
200
|
+
`["https://127.0.0.1:9200"]`
|
|
201
|
+
`["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath)
|
|
202
|
+
It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list
|
|
203
|
+
to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch.
|
|
204
|
+
|
|
205
|
+
Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance.
|
|
206
|
+
|
|
207
|
+
[id="plugins-{type}s-{plugin}-idle_flush_time"]
|
|
208
|
+
===== `idle_flush_time` (DEPRECATED)
|
|
209
|
+
|
|
210
|
+
* DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
|
|
211
|
+
* Value type is <<number,number>>
|
|
212
|
+
* Default value is `1`
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
[id="plugins-{type}s-{plugin}-index"]
|
|
217
|
+
===== `index`
|
|
218
|
+
|
|
219
|
+
* Value type is <<string,string>>
|
|
220
|
+
* Default value is `"logstash-%{+YYYY.MM.dd}"`
|
|
221
|
+
|
|
222
|
+
The index to write events to. This can be dynamic using the `%{foo}` syntax.
|
|
223
|
+
The default value will partition your indices by day so you can more easily
|
|
224
|
+
delete old data or only search specific date ranges.
|
|
225
|
+
Indexes may not contain uppercase characters.
|
|
226
|
+
For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}.
|
|
227
|
+
LS uses Joda to format the index pattern from event timestamp.
|
|
228
|
+
Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here].
|
|
229
|
+
|
|
230
|
+
[id="plugins-{type}s-{plugin}-manage_template"]
|
|
231
|
+
===== `manage_template`
|
|
232
|
+
|
|
233
|
+
* Value type is <<boolean,boolean>>
|
|
234
|
+
* Default value is `true`
|
|
235
|
+
|
|
236
|
+
From Logstash 1.3 onwards, a template is applied to Elasticsearch during
|
|
237
|
+
Logstash's startup if one with the name `template_name` does not already exist.
|
|
238
|
+
By default, the contents of this template is the default template for
|
|
239
|
+
`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern
|
|
240
|
+
`logstash-*`. Should you require support for other index names, or would like
|
|
241
|
+
to change the mappings in the template in general, a custom template can be
|
|
242
|
+
specified by setting `template` to the path of a template file.
|
|
243
|
+
|
|
244
|
+
Setting `manage_template` to false disables this feature. If you require more
|
|
245
|
+
control over template creation, (e.g. creating indices dynamically based on
|
|
246
|
+
field names) you should set `manage_template` to false and use the REST
|
|
247
|
+
API to apply your templates manually.
|
|
248
|
+
|
|
249
|
+
[id="plugins-{type}s-{plugin}-max_inflight_requests"]
|
|
250
|
+
===== `max_inflight_requests` (DEPRECATED)
|
|
251
|
+
|
|
252
|
+
* DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
|
|
253
|
+
* Value type is <<number,number>>
|
|
254
|
+
* Default value is `50`
|
|
255
|
+
|
|
256
|
+
This setting no longer does anything. It exists to keep config validation
|
|
257
|
+
from failing. It will be removed in future versions.
|
|
258
|
+
|
|
259
|
+
[id="plugins-{type}s-{plugin}-network_host"]
|
|
260
|
+
===== `network_host`
|
|
261
|
+
|
|
262
|
+
* This is a required setting.
|
|
263
|
+
* Value type is <<string,string>>
|
|
264
|
+
* There is no default value for this setting.
|
|
265
|
+
|
|
266
|
+
The name/address of the host to bind to for Elasticsearch clustering. Equivalent to the Elasticsearch option 'network.host'
|
|
267
|
+
option.
|
|
268
|
+
This MUST be set for either protocol to work (node or transport)! The internal Elasticsearch node
|
|
269
|
+
will bind to this ip. This ip MUST be reachable by all nodes in the Elasticsearch cluster
|
|
270
|
+
|
|
271
|
+
[id="plugins-{type}s-{plugin}-node_name"]
|
|
272
|
+
===== `node_name`
|
|
273
|
+
|
|
274
|
+
* Value type is <<string,string>>
|
|
275
|
+
* There is no default value for this setting.
|
|
276
|
+
|
|
277
|
+
The node name Elasticsearch will use when joining a cluster.
|
|
278
|
+
|
|
279
|
+
By default, this is generated internally by the ES client.
|
|
280
|
+
|
|
281
|
+
[id="plugins-{type}s-{plugin}-parent"]
|
|
282
|
+
===== `parent`
|
|
283
|
+
|
|
284
|
+
* Value type is <<string,string>>
|
|
285
|
+
* Default value is `nil`
|
|
286
|
+
|
|
287
|
+
For child documents, ID of the associated parent.
|
|
288
|
+
This can be dynamic using the `%{foo}` syntax.
|
|
289
|
+
|
|
290
|
+
[id="plugins-{type}s-{plugin}-pipeline"]
|
|
291
|
+
===== `pipeline`
|
|
292
|
+
|
|
293
|
+
* Value type is <<string,string>>
|
|
294
|
+
* Default value is `nil`
|
|
295
|
+
|
|
296
|
+
Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration
|
|
297
|
+
here like `pipeline => "%{INGEST_PIPELINE}"`
|
|
298
|
+
|
|
299
|
+
[id="plugins-{type}s-{plugin}-protocol"]
|
|
300
|
+
===== `protocol`
|
|
301
|
+
|
|
302
|
+
* Value can be any of: `node`, `transport`
|
|
303
|
+
* Default value is `"transport"`
|
|
304
|
+
|
|
305
|
+
Choose the protocol used to talk to Elasticsearch.
|
|
306
|
+
|
|
307
|
+
The 'node' protocol (default) will connect to the cluster as a normal Elasticsearch
|
|
308
|
+
node (but will not store data). If you use the `node` protocol, you must permit
|
|
309
|
+
bidirectional communication on the port 9300 (or whichever port you have
|
|
310
|
+
configured).
|
|
311
|
+
|
|
312
|
+
If you do not specify the `host` parameter, it will use multicast for http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html[Elasticsearch discovery]. While this may work in a test/dev environment where multicast is enabled in
|
|
313
|
+
Elasticsearch, we strongly recommend http://www.elastic.co/guide/en/elasticsearch/guide/current/important-configuration-changes.html#unicast[using unicast]
|
|
314
|
+
in Elasticsearch. To connect to an Elasticsearch cluster with unicast,
|
|
315
|
+
you must include the `host` parameter (see relevant section above).
|
|
316
|
+
|
|
317
|
+
The 'transport' protocol will connect to the host you specify and will
|
|
318
|
+
not show up as a 'node' in the Elasticsearch cluster. This is useful
|
|
319
|
+
in situations where you cannot permit connections outbound from the
|
|
320
|
+
Elasticsearch cluster to this Logstash server.
|
|
321
|
+
|
|
322
|
+
All protocols will use bulk requests when talking to Elasticsearch.
|
|
323
|
+
|
|
324
|
+
[id="plugins-{type}s-{plugin}-retry_initial_interval"]
|
|
325
|
+
===== `retry_initial_interval`
|
|
326
|
+
|
|
327
|
+
* Value type is <<number,number>>
|
|
328
|
+
* Default value is `2`
|
|
329
|
+
|
|
330
|
+
Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval`
|
|
331
|
+
|
|
332
|
+
[id="plugins-{type}s-{plugin}-retry_max_interval"]
|
|
333
|
+
===== `retry_max_interval`
|
|
334
|
+
|
|
335
|
+
* Value type is <<number,number>>
|
|
336
|
+
* Default value is `64`
|
|
337
|
+
|
|
338
|
+
Set max interval in seconds between bulk retries.
|
|
339
|
+
|
|
340
|
+
[id="plugins-{type}s-{plugin}-retry_on_conflict"]
|
|
341
|
+
===== `retry_on_conflict`
|
|
342
|
+
|
|
343
|
+
* Value type is <<number,number>>
|
|
344
|
+
* Default value is `1`
|
|
345
|
+
|
|
346
|
+
The number of times Elasticsearch should internally retry an update/upserted document
|
|
347
|
+
See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates]
|
|
348
|
+
for more info
|
|
349
|
+
|
|
350
|
+
[id="plugins-{type}s-{plugin}-routing"]
|
|
351
|
+
===== `routing`
|
|
352
|
+
|
|
353
|
+
* Value type is <<string,string>>
|
|
354
|
+
* There is no default value for this setting.
|
|
355
|
+
|
|
356
|
+
A routing override to be applied to all processed events.
|
|
357
|
+
This can be dynamic using the `%{foo}` syntax.
|
|
358
|
+
|
|
359
|
+
[id="plugins-{type}s-{plugin}-script"]
|
|
360
|
+
===== `script`
|
|
361
|
+
|
|
362
|
+
* Value type is <<string,string>>
|
|
363
|
+
* Default value is `""`
|
|
364
|
+
|
|
365
|
+
Set script name for scripted update mode
|
|
366
|
+
|
|
367
|
+
[id="plugins-{type}s-{plugin}-script_lang"]
|
|
368
|
+
===== `script_lang`
|
|
369
|
+
|
|
370
|
+
* Value type is <<string,string>>
|
|
371
|
+
* Default value is `"painless"`
|
|
372
|
+
|
|
373
|
+
Set the language of the used script. If not set, this defaults to painless in ES 5.0
|
|
374
|
+
|
|
375
|
+
[id="plugins-{type}s-{plugin}-script_type"]
|
|
376
|
+
===== `script_type`
|
|
377
|
+
|
|
378
|
+
* Value can be any of: `inline`, `indexed`, `file`
|
|
379
|
+
* Default value is `["inline"]`
|
|
380
|
+
|
|
381
|
+
Define the type of script referenced by "script" variable
|
|
382
|
+
inline : "script" contains inline script
|
|
383
|
+
indexed : "script" contains the name of script directly indexed in elasticsearch
|
|
384
|
+
file : "script" contains the name of script stored in elasticseach's config directory
|
|
385
|
+
|
|
386
|
+
[id="plugins-{type}s-{plugin}-script_var_name"]
|
|
387
|
+
===== `script_var_name`
|
|
388
|
+
|
|
389
|
+
* Value type is <<string,string>>
|
|
390
|
+
* Default value is `"event"`
|
|
391
|
+
|
|
392
|
+
Set variable name passed to script (scripted update)
|
|
393
|
+
|
|
394
|
+
[id="plugins-{type}s-{plugin}-scripted_upsert"]
|
|
395
|
+
===== `scripted_upsert`
|
|
396
|
+
|
|
397
|
+
* Value type is <<boolean,boolean>>
|
|
398
|
+
* Default value is `false`
|
|
399
|
+
|
|
400
|
+
if enabled, script is in charge of creating non-existent document (scripted update)
|
|
401
|
+
|
|
402
|
+
[id="plugins-{type}s-{plugin}-sniffing"]
|
|
403
|
+
===== `sniffing`
|
|
404
|
+
|
|
405
|
+
* Value type is <<boolean,boolean>>
|
|
406
|
+
* Default value is `false`
|
|
407
|
+
|
|
408
|
+
Enable cluster sniffing (transport only).
|
|
409
|
+
Asks host for the list of all cluster nodes and adds them to the hosts list
|
|
410
|
+
Equivalent to the Elasticsearch option 'client.transport.sniff'
|
|
411
|
+
|
|
412
|
+
[id="plugins-{type}s-{plugin}-template"]
|
|
413
|
+
===== `template`
|
|
414
|
+
|
|
415
|
+
* Value type is <<path,path>>
|
|
416
|
+
* There is no default value for this setting.
|
|
417
|
+
|
|
418
|
+
You can set the path to your own template here, if you so desire.
|
|
419
|
+
If not set, the included template will be used.
|
|
420
|
+
|
|
421
|
+
[id="plugins-{type}s-{plugin}-template_name"]
|
|
422
|
+
===== `template_name`
|
|
423
|
+
|
|
424
|
+
* Value type is <<string,string>>
|
|
425
|
+
* Default value is `"logstash"`
|
|
426
|
+
|
|
427
|
+
This configuration option defines how the template is named inside Elasticsearch.
|
|
428
|
+
Note that if you have used the template management features and subsequently
|
|
429
|
+
change this, you will need to prune the old template manually, e.g.
|
|
430
|
+
|
|
431
|
+
`curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
|
|
432
|
+
|
|
433
|
+
where `OldTemplateName` is whatever the former setting was.
|
|
434
|
+
|
|
435
|
+
[id="plugins-{type}s-{plugin}-template_overwrite"]
|
|
436
|
+
===== `template_overwrite`
|
|
437
|
+
|
|
438
|
+
* Value type is <<boolean,boolean>>
|
|
439
|
+
* Default value is `false`
|
|
440
|
+
|
|
441
|
+
The template_overwrite option will always overwrite the indicated template
|
|
442
|
+
in Elasticsearch with either the one indicated by template or the included one.
|
|
443
|
+
This option is set to false by default. If you always want to stay up to date
|
|
444
|
+
with the template provided by Logstash, this option could be very useful to you.
|
|
445
|
+
Likewise, if you have your own template file managed by puppet, for example, and
|
|
446
|
+
you wanted to be able to update it regularly, this option could help there as well.
|
|
447
|
+
|
|
448
|
+
Please note that if you are using your own customized version of the Logstash
|
|
449
|
+
template (logstash), setting this to true will make Logstash to overwrite
|
|
450
|
+
the "logstash" template (i.e. removing all customized settings)
|
|
451
|
+
|
|
452
|
+
[id="plugins-{type}s-{plugin}-transport_tcp_port"]
|
|
453
|
+
===== `transport_tcp_port`
|
|
454
|
+
|
|
455
|
+
* Value type is <<number,number>>
|
|
456
|
+
* There is no default value for this setting.
|
|
457
|
+
|
|
458
|
+
This sets the local port to bind to. Equivalent to the Elasticsrearch option 'transport.tcp.port'
|
|
459
|
+
|
|
460
|
+
[id="plugins-{type}s-{plugin}-upsert"]
|
|
461
|
+
===== `upsert`
|
|
462
|
+
|
|
463
|
+
* Value type is <<string,string>>
|
|
464
|
+
* Default value is `""`
|
|
465
|
+
|
|
466
|
+
Set upsert content for update mode.s
|
|
467
|
+
Create a new document with this parameter as json string if `document_id` doesn't exists
|
|
468
|
+
|
|
469
|
+
[id="plugins-{type}s-{plugin}-version"]
|
|
470
|
+
===== `version`
|
|
471
|
+
|
|
472
|
+
* Value type is <<string,string>>
|
|
473
|
+
* There is no default value for this setting.
|
|
474
|
+
|
|
475
|
+
The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here.
|
|
476
|
+
See https://www.elastic.co/blog/elasticsearch-versioning-support.
|
|
477
|
+
|
|
478
|
+
[id="plugins-{type}s-{plugin}-version_type"]
|
|
479
|
+
===== `version_type`
|
|
480
|
+
|
|
481
|
+
* Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force`
|
|
482
|
+
* There is no default value for this setting.
|
|
483
|
+
|
|
484
|
+
The version_type to use for indexing.
|
|
485
|
+
See https://www.elastic.co/blog/elasticsearch-versioning-support.
|
|
486
|
+
See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
|
|
490
|
+
[id="plugins-{type}s-{plugin}-common-options"]
|
|
491
|
+
include::{include_path}/{type}.asciidoc[]
|
|
@@ -1,16 +1,16 @@
|
|
|
1
1
|
Gem::Specification.new do |s|
|
|
2
2
|
s.name = 'logstash-output-elasticsearch_java'
|
|
3
|
-
s.version = '2.1.
|
|
3
|
+
s.version = '2.1.4'
|
|
4
4
|
s.licenses = ['apache-2.0']
|
|
5
5
|
s.summary = "Logstash Output to Elasticsearch using Java node/transport client"
|
|
6
|
-
s.description = "
|
|
6
|
+
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
|
7
7
|
s.authors = ["Elastic"]
|
|
8
8
|
s.email = 'info@elastic.co'
|
|
9
9
|
s.homepage = "http://logstash.net/"
|
|
10
10
|
s.require_paths = ["lib"]
|
|
11
11
|
|
|
12
12
|
# Files
|
|
13
|
-
s.files = Dir[
|
|
13
|
+
s.files = Dir["lib/**/*","spec/**/*","*.gemspec","*.md","CONTRIBUTORS","Gemfile","LICENSE","NOTICE.TXT", "vendor/jar-dependencies/**/*.jar", "vendor/jar-dependencies/**/*.rb", "VERSION", "docs/**/*"]
|
|
14
14
|
|
|
15
15
|
# Tests
|
|
16
16
|
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: logstash-output-elasticsearch_java
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 2.1.
|
|
4
|
+
version: 2.1.4
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Elastic
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date:
|
|
11
|
+
date: 2017-06-23 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|
|
@@ -162,7 +162,7 @@ dependencies:
|
|
|
162
162
|
- - ">="
|
|
163
163
|
- !ruby/object:Gem::Version
|
|
164
164
|
version: '0'
|
|
165
|
-
description:
|
|
165
|
+
description: This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program
|
|
166
166
|
email: info@elastic.co
|
|
167
167
|
executables: []
|
|
168
168
|
extensions: []
|
|
@@ -174,6 +174,7 @@ files:
|
|
|
174
174
|
- LICENSE
|
|
175
175
|
- NOTICE.TXT
|
|
176
176
|
- README.md
|
|
177
|
+
- docs/index.asciidoc
|
|
177
178
|
- lib/logstash-output-elasticsearch_java_jars.rb
|
|
178
179
|
- lib/logstash/outputs/elasticsearch_java.rb
|
|
179
180
|
- lib/logstash/outputs/elasticsearch_java/protocol.rb
|