logstash-output-elasticsearch 7.3.1-java → 7.3.2-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 84e98668e085b0b9f4514e8bf574f502f5e3084e
4
- data.tar.gz: ce5720c504d1c225cf4deade46096f0eba7cff76
3
+ metadata.gz: 42a74efb39e22b7169dc0ff278c364b1b6b8f282
4
+ data.tar.gz: 703b6a84b574bd8db99dd8dabb1fa16c77b34eb3
5
5
  SHA512:
6
- metadata.gz: 17fed98200315e4c6182a91cda4ff0762a6dc11af70048cb037ed87ea48e62a1835b35e0ce170800e76b9174c4a0382024c41e22ca632a97cffedc1378bc90e9
7
- data.tar.gz: 8433b4438c50c0d7ae0d52404f706917ae5ce581423a32f8abd32425340158798374e1c6ebd8c703fb36a9713a2131be07a67098a5e7f5bc75c5f11b8fa68c46
6
+ metadata.gz: f0bc0d68d637ee45d5b2a4243f03f72ce367dc34c81f4366b16c46ee9da3034d7d42799d90b518663ee894eb5bf59110a9530d8a35ab259bab047335ac3200f4
7
+ data.tar.gz: 5d6d9edc56c353d3ea1350a54ff1142d0af0242c587d54a6a26b646b9fd54bfef939f8925d346e0e25e0d78c0ab487df6419c899c6b7e2ed410e21233099c27e
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## 7.3.2
2
+ - Fix error where a 429 would cause this output to crash
3
+ - Wait for all inflight requests to complete before stopping
4
+
1
5
  ## 7.3.1
2
6
  - Fix the backwards compatibility layer used for detecting DLQ capabilities in logstash core
3
7
 
data/Gemfile CHANGED
@@ -1,4 +1,10 @@
1
1
  source 'https://rubygems.org'
2
2
 
3
-
4
3
  gemspec
4
+
5
+ logstash_path = "../../logstash"
6
+
7
+ if Dir.exist?(logstash_path) && ENV["LOGSTASH_SOURCE"] == 1
8
+ gem 'logstash-core', :path => "#{logstash_path}/logstash-core"
9
+ gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api"
10
+ end
@@ -0,0 +1,679 @@
1
+ :plugin: elasticsearch
2
+ :type: output
3
+
4
+ ///////////////////////////////////////////
5
+ START - GENERATED VARIABLES, DO NOT EDIT!
6
+ ///////////////////////////////////////////
7
+ :version: %VERSION%
8
+ :release_date: %RELEASE_DATE%
9
+ :changelog_url: %CHANGELOG_URL%
10
+ :include_path: ../../../logstash/docs/include
11
+ ///////////////////////////////////////////
12
+ END - GENERATED VARIABLES, DO NOT EDIT!
13
+ ///////////////////////////////////////////
14
+
15
+ [id="plugins-{type}-{plugin}"]
16
+
17
+ === Elasticsearch
18
+
19
+ include::{include_path}/plugin_header.asciidoc[]
20
+
21
+ ==== Description
22
+
23
+ .Compatibility Note
24
+ [NOTE]
25
+ ================================================================================
26
+ Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting]
27
+ called `http.content_type.required`. If this option is set to `true`, and you
28
+ are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output
29
+ plugin to version 6.2.5 or higher.
30
+
31
+ ================================================================================
32
+
33
+ This plugin is the recommended method of storing logs in Elasticsearch.
34
+ If you plan on using the Kibana web interface, you'll want to use this output.
35
+
36
+ This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0.
37
+ We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower,
38
+ yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having
39
+ to upgrade Logstash in lock-step.
40
+
41
+ You can learn more about Elasticsearch at <https://www.elastic.co/products/elasticsearch>
42
+
43
+ ==== Template management for Elasticsearch 5.x
44
+ Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0.
45
+ Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default
46
+ behavior.
47
+
48
+ ** Users installing ES 5.x and LS 5.x **
49
+ This change will not affect you and you will continue to use the ES defaults.
50
+
51
+ ** Users upgrading from LS 2.x to LS 5.x with ES 5.x **
52
+ LS will not force upgrade the template, if `logstash` template already exists. This means you will still use
53
+ `.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after
54
+ the new template is installed.
55
+
56
+ ==== Retry Policy
57
+
58
+ The retry policy has changed significantly in the 2.2.0 release.
59
+ This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience
60
+ either partial or total failures.
61
+
62
+ The following errors are retried infinitely:
63
+
64
+ - Network errors (inability to connect)
65
+ - 429 (Too many requests) and
66
+ - 503 (Service unavailable) errors
67
+
68
+ NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions.
69
+ It is more performant for Elasticsearch to retry these exceptions than this plugin.
70
+
71
+ ==== Batch Sizes ====
72
+ This plugin attempts to send batches of events as a single request. However, if
73
+ a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request.
74
+
75
+ ==== DNS Caching
76
+
77
+ This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl],
78
+ a global setting for the JVM.
79
+
80
+ As an example, to set your DNS TTL to 1 second you would set
81
+ the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`.
82
+
83
+ Keep in mind that a connection with keepalive enabled will
84
+ not reevaluate its DNS value while the keepalive is in effect.
85
+
86
+ ==== HTTP Compression
87
+
88
+ This plugin supports request and response compression. Response compression is enabled by default and
89
+ for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for
90
+ it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in
91
+ Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin
92
+
93
+ For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression`
94
+ setting in their Logstash config file.
95
+
96
+
97
+ [id="plugins-{type}s-{plugin}-options"]
98
+ ==== Elasticsearch Output Configuration Options
99
+
100
+ This plugin supports the following configuration options plus the <<plugins-{type}s-common-options>> described later.
101
+
102
+ [cols="<,<,<",options="header",]
103
+ |=======================================================================
104
+ |Setting |Input type|Required
105
+ | <<plugins-{type}s-{plugin}-action>> |<<string,string>>|No
106
+ | <<plugins-{type}s-{plugin}-bulk_path>> |<<string,string>>|No
107
+ | <<plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No
108
+ | <<plugins-{type}s-{plugin}-doc_as_upsert>> |<<boolean,boolean>>|No
109
+ | <<plugins-{type}s-{plugin}-document_id>> |<<string,string>>|No
110
+ | <<plugins-{type}s-{plugin}-document_type>> |<<string,string>>|No
111
+ | <<plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<<array,array>>|No
112
+ | <<plugins-{type}s-{plugin}-healthcheck_path>> |<<string,string>>|No
113
+ | <<plugins-{type}s-{plugin}-hosts>> |<<uri,uri>>|No
114
+ | <<plugins-{type}s-{plugin}-http_compression>> |<<boolean,boolean>>|No
115
+ | <<plugins-{type}s-{plugin}-index>> |<<string,string>>|No
116
+ | <<plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No
117
+ | <<plugins-{type}s-{plugin}-keystore_password>> |<<password,password>>|No
118
+ | <<plugins-{type}s-{plugin}-manage_template>> |<<boolean,boolean>>|No
119
+ | <<plugins-{type}s-{plugin}-parameters>> |<<hash,hash>>|No
120
+ | <<plugins-{type}s-{plugin}-parent>> |<<string,string>>|No
121
+ | <<plugins-{type}s-{plugin}-password>> |<<password,password>>|No
122
+ | <<plugins-{type}s-{plugin}-path>> |<<string,string>>|No
123
+ | <<plugins-{type}s-{plugin}-pipeline>> |<<string,string>>|No
124
+ | <<plugins-{type}s-{plugin}-pool_max>> |<<number,number>>|No
125
+ | <<plugins-{type}s-{plugin}-pool_max_per_route>> |<<number,number>>|No
126
+ | <<plugins-{type}s-{plugin}-proxy>> |<<uri,uri>>|No
127
+ | <<plugins-{type}s-{plugin}-resurrect_delay>> |<<number,number>>|No
128
+ | <<plugins-{type}s-{plugin}-retry_initial_interval>> |<<number,number>>|No
129
+ | <<plugins-{type}s-{plugin}-retry_max_interval>> |<<number,number>>|No
130
+ | <<plugins-{type}s-{plugin}-retry_on_conflict>> |<<number,number>>|No
131
+ | <<plugins-{type}s-{plugin}-routing>> |<<string,string>>|No
132
+ | <<plugins-{type}s-{plugin}-script>> |<<string,string>>|No
133
+ | <<plugins-{type}s-{plugin}-script_lang>> |<<string,string>>|No
134
+ | <<plugins-{type}s-{plugin}-script_type>> |<<string,string>>, one of `["inline", "indexed", "file"]`|No
135
+ | <<plugins-{type}s-{plugin}-script_var_name>> |<<string,string>>|No
136
+ | <<plugins-{type}s-{plugin}-scripted_upsert>> |<<boolean,boolean>>|No
137
+ | <<plugins-{type}s-{plugin}-sniffing>> |<<boolean,boolean>>|No
138
+ | <<plugins-{type}s-{plugin}-sniffing_delay>> |<<number,number>>|No
139
+ | <<plugins-{type}s-{plugin}-sniffing_path>> |<<string,string>>|No
140
+ | <<plugins-{type}s-{plugin}-ssl>> |<<boolean,boolean>>|No
141
+ | <<plugins-{type}s-{plugin}-ssl_certificate_verification>> |<<boolean,boolean>>|No
142
+ | <<plugins-{type}s-{plugin}-template>> |a valid filesystem path|No
143
+ | <<plugins-{type}s-{plugin}-template_name>> |<<string,string>>|No
144
+ | <<plugins-{type}s-{plugin}-template_overwrite>> |<<boolean,boolean>>|No
145
+ | <<plugins-{type}s-{plugin}-timeout>> |<<number,number>>|No
146
+ | <<plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No
147
+ | <<plugins-{type}s-{plugin}-truststore_password>> |<<password,password>>|No
148
+ | <<plugins-{type}s-{plugin}-upsert>> |<<string,string>>|No
149
+ | <<plugins-{type}s-{plugin}-user>> |<<string,string>>|No
150
+ | <<plugins-{type}s-{plugin}-validate_after_inactivity>> |<<number,number>>|No
151
+ | <<plugins-{type}s-{plugin}-version>> |<<string,string>>|No
152
+ | <<plugins-{type}s-{plugin}-version_type>> |<<string,string>>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No
153
+ |=======================================================================
154
+
155
+ Also see <<plugins-{type}s-common-options>> for a list of options supported by all
156
+ output plugins.
157
+
158
+ &nbsp;
159
+
160
+ [id="plugins-{type}s-{plugin}-action"]
161
+ ===== `action`
162
+
163
+ * Value type is <<string,string>>
164
+ * Default value is `"index"`
165
+
166
+ Protocol agnostic (i.e. non-http, non-java specific) configs go here
167
+ Protocol agnostic methods
168
+ The Elasticsearch action to perform. Valid actions are:
169
+
170
+ - index: indexes a document (an event from Logstash).
171
+ - delete: deletes a document by id (An id is required for this action)
172
+ - create: indexes a document, fails if a document by that id already exists in the index.
173
+ - update: updates a document by id. Update has a special case where you can upsert -- update a
174
+ document if not already present. See the `upsert` option. NOTE: This does not work and is not supported
175
+ in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash!
176
+ - A sprintf style string to change the action based on the content of the event. The value `%{[foo]}`
177
+ would use the foo field for the action
178
+
179
+ For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
180
+
181
+ [id="plugins-{type}s-{plugin}-bulk_path"]
182
+ ===== `bulk_path`
183
+
184
+ * Value type is <<string,string>>
185
+ * There is no default value for this setting.
186
+
187
+ HTTP Path to perform the _bulk requests to
188
+ this defaults to a concatenation of the path parameter and "_bulk"
189
+
190
+ [id="plugins-{type}s-{plugin}-cacert"]
191
+ ===== `cacert`
192
+
193
+ * Value type is <<path,path>>
194
+ * There is no default value for this setting.
195
+
196
+ The .cer or .pem file to validate the server's certificate
197
+
198
+ [id="plugins-{type}s-{plugin}-doc_as_upsert"]
199
+ ===== `doc_as_upsert`
200
+
201
+ * Value type is <<boolean,boolean>>
202
+ * Default value is `false`
203
+
204
+ Enable `doc_as_upsert` for update mode.
205
+ Create a new document with source if `document_id` doesn't exist in Elasticsearch
206
+
207
+ [id="plugins-{type}s-{plugin}-document_id"]
208
+ ===== `document_id`
209
+
210
+ * Value type is <<string,string>>
211
+ * There is no default value for this setting.
212
+
213
+ The document ID for the index. Useful for overwriting existing entries in
214
+ Elasticsearch with the same ID.
215
+
216
+ [id="plugins-{type}s-{plugin}-document_type"]
217
+ ===== `document_type`
218
+
219
+ * Value type is <<string,string>>
220
+ * There is no default value for this setting.
221
+
222
+ The document type to write events to. Generally you should try to write only
223
+ similar events to the same 'type'. String expansion `%{foo}` works here.
224
+ Unless you set 'document_type', the event 'type' will be used if it exists
225
+ otherwise the document type will be assigned the value of 'logs'
226
+
227
+ [id="plugins-{type}s-{plugin}-failure_type_logging_whitelist"]
228
+ ===== `failure_type_logging_whitelist`
229
+
230
+ * Value type is <<array,array>>
231
+ * Default value is `[]`
232
+
233
+ Set the Elasticsearch errors in the whitelist that you don't want to log.
234
+ A useful example is when you want to skip all 409 errors
235
+ which are `document_already_exists_exception`.
236
+
237
+ [id="plugins-{type}s-{plugin}-flush_size"]
238
+ ===== `flush_size` (DEPRECATED)
239
+
240
+ * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
241
+ * Value type is <<number,number>>
242
+ * There is no default value for this setting.
243
+
244
+
245
+
246
+ [id="plugins-{type}s-{plugin}-healthcheck_path"]
247
+ ===== `healthcheck_path`
248
+
249
+ * Value type is <<string,string>>
250
+ * There is no default value for this setting.
251
+
252
+ HTTP Path where a HEAD request is sent when a backend is marked down
253
+ the request is sent in the background to see if it has come back again
254
+ before it is once again eligible to service requests.
255
+ If you have custom firewall rules you may need to change this
256
+
257
+ [id="plugins-{type}s-{plugin}-hosts"]
258
+ ===== `hosts`
259
+
260
+ * Value type is <<uri,uri>>
261
+ * Default value is `[//127.0.0.1]`
262
+
263
+ Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter.
264
+ Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300).
265
+ `"127.0.0.1"`
266
+ `["127.0.0.1:9200","127.0.0.2:9200"]`
267
+ `["http://127.0.0.1"]`
268
+ `["https://127.0.0.1:9200"]`
269
+ `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath)
270
+ It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list
271
+ to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch.
272
+
273
+ Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance.
274
+
275
+ [id="plugins-{type}s-{plugin}-http_compression"]
276
+ ===== `http_compression`
277
+
278
+ * Value type is <<boolean,boolean>>
279
+ * Default value is `false`
280
+
281
+ Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond
282
+
283
+ [id="plugins-{type}s-{plugin}-idle_flush_time"]
284
+ ===== `idle_flush_time` (DEPRECATED)
285
+
286
+ * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
287
+ * Value type is <<number,number>>
288
+ * Default value is `1`
289
+
290
+
291
+
292
+ [id="plugins-{type}s-{plugin}-index"]
293
+ ===== `index`
294
+
295
+ * Value type is <<string,string>>
296
+ * Default value is `"logstash-%{+YYYY.MM.dd}"`
297
+
298
+ The index to write events to. This can be dynamic using the `%{foo}` syntax.
299
+ The default value will partition your indices by day so you can more easily
300
+ delete old data or only search specific date ranges.
301
+ Indexes may not contain uppercase characters.
302
+ For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}.
303
+ LS uses Joda to format the index pattern from event timestamp.
304
+ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here].
305
+
306
+ [id="plugins-{type}s-{plugin}-keystore"]
307
+ ===== `keystore`
308
+
309
+ * Value type is <<path,path>>
310
+ * There is no default value for this setting.
311
+
312
+ The keystore used to present a certificate to the server.
313
+ It can be either .jks or .p12
314
+
315
+ [id="plugins-{type}s-{plugin}-keystore_password"]
316
+ ===== `keystore_password`
317
+
318
+ * Value type is <<password,password>>
319
+ * There is no default value for this setting.
320
+
321
+ Set the keystore password
322
+
323
+ [id="plugins-{type}s-{plugin}-manage_template"]
324
+ ===== `manage_template`
325
+
326
+ * Value type is <<boolean,boolean>>
327
+ * Default value is `true`
328
+
329
+ From Logstash 1.3 onwards, a template is applied to Elasticsearch during
330
+ Logstash's startup if one with the name `template_name` does not already exist.
331
+ By default, the contents of this template is the default template for
332
+ `logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern
333
+ `logstash-*`. Should you require support for other index names, or would like
334
+ to change the mappings in the template in general, a custom template can be
335
+ specified by setting `template` to the path of a template file.
336
+
337
+ Setting `manage_template` to false disables this feature. If you require more
338
+ control over template creation, (e.g. creating indices dynamically based on
339
+ field names) you should set `manage_template` to false and use the REST
340
+ API to apply your templates manually.
341
+
342
+ [id="plugins-{type}s-{plugin}-parameters"]
343
+ ===== `parameters`
344
+
345
+ * Value type is <<hash,hash>>
346
+ * There is no default value for this setting.
347
+
348
+ Pass a set of key value pairs as the URL query string. This query string is added
349
+ to every host listed in the 'hosts' configuration. If the 'hosts' list contains
350
+ urls that already have query strings, the one specified here will be appended.
351
+
352
+ [id="plugins-{type}s-{plugin}-parent"]
353
+ ===== `parent`
354
+
355
+ * Value type is <<string,string>>
356
+ * Default value is `nil`
357
+
358
+ For child documents, ID of the associated parent.
359
+ This can be dynamic using the `%{foo}` syntax.
360
+
361
+ [id="plugins-{type}s-{plugin}-password"]
362
+ ===== `password`
363
+
364
+ * Value type is <<password,password>>
365
+ * There is no default value for this setting.
366
+
367
+ Password to authenticate to a secure Elasticsearch cluster
368
+
369
+ [id="plugins-{type}s-{plugin}-path"]
370
+ ===== `path`
371
+
372
+ * Value type is <<string,string>>
373
+ * There is no default value for this setting.
374
+
375
+ HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps
376
+ the root path for the Elasticsearch HTTP API lives.
377
+ Note that if you use paths as components of URLs in the 'hosts' field you may
378
+ not also set this field. That will raise an error at startup
379
+
380
+ [id="plugins-{type}s-{plugin}-pipeline"]
381
+ ===== `pipeline`
382
+
383
+ * Value type is <<string,string>>
384
+ * Default value is `nil`
385
+
386
+ Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration
387
+ here like `pipeline => "%{INGEST_PIPELINE}"`
388
+
389
+ [id="plugins-{type}s-{plugin}-pool_max"]
390
+ ===== `pool_max`
391
+
392
+ * Value type is <<number,number>>
393
+ * Default value is `1000`
394
+
395
+ While the output tries to reuse connections efficiently we have a maximum.
396
+ This sets the maximum number of open connections the output will create.
397
+ Setting this too low may mean frequently closing / opening connections
398
+ which is bad.
399
+
400
+ [id="plugins-{type}s-{plugin}-pool_max_per_route"]
401
+ ===== `pool_max_per_route`
402
+
403
+ * Value type is <<number,number>>
404
+ * Default value is `100`
405
+
406
+ While the output tries to reuse connections efficiently we have a maximum per endpoint.
407
+ This sets the maximum number of open connections per endpoint the output will create.
408
+ Setting this too low may mean frequently closing / opening connections
409
+ which is bad.
410
+
411
+ [id="plugins-{type}s-{plugin}-proxy"]
412
+ ===== `proxy`
413
+
414
+ * Value type is <<uri,uri>>
415
+ * There is no default value for this setting.
416
+
417
+ Set the address of a forward HTTP proxy.
418
+ This used to accept hashes as arguments but now only accepts
419
+ arguments of the URI type to prevent leaking credentials.
420
+
421
+ [id="plugins-{type}s-{plugin}-resurrect_delay"]
422
+ ===== `resurrect_delay`
423
+
424
+ * Value type is <<number,number>>
425
+ * Default value is `5`
426
+
427
+ How frequently, in seconds, to wait between resurrection attempts.
428
+ Resurrection is the process by which backend endpoints marked 'down' are checked
429
+ to see if they have come back to life
430
+
431
+ [id="plugins-{type}s-{plugin}-retry_initial_interval"]
432
+ ===== `retry_initial_interval`
433
+
434
+ * Value type is <<number,number>>
435
+ * Default value is `2`
436
+
437
+ Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval`
438
+
439
+ [id="plugins-{type}s-{plugin}-retry_max_interval"]
440
+ ===== `retry_max_interval`
441
+
442
+ * Value type is <<number,number>>
443
+ * Default value is `64`
444
+
445
+ Set max interval in seconds between bulk retries.
446
+
447
+ [id="plugins-{type}s-{plugin}-retry_on_conflict"]
448
+ ===== `retry_on_conflict`
449
+
450
+ * Value type is <<number,number>>
451
+ * Default value is `1`
452
+
453
+ The number of times Elasticsearch should internally retry an update/upserted document
454
+ See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates]
455
+ for more info
456
+
457
+ [id="plugins-{type}s-{plugin}-routing"]
458
+ ===== `routing`
459
+
460
+ * Value type is <<string,string>>
461
+ * There is no default value for this setting.
462
+
463
+ A routing override to be applied to all processed events.
464
+ This can be dynamic using the `%{foo}` syntax.
465
+
466
+ [id="plugins-{type}s-{plugin}-script"]
467
+ ===== `script`
468
+
469
+ * Value type is <<string,string>>
470
+ * Default value is `""`
471
+
472
+ Set script name for scripted update mode
473
+
474
+ [id="plugins-{type}s-{plugin}-script_lang"]
475
+ ===== `script_lang`
476
+
477
+ * Value type is <<string,string>>
478
+ * Default value is `"painless"`
479
+
480
+ Set the language of the used script. If not set, this defaults to painless in ES 5.0
481
+
482
+ [id="plugins-{type}s-{plugin}-script_type"]
483
+ ===== `script_type`
484
+
485
+ * Value can be any of: `inline`, `indexed`, `file`
486
+ * Default value is `["inline"]`
487
+
488
+ Define the type of script referenced by "script" variable
489
+ inline : "script" contains inline script
490
+ indexed : "script" contains the name of script directly indexed in elasticsearch
491
+ file : "script" contains the name of script stored in elasticseach's config directory
492
+
493
+ [id="plugins-{type}s-{plugin}-script_var_name"]
494
+ ===== `script_var_name`
495
+
496
+ * Value type is <<string,string>>
497
+ * Default value is `"event"`
498
+
499
+ Set variable name passed to script (scripted update)
500
+
501
+ [id="plugins-{type}s-{plugin}-scripted_upsert"]
502
+ ===== `scripted_upsert`
503
+
504
+ * Value type is <<boolean,boolean>>
505
+ * Default value is `false`
506
+
507
+ if enabled, script is in charge of creating non-existent document (scripted update)
508
+
509
+ [id="plugins-{type}s-{plugin}-sniffing"]
510
+ ===== `sniffing`
511
+
512
+ * Value type is <<boolean,boolean>>
513
+ * Default value is `false`
514
+
515
+ This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list.
516
+ Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use
517
+ this with master nodes, you probably want to disable HTTP on them by setting
518
+ `http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or
519
+ manually enter multiple Elasticsearch hosts using the `hosts` parameter.
520
+
521
+ [id="plugins-{type}s-{plugin}-sniffing_delay"]
522
+ ===== `sniffing_delay`
523
+
524
+ * Value type is <<number,number>>
525
+ * Default value is `5`
526
+
527
+ How long to wait, in seconds, between sniffing attempts
528
+
529
+ [id="plugins-{type}s-{plugin}-sniffing_path"]
530
+ ===== `sniffing_path`
531
+
532
+ * Value type is <<string,string>>
533
+ * There is no default value for this setting.
534
+
535
+ HTTP Path to be used for the sniffing requests
536
+ the default value is computed by concatenating the path value and "_nodes/http"
537
+ if sniffing_path is set it will be used as an absolute path
538
+ do not use full URL here, only paths, e.g. "/sniff/_nodes/http"
539
+
540
+ [id="plugins-{type}s-{plugin}-ssl"]
541
+ ===== `ssl`
542
+
543
+ * Value type is <<boolean,boolean>>
544
+ * There is no default value for this setting.
545
+
546
+ Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme
547
+ is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used.
548
+ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts'
549
+
550
+ [id="plugins-{type}s-{plugin}-ssl_certificate_verification"]
551
+ ===== `ssl_certificate_verification`
552
+
553
+ * Value type is <<boolean,boolean>>
554
+ * Default value is `true`
555
+
556
+ Option to validate the server's certificate. Disabling this severely compromises security.
557
+ For more information on disabling certificate verification please read
558
+ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf
559
+
560
+ [id="plugins-{type}s-{plugin}-template"]
561
+ ===== `template`
562
+
563
+ * Value type is <<path,path>>
564
+ * There is no default value for this setting.
565
+
566
+ You can set the path to your own template here, if you so desire.
567
+ If not set, the included template will be used.
568
+
569
+ [id="plugins-{type}s-{plugin}-template_name"]
570
+ ===== `template_name`
571
+
572
+ * Value type is <<string,string>>
573
+ * Default value is `"logstash"`
574
+
575
+ This configuration option defines how the template is named inside Elasticsearch.
576
+ Note that if you have used the template management features and subsequently
577
+ change this, you will need to prune the old template manually, e.g.
578
+
579
+ `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
580
+
581
+ where `OldTemplateName` is whatever the former setting was.
582
+
583
+ [id="plugins-{type}s-{plugin}-template_overwrite"]
584
+ ===== `template_overwrite`
585
+
586
+ * Value type is <<boolean,boolean>>
587
+ * Default value is `false`
588
+
589
+ The template_overwrite option will always overwrite the indicated template
590
+ in Elasticsearch with either the one indicated by template or the included one.
591
+ This option is set to false by default. If you always want to stay up to date
592
+ with the template provided by Logstash, this option could be very useful to you.
593
+ Likewise, if you have your own template file managed by puppet, for example, and
594
+ you wanted to be able to update it regularly, this option could help there as well.
595
+
596
+ Please note that if you are using your own customized version of the Logstash
597
+ template (logstash), setting this to true will make Logstash to overwrite
598
+ the "logstash" template (i.e. removing all customized settings)
599
+
600
+ [id="plugins-{type}s-{plugin}-timeout"]
601
+ ===== `timeout`
602
+
603
+ * Value type is <<number,number>>
604
+ * Default value is `60`
605
+
606
+ Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If
607
+ a timeout occurs, the request will be retried.
608
+
609
+ [id="plugins-{type}s-{plugin}-truststore"]
610
+ ===== `truststore`
611
+
612
+ * Value type is <<path,path>>
613
+ * There is no default value for this setting.
614
+
615
+ The JKS truststore to validate the server's certificate.
616
+ Use either `:truststore` or `:cacert`
617
+
618
+ [id="plugins-{type}s-{plugin}-truststore_password"]
619
+ ===== `truststore_password`
620
+
621
+ * Value type is <<password,password>>
622
+ * There is no default value for this setting.
623
+
624
+ Set the truststore password
625
+
626
+ [id="plugins-{type}s-{plugin}-upsert"]
627
+ ===== `upsert`
628
+
629
+ * Value type is <<string,string>>
630
+ * Default value is `""`
631
+
632
+ Set upsert content for update mode.s
633
+ Create a new document with this parameter as json string if `document_id` doesn't exists
634
+
635
+ [id="plugins-{type}s-{plugin}-user"]
636
+ ===== `user`
637
+
638
+ * Value type is <<string,string>>
639
+ * There is no default value for this setting.
640
+
641
+ Username to authenticate to a secure Elasticsearch cluster
642
+
643
+ [id="plugins-{type}s-{plugin}-validate_after_inactivity"]
644
+ ===== `validate_after_inactivity`
645
+
646
+ * Value type is <<number,number>>
647
+ * Default value is `10000`
648
+
649
+ How long to wait before checking if the connection is stale before executing a request on a connection using keepalive.
650
+ You may want to set this lower, if you get connection errors regularly
651
+ Quoting the Apache commons docs (this client is based Apache Commmons):
652
+ 'Defines period of inactivity in milliseconds after which persistent connections must
653
+ be re-validated prior to being leased to the consumer. Non-positive value passed to
654
+ this method disables connection validation. This check helps detect connections that
655
+ have become stale (half-closed) while kept inactive in the pool.'
656
+ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info]
657
+
658
+ [id="plugins-{type}s-{plugin}-version"]
659
+ ===== `version`
660
+
661
+ * Value type is <<string,string>>
662
+ * There is no default value for this setting.
663
+
664
+ The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here.
665
+ See https://www.elastic.co/blog/elasticsearch-versioning-support.
666
+
667
+ [id="plugins-{type}s-{plugin}-version_type"]
668
+ ===== `version_type`
669
+
670
+ * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force`
671
+ * There is no default value for this setting.
672
+
673
+ The version_type to use for indexing.
674
+ See https://www.elastic.co/blog/elasticsearch-versioning-support.
675
+ See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
676
+
677
+
678
+
679
+ include::{include_path}/{type}.asciidoc[]
@@ -4,6 +4,11 @@ module LogStash; module Outputs; class ElasticSearch;
4
4
  module Common
5
5
  attr_reader :client, :hosts
6
6
 
7
+ # These are codes for temporary recoverable conditions
8
+ # 429 just means that ES has too much traffic ATM
9
+ # 503 means it , or a proxy is temporarily unavailable
10
+ RETRYABLE_CODES = [429, 503]
11
+
7
12
  DLQ_CODES = [400, 404]
8
13
  SUCCESS_CODES = [200, 201]
9
14
  CONFLICT_CODE = 409
@@ -252,16 +257,16 @@ module LogStash; module Outputs; class ElasticSearch;
252
257
 
253
258
  # We treat 429s as a special case because these really aren't errors, but
254
259
  # rather just ES telling us to back off a bit, which we do.
255
- # The other retryable codes are 502 and 503, which are true errors
260
+ # The other retryable code is 503, which are true errors
256
261
  # Even though we retry the user should be made aware of these
257
262
  if e.response_code == 429
258
- @logger.debug(message, log_hash)
263
+ logger.debug(message, log_hash)
259
264
  else
260
- @logger.error(message, log_hash)
265
+ logger.error(message, log_hash)
261
266
  end
262
267
 
263
268
  sleep_interval = sleep_for_interval(sleep_interval)
264
- retry unless @stopping.true?
269
+ retry
265
270
  else
266
271
  log_hash = {:code => e.response_code,
267
272
  :response_body => e.response_body}
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-output-elasticsearch'
3
- s.version = '7.3.1'
3
+ s.version = '7.3.2'
4
4
  s.licenses = ['apache-2.0']
5
5
  s.summary = "Logstash Output to Elasticsearch"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
@@ -10,7 +10,7 @@ Gem::Specification.new do |s|
10
10
  s.require_paths = ["lib"]
11
11
 
12
12
  # Files
13
- s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
13
+ s.files = Dir["lib/**/*","spec/**/*","*.gemspec","*.md","CONTRIBUTORS","Gemfile","LICENSE","NOTICE.TXT", "vendor/jar-dependencies/**/*.jar", "vendor/jar-dependencies/**/*.rb", "VERSION", "docs/**/*"]
14
14
 
15
15
  # Tests
16
16
  s.test_files = s.files.grep(%r{^(test|spec|features)/})
@@ -202,8 +202,41 @@ describe "outputs/elasticsearch" do
202
202
 
203
203
  end
204
204
 
205
- end
205
+ context "429 errors" do
206
+ let(:event) { ::LogStash::Event.new("foo" => "bar") }
207
+ let(:error) do
208
+ ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError.new(
209
+ 429, double("url").as_null_object, double("request body"), double("response body")
210
+ )
211
+ end
212
+ let(:logger) { double("logger").as_null_object }
213
+
214
+ before(:each) do
215
+ i = 0
216
+ bulk_param = [["index", anything, event.to_hash]]
217
+
218
+ allow(eso).to receive(:logger).and_return(logger)
206
219
 
220
+ # Fail the first time bulk is called, succeed the next time
221
+ allow(eso.client).to receive(:bulk).with(bulk_param) do
222
+ i += 1
223
+ if i == 1
224
+ raise error
225
+ end
226
+ end
227
+ eso.multi_receive([event])
228
+ end
229
+
230
+ it "should retry the 429 till it goes away" do
231
+ expect(eso.client).to have_received(:bulk).twice
232
+ end
233
+
234
+ it "should log a debug message" do
235
+ expect(eso.logger).to have_received(:debug).with(/Encountered a retryable error/i, anything)
236
+ end
237
+ end
238
+ end
239
+
207
240
  context "with timeout set" do
208
241
  let(:listener) { Flores::Random.tcp_listener }
209
242
  let(:port) { listener[2] }
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 7.3.1
4
+ version: 7.3.2
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-04-27 00:00:00.000000000 Z
11
+ date: 2017-05-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -208,6 +208,7 @@ files:
208
208
  - LICENSE
209
209
  - NOTICE.TXT
210
210
  - README.md
211
+ - docs/index.asciidoc
211
212
  - lib/logstash/outputs/elasticsearch.rb
212
213
  - lib/logstash/outputs/elasticsearch/common.rb
213
214
  - lib/logstash/outputs/elasticsearch/common_configs.rb