logstash-output-elasticsearch 10.6.0-java → 10.8.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +24 -1
- data/CONTRIBUTORS +1 -0
- data/docs/index.asciidoc +97 -78
- data/lib/logstash/outputs/elasticsearch.rb +300 -165
- data/lib/logstash/outputs/elasticsearch/http_client.rb +7 -2
- data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +13 -28
- data/lib/logstash/outputs/elasticsearch/http_client_builder.rb +1 -0
- data/lib/logstash/outputs/elasticsearch/ilm.rb +9 -5
- data/lib/logstash/outputs/elasticsearch/license_checker.rb +47 -0
- data/lib/logstash/outputs/elasticsearch/template_manager.rb +8 -3
- data/lib/logstash/outputs/elasticsearch/templates/ecs-disabled/elasticsearch-8x.json +39 -33
- data/lib/logstash/plugin_mixins/elasticsearch/api_configs.rb +163 -0
- data/lib/logstash/{outputs → plugin_mixins}/elasticsearch/common.rb +40 -154
- data/lib/logstash/plugin_mixins/elasticsearch/noop_license_checker.rb +9 -0
- data/logstash-output-elasticsearch.gemspec +1 -1
- data/spec/es_spec_helper.rb +32 -12
- data/spec/fixtures/template-with-policy-es8x.json +50 -0
- data/spec/integration/outputs/ilm_spec.rb +34 -20
- data/spec/integration/outputs/metrics_spec.rb +1 -5
- data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +45 -5
- data/spec/unit/outputs/elasticsearch/http_client_spec.rb +22 -0
- data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +31 -0
- data/spec/unit/outputs/elasticsearch_spec.rb +22 -0
- data/spec/unit/outputs/license_check_spec.rb +41 -0
- metadata +10 -4
- data/lib/logstash/outputs/elasticsearch/common_configs.rb +0 -167
@@ -286,6 +286,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
286
286
|
adapter = build_adapter(options)
|
287
287
|
|
288
288
|
pool_options = {
|
289
|
+
:license_checker => options[:license_checker],
|
289
290
|
:sniffing => sniffing,
|
290
291
|
:sniffer_delay => options[:sniffer_delay],
|
291
292
|
:sniffing_path => options[:sniffing_path],
|
@@ -343,15 +344,19 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
343
344
|
end
|
344
345
|
|
345
346
|
def template_exists?(name)
|
346
|
-
exists?("
|
347
|
+
exists?("/#{template_endpoint}/#{name}")
|
347
348
|
end
|
348
349
|
|
349
350
|
def template_put(name, template)
|
350
|
-
path = "
|
351
|
+
path = "#{template_endpoint}/#{name}"
|
351
352
|
logger.info("Installing elasticsearch template to #{path}")
|
352
353
|
@pool.put(path, nil, LogStash::Json.dump(template))
|
353
354
|
end
|
354
355
|
|
356
|
+
def template_endpoint
|
357
|
+
maximum_seen_major_version < 8 ? '_template' : '_index_template'
|
358
|
+
end
|
359
|
+
|
355
360
|
# ILM methods
|
356
361
|
|
357
362
|
# check whether rollover alias already exists
|
@@ -1,3 +1,5 @@
|
|
1
|
+
require "logstash/plugin_mixins/elasticsearch/noop_license_checker"
|
2
|
+
|
1
3
|
module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
2
4
|
class Pool
|
3
5
|
class NoConnectionAvailableError < Error; end
|
@@ -29,6 +31,7 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
29
31
|
end
|
30
32
|
|
31
33
|
attr_reader :logger, :adapter, :sniffing, :sniffer_delay, :resurrect_delay, :healthcheck_path, :sniffing_path, :bulk_path
|
34
|
+
attr_reader :license_checker # license_checker is used by the pool specs
|
32
35
|
|
33
36
|
ROOT_URI_PATH = '/'.freeze
|
34
37
|
LICENSE_PATH = '/_license'.freeze
|
@@ -66,12 +69,10 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
66
69
|
# Holds metadata about all URLs
|
67
70
|
@url_info = {}
|
68
71
|
@stopping = false
|
69
|
-
end
|
70
72
|
|
71
|
-
|
72
|
-
LogStash::Outputs::ElasticSearch.oss?
|
73
|
+
@license_checker = options.fetch(:license_checker) { LogStash::PluginMixins::ElasticSearch::NoopLicenseChecker::INSTANCE }
|
73
74
|
end
|
74
|
-
|
75
|
+
|
75
76
|
def start
|
76
77
|
update_initial_urls
|
77
78
|
start_resurrectionist
|
@@ -210,7 +211,6 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
210
211
|
end
|
211
212
|
end
|
212
213
|
|
213
|
-
|
214
214
|
def sniff_2x_1x(nodes)
|
215
215
|
nodes.map do |id,info|
|
216
216
|
# TODO Make sure this works with shield. Does that listed
|
@@ -245,16 +245,15 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
245
245
|
end
|
246
246
|
end
|
247
247
|
|
248
|
+
# Retrieve ES node license information
|
249
|
+
# @param url [LogStash::Util::SafeURI] ES node URL
|
250
|
+
# @return [Hash] deserialized license document or empty Hash upon any error
|
248
251
|
def get_license(url)
|
249
252
|
response = perform_request_to_url(url, :get, LICENSE_PATH)
|
250
253
|
LogStash::Json.load(response.body)
|
251
|
-
end
|
252
|
-
|
253
|
-
def valid_es_license?(url)
|
254
|
-
license = get_license(url)
|
255
|
-
license.fetch("license", {}).fetch("status", nil) == "active"
|
256
254
|
rescue => e
|
257
|
-
|
255
|
+
logger.error("Unable to get license information", url: url.sanitized.to_s, error_type: e.class, error: e.message)
|
256
|
+
{}
|
258
257
|
end
|
259
258
|
|
260
259
|
def health_check_request(url)
|
@@ -282,19 +281,9 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
282
281
|
@logger.warn("Detected a node with a higher major version than previously observed. This could be the result of an elasticsearch cluster upgrade.", :previous_major => @maximum_seen_major_version, :new_major => major, :node_url => url.sanitized.to_s)
|
283
282
|
set_new_major_version(major)
|
284
283
|
end
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
# As this version is to be shipped with Logstash 7.x we won't mark the connection as unlicensed
|
289
|
-
#
|
290
|
-
# logger.error("Cannot connect to the Elasticsearch cluster configured in the Elasticsearch output. Logstash requires the default distribution of Elasticsearch. Please update to the default distribution of Elasticsearch for full access to all free features, or switch to the OSS distribution of Logstash.", :url => url.sanitized.to_s)
|
291
|
-
# meta[:state] = :unlicensed
|
292
|
-
#
|
293
|
-
# Instead we'll log a deprecation warning and mark it as alive:
|
294
|
-
#
|
295
|
-
log_license_deprecation_warn(url)
|
296
|
-
meta[:state] = :alive
|
297
|
-
end
|
284
|
+
|
285
|
+
alive = @license_checker.appropriate_license?(self, url)
|
286
|
+
meta[:state] = alive ? :alive : :dead
|
298
287
|
end
|
299
288
|
rescue HostUnreachableError, BadResponseCodeError => e
|
300
289
|
logger.warn("Attempted to resurrect connection to dead ES instance, but got an error.", url: url.sanitized.to_s, error_type: e.class, error: e.message)
|
@@ -306,10 +295,6 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
306
295
|
@resurrectionist.join if @resurrectionist
|
307
296
|
end
|
308
297
|
|
309
|
-
def log_license_deprecation_warn(url)
|
310
|
-
logger.warn("DEPRECATION WARNING: Connecting to an OSS distribution of Elasticsearch using the default distribution of Logstash will stop working in Logstash 8.0.0. Please upgrade to the default distribution of Elasticsearch, or use the OSS distribution of Logstash", :url => url.sanitized.to_s)
|
311
|
-
end
|
312
|
-
|
313
298
|
def resurrectionist_alive?
|
314
299
|
@resurrectionist ? @resurrectionist.alive? : nil
|
315
300
|
end
|
@@ -15,6 +15,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
15
15
|
client_settings[:proxy] = params["proxy"] if params["proxy"]
|
16
16
|
|
17
17
|
common_options = {
|
18
|
+
:license_checker => params["license_checker"],
|
18
19
|
:client_settings => client_settings,
|
19
20
|
:metric => params["metric"],
|
20
21
|
:resurrect_delay => params["resurrect_delay"]
|
@@ -5,10 +5,10 @@ module LogStash; module Outputs; class ElasticSearch
|
|
5
5
|
|
6
6
|
def setup_ilm
|
7
7
|
return unless ilm_in_use?
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
8
|
+
logger.warn("Overwriting supplied index #{@index} with rollover alias #{@ilm_rollover_alias}") unless default_index?(@index)
|
9
|
+
@index = @ilm_rollover_alias
|
10
|
+
maybe_create_rollover_alias
|
11
|
+
maybe_create_ilm_policy
|
12
12
|
end
|
13
13
|
|
14
14
|
def default_rollover_alias?(rollover_alias)
|
@@ -75,6 +75,10 @@ module LogStash; module Outputs; class ElasticSearch
|
|
75
75
|
|
76
76
|
private
|
77
77
|
|
78
|
+
def default_index?(index)
|
79
|
+
index == @default_index
|
80
|
+
end
|
81
|
+
|
78
82
|
def ilm_policy_default?
|
79
83
|
ilm_policy == LogStash::Outputs::ElasticSearch::DEFAULT_POLICY
|
80
84
|
end
|
@@ -110,4 +114,4 @@ module LogStash; module Outputs; class ElasticSearch
|
|
110
114
|
LogStash::Json.load(::IO.read(policy_path))
|
111
115
|
end
|
112
116
|
end
|
113
|
-
|
117
|
+
end; end; end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
module LogStash; module Outputs; class ElasticSearch
|
2
|
+
class LicenseChecker
|
3
|
+
|
4
|
+
def initialize(logger)
|
5
|
+
@logger = logger
|
6
|
+
end
|
7
|
+
|
8
|
+
# Figure out if the provided license is appropriate or not
|
9
|
+
# The appropriate_license? methods is the method called from LogStash::Outputs::ElasticSearch::HttpClient::Pool#healthcheck!
|
10
|
+
# @param url [LogStash::Util::SafeURI] ES node URL
|
11
|
+
# @param license [Hash] ES node deserialized licence document
|
12
|
+
# @return [Boolean] true if provided license is deemed appropriate
|
13
|
+
def appropriate_license?(pool, url)
|
14
|
+
return true if oss?
|
15
|
+
|
16
|
+
license = pool.get_license(url)
|
17
|
+
if valid_es_license?(license)
|
18
|
+
true
|
19
|
+
else
|
20
|
+
# As this version is to be shipped with Logstash 7.x we won't mark the connection as unlicensed
|
21
|
+
#
|
22
|
+
# @logger.error("Cannot connect to the Elasticsearch cluster configured in the Elasticsearch output. Logstash requires the default distribution of Elasticsearch. Please update to the default distribution of Elasticsearch for full access to all free features, or switch to the OSS distribution of Logstash.", :url => url.sanitized.to_s)
|
23
|
+
# meta[:state] = :unlicensed
|
24
|
+
#
|
25
|
+
# Instead we'll log a deprecation warning and mark it as alive:
|
26
|
+
#
|
27
|
+
log_license_deprecation_warn(url)
|
28
|
+
true
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
# Note that oss? could be private but is used by the Pool specs
|
33
|
+
def oss?
|
34
|
+
LogStash::OSS
|
35
|
+
end
|
36
|
+
|
37
|
+
# Note that valid_es_license? could be private but is used by the Pool specs
|
38
|
+
def valid_es_license?(license)
|
39
|
+
license.fetch("license", {}).fetch("status", nil) == "active"
|
40
|
+
end
|
41
|
+
|
42
|
+
# Note that log_license_deprecation_warn could be private but is used by the Pool specs
|
43
|
+
def log_license_deprecation_warn(url)
|
44
|
+
@logger.warn("DEPRECATION WARNING: Connecting to an OSS distribution of Elasticsearch using the default distribution of Logstash will stop working in Logstash 8.0.0. Please upgrade to the default distribution of Elasticsearch, or use the OSS distribution of Logstash", :url => url.sanitized.to_s)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end; end; end
|
@@ -34,12 +34,17 @@ module LogStash; module Outputs; class ElasticSearch
|
|
34
34
|
def self.add_ilm_settings_to_template(plugin, template)
|
35
35
|
# Overwrite any index patterns, and use the rollover alias. Use 'index_patterns' rather than 'template' for pattern
|
36
36
|
# definition - remove any existing definition of 'template'
|
37
|
-
template.delete('template') if template.include?('template')
|
37
|
+
template.delete('template') if template.include?('template') if plugin.maximum_seen_major_version < 8
|
38
38
|
template['index_patterns'] = "#{plugin.ilm_rollover_alias}-*"
|
39
|
-
|
39
|
+
settings = template_settings(plugin, template)
|
40
|
+
if settings && (settings['index.lifecycle.name'] || settings['index.lifecycle.rollover_alias'])
|
40
41
|
plugin.logger.info("Overwriting index lifecycle name and rollover alias as ILM is enabled.")
|
41
42
|
end
|
42
|
-
|
43
|
+
settings.update({ 'index.lifecycle.name' => plugin.ilm_policy, 'index.lifecycle.rollover_alias' => plugin.ilm_rollover_alias})
|
44
|
+
end
|
45
|
+
|
46
|
+
def self.template_settings(plugin, template)
|
47
|
+
plugin.maximum_seen_major_version < 8 ? template['settings']: template['template']['settings']
|
43
48
|
end
|
44
49
|
|
45
50
|
# Template name - if template_name set, use it
|
@@ -1,44 +1,50 @@
|
|
1
1
|
{
|
2
2
|
"index_patterns" : "logstash-*",
|
3
3
|
"version" : 80001,
|
4
|
-
"
|
5
|
-
"
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
"
|
10
|
-
"
|
11
|
-
"
|
12
|
-
|
13
|
-
|
14
|
-
"
|
15
|
-
|
4
|
+
"template" : {
|
5
|
+
"settings" : {
|
6
|
+
"index.refresh_interval" : "5s",
|
7
|
+
"number_of_shards": 1
|
8
|
+
},
|
9
|
+
"mappings" : {
|
10
|
+
"dynamic_templates" : [ {
|
11
|
+
"message_field" : {
|
12
|
+
"path_match" : "message",
|
13
|
+
"match_mapping_type" : "string",
|
14
|
+
"mapping" : {
|
15
|
+
"type" : "text",
|
16
|
+
"norms" : false
|
17
|
+
}
|
16
18
|
}
|
17
|
-
}
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
19
|
+
}, {
|
20
|
+
"string_fields" : {
|
21
|
+
"match" : "*",
|
22
|
+
"match_mapping_type" : "string",
|
23
|
+
"mapping" : {
|
24
|
+
"type" : "text", "norms" : false,
|
25
|
+
"fields" : {
|
26
|
+
"keyword" : { "type": "keyword", "ignore_above": 256 }
|
27
|
+
}
|
26
28
|
}
|
27
29
|
}
|
28
|
-
}
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
30
|
+
} ],
|
31
|
+
"properties" : {
|
32
|
+
"@timestamp": { "type": "date" },
|
33
|
+
"@version": { "type": "keyword" },
|
34
|
+
"geoip" : {
|
35
|
+
"dynamic": true,
|
36
|
+
"properties" : {
|
37
|
+
"ip": { "type": "ip" },
|
38
|
+
"location" : { "type" : "geo_point" },
|
39
|
+
"latitude" : { "type" : "half_float" },
|
40
|
+
"longitude" : { "type" : "half_float" }
|
41
|
+
}
|
40
42
|
}
|
41
43
|
}
|
42
44
|
}
|
45
|
+
},
|
46
|
+
"priority": 200,
|
47
|
+
"_meta" : {
|
48
|
+
"description": "index template for logstash-output-elasticsearch"
|
43
49
|
}
|
44
50
|
}
|
@@ -0,0 +1,163 @@
|
|
1
|
+
module LogStash; module PluginMixins; module ElasticSearch
|
2
|
+
module APIConfigs
|
3
|
+
|
4
|
+
# This module defines common options that can be reused by alternate elasticsearch output plugins such as the elasticsearch_data_streams output.
|
5
|
+
|
6
|
+
DEFAULT_HOST = ::LogStash::Util::SafeURI.new("//127.0.0.1")
|
7
|
+
|
8
|
+
def self.included(mod)
|
9
|
+
# Username to authenticate to a secure Elasticsearch cluster
|
10
|
+
mod.config :user, :validate => :string
|
11
|
+
# Password to authenticate to a secure Elasticsearch cluster
|
12
|
+
mod.config :password, :validate => :password
|
13
|
+
|
14
|
+
# Authenticate using Elasticsearch API key.
|
15
|
+
# format is id:api_key (as returned by https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html[Create API key])
|
16
|
+
mod.config :api_key, :validate => :password
|
17
|
+
|
18
|
+
# Cloud authentication string ("<username>:<password>" format) is an alternative for the `user`/`password` configuration.
|
19
|
+
#
|
20
|
+
# For more details, check out the https://www.elastic.co/guide/en/logstash/current/connecting-to-cloud.html#_cloud_auth[cloud documentation]
|
21
|
+
mod.config :cloud_auth, :validate => :password
|
22
|
+
|
23
|
+
# The document ID for the index. Useful for overwriting existing entries in
|
24
|
+
# Elasticsearch with the same ID.
|
25
|
+
mod.config :document_id, :validate => :string
|
26
|
+
|
27
|
+
# HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps
|
28
|
+
# the root path for the Elasticsearch HTTP API lives.
|
29
|
+
# Note that if you use paths as components of URLs in the 'hosts' field you may
|
30
|
+
# not also set this field. That will raise an error at startup
|
31
|
+
mod.config :path, :validate => :string
|
32
|
+
|
33
|
+
# HTTP Path to perform the _bulk requests to
|
34
|
+
# this defaults to a concatenation of the path parameter and "_bulk"
|
35
|
+
mod.config :bulk_path, :validate => :string
|
36
|
+
|
37
|
+
# Pass a set of key value pairs as the URL query string. This query string is added
|
38
|
+
# to every host listed in the 'hosts' configuration. If the 'hosts' list contains
|
39
|
+
# urls that already have query strings, the one specified here will be appended.
|
40
|
+
mod.config :parameters, :validate => :hash
|
41
|
+
|
42
|
+
# Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme
|
43
|
+
# is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used.
|
44
|
+
# If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts'
|
45
|
+
mod.config :ssl, :validate => :boolean
|
46
|
+
|
47
|
+
# Option to validate the server's certificate. Disabling this severely compromises security.
|
48
|
+
# For more information on disabling certificate verification please read
|
49
|
+
# https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf
|
50
|
+
mod.config :ssl_certificate_verification, :validate => :boolean, :default => true
|
51
|
+
|
52
|
+
# The .cer or .pem file to validate the server's certificate
|
53
|
+
mod.config :cacert, :validate => :path
|
54
|
+
|
55
|
+
# The JKS truststore to validate the server's certificate.
|
56
|
+
# Use either `:truststore` or `:cacert`
|
57
|
+
mod.config :truststore, :validate => :path
|
58
|
+
|
59
|
+
# Set the truststore password
|
60
|
+
mod.config :truststore_password, :validate => :password
|
61
|
+
|
62
|
+
# The keystore used to present a certificate to the server.
|
63
|
+
# It can be either .jks or .p12
|
64
|
+
mod.config :keystore, :validate => :path
|
65
|
+
|
66
|
+
# Set the keystore password
|
67
|
+
mod.config :keystore_password, :validate => :password
|
68
|
+
|
69
|
+
# This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list.
|
70
|
+
# Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use
|
71
|
+
# this with master nodes, you probably want to disable HTTP on them by setting
|
72
|
+
# `http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or
|
73
|
+
# manually enter multiple Elasticsearch hosts using the `hosts` parameter.
|
74
|
+
mod.config :sniffing, :validate => :boolean, :default => false
|
75
|
+
|
76
|
+
# How long to wait, in seconds, between sniffing attempts
|
77
|
+
mod.config :sniffing_delay, :validate => :number, :default => 5
|
78
|
+
|
79
|
+
# HTTP Path to be used for the sniffing requests
|
80
|
+
# the default value is computed by concatenating the path value and "_nodes/http"
|
81
|
+
# if sniffing_path is set it will be used as an absolute path
|
82
|
+
# do not use full URL here, only paths, e.g. "/sniff/_nodes/http"
|
83
|
+
mod.config :sniffing_path, :validate => :string
|
84
|
+
|
85
|
+
# Set the address of a forward HTTP proxy.
|
86
|
+
# This used to accept hashes as arguments but now only accepts
|
87
|
+
# arguments of the URI type to prevent leaking credentials.
|
88
|
+
mod.config :proxy, :validate => :uri # but empty string is allowed
|
89
|
+
|
90
|
+
# Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If
|
91
|
+
# a timeout occurs, the request will be retried.
|
92
|
+
mod.config :timeout, :validate => :number, :default => 60
|
93
|
+
|
94
|
+
# Set the Elasticsearch errors in the whitelist that you don't want to log.
|
95
|
+
# A useful example is when you want to skip all 409 errors
|
96
|
+
# which are `document_already_exists_exception`.
|
97
|
+
mod.config :failure_type_logging_whitelist, :validate => :array, :default => []
|
98
|
+
|
99
|
+
# While the output tries to reuse connections efficiently we have a maximum.
|
100
|
+
# This sets the maximum number of open connections the output will create.
|
101
|
+
# Setting this too low may mean frequently closing / opening connections
|
102
|
+
# which is bad.
|
103
|
+
mod.config :pool_max, :validate => :number, :default => 1000
|
104
|
+
|
105
|
+
# While the output tries to reuse connections efficiently we have a maximum per endpoint.
|
106
|
+
# This sets the maximum number of open connections per endpoint the output will create.
|
107
|
+
# Setting this too low may mean frequently closing / opening connections
|
108
|
+
# which is bad.
|
109
|
+
mod.config :pool_max_per_route, :validate => :number, :default => 100
|
110
|
+
|
111
|
+
# HTTP Path where a HEAD request is sent when a backend is marked down
|
112
|
+
# the request is sent in the background to see if it has come back again
|
113
|
+
# before it is once again eligible to service requests.
|
114
|
+
# If you have custom firewall rules you may need to change this
|
115
|
+
mod.config :healthcheck_path, :validate => :string
|
116
|
+
|
117
|
+
# How frequently, in seconds, to wait between resurrection attempts.
|
118
|
+
# Resurrection is the process by which backend endpoints marked 'down' are checked
|
119
|
+
# to see if they have come back to life
|
120
|
+
mod.config :resurrect_delay, :validate => :number, :default => 5
|
121
|
+
|
122
|
+
# How long to wait before checking if the connection is stale before executing a request on a connection using keepalive.
|
123
|
+
# You may want to set this lower, if you get connection errors regularly
|
124
|
+
# Quoting the Apache commons docs (this client is based Apache Commmons):
|
125
|
+
# 'Defines period of inactivity in milliseconds after which persistent connections must
|
126
|
+
# be re-validated prior to being leased to the consumer. Non-positive value passed to
|
127
|
+
# this method disables connection validation. This check helps detect connections that
|
128
|
+
# have become stale (half-closed) while kept inactive in the pool.'
|
129
|
+
# See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info]
|
130
|
+
mod.config :validate_after_inactivity, :validate => :number, :default => 10000
|
131
|
+
|
132
|
+
# Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond
|
133
|
+
mod.config :http_compression, :validate => :boolean, :default => false
|
134
|
+
|
135
|
+
# Custom Headers to send on each request to elasticsearch nodes
|
136
|
+
mod.config :custom_headers, :validate => :hash, :default => {}
|
137
|
+
|
138
|
+
# Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter.
|
139
|
+
# Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300).
|
140
|
+
# `"127.0.0.1"`
|
141
|
+
# `["127.0.0.1:9200","127.0.0.2:9200"]`
|
142
|
+
# `["http://127.0.0.1"]`
|
143
|
+
# `["https://127.0.0.1:9200"]`
|
144
|
+
# `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath)
|
145
|
+
# It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list
|
146
|
+
# to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch.
|
147
|
+
#
|
148
|
+
# Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance.
|
149
|
+
mod.config :hosts, :validate => :uri, :default => [ DEFAULT_HOST ], :list => true
|
150
|
+
|
151
|
+
# Cloud ID, from the Elastic Cloud web console. If set `hosts` should not be used.
|
152
|
+
#
|
153
|
+
# For more details, check out the https://www.elastic.co/guide/en/logstash/current/connecting-to-cloud.html#_cloud_id[cloud documentation]
|
154
|
+
mod.config :cloud_id, :validate => :string
|
155
|
+
|
156
|
+
# Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval`
|
157
|
+
mod.config :retry_initial_interval, :validate => :number, :default => 2
|
158
|
+
|
159
|
+
# Set max interval in seconds between bulk retries.
|
160
|
+
mod.config :retry_max_interval, :validate => :number, :default => 64
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end; end; end
|