logstash-output-elasticsearch 10.7.3-java → 10.8.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/lib/logstash/outputs/elasticsearch.rb +300 -165
- data/lib/logstash/outputs/elasticsearch/http_client.rb +1 -0
- data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +13 -28
- data/lib/logstash/outputs/elasticsearch/http_client_builder.rb +1 -0
- data/lib/logstash/outputs/elasticsearch/ilm.rb +9 -5
- data/lib/logstash/outputs/elasticsearch/license_checker.rb +47 -0
- data/lib/logstash/plugin_mixins/elasticsearch/api_configs.rb +163 -0
- data/lib/logstash/{outputs → plugin_mixins}/elasticsearch/common.rb +40 -167
- data/lib/logstash/plugin_mixins/elasticsearch/noop_license_checker.rb +9 -0
- data/logstash-output-elasticsearch.gemspec +1 -1
- data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +45 -5
- data/spec/unit/outputs/license_check_spec.rb +41 -0
- metadata +8 -4
- data/lib/logstash/outputs/elasticsearch/common_configs.rb +0 -167
@@ -286,6 +286,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
286
286
|
adapter = build_adapter(options)
|
287
287
|
|
288
288
|
pool_options = {
|
289
|
+
:license_checker => options[:license_checker],
|
289
290
|
:sniffing => sniffing,
|
290
291
|
:sniffer_delay => options[:sniffer_delay],
|
291
292
|
:sniffing_path => options[:sniffing_path],
|
@@ -1,3 +1,5 @@
|
|
1
|
+
require "logstash/plugin_mixins/elasticsearch/noop_license_checker"
|
2
|
+
|
1
3
|
module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
2
4
|
class Pool
|
3
5
|
class NoConnectionAvailableError < Error; end
|
@@ -29,6 +31,7 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
29
31
|
end
|
30
32
|
|
31
33
|
attr_reader :logger, :adapter, :sniffing, :sniffer_delay, :resurrect_delay, :healthcheck_path, :sniffing_path, :bulk_path
|
34
|
+
attr_reader :license_checker # license_checker is used by the pool specs
|
32
35
|
|
33
36
|
ROOT_URI_PATH = '/'.freeze
|
34
37
|
LICENSE_PATH = '/_license'.freeze
|
@@ -66,12 +69,10 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
66
69
|
# Holds metadata about all URLs
|
67
70
|
@url_info = {}
|
68
71
|
@stopping = false
|
69
|
-
end
|
70
72
|
|
71
|
-
|
72
|
-
LogStash::Outputs::ElasticSearch.oss?
|
73
|
+
@license_checker = options.fetch(:license_checker) { LogStash::PluginMixins::ElasticSearch::NoopLicenseChecker::INSTANCE }
|
73
74
|
end
|
74
|
-
|
75
|
+
|
75
76
|
def start
|
76
77
|
update_initial_urls
|
77
78
|
start_resurrectionist
|
@@ -210,7 +211,6 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
210
211
|
end
|
211
212
|
end
|
212
213
|
|
213
|
-
|
214
214
|
def sniff_2x_1x(nodes)
|
215
215
|
nodes.map do |id,info|
|
216
216
|
# TODO Make sure this works with shield. Does that listed
|
@@ -245,16 +245,15 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
245
245
|
end
|
246
246
|
end
|
247
247
|
|
248
|
+
# Retrieve ES node license information
|
249
|
+
# @param url [LogStash::Util::SafeURI] ES node URL
|
250
|
+
# @return [Hash] deserialized license document or empty Hash upon any error
|
248
251
|
def get_license(url)
|
249
252
|
response = perform_request_to_url(url, :get, LICENSE_PATH)
|
250
253
|
LogStash::Json.load(response.body)
|
251
|
-
end
|
252
|
-
|
253
|
-
def valid_es_license?(url)
|
254
|
-
license = get_license(url)
|
255
|
-
license.fetch("license", {}).fetch("status", nil) == "active"
|
256
254
|
rescue => e
|
257
|
-
|
255
|
+
logger.error("Unable to get license information", url: url.sanitized.to_s, error_type: e.class, error: e.message)
|
256
|
+
{}
|
258
257
|
end
|
259
258
|
|
260
259
|
def health_check_request(url)
|
@@ -282,19 +281,9 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
282
281
|
@logger.warn("Detected a node with a higher major version than previously observed. This could be the result of an elasticsearch cluster upgrade.", :previous_major => @maximum_seen_major_version, :new_major => major, :node_url => url.sanitized.to_s)
|
283
282
|
set_new_major_version(major)
|
284
283
|
end
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
# As this version is to be shipped with Logstash 7.x we won't mark the connection as unlicensed
|
289
|
-
#
|
290
|
-
# logger.error("Cannot connect to the Elasticsearch cluster configured in the Elasticsearch output. Logstash requires the default distribution of Elasticsearch. Please update to the default distribution of Elasticsearch for full access to all free features, or switch to the OSS distribution of Logstash.", :url => url.sanitized.to_s)
|
291
|
-
# meta[:state] = :unlicensed
|
292
|
-
#
|
293
|
-
# Instead we'll log a deprecation warning and mark it as alive:
|
294
|
-
#
|
295
|
-
log_license_deprecation_warn(url)
|
296
|
-
meta[:state] = :alive
|
297
|
-
end
|
284
|
+
|
285
|
+
alive = @license_checker.appropriate_license?(self, url)
|
286
|
+
meta[:state] = alive ? :alive : :dead
|
298
287
|
end
|
299
288
|
rescue HostUnreachableError, BadResponseCodeError => e
|
300
289
|
logger.warn("Attempted to resurrect connection to dead ES instance, but got an error.", url: url.sanitized.to_s, error_type: e.class, error: e.message)
|
@@ -306,10 +295,6 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
|
|
306
295
|
@resurrectionist.join if @resurrectionist
|
307
296
|
end
|
308
297
|
|
309
|
-
def log_license_deprecation_warn(url)
|
310
|
-
logger.warn("DEPRECATION WARNING: Connecting to an OSS distribution of Elasticsearch using the default distribution of Logstash will stop working in Logstash 8.0.0. Please upgrade to the default distribution of Elasticsearch, or use the OSS distribution of Logstash", :url => url.sanitized.to_s)
|
311
|
-
end
|
312
|
-
|
313
298
|
def resurrectionist_alive?
|
314
299
|
@resurrectionist ? @resurrectionist.alive? : nil
|
315
300
|
end
|
@@ -15,6 +15,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
15
15
|
client_settings[:proxy] = params["proxy"] if params["proxy"]
|
16
16
|
|
17
17
|
common_options = {
|
18
|
+
:license_checker => params["license_checker"],
|
18
19
|
:client_settings => client_settings,
|
19
20
|
:metric => params["metric"],
|
20
21
|
:resurrect_delay => params["resurrect_delay"]
|
@@ -5,10 +5,10 @@ module LogStash; module Outputs; class ElasticSearch
|
|
5
5
|
|
6
6
|
def setup_ilm
|
7
7
|
return unless ilm_in_use?
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
8
|
+
logger.warn("Overwriting supplied index #{@index} with rollover alias #{@ilm_rollover_alias}") unless default_index?(@index)
|
9
|
+
@index = @ilm_rollover_alias
|
10
|
+
maybe_create_rollover_alias
|
11
|
+
maybe_create_ilm_policy
|
12
12
|
end
|
13
13
|
|
14
14
|
def default_rollover_alias?(rollover_alias)
|
@@ -75,6 +75,10 @@ module LogStash; module Outputs; class ElasticSearch
|
|
75
75
|
|
76
76
|
private
|
77
77
|
|
78
|
+
def default_index?(index)
|
79
|
+
index == @default_index
|
80
|
+
end
|
81
|
+
|
78
82
|
def ilm_policy_default?
|
79
83
|
ilm_policy == LogStash::Outputs::ElasticSearch::DEFAULT_POLICY
|
80
84
|
end
|
@@ -110,4 +114,4 @@ module LogStash; module Outputs; class ElasticSearch
|
|
110
114
|
LogStash::Json.load(::IO.read(policy_path))
|
111
115
|
end
|
112
116
|
end
|
113
|
-
|
117
|
+
end; end; end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
module LogStash; module Outputs; class ElasticSearch
|
2
|
+
class LicenseChecker
|
3
|
+
|
4
|
+
def initialize(logger)
|
5
|
+
@logger = logger
|
6
|
+
end
|
7
|
+
|
8
|
+
# Figure out if the provided license is appropriate or not
|
9
|
+
# The appropriate_license? methods is the method called from LogStash::Outputs::ElasticSearch::HttpClient::Pool#healthcheck!
|
10
|
+
# @param url [LogStash::Util::SafeURI] ES node URL
|
11
|
+
# @param license [Hash] ES node deserialized licence document
|
12
|
+
# @return [Boolean] true if provided license is deemed appropriate
|
13
|
+
def appropriate_license?(pool, url)
|
14
|
+
return true if oss?
|
15
|
+
|
16
|
+
license = pool.get_license(url)
|
17
|
+
if valid_es_license?(license)
|
18
|
+
true
|
19
|
+
else
|
20
|
+
# As this version is to be shipped with Logstash 7.x we won't mark the connection as unlicensed
|
21
|
+
#
|
22
|
+
# @logger.error("Cannot connect to the Elasticsearch cluster configured in the Elasticsearch output. Logstash requires the default distribution of Elasticsearch. Please update to the default distribution of Elasticsearch for full access to all free features, or switch to the OSS distribution of Logstash.", :url => url.sanitized.to_s)
|
23
|
+
# meta[:state] = :unlicensed
|
24
|
+
#
|
25
|
+
# Instead we'll log a deprecation warning and mark it as alive:
|
26
|
+
#
|
27
|
+
log_license_deprecation_warn(url)
|
28
|
+
true
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
# Note that oss? could be private but is used by the Pool specs
|
33
|
+
def oss?
|
34
|
+
LogStash::OSS
|
35
|
+
end
|
36
|
+
|
37
|
+
# Note that valid_es_license? could be private but is used by the Pool specs
|
38
|
+
def valid_es_license?(license)
|
39
|
+
license.fetch("license", {}).fetch("status", nil) == "active"
|
40
|
+
end
|
41
|
+
|
42
|
+
# Note that log_license_deprecation_warn could be private but is used by the Pool specs
|
43
|
+
def log_license_deprecation_warn(url)
|
44
|
+
@logger.warn("DEPRECATION WARNING: Connecting to an OSS distribution of Elasticsearch using the default distribution of Logstash will stop working in Logstash 8.0.0. Please upgrade to the default distribution of Elasticsearch, or use the OSS distribution of Logstash", :url => url.sanitized.to_s)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end; end; end
|
@@ -0,0 +1,163 @@
|
|
1
|
+
module LogStash; module PluginMixins; module ElasticSearch
|
2
|
+
module APIConfigs
|
3
|
+
|
4
|
+
# This module defines common options that can be reused by alternate elasticsearch output plugins such as the elasticsearch_data_streams output.
|
5
|
+
|
6
|
+
DEFAULT_HOST = ::LogStash::Util::SafeURI.new("//127.0.0.1")
|
7
|
+
|
8
|
+
def self.included(mod)
|
9
|
+
# Username to authenticate to a secure Elasticsearch cluster
|
10
|
+
mod.config :user, :validate => :string
|
11
|
+
# Password to authenticate to a secure Elasticsearch cluster
|
12
|
+
mod.config :password, :validate => :password
|
13
|
+
|
14
|
+
# Authenticate using Elasticsearch API key.
|
15
|
+
# format is id:api_key (as returned by https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html[Create API key])
|
16
|
+
mod.config :api_key, :validate => :password
|
17
|
+
|
18
|
+
# Cloud authentication string ("<username>:<password>" format) is an alternative for the `user`/`password` configuration.
|
19
|
+
#
|
20
|
+
# For more details, check out the https://www.elastic.co/guide/en/logstash/current/connecting-to-cloud.html#_cloud_auth[cloud documentation]
|
21
|
+
mod.config :cloud_auth, :validate => :password
|
22
|
+
|
23
|
+
# The document ID for the index. Useful for overwriting existing entries in
|
24
|
+
# Elasticsearch with the same ID.
|
25
|
+
mod.config :document_id, :validate => :string
|
26
|
+
|
27
|
+
# HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps
|
28
|
+
# the root path for the Elasticsearch HTTP API lives.
|
29
|
+
# Note that if you use paths as components of URLs in the 'hosts' field you may
|
30
|
+
# not also set this field. That will raise an error at startup
|
31
|
+
mod.config :path, :validate => :string
|
32
|
+
|
33
|
+
# HTTP Path to perform the _bulk requests to
|
34
|
+
# this defaults to a concatenation of the path parameter and "_bulk"
|
35
|
+
mod.config :bulk_path, :validate => :string
|
36
|
+
|
37
|
+
# Pass a set of key value pairs as the URL query string. This query string is added
|
38
|
+
# to every host listed in the 'hosts' configuration. If the 'hosts' list contains
|
39
|
+
# urls that already have query strings, the one specified here will be appended.
|
40
|
+
mod.config :parameters, :validate => :hash
|
41
|
+
|
42
|
+
# Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme
|
43
|
+
# is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used.
|
44
|
+
# If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts'
|
45
|
+
mod.config :ssl, :validate => :boolean
|
46
|
+
|
47
|
+
# Option to validate the server's certificate. Disabling this severely compromises security.
|
48
|
+
# For more information on disabling certificate verification please read
|
49
|
+
# https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf
|
50
|
+
mod.config :ssl_certificate_verification, :validate => :boolean, :default => true
|
51
|
+
|
52
|
+
# The .cer or .pem file to validate the server's certificate
|
53
|
+
mod.config :cacert, :validate => :path
|
54
|
+
|
55
|
+
# The JKS truststore to validate the server's certificate.
|
56
|
+
# Use either `:truststore` or `:cacert`
|
57
|
+
mod.config :truststore, :validate => :path
|
58
|
+
|
59
|
+
# Set the truststore password
|
60
|
+
mod.config :truststore_password, :validate => :password
|
61
|
+
|
62
|
+
# The keystore used to present a certificate to the server.
|
63
|
+
# It can be either .jks or .p12
|
64
|
+
mod.config :keystore, :validate => :path
|
65
|
+
|
66
|
+
# Set the keystore password
|
67
|
+
mod.config :keystore_password, :validate => :password
|
68
|
+
|
69
|
+
# This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list.
|
70
|
+
# Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use
|
71
|
+
# this with master nodes, you probably want to disable HTTP on them by setting
|
72
|
+
# `http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or
|
73
|
+
# manually enter multiple Elasticsearch hosts using the `hosts` parameter.
|
74
|
+
mod.config :sniffing, :validate => :boolean, :default => false
|
75
|
+
|
76
|
+
# How long to wait, in seconds, between sniffing attempts
|
77
|
+
mod.config :sniffing_delay, :validate => :number, :default => 5
|
78
|
+
|
79
|
+
# HTTP Path to be used for the sniffing requests
|
80
|
+
# the default value is computed by concatenating the path value and "_nodes/http"
|
81
|
+
# if sniffing_path is set it will be used as an absolute path
|
82
|
+
# do not use full URL here, only paths, e.g. "/sniff/_nodes/http"
|
83
|
+
mod.config :sniffing_path, :validate => :string
|
84
|
+
|
85
|
+
# Set the address of a forward HTTP proxy.
|
86
|
+
# This used to accept hashes as arguments but now only accepts
|
87
|
+
# arguments of the URI type to prevent leaking credentials.
|
88
|
+
mod.config :proxy, :validate => :uri # but empty string is allowed
|
89
|
+
|
90
|
+
# Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If
|
91
|
+
# a timeout occurs, the request will be retried.
|
92
|
+
mod.config :timeout, :validate => :number, :default => 60
|
93
|
+
|
94
|
+
# Set the Elasticsearch errors in the whitelist that you don't want to log.
|
95
|
+
# A useful example is when you want to skip all 409 errors
|
96
|
+
# which are `document_already_exists_exception`.
|
97
|
+
mod.config :failure_type_logging_whitelist, :validate => :array, :default => []
|
98
|
+
|
99
|
+
# While the output tries to reuse connections efficiently we have a maximum.
|
100
|
+
# This sets the maximum number of open connections the output will create.
|
101
|
+
# Setting this too low may mean frequently closing / opening connections
|
102
|
+
# which is bad.
|
103
|
+
mod.config :pool_max, :validate => :number, :default => 1000
|
104
|
+
|
105
|
+
# While the output tries to reuse connections efficiently we have a maximum per endpoint.
|
106
|
+
# This sets the maximum number of open connections per endpoint the output will create.
|
107
|
+
# Setting this too low may mean frequently closing / opening connections
|
108
|
+
# which is bad.
|
109
|
+
mod.config :pool_max_per_route, :validate => :number, :default => 100
|
110
|
+
|
111
|
+
# HTTP Path where a HEAD request is sent when a backend is marked down
|
112
|
+
# the request is sent in the background to see if it has come back again
|
113
|
+
# before it is once again eligible to service requests.
|
114
|
+
# If you have custom firewall rules you may need to change this
|
115
|
+
mod.config :healthcheck_path, :validate => :string
|
116
|
+
|
117
|
+
# How frequently, in seconds, to wait between resurrection attempts.
|
118
|
+
# Resurrection is the process by which backend endpoints marked 'down' are checked
|
119
|
+
# to see if they have come back to life
|
120
|
+
mod.config :resurrect_delay, :validate => :number, :default => 5
|
121
|
+
|
122
|
+
# How long to wait before checking if the connection is stale before executing a request on a connection using keepalive.
|
123
|
+
# You may want to set this lower, if you get connection errors regularly
|
124
|
+
# Quoting the Apache commons docs (this client is based Apache Commmons):
|
125
|
+
# 'Defines period of inactivity in milliseconds after which persistent connections must
|
126
|
+
# be re-validated prior to being leased to the consumer. Non-positive value passed to
|
127
|
+
# this method disables connection validation. This check helps detect connections that
|
128
|
+
# have become stale (half-closed) while kept inactive in the pool.'
|
129
|
+
# See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info]
|
130
|
+
mod.config :validate_after_inactivity, :validate => :number, :default => 10000
|
131
|
+
|
132
|
+
# Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond
|
133
|
+
mod.config :http_compression, :validate => :boolean, :default => false
|
134
|
+
|
135
|
+
# Custom Headers to send on each request to elasticsearch nodes
|
136
|
+
mod.config :custom_headers, :validate => :hash, :default => {}
|
137
|
+
|
138
|
+
# Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter.
|
139
|
+
# Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300).
|
140
|
+
# `"127.0.0.1"`
|
141
|
+
# `["127.0.0.1:9200","127.0.0.2:9200"]`
|
142
|
+
# `["http://127.0.0.1"]`
|
143
|
+
# `["https://127.0.0.1:9200"]`
|
144
|
+
# `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath)
|
145
|
+
# It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list
|
146
|
+
# to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch.
|
147
|
+
#
|
148
|
+
# Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance.
|
149
|
+
mod.config :hosts, :validate => :uri, :default => [ DEFAULT_HOST ], :list => true
|
150
|
+
|
151
|
+
# Cloud ID, from the Elastic Cloud web console. If set `hosts` should not be used.
|
152
|
+
#
|
153
|
+
# For more details, check out the https://www.elastic.co/guide/en/logstash/current/connecting-to-cloud.html#_cloud_id[cloud documentation]
|
154
|
+
mod.config :cloud_id, :validate => :string
|
155
|
+
|
156
|
+
# Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval`
|
157
|
+
mod.config :retry_initial_interval, :validate => :number, :default => 2
|
158
|
+
|
159
|
+
# Set max interval in seconds between bulk retries.
|
160
|
+
mod.config :retry_max_interval, :validate => :number, :default => 64
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end; end; end
|
@@ -1,7 +1,10 @@
|
|
1
1
|
require "logstash/outputs/elasticsearch/template_manager"
|
2
2
|
|
3
|
-
module LogStash; module
|
3
|
+
module LogStash; module PluginMixins; module ElasticSearch
|
4
4
|
module Common
|
5
|
+
|
6
|
+
# This module defines common methods that can be reused by alternate elasticsearch output plugins such as the elasticsearch_data_streams output.
|
7
|
+
|
5
8
|
attr_reader :client, :hosts
|
6
9
|
|
7
10
|
# These codes apply to documents, not at the request level
|
@@ -9,116 +12,26 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
9
12
|
DOC_SUCCESS_CODES = [200, 201]
|
10
13
|
DOC_CONFLICT_CODE = 409
|
11
14
|
|
12
|
-
#
|
13
|
-
#
|
14
|
-
#
|
15
|
-
#
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
#
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
# Receive an array of events and immediately attempt to index them (no buffering)
|
32
|
-
def multi_receive(events)
|
33
|
-
until @template_installed.true?
|
34
|
-
sleep 1
|
15
|
+
# Perform some ES options validations and Build the HttpClient.
|
16
|
+
# Note that this methods may sets the @user, @password, @hosts and @client ivars as a side effect.
|
17
|
+
# @param license_checker [#appropriate_license?] An optional license checker that will be used by the Pool class.
|
18
|
+
# @return [HttpClient] the new http client
|
19
|
+
def build_client(license_checker = nil)
|
20
|
+
params["license_checker"] = license_checker
|
21
|
+
|
22
|
+
# the following 3 options validation & setup methods are called inside build_client
|
23
|
+
# because they must be executed prior to building the client and logstash
|
24
|
+
# monitoring and management rely on directly calling build_client
|
25
|
+
# see https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/934#pullrequestreview-396203307
|
26
|
+
validate_authentication
|
27
|
+
fill_hosts_from_cloud_id
|
28
|
+
setup_hosts
|
29
|
+
|
30
|
+
params["metric"] = metric
|
31
|
+
if @proxy.eql?('')
|
32
|
+
@logger.warn "Supplied proxy setting (proxy => '') has no effect"
|
35
33
|
end
|
36
|
-
|
37
|
-
end
|
38
|
-
|
39
|
-
def setup_after_successful_connection
|
40
|
-
@template_installer ||= Thread.new do
|
41
|
-
sleep_interval = @retry_initial_interval
|
42
|
-
until successful_connection? || @stopping.true?
|
43
|
-
@logger.debug("Waiting for connectivity to Elasticsearch cluster. Retrying in #{sleep_interval}s")
|
44
|
-
Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
|
45
|
-
sleep_interval = next_sleep_interval(sleep_interval)
|
46
|
-
end
|
47
|
-
if successful_connection?
|
48
|
-
discover_cluster_uuid
|
49
|
-
install_template
|
50
|
-
setup_ilm if ilm_in_use?
|
51
|
-
end
|
52
|
-
end
|
53
|
-
end
|
54
|
-
|
55
|
-
def stop_template_installer
|
56
|
-
@template_installer.join unless @template_installer.nil?
|
57
|
-
end
|
58
|
-
|
59
|
-
def successful_connection?
|
60
|
-
!!maximum_seen_major_version
|
61
|
-
end
|
62
|
-
|
63
|
-
##
|
64
|
-
# WARNING: This method is overridden in a subclass in Logstash Core 7.7-7.8's monitoring,
|
65
|
-
# where a `client` argument is both required and ignored. In later versions of
|
66
|
-
# Logstash Core it is optional and ignored, but to make it optional here would
|
67
|
-
# allow us to accidentally break compatibility with Logstashes where it was required.
|
68
|
-
# @param noop_required_client [nil]: required `nil` for legacy reasons.
|
69
|
-
# @return [Boolean]
|
70
|
-
def use_event_type?(noop_required_client)
|
71
|
-
maximum_seen_major_version < 8
|
72
|
-
end
|
73
|
-
|
74
|
-
# Convert the event into a 3-tuple of action, params, and event
|
75
|
-
def event_action_tuple(event)
|
76
|
-
action = event.sprintf(@action)
|
77
|
-
|
78
|
-
params = {
|
79
|
-
:_id => @document_id ? event.sprintf(@document_id) : nil,
|
80
|
-
:_index => event.sprintf(@index),
|
81
|
-
routing_field_name => @routing ? event.sprintf(@routing) : nil
|
82
|
-
}
|
83
|
-
|
84
|
-
params[:_type] = get_event_type(event) if use_event_type?(nil)
|
85
|
-
|
86
|
-
if @pipeline
|
87
|
-
value = event.sprintf(@pipeline)
|
88
|
-
# convention: empty string equates to not using a pipeline
|
89
|
-
# this is useful when using a field reference in the pipeline setting, e.g.
|
90
|
-
# elasticsearch {
|
91
|
-
# pipeline => "%{[@metadata][pipeline]}"
|
92
|
-
# }
|
93
|
-
params[:pipeline] = value unless value.empty?
|
94
|
-
end
|
95
|
-
|
96
|
-
if @parent
|
97
|
-
if @join_field
|
98
|
-
join_value = event.get(@join_field)
|
99
|
-
parent_value = event.sprintf(@parent)
|
100
|
-
event.set(@join_field, { "name" => join_value, "parent" => parent_value })
|
101
|
-
params[routing_field_name] = event.sprintf(@parent)
|
102
|
-
else
|
103
|
-
params[:parent] = event.sprintf(@parent)
|
104
|
-
end
|
105
|
-
end
|
106
|
-
|
107
|
-
if action == 'update'
|
108
|
-
params[:_upsert] = LogStash::Json.load(event.sprintf(@upsert)) if @upsert != ""
|
109
|
-
params[:_script] = event.sprintf(@script) if @script != ""
|
110
|
-
params[retry_on_conflict_action_name] = @retry_on_conflict
|
111
|
-
end
|
112
|
-
|
113
|
-
if @version
|
114
|
-
params[:version] = event.sprintf(@version)
|
115
|
-
end
|
116
|
-
|
117
|
-
if @version_type
|
118
|
-
params[:version_type] = event.sprintf(@version_type)
|
119
|
-
end
|
120
|
-
|
121
|
-
[action, params, event]
|
34
|
+
@client ||= ::LogStash::Outputs::ElasticSearch::HttpClientBuilder.build(@logger, @hosts, params)
|
122
35
|
end
|
123
36
|
|
124
37
|
def validate_authentication
|
@@ -153,7 +66,7 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
153
66
|
|
154
67
|
def hosts_default?(hosts)
|
155
68
|
# NOTE: would be nice if pipeline allowed us a clean way to detect a config default :
|
156
|
-
hosts.is_a?(Array) && hosts.size == 1 && hosts.first.equal?(
|
69
|
+
hosts.is_a?(Array) && hosts.size == 1 && hosts.first.equal?(LogStash::PluginMixins::ElasticSearch::APIConfigs::DEFAULT_HOST)
|
157
70
|
end
|
158
71
|
private :hosts_default?
|
159
72
|
|
@@ -206,17 +119,23 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
206
119
|
client.maximum_seen_major_version
|
207
120
|
end
|
208
121
|
|
209
|
-
def
|
210
|
-
maximum_seen_major_version
|
211
|
-
end
|
212
|
-
|
213
|
-
def retry_on_conflict_action_name
|
214
|
-
maximum_seen_major_version >= 7 ? :retry_on_conflict : :_retry_on_conflict
|
122
|
+
def successful_connection?
|
123
|
+
!!maximum_seen_major_version
|
215
124
|
end
|
216
125
|
|
217
|
-
|
218
|
-
|
219
|
-
|
126
|
+
# launch a thread that waits for an initial successful connection to the ES cluster to call the given block
|
127
|
+
# @param block [Proc] the block to execute upon initial successful connection
|
128
|
+
# @return [Thread] the successful connection wait thread
|
129
|
+
def setup_after_successful_connection(&block)
|
130
|
+
Thread.new do
|
131
|
+
sleep_interval = @retry_initial_interval
|
132
|
+
until successful_connection? || @stopping.true?
|
133
|
+
@logger.debug("Waiting for connectivity to Elasticsearch cluster. Retrying in #{sleep_interval}s")
|
134
|
+
Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
|
135
|
+
sleep_interval = next_sleep_interval(sleep_interval)
|
136
|
+
end
|
137
|
+
block.call if successful_connection?
|
138
|
+
end
|
220
139
|
end
|
221
140
|
|
222
141
|
def discover_cluster_uuid
|
@@ -228,22 +147,6 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
228
147
|
# @logger.error("Unable to retrieve elasticsearch cluster uuid", error => e.message)
|
229
148
|
end
|
230
149
|
|
231
|
-
def check_action_validity
|
232
|
-
raise LogStash::ConfigurationError, "No action specified!" unless @action
|
233
|
-
|
234
|
-
# If we're using string interpolation, we're good!
|
235
|
-
return if @action =~ /%{.+}/
|
236
|
-
return if valid_actions.include?(@action)
|
237
|
-
|
238
|
-
raise LogStash::ConfigurationError, "Action '#{@action}' is invalid! Pick one of #{valid_actions} or use a sprintf style statement"
|
239
|
-
end
|
240
|
-
|
241
|
-
# To be overidden by the -java version
|
242
|
-
VALID_HTTP_ACTIONS=["index", "delete", "create", "update"]
|
243
|
-
def valid_actions
|
244
|
-
VALID_HTTP_ACTIONS
|
245
|
-
end
|
246
|
-
|
247
150
|
def retrying_submit(actions)
|
248
151
|
# Initially we submit the full list of actions
|
249
152
|
submit_actions = actions
|
@@ -352,32 +255,6 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
352
255
|
end
|
353
256
|
end
|
354
257
|
|
355
|
-
# Determine the correct value for the 'type' field for the given event
|
356
|
-
DEFAULT_EVENT_TYPE_ES6="doc".freeze
|
357
|
-
DEFAULT_EVENT_TYPE_ES7="_doc".freeze
|
358
|
-
def get_event_type(event)
|
359
|
-
# Set the 'type' value for the index.
|
360
|
-
type = if @document_type
|
361
|
-
event.sprintf(@document_type)
|
362
|
-
else
|
363
|
-
if maximum_seen_major_version < 6
|
364
|
-
event.get("type") || DEFAULT_EVENT_TYPE_ES6
|
365
|
-
elsif maximum_seen_major_version == 6
|
366
|
-
DEFAULT_EVENT_TYPE_ES6
|
367
|
-
elsif maximum_seen_major_version == 7
|
368
|
-
DEFAULT_EVENT_TYPE_ES7
|
369
|
-
else
|
370
|
-
nil
|
371
|
-
end
|
372
|
-
end
|
373
|
-
|
374
|
-
if !(type.is_a?(String) || type.is_a?(Numeric))
|
375
|
-
@logger.warn("Bad event type! Non-string/integer type value set!", :type_class => type.class, :type_value => type.to_s, :event => event)
|
376
|
-
end
|
377
|
-
|
378
|
-
type.to_s
|
379
|
-
end
|
380
|
-
|
381
258
|
# Rescue retryable errors during bulk submission
|
382
259
|
def safe_bulk(actions)
|
383
260
|
sleep_interval = @retry_initial_interval
|
@@ -448,10 +325,6 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
448
325
|
end
|
449
326
|
end
|
450
327
|
|
451
|
-
def default_index?(index)
|
452
|
-
@index == @default_index
|
453
|
-
end
|
454
|
-
|
455
328
|
def dlq_enabled?
|
456
329
|
# TODO there should be a better way to query if DLQ is enabled
|
457
330
|
# See more in: https://github.com/elastic/logstash/issues/8064
|
@@ -459,4 +332,4 @@ module LogStash; module Outputs; class ElasticSearch;
|
|
459
332
|
!execution_context.dlq_writer.inner_writer.is_a?(::LogStash::Util::DummyDeadLetterQueueWriter)
|
460
333
|
end
|
461
334
|
end
|
462
|
-
end end end
|
335
|
+
end; end; end
|