logstash-output-elasticsearch-test 11.16.0-x86_64-linux
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +649 -0
- data/CONTRIBUTORS +34 -0
- data/Gemfile +16 -0
- data/LICENSE +202 -0
- data/NOTICE.TXT +5 -0
- data/README.md +106 -0
- data/docs/index.asciidoc +1369 -0
- data/lib/logstash/outputs/elasticsearch/data_stream_support.rb +282 -0
- data/lib/logstash/outputs/elasticsearch/default-ilm-policy.json +14 -0
- data/lib/logstash/outputs/elasticsearch/http_client/manticore_adapter.rb +155 -0
- data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +534 -0
- data/lib/logstash/outputs/elasticsearch/http_client.rb +497 -0
- data/lib/logstash/outputs/elasticsearch/http_client_builder.rb +201 -0
- data/lib/logstash/outputs/elasticsearch/ilm.rb +92 -0
- data/lib/logstash/outputs/elasticsearch/license_checker.rb +52 -0
- data/lib/logstash/outputs/elasticsearch/template_manager.rb +131 -0
- data/lib/logstash/outputs/elasticsearch/templates/ecs-disabled/elasticsearch-6x.json +45 -0
- data/lib/logstash/outputs/elasticsearch/templates/ecs-disabled/elasticsearch-7x.json +44 -0
- data/lib/logstash/outputs/elasticsearch/templates/ecs-disabled/elasticsearch-8x.json +50 -0
- data/lib/logstash/outputs/elasticsearch.rb +699 -0
- data/lib/logstash/plugin_mixins/elasticsearch/api_configs.rb +237 -0
- data/lib/logstash/plugin_mixins/elasticsearch/common.rb +409 -0
- data/lib/logstash/plugin_mixins/elasticsearch/noop_license_checker.rb +9 -0
- data/logstash-output-elasticsearch.gemspec +40 -0
- data/spec/es_spec_helper.rb +225 -0
- data/spec/fixtures/_nodes/6x.json +81 -0
- data/spec/fixtures/_nodes/7x.json +92 -0
- data/spec/fixtures/htpasswd +2 -0
- data/spec/fixtures/license_check/active.json +16 -0
- data/spec/fixtures/license_check/inactive.json +5 -0
- data/spec/fixtures/nginx_reverse_proxy.conf +22 -0
- data/spec/fixtures/scripts/painless/scripted_update.painless +2 -0
- data/spec/fixtures/scripts/painless/scripted_update_nested.painless +1 -0
- data/spec/fixtures/scripts/painless/scripted_upsert.painless +1 -0
- data/spec/fixtures/template-with-policy-es6x.json +48 -0
- data/spec/fixtures/template-with-policy-es7x.json +45 -0
- data/spec/fixtures/template-with-policy-es8x.json +50 -0
- data/spec/fixtures/test_certs/ca.crt +29 -0
- data/spec/fixtures/test_certs/ca.der.sha256 +1 -0
- data/spec/fixtures/test_certs/ca.key +51 -0
- data/spec/fixtures/test_certs/renew.sh +13 -0
- data/spec/fixtures/test_certs/test.crt +30 -0
- data/spec/fixtures/test_certs/test.der.sha256 +1 -0
- data/spec/fixtures/test_certs/test.key +51 -0
- data/spec/fixtures/test_certs/test.p12 +0 -0
- data/spec/fixtures/test_certs/test_invalid.crt +36 -0
- data/spec/fixtures/test_certs/test_invalid.key +51 -0
- data/spec/fixtures/test_certs/test_invalid.p12 +0 -0
- data/spec/fixtures/test_certs/test_self_signed.crt +32 -0
- data/spec/fixtures/test_certs/test_self_signed.key +54 -0
- data/spec/fixtures/test_certs/test_self_signed.p12 +0 -0
- data/spec/integration/outputs/compressed_indexing_spec.rb +70 -0
- data/spec/integration/outputs/create_spec.rb +67 -0
- data/spec/integration/outputs/data_stream_spec.rb +68 -0
- data/spec/integration/outputs/delete_spec.rb +63 -0
- data/spec/integration/outputs/ilm_spec.rb +534 -0
- data/spec/integration/outputs/index_spec.rb +421 -0
- data/spec/integration/outputs/index_version_spec.rb +98 -0
- data/spec/integration/outputs/ingest_pipeline_spec.rb +75 -0
- data/spec/integration/outputs/metrics_spec.rb +66 -0
- data/spec/integration/outputs/no_es_on_startup_spec.rb +78 -0
- data/spec/integration/outputs/painless_update_spec.rb +99 -0
- data/spec/integration/outputs/parent_spec.rb +94 -0
- data/spec/integration/outputs/retry_spec.rb +182 -0
- data/spec/integration/outputs/routing_spec.rb +61 -0
- data/spec/integration/outputs/sniffer_spec.rb +94 -0
- data/spec/integration/outputs/templates_spec.rb +133 -0
- data/spec/integration/outputs/unsupported_actions_spec.rb +75 -0
- data/spec/integration/outputs/update_spec.rb +114 -0
- data/spec/spec_helper.rb +10 -0
- data/spec/support/elasticsearch/api/actions/delete_ilm_policy.rb +19 -0
- data/spec/support/elasticsearch/api/actions/get_alias.rb +18 -0
- data/spec/support/elasticsearch/api/actions/get_ilm_policy.rb +18 -0
- data/spec/support/elasticsearch/api/actions/put_alias.rb +24 -0
- data/spec/support/elasticsearch/api/actions/put_ilm_policy.rb +25 -0
- data/spec/unit/http_client_builder_spec.rb +185 -0
- data/spec/unit/outputs/elasticsearch/data_stream_support_spec.rb +612 -0
- data/spec/unit/outputs/elasticsearch/http_client/manticore_adapter_spec.rb +151 -0
- data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +501 -0
- data/spec/unit/outputs/elasticsearch/http_client_spec.rb +339 -0
- data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +189 -0
- data/spec/unit/outputs/elasticsearch_proxy_spec.rb +103 -0
- data/spec/unit/outputs/elasticsearch_spec.rb +1573 -0
- data/spec/unit/outputs/elasticsearch_ssl_spec.rb +197 -0
- data/spec/unit/outputs/error_whitelist_spec.rb +56 -0
- data/spec/unit/outputs/license_check_spec.rb +57 -0
- metadata +423 -0
@@ -0,0 +1,699 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/namespace"
|
3
|
+
require "logstash/environment"
|
4
|
+
require "logstash/outputs/base"
|
5
|
+
require "logstash/json"
|
6
|
+
require "concurrent/atomic/atomic_boolean"
|
7
|
+
require "stud/interval"
|
8
|
+
require "socket" # for Socket.gethostname
|
9
|
+
require "thread" # for safe queueing
|
10
|
+
require "uri" # for escaping user input
|
11
|
+
require "forwardable"
|
12
|
+
require "set"
|
13
|
+
|
14
|
+
# .Compatibility Note
|
15
|
+
# [NOTE]
|
16
|
+
# ================================================================================
|
17
|
+
# Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting]
|
18
|
+
# called `http.content_type.required`. If this option is set to `true`, and you
|
19
|
+
# are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output
|
20
|
+
# plugin to version 6.2.5 or higher.
|
21
|
+
#
|
22
|
+
# ================================================================================
|
23
|
+
#
|
24
|
+
# This plugin is the recommended method of storing logs in Elasticsearch.
|
25
|
+
# If you plan on using the Kibana web interface, you'll want to use this output.
|
26
|
+
#
|
27
|
+
# This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0.
|
28
|
+
# We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower,
|
29
|
+
# yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having
|
30
|
+
# to upgrade Logstash in lock-step.
|
31
|
+
#
|
32
|
+
# You can learn more about Elasticsearch at <https://www.elastic.co/products/elasticsearch>
|
33
|
+
#
|
34
|
+
# ==== Template management for Elasticsearch 5.x
|
35
|
+
# Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0.
|
36
|
+
# Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default
|
37
|
+
# behavior.
|
38
|
+
#
|
39
|
+
# ** Users installing ES 5.x and LS 5.x **
|
40
|
+
# This change will not affect you and you will continue to use the ES defaults.
|
41
|
+
#
|
42
|
+
# ** Users upgrading from LS 2.x to LS 5.x with ES 5.x **
|
43
|
+
# LS will not force upgrade the template, if `logstash` template already exists. This means you will still use
|
44
|
+
# `.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after
|
45
|
+
# the new template is installed.
|
46
|
+
#
|
47
|
+
# ==== Retry Policy
|
48
|
+
#
|
49
|
+
# The retry policy has changed significantly in the 2.2.0 release.
|
50
|
+
# This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience
|
51
|
+
# either partial or total failures.
|
52
|
+
#
|
53
|
+
# The following errors are retried infinitely:
|
54
|
+
#
|
55
|
+
# - Network errors (inability to connect)
|
56
|
+
# - 429 (Too many requests) and
|
57
|
+
# - 503 (Service unavailable) errors
|
58
|
+
#
|
59
|
+
# NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions.
|
60
|
+
# It is more performant for Elasticsearch to retry these exceptions than this plugin.
|
61
|
+
#
|
62
|
+
# ==== Batch Sizes ====
|
63
|
+
# This plugin attempts to send batches of events as a single request. However, if
|
64
|
+
# a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request.
|
65
|
+
#
|
66
|
+
# ==== DNS Caching
|
67
|
+
#
|
68
|
+
# This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl],
|
69
|
+
# a global setting for the JVM.
|
70
|
+
#
|
71
|
+
# As an example, to set your DNS TTL to 1 second you would set
|
72
|
+
# the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`.
|
73
|
+
#
|
74
|
+
# Keep in mind that a connection with keepalive enabled will
|
75
|
+
# not reevaluate its DNS value while the keepalive is in effect.
|
76
|
+
#
|
77
|
+
# ==== HTTP Compression
|
78
|
+
#
|
79
|
+
# This plugin supports request and response compression. Response compression is enabled by default and
|
80
|
+
# for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for
|
81
|
+
# it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in
|
82
|
+
# Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin
|
83
|
+
#
|
84
|
+
# For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression`
|
85
|
+
# setting in their Logstash config file.
|
86
|
+
#
|
87
|
+
class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
88
|
+
declare_threadsafe!
|
89
|
+
|
90
|
+
require "logstash/outputs/elasticsearch/license_checker"
|
91
|
+
require "logstash/outputs/elasticsearch/http_client"
|
92
|
+
require "logstash/outputs/elasticsearch/http_client_builder"
|
93
|
+
require "logstash/plugin_mixins/elasticsearch/api_configs"
|
94
|
+
require "logstash/plugin_mixins/elasticsearch/common"
|
95
|
+
require "logstash/outputs/elasticsearch/ilm"
|
96
|
+
require "logstash/outputs/elasticsearch/data_stream_support"
|
97
|
+
require 'logstash/plugin_mixins/ecs_compatibility_support'
|
98
|
+
require 'logstash/plugin_mixins/deprecation_logger_support'
|
99
|
+
require 'logstash/plugin_mixins/normalize_config_support'
|
100
|
+
|
101
|
+
# Protocol agnostic methods
|
102
|
+
include(LogStash::PluginMixins::ElasticSearch::Common)
|
103
|
+
|
104
|
+
# Config normalization helpers
|
105
|
+
include(LogStash::PluginMixins::NormalizeConfigSupport)
|
106
|
+
|
107
|
+
# Methods for ILM support
|
108
|
+
include(LogStash::Outputs::ElasticSearch::Ilm)
|
109
|
+
|
110
|
+
# ecs_compatibility option, provided by Logstash core or the support adapter.
|
111
|
+
include(LogStash::PluginMixins::ECSCompatibilitySupport(:disabled, :v1, :v8))
|
112
|
+
|
113
|
+
# deprecation logger adapter for older Logstashes
|
114
|
+
include(LogStash::PluginMixins::DeprecationLoggerSupport)
|
115
|
+
|
116
|
+
# Generic/API config options that any document indexer output needs
|
117
|
+
include(LogStash::PluginMixins::ElasticSearch::APIConfigs)
|
118
|
+
|
119
|
+
# DS support
|
120
|
+
include(LogStash::Outputs::ElasticSearch::DataStreamSupport)
|
121
|
+
|
122
|
+
DEFAULT_POLICY = "logstash-policy"
|
123
|
+
|
124
|
+
config_name "elasticsearch"
|
125
|
+
|
126
|
+
# The Elasticsearch action to perform. Valid actions are:
|
127
|
+
#
|
128
|
+
# - index: indexes a document (an event from Logstash).
|
129
|
+
# - delete: deletes a document by id (An id is required for this action)
|
130
|
+
# - create: indexes a document, fails if a document by that id already exists in the index.
|
131
|
+
# - update: updates a document by id. Update has a special case where you can upsert -- update a
|
132
|
+
# document if not already present. See the `upsert` option. NOTE: This does not work and is not supported
|
133
|
+
# in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash!
|
134
|
+
# - A sprintf style string to change the action based on the content of the event. The value `%{[foo]}`
|
135
|
+
# would use the foo field for the action
|
136
|
+
#
|
137
|
+
# For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
|
138
|
+
config :action, :validate => :string # :default => "index" unless data_stream
|
139
|
+
|
140
|
+
# The index to write events to. This can be dynamic using the `%{foo}` syntax.
|
141
|
+
# The default value will partition your indices by day so you can more easily
|
142
|
+
# delete old data or only search specific date ranges.
|
143
|
+
# Indexes may not contain uppercase characters.
|
144
|
+
# For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}.
|
145
|
+
# LS uses Joda to format the index pattern from event timestamp.
|
146
|
+
# Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here].
|
147
|
+
config :index, :validate => :string
|
148
|
+
|
149
|
+
config :document_type,
|
150
|
+
:validate => :string,
|
151
|
+
:deprecated => "Document types are being deprecated in Elasticsearch 6.0, and removed entirely in 7.0. You should avoid this feature"
|
152
|
+
|
153
|
+
# From Logstash 1.3 onwards, a template is applied to Elasticsearch during
|
154
|
+
# Logstash's startup if one with the name `template_name` does not already exist.
|
155
|
+
# By default, the contents of this template is the default template for
|
156
|
+
# `logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern
|
157
|
+
# `logstash-*`. Should you require support for other index names, or would like
|
158
|
+
# to change the mappings in the template in general, a custom template can be
|
159
|
+
# specified by setting `template` to the path of a template file.
|
160
|
+
#
|
161
|
+
# Setting `manage_template` to false disables this feature. If you require more
|
162
|
+
# control over template creation, (e.g. creating indices dynamically based on
|
163
|
+
# field names) you should set `manage_template` to false and use the REST
|
164
|
+
# API to apply your templates manually.
|
165
|
+
#
|
166
|
+
# Default value is `true` unless data streams is enabled
|
167
|
+
config :manage_template, :validate => :boolean, :default => true
|
168
|
+
|
169
|
+
# This configuration option defines how the template is named inside Elasticsearch.
|
170
|
+
# Note that if you have used the template management features and subsequently
|
171
|
+
# change this, you will need to prune the old template manually, e.g.
|
172
|
+
#
|
173
|
+
# `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
|
174
|
+
#
|
175
|
+
# where `OldTemplateName` is whatever the former setting was.
|
176
|
+
config :template_name, :validate => :string
|
177
|
+
|
178
|
+
# You can set the path to your own template here, if you so desire.
|
179
|
+
# If not set, the included template will be used.
|
180
|
+
config :template, :validate => :path
|
181
|
+
|
182
|
+
# The template_overwrite option will always overwrite the indicated template
|
183
|
+
# in Elasticsearch with either the one indicated by template or the included one.
|
184
|
+
# This option is set to false by default. If you always want to stay up to date
|
185
|
+
# with the template provided by Logstash, this option could be very useful to you.
|
186
|
+
# Likewise, if you have your own template file managed by puppet, for example, and
|
187
|
+
# you wanted to be able to update it regularly, this option could help there as well.
|
188
|
+
#
|
189
|
+
# Please note that if you are using your own customized version of the Logstash
|
190
|
+
# template (logstash), setting this to true will make Logstash to overwrite
|
191
|
+
# the "logstash" template (i.e. removing all customized settings)
|
192
|
+
config :template_overwrite, :validate => :boolean, :default => false
|
193
|
+
|
194
|
+
# Flag for enabling legacy template api for Elasticsearch 8
|
195
|
+
# Default auto will use index template api for Elasticsearch 8 and use legacy api for 7
|
196
|
+
# Set to legacy to use legacy template api
|
197
|
+
config :template_api, :validate => ['auto', 'legacy', 'composable'], :default => 'auto'
|
198
|
+
|
199
|
+
# The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here.
|
200
|
+
# See https://www.elastic.co/blog/elasticsearch-versioning-support.
|
201
|
+
config :version, :validate => :string
|
202
|
+
|
203
|
+
# The version_type to use for indexing.
|
204
|
+
# See https://www.elastic.co/blog/elasticsearch-versioning-support.
|
205
|
+
# See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
|
206
|
+
config :version_type, :validate => ["internal", 'external', "external_gt", "external_gte", "force"]
|
207
|
+
|
208
|
+
# A routing override to be applied to all processed events.
|
209
|
+
# This can be dynamic using the `%{foo}` syntax.
|
210
|
+
config :routing, :validate => :string
|
211
|
+
|
212
|
+
# For child documents, ID of the associated parent.
|
213
|
+
# This can be dynamic using the `%{foo}` syntax.
|
214
|
+
config :parent, :validate => :string, :default => nil
|
215
|
+
|
216
|
+
# For child documents, name of the join field
|
217
|
+
config :join_field, :validate => :string, :default => nil
|
218
|
+
|
219
|
+
# Set upsert content for update mode.s
|
220
|
+
# Create a new document with this parameter as json string if `document_id` doesn't exists
|
221
|
+
config :upsert, :validate => :string, :default => ""
|
222
|
+
|
223
|
+
# Enable `doc_as_upsert` for update mode.
|
224
|
+
# Create a new document with source if `document_id` doesn't exist in Elasticsearch
|
225
|
+
config :doc_as_upsert, :validate => :boolean, :default => false
|
226
|
+
|
227
|
+
# Set script name for scripted update mode
|
228
|
+
config :script, :validate => :string, :default => ""
|
229
|
+
|
230
|
+
# Define the type of script referenced by "script" variable
|
231
|
+
# inline : "script" contains inline script
|
232
|
+
# indexed : "script" contains the name of script directly indexed in elasticsearch
|
233
|
+
# file : "script" contains the name of script stored in elasticseach's config directory
|
234
|
+
config :script_type, :validate => ["inline", 'indexed', "file"], :default => ["inline"]
|
235
|
+
|
236
|
+
# Set the language of the used script. If not set, this defaults to painless in ES 5.0
|
237
|
+
config :script_lang, :validate => :string, :default => "painless"
|
238
|
+
|
239
|
+
# Set variable name passed to script (scripted update)
|
240
|
+
config :script_var_name, :validate => :string, :default => "event"
|
241
|
+
|
242
|
+
# if enabled, script is in charge of creating non-existent document (scripted update)
|
243
|
+
config :scripted_upsert, :validate => :boolean, :default => false
|
244
|
+
|
245
|
+
# The number of times Elasticsearch should internally retry an update/upserted document
|
246
|
+
# See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates]
|
247
|
+
# for more info
|
248
|
+
config :retry_on_conflict, :validate => :number, :default => 1
|
249
|
+
|
250
|
+
# Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration
|
251
|
+
# here like `pipeline => "%{INGEST_PIPELINE}"`
|
252
|
+
config :pipeline, :validate => :string, :default => nil
|
253
|
+
|
254
|
+
# -----
|
255
|
+
# ILM configurations (beta)
|
256
|
+
# -----
|
257
|
+
# Flag for enabling Index Lifecycle Management integration.
|
258
|
+
config :ilm_enabled, :validate => [true, false, 'true', 'false', 'auto'], :default => 'auto'
|
259
|
+
|
260
|
+
# Rollover alias used for indexing data. If rollover alias doesn't exist, Logstash will create it and map it to the relevant index
|
261
|
+
config :ilm_rollover_alias, :validate => :string
|
262
|
+
|
263
|
+
# appends “{now/d}-000001” by default for new index creation, subsequent rollover indices will increment based on this pattern i.e. “000002”
|
264
|
+
# {now/d} is date math, and will insert the appropriate value automatically.
|
265
|
+
config :ilm_pattern, :validate => :string, :default => '{now/d}-000001'
|
266
|
+
|
267
|
+
# ILM policy to use, if undefined the default policy will be used.
|
268
|
+
config :ilm_policy, :validate => :string, :default => DEFAULT_POLICY
|
269
|
+
|
270
|
+
attr_reader :client
|
271
|
+
attr_reader :default_index
|
272
|
+
attr_reader :default_ilm_rollover_alias
|
273
|
+
attr_reader :default_template_name
|
274
|
+
|
275
|
+
def initialize(*params)
|
276
|
+
super
|
277
|
+
setup_ecs_compatibility_related_defaults
|
278
|
+
setup_ssl_params!
|
279
|
+
end
|
280
|
+
|
281
|
+
def register
|
282
|
+
if !failure_type_logging_whitelist.empty?
|
283
|
+
log_message = "'failure_type_logging_whitelist' is deprecated and in a future version of Elasticsearch " +
|
284
|
+
"output plugin will be removed, please use 'silence_errors_in_log' instead."
|
285
|
+
@deprecation_logger.deprecated log_message
|
286
|
+
@logger.warn log_message
|
287
|
+
@silence_errors_in_log = silence_errors_in_log | failure_type_logging_whitelist
|
288
|
+
end
|
289
|
+
|
290
|
+
@after_successful_connection_done = Concurrent::AtomicBoolean.new(false)
|
291
|
+
@stopping = Concurrent::AtomicBoolean.new(false)
|
292
|
+
|
293
|
+
check_action_validity
|
294
|
+
|
295
|
+
@logger.info("1111111-----New Elasticsearch output", :class => self.class.name, :hosts => @hosts.map(&:sanitized).map(&:to_s))
|
296
|
+
|
297
|
+
# the license_checking behaviour in the Pool class is externalized in the LogStash::ElasticSearchOutputLicenseChecker
|
298
|
+
# class defined in license_check.rb. This license checking is specific to the elasticsearch output here and passed
|
299
|
+
# to build_client down to the Pool class.
|
300
|
+
@client = build_client(LicenseChecker.new(@logger))
|
301
|
+
|
302
|
+
# Avoids race conditions in the @data_stream_config initialization (invoking check_data_stream_config! twice).
|
303
|
+
# It's being concurrently invoked by this register method and by the finish_register on the @after_successful_connection_thread
|
304
|
+
data_stream_enabled = data_stream_config?
|
305
|
+
|
306
|
+
setup_template_manager_defaults(data_stream_enabled)
|
307
|
+
# To support BWC, we check if DLQ exists in core (< 5.4). If it doesn't, we use nil to resort to previous behavior.
|
308
|
+
@dlq_writer = dlq_enabled? ? execution_context.dlq_writer : nil
|
309
|
+
|
310
|
+
@dlq_codes = DOC_DLQ_CODES.to_set
|
311
|
+
|
312
|
+
if dlq_enabled?
|
313
|
+
check_dlq_custom_codes
|
314
|
+
@dlq_codes.merge(dlq_custom_codes)
|
315
|
+
else
|
316
|
+
raise LogStash::ConfigurationError, "DLQ feature (dlq_custom_codes) is configured while DLQ is not enabled" unless dlq_custom_codes.empty?
|
317
|
+
end
|
318
|
+
|
319
|
+
setup_mapper_and_target(data_stream_enabled)
|
320
|
+
|
321
|
+
@bulk_request_metrics = metric.namespace(:bulk_requests)
|
322
|
+
@document_level_metrics = metric.namespace(:documents)
|
323
|
+
|
324
|
+
@after_successful_connection_thread = after_successful_connection do
|
325
|
+
begin
|
326
|
+
finish_register
|
327
|
+
true # thread.value
|
328
|
+
rescue => e
|
329
|
+
# we do not want to halt the thread with an exception as that has consequences for LS
|
330
|
+
e # thread.value
|
331
|
+
ensure
|
332
|
+
@after_successful_connection_done.make_true
|
333
|
+
end
|
334
|
+
end
|
335
|
+
|
336
|
+
end
|
337
|
+
|
338
|
+
def setup_mapper_and_target(data_stream_enabled)
|
339
|
+
if data_stream_enabled
|
340
|
+
@event_mapper = -> (e) { data_stream_event_action_tuple(e) }
|
341
|
+
@event_target = -> (e) { data_stream_name(e) }
|
342
|
+
@index = "#{data_stream_type}-#{data_stream_dataset}-#{data_stream_namespace}".freeze # default name
|
343
|
+
else
|
344
|
+
@event_mapper = -> (e) { event_action_tuple(e) }
|
345
|
+
@event_target = -> (e) { e.sprintf(@index) }
|
346
|
+
end
|
347
|
+
end
|
348
|
+
|
349
|
+
# @override post-register when ES connection established
|
350
|
+
def finish_register
|
351
|
+
assert_es_version_supports_data_streams if data_stream_config?
|
352
|
+
discover_cluster_uuid
|
353
|
+
install_template
|
354
|
+
setup_ilm if ilm_in_use?
|
355
|
+
super
|
356
|
+
end
|
357
|
+
|
358
|
+
# @override to handle proxy => '' as if none was set
|
359
|
+
def config_init(params)
|
360
|
+
proxy = params['proxy']
|
361
|
+
if proxy.is_a?(String)
|
362
|
+
# environment variables references aren't yet resolved
|
363
|
+
proxy = deep_replace(proxy)
|
364
|
+
if proxy.empty?
|
365
|
+
params.delete('proxy')
|
366
|
+
@proxy = ''
|
367
|
+
else
|
368
|
+
params['proxy'] = proxy # do not do resolving again
|
369
|
+
end
|
370
|
+
end
|
371
|
+
super(params)
|
372
|
+
end
|
373
|
+
|
374
|
+
# Receive an array of events and immediately attempt to index them (no buffering)
|
375
|
+
def multi_receive(events)
|
376
|
+
wait_for_successful_connection if @after_successful_connection_done
|
377
|
+
events_mapped = safe_interpolation_map_events(events)
|
378
|
+
retrying_submit(events_mapped.successful_events)
|
379
|
+
unless events_mapped.event_mapping_errors.empty?
|
380
|
+
handle_event_mapping_errors(events_mapped.event_mapping_errors)
|
381
|
+
end
|
382
|
+
end
|
383
|
+
|
384
|
+
# @param: Arrays of FailedEventMapping
|
385
|
+
private
|
386
|
+
def handle_event_mapping_errors(event_mapping_errors)
|
387
|
+
# if DQL is enabled, log the events to provide issue insights to users.
|
388
|
+
if @dlq_writer
|
389
|
+
@logger.warn("Events could not be indexed and routing to DLQ, count: #{event_mapping_errors.size}")
|
390
|
+
end
|
391
|
+
|
392
|
+
event_mapping_errors.each do |event_mapping_error|
|
393
|
+
detailed_message = "#{event_mapping_error.message}; event: `#{event_mapping_error.event.to_hash_with_metadata}`"
|
394
|
+
@dlq_writer ? @dlq_writer.write(event_mapping_error.event, detailed_message) : @logger.warn(detailed_message)
|
395
|
+
end
|
396
|
+
@document_level_metrics.increment(:non_retryable_failures, event_mapping_errors.size)
|
397
|
+
end
|
398
|
+
|
399
|
+
MapEventsResult = Struct.new(:successful_events, :event_mapping_errors)
|
400
|
+
FailedEventMapping = Struct.new(:event, :message)
|
401
|
+
|
402
|
+
private
|
403
|
+
def safe_interpolation_map_events(events)
|
404
|
+
successful_events = [] # list of LogStash::Outputs::ElasticSearch::EventActionTuple
|
405
|
+
event_mapping_errors = [] # list of FailedEventMapping
|
406
|
+
events.each do |event|
|
407
|
+
begin
|
408
|
+
successful_events << @event_mapper.call(event)
|
409
|
+
rescue EventMappingError => ie
|
410
|
+
event_mapping_errors << FailedEventMapping.new(event, ie.message)
|
411
|
+
end
|
412
|
+
end
|
413
|
+
MapEventsResult.new(successful_events, event_mapping_errors)
|
414
|
+
end
|
415
|
+
|
416
|
+
public
|
417
|
+
def map_events(events)
|
418
|
+
safe_interpolation_map_events(events).successful_events
|
419
|
+
end
|
420
|
+
|
421
|
+
def wait_for_successful_connection
|
422
|
+
after_successful_connection_done = @after_successful_connection_done
|
423
|
+
return unless after_successful_connection_done
|
424
|
+
stoppable_sleep 1 until (after_successful_connection_done.true? || pipeline_shutdown_requested?)
|
425
|
+
|
426
|
+
if pipeline_shutdown_requested? && !after_successful_connection_done.true?
|
427
|
+
logger.info "Aborting the batch due to shutdown request while waiting for connections to become live"
|
428
|
+
abort_batch_if_available!
|
429
|
+
end
|
430
|
+
|
431
|
+
status = @after_successful_connection_thread && @after_successful_connection_thread.value
|
432
|
+
if status.is_a?(Exception) # check if thread 'halted' with an error
|
433
|
+
# keep logging that something isn't right (from every #multi_receive)
|
434
|
+
@logger.error "Elasticsearch setup did not complete normally, please review previously logged errors",
|
435
|
+
message: status.message, exception: status.class
|
436
|
+
else
|
437
|
+
@after_successful_connection_done = nil # do not execute __method__ again if all went well
|
438
|
+
end
|
439
|
+
end
|
440
|
+
private :wait_for_successful_connection
|
441
|
+
|
442
|
+
def close
|
443
|
+
@stopping.make_true if @stopping
|
444
|
+
stop_after_successful_connection_thread
|
445
|
+
@client.close if @client
|
446
|
+
end
|
447
|
+
|
448
|
+
private
|
449
|
+
|
450
|
+
def stop_after_successful_connection_thread
|
451
|
+
@after_successful_connection_thread.join unless @after_successful_connection_thread.nil?
|
452
|
+
end
|
453
|
+
|
454
|
+
# Convert the event into a 3-tuple of action, params and event hash
|
455
|
+
def event_action_tuple(event)
|
456
|
+
params = common_event_params(event)
|
457
|
+
params[:_type] = get_event_type(event) if use_event_type?(nil)
|
458
|
+
|
459
|
+
if @parent
|
460
|
+
if @join_field
|
461
|
+
join_value = event.get(@join_field)
|
462
|
+
parent_value = event.sprintf(@parent)
|
463
|
+
event.set(@join_field, { "name" => join_value, "parent" => parent_value })
|
464
|
+
params[routing_field_name] = event.sprintf(@parent)
|
465
|
+
else
|
466
|
+
params[:parent] = event.sprintf(@parent)
|
467
|
+
end
|
468
|
+
end
|
469
|
+
|
470
|
+
action = event.sprintf(@action || 'index')
|
471
|
+
raise UnsupportedActionError, action unless VALID_HTTP_ACTIONS.include?(action)
|
472
|
+
|
473
|
+
if action == 'update'
|
474
|
+
params[:_upsert] = LogStash::Json.load(event.sprintf(@upsert)) if @upsert != ""
|
475
|
+
params[:_script] = event.sprintf(@script) if @script != ""
|
476
|
+
params[retry_on_conflict_action_name] = @retry_on_conflict
|
477
|
+
end
|
478
|
+
|
479
|
+
params[:version] = event.sprintf(@version) if @version
|
480
|
+
params[:version_type] = event.sprintf(@version_type) if @version_type
|
481
|
+
|
482
|
+
EventActionTuple.new(action, params, event)
|
483
|
+
end
|
484
|
+
|
485
|
+
class EventActionTuple < Array # TODO: acting as an array for compatibility
|
486
|
+
|
487
|
+
def initialize(action, params, event, event_data = nil)
|
488
|
+
super(3)
|
489
|
+
self[0] = action
|
490
|
+
self[1] = params
|
491
|
+
self[2] = event_data || event.to_hash
|
492
|
+
@event = event
|
493
|
+
end
|
494
|
+
|
495
|
+
attr_reader :event
|
496
|
+
|
497
|
+
end
|
498
|
+
|
499
|
+
class EventMappingError < ArgumentError
|
500
|
+
def initialize(msg = nil)
|
501
|
+
super
|
502
|
+
end
|
503
|
+
end
|
504
|
+
|
505
|
+
class IndexInterpolationError < EventMappingError
|
506
|
+
def initialize(bad_formatted_index)
|
507
|
+
super("Badly formatted index, after interpolation still contains placeholder: [#{bad_formatted_index}]")
|
508
|
+
end
|
509
|
+
end
|
510
|
+
|
511
|
+
class UnsupportedActionError < EventMappingError
|
512
|
+
def initialize(bad_action)
|
513
|
+
super("Elasticsearch doesn't support [#{bad_action}] action")
|
514
|
+
end
|
515
|
+
end
|
516
|
+
|
517
|
+
# @return Hash (initial) parameters for given event
|
518
|
+
# @private shared event params factory between index and data_stream mode
|
519
|
+
def common_event_params(event)
|
520
|
+
sprintf_index = @event_target.call(event)
|
521
|
+
raise IndexInterpolationError, sprintf_index if sprintf_index.match(/%{.*?}/) && dlq_on_failed_indexname_interpolation
|
522
|
+
params = {
|
523
|
+
:_id => @document_id ? event.sprintf(@document_id) : nil,
|
524
|
+
:_index => sprintf_index,
|
525
|
+
routing_field_name => @routing ? event.sprintf(@routing) : nil
|
526
|
+
}
|
527
|
+
|
528
|
+
target_pipeline = resolve_pipeline(event)
|
529
|
+
# convention: empty string equates to not using a pipeline
|
530
|
+
# this is useful when using a field reference in the pipeline setting, e.g.
|
531
|
+
# elasticsearch {
|
532
|
+
# pipeline => "%{[@metadata][pipeline]}"
|
533
|
+
# }
|
534
|
+
params[:pipeline] = target_pipeline unless (target_pipeline.nil? || target_pipeline.empty?)
|
535
|
+
|
536
|
+
params
|
537
|
+
end
|
538
|
+
|
539
|
+
def resolve_pipeline(event)
|
540
|
+
pipeline_template = @pipeline || event.get("[@metadata][target_ingest_pipeline]")&.to_s
|
541
|
+
pipeline_template && event.sprintf(pipeline_template)
|
542
|
+
end
|
543
|
+
|
544
|
+
@@plugins = Gem::Specification.find_all{|spec| spec.name =~ /logstash-output-elasticsearch-/ }
|
545
|
+
|
546
|
+
@@plugins.each do |plugin|
|
547
|
+
name = plugin.name.split('-')[-1]
|
548
|
+
require "logstash/outputs/elasticsearch/#{name}"
|
549
|
+
end
|
550
|
+
|
551
|
+
def retry_on_conflict_action_name
|
552
|
+
maximum_seen_major_version >= 7 ? :retry_on_conflict : :_retry_on_conflict
|
553
|
+
end
|
554
|
+
|
555
|
+
def routing_field_name
|
556
|
+
:routing
|
557
|
+
end
|
558
|
+
|
559
|
+
# Determine the correct value for the 'type' field for the given event
|
560
|
+
DEFAULT_EVENT_TYPE_ES6 = "doc".freeze
|
561
|
+
DEFAULT_EVENT_TYPE_ES7 = "_doc".freeze
|
562
|
+
|
563
|
+
def get_event_type(event)
|
564
|
+
# Set the 'type' value for the index.
|
565
|
+
type = if @document_type
|
566
|
+
event.sprintf(@document_type)
|
567
|
+
else
|
568
|
+
major_version = maximum_seen_major_version
|
569
|
+
if major_version == 6
|
570
|
+
DEFAULT_EVENT_TYPE_ES6
|
571
|
+
elsif major_version == 7
|
572
|
+
DEFAULT_EVENT_TYPE_ES7
|
573
|
+
else
|
574
|
+
nil
|
575
|
+
end
|
576
|
+
end
|
577
|
+
|
578
|
+
type.to_s
|
579
|
+
end
|
580
|
+
|
581
|
+
##
|
582
|
+
# WARNING: This method is overridden in a subclass in Logstash Core 7.7-7.8's monitoring,
|
583
|
+
# where a `client` argument is both required and ignored. In later versions of
|
584
|
+
# Logstash Core it is optional and ignored, but to make it optional here would
|
585
|
+
# allow us to accidentally break compatibility with Logstashes where it was required.
|
586
|
+
# @param noop_required_client [nil]: required `nil` for legacy reasons.
|
587
|
+
# @return [Boolean]
|
588
|
+
def use_event_type?(noop_required_client)
|
589
|
+
# always set type for ES 6
|
590
|
+
# for ES 7 only set it if the user defined it
|
591
|
+
(maximum_seen_major_version < 7) || (maximum_seen_major_version == 7 && @document_type)
|
592
|
+
end
|
593
|
+
|
594
|
+
def install_template
|
595
|
+
TemplateManager.install_template(self)
|
596
|
+
rescue => e
|
597
|
+
@logger.error("Failed to install template", message: e.message, exception: e.class, backtrace: e.backtrace)
|
598
|
+
end
|
599
|
+
|
600
|
+
def setup_ecs_compatibility_related_defaults
|
601
|
+
case ecs_compatibility
|
602
|
+
when :disabled
|
603
|
+
@default_index = "logstash-%{+yyyy.MM.dd}"
|
604
|
+
@default_ilm_rollover_alias = "logstash"
|
605
|
+
@default_template_name = 'logstash'
|
606
|
+
when :v1, :v8
|
607
|
+
@default_index = "ecs-logstash-%{+yyyy.MM.dd}"
|
608
|
+
@default_ilm_rollover_alias = "ecs-logstash"
|
609
|
+
@default_template_name = 'ecs-logstash'
|
610
|
+
else
|
611
|
+
fail("unsupported ECS Compatibility `#{ecs_compatibility}`")
|
612
|
+
end
|
613
|
+
|
614
|
+
@index ||= default_index
|
615
|
+
@ilm_rollover_alias ||= default_ilm_rollover_alias
|
616
|
+
@template_name ||= default_template_name
|
617
|
+
end
|
618
|
+
|
619
|
+
def setup_template_manager_defaults(data_stream_enabled)
|
620
|
+
if original_params["manage_template"].nil? && data_stream_enabled
|
621
|
+
logger.debug("Disabling template management since data streams are enabled")
|
622
|
+
@manage_template = false
|
623
|
+
end
|
624
|
+
end
|
625
|
+
|
626
|
+
def setup_ssl_params!
|
627
|
+
@ssl_enabled = normalize_config(:ssl_enabled) do |normalize|
|
628
|
+
normalize.with_deprecated_alias(:ssl)
|
629
|
+
end
|
630
|
+
|
631
|
+
@ssl_certificate_authorities = normalize_config(:ssl_certificate_authorities) do |normalize|
|
632
|
+
normalize.with_deprecated_mapping(:cacert) do |cacert|
|
633
|
+
[cacert]
|
634
|
+
end
|
635
|
+
end
|
636
|
+
|
637
|
+
@ssl_keystore_path = normalize_config(:ssl_keystore_path) do |normalize|
|
638
|
+
normalize.with_deprecated_alias(:keystore)
|
639
|
+
end
|
640
|
+
|
641
|
+
@ssl_keystore_password = normalize_config(:ssl_keystore_password) do |normalize|
|
642
|
+
normalize.with_deprecated_alias(:keystore_password)
|
643
|
+
end
|
644
|
+
|
645
|
+
@ssl_truststore_path = normalize_config(:ssl_truststore_path) do |normalize|
|
646
|
+
normalize.with_deprecated_alias(:truststore)
|
647
|
+
end
|
648
|
+
|
649
|
+
@ssl_truststore_password = normalize_config(:ssl_truststore_password) do |normalize|
|
650
|
+
normalize.with_deprecated_alias(:truststore_password)
|
651
|
+
end
|
652
|
+
|
653
|
+
@ssl_verification_mode = normalize_config(:ssl_verification_mode) do |normalize|
|
654
|
+
normalize.with_deprecated_mapping(:ssl_certificate_verification) do |ssl_certificate_verification|
|
655
|
+
if ssl_certificate_verification == true
|
656
|
+
"full"
|
657
|
+
else
|
658
|
+
"none"
|
659
|
+
end
|
660
|
+
end
|
661
|
+
end
|
662
|
+
|
663
|
+
params['ssl_enabled'] = @ssl_enabled unless @ssl_enabled.nil?
|
664
|
+
params['ssl_certificate_authorities'] = @ssl_certificate_authorities unless @ssl_certificate_authorities.nil?
|
665
|
+
params['ssl_keystore_path'] = @ssl_keystore_path unless @ssl_keystore_path.nil?
|
666
|
+
params['ssl_keystore_password'] = @ssl_keystore_password unless @ssl_keystore_password.nil?
|
667
|
+
params['ssl_truststore_path'] = @ssl_truststore_path unless @ssl_truststore_path.nil?
|
668
|
+
params['ssl_truststore_password'] = @ssl_truststore_password unless @ssl_truststore_password.nil?
|
669
|
+
params['ssl_verification_mode'] = @ssl_verification_mode unless @ssl_verification_mode.nil?
|
670
|
+
end
|
671
|
+
|
672
|
+
# To be overidden by the -java version
|
673
|
+
VALID_HTTP_ACTIONS = ["index", "delete", "create", "update"]
|
674
|
+
def valid_actions
|
675
|
+
VALID_HTTP_ACTIONS
|
676
|
+
end
|
677
|
+
|
678
|
+
def check_action_validity
|
679
|
+
return if @action.nil? # not set
|
680
|
+
raise LogStash::ConfigurationError, "No action specified!" if @action.empty?
|
681
|
+
|
682
|
+
# If we're using string interpolation, we're good!
|
683
|
+
return if @action =~ /%{.+}/
|
684
|
+
return if valid_actions.include?(@action)
|
685
|
+
|
686
|
+
raise LogStash::ConfigurationError, "Action '#{@action}' is invalid! Pick one of #{valid_actions} or use a sprintf style statement"
|
687
|
+
end
|
688
|
+
|
689
|
+
def check_dlq_custom_codes
|
690
|
+
intersection = dlq_custom_codes & DOC_DLQ_CODES
|
691
|
+
raise LogStash::ConfigurationError, "#{intersection} are already defined as standard DLQ error codes" unless intersection.empty?
|
692
|
+
|
693
|
+
intersection = dlq_custom_codes & DOC_SUCCESS_CODES
|
694
|
+
raise LogStash::ConfigurationError, "#{intersection} are success codes which cannot be redefined in dlq_custom_codes" unless intersection.empty?
|
695
|
+
|
696
|
+
intersection = dlq_custom_codes & [DOC_CONFLICT_CODE]
|
697
|
+
raise LogStash::ConfigurationError, "#{intersection} are error codes already defined as conflict which cannot be redefined in dlq_custom_codes" unless intersection.empty?
|
698
|
+
end
|
699
|
+
end
|