logstash-output-elasticsearch 10.7.0-java → 10.8.3-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +22 -0
- data/CONTRIBUTORS +1 -0
- data/README.md +1 -1
- data/docs/index.asciidoc +226 -153
- data/lib/logstash/outputs/elasticsearch.rb +302 -165
- data/lib/logstash/outputs/elasticsearch/http_client.rb +7 -2
- data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +13 -28
- data/lib/logstash/outputs/elasticsearch/http_client_builder.rb +1 -0
- data/lib/logstash/outputs/elasticsearch/ilm.rb +9 -5
- data/lib/logstash/outputs/elasticsearch/license_checker.rb +47 -0
- data/lib/logstash/outputs/elasticsearch/template_manager.rb +8 -3
- data/lib/logstash/outputs/elasticsearch/templates/ecs-disabled/elasticsearch-8x.json +39 -33
- data/lib/logstash/plugin_mixins/elasticsearch/api_configs.rb +163 -0
- data/lib/logstash/{outputs → plugin_mixins}/elasticsearch/common.rb +40 -167
- data/lib/logstash/plugin_mixins/elasticsearch/noop_license_checker.rb +9 -0
- data/logstash-output-elasticsearch.gemspec +1 -1
- data/spec/es_spec_helper.rb +32 -12
- data/spec/fixtures/template-with-policy-es8x.json +50 -0
- data/spec/integration/outputs/ilm_spec.rb +34 -20
- data/spec/integration/outputs/metrics_spec.rb +1 -5
- data/spec/integration/outputs/retry_spec.rb +14 -2
- data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +45 -5
- data/spec/unit/outputs/elasticsearch/http_client_spec.rb +22 -0
- data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +31 -0
- data/spec/unit/outputs/elasticsearch_spec.rb +2 -2
- data/spec/unit/outputs/license_check_spec.rb +41 -0
- metadata +10 -4
- data/lib/logstash/outputs/elasticsearch/common_configs.rb +0 -167
@@ -86,19 +86,16 @@ require "forwardable"
|
|
86
86
|
class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
87
87
|
declare_threadsafe!
|
88
88
|
|
89
|
+
require "logstash/outputs/elasticsearch/license_checker"
|
89
90
|
require "logstash/outputs/elasticsearch/http_client"
|
90
91
|
require "logstash/outputs/elasticsearch/http_client_builder"
|
91
|
-
require "logstash/
|
92
|
-
require "logstash/
|
92
|
+
require "logstash/plugin_mixins/elasticsearch/api_configs"
|
93
|
+
require "logstash/plugin_mixins/elasticsearch/common"
|
93
94
|
require "logstash/outputs/elasticsearch/ilm"
|
94
|
-
|
95
95
|
require 'logstash/plugin_mixins/ecs_compatibility_support'
|
96
96
|
|
97
|
-
# Protocol agnostic (i.e. non-http, non-java specific) configs go here
|
98
|
-
include(LogStash::Outputs::ElasticSearch::CommonConfigs)
|
99
|
-
|
100
97
|
# Protocol agnostic methods
|
101
|
-
include(LogStash::
|
98
|
+
include(LogStash::PluginMixins::ElasticSearch::Common)
|
102
99
|
|
103
100
|
# Methods for ILM support
|
104
101
|
include(LogStash::Outputs::ElasticSearch::Ilm)
|
@@ -106,6 +103,11 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
106
103
|
# ecs_compatibility option, provided by Logstash core or the support adapter.
|
107
104
|
include(LogStash::PluginMixins::ECSCompatibilitySupport)
|
108
105
|
|
106
|
+
# Generic/API config options that any document indexer output needs
|
107
|
+
include(LogStash::PluginMixins::ElasticSearch::APIConfigs)
|
108
|
+
|
109
|
+
DEFAULT_POLICY = "logstash-policy"
|
110
|
+
|
109
111
|
config_name "elasticsearch"
|
110
112
|
|
111
113
|
# The Elasticsearch action to perform. Valid actions are:
|
@@ -122,158 +124,160 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
122
124
|
# For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
|
123
125
|
config :action, :validate => :string, :default => "index"
|
124
126
|
|
125
|
-
#
|
126
|
-
|
127
|
-
#
|
128
|
-
|
127
|
+
# The index to write events to. This can be dynamic using the `%{foo}` syntax.
|
128
|
+
# The default value will partition your indices by day so you can more easily
|
129
|
+
# delete old data or only search specific date ranges.
|
130
|
+
# Indexes may not contain uppercase characters.
|
131
|
+
# For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}.
|
132
|
+
# LS uses Joda to format the index pattern from event timestamp.
|
133
|
+
# Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here].
|
134
|
+
config :index, :validate => :string
|
135
|
+
|
136
|
+
config :document_type,
|
137
|
+
:validate => :string,
|
138
|
+
:deprecated => "Document types are being deprecated in Elasticsearch 6.0, and removed entirely in 7.0. You should avoid this feature"
|
139
|
+
|
140
|
+
# From Logstash 1.3 onwards, a template is applied to Elasticsearch during
|
141
|
+
# Logstash's startup if one with the name `template_name` does not already exist.
|
142
|
+
# By default, the contents of this template is the default template for
|
143
|
+
# `logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern
|
144
|
+
# `logstash-*`. Should you require support for other index names, or would like
|
145
|
+
# to change the mappings in the template in general, a custom template can be
|
146
|
+
# specified by setting `template` to the path of a template file.
|
147
|
+
#
|
148
|
+
# Setting `manage_template` to false disables this feature. If you require more
|
149
|
+
# control over template creation, (e.g. creating indices dynamically based on
|
150
|
+
# field names) you should set `manage_template` to false and use the REST
|
151
|
+
# API to apply your templates manually.
|
152
|
+
config :manage_template, :validate => :boolean, :default => true
|
153
|
+
|
154
|
+
# This configuration option defines how the template is named inside Elasticsearch.
|
155
|
+
# Note that if you have used the template management features and subsequently
|
156
|
+
# change this, you will need to prune the old template manually, e.g.
|
157
|
+
#
|
158
|
+
# `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
|
159
|
+
#
|
160
|
+
# where `OldTemplateName` is whatever the former setting was.
|
161
|
+
config :template_name, :validate => :string
|
162
|
+
|
163
|
+
# You can set the path to your own template here, if you so desire.
|
164
|
+
# If not set, the included template will be used.
|
165
|
+
config :template, :validate => :path
|
166
|
+
|
167
|
+
# The template_overwrite option will always overwrite the indicated template
|
168
|
+
# in Elasticsearch with either the one indicated by template or the included one.
|
169
|
+
# This option is set to false by default. If you always want to stay up to date
|
170
|
+
# with the template provided by Logstash, this option could be very useful to you.
|
171
|
+
# Likewise, if you have your own template file managed by puppet, for example, and
|
172
|
+
# you wanted to be able to update it regularly, this option could help there as well.
|
173
|
+
#
|
174
|
+
# Please note that if you are using your own customized version of the Logstash
|
175
|
+
# template (logstash), setting this to true will make Logstash to overwrite
|
176
|
+
# the "logstash" template (i.e. removing all customized settings)
|
177
|
+
config :template_overwrite, :validate => :boolean, :default => false
|
129
178
|
|
130
|
-
#
|
131
|
-
#
|
132
|
-
config :
|
179
|
+
# The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here.
|
180
|
+
# See https://www.elastic.co/blog/elasticsearch-versioning-support.
|
181
|
+
config :version, :validate => :string
|
133
182
|
|
134
|
-
#
|
135
|
-
#
|
136
|
-
#
|
137
|
-
config :
|
138
|
-
|
139
|
-
#
|
140
|
-
#
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
#
|
151
|
-
#
|
152
|
-
config :
|
153
|
-
|
154
|
-
# Enable
|
155
|
-
#
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
#
|
162
|
-
|
163
|
-
|
164
|
-
#
|
165
|
-
config :
|
166
|
-
|
167
|
-
#
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
#
|
179
|
-
config :
|
180
|
-
|
181
|
-
#
|
182
|
-
#
|
183
|
-
|
184
|
-
|
185
|
-
#
|
186
|
-
|
187
|
-
|
188
|
-
#
|
189
|
-
config :
|
190
|
-
|
191
|
-
#
|
192
|
-
|
193
|
-
|
194
|
-
#
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
#
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
config :timeout, :validate => :number, :default => 60
|
205
|
-
|
206
|
-
# Set the Elasticsearch errors in the whitelist that you don't want to log.
|
207
|
-
# A useful example is when you want to skip all 409 errors
|
208
|
-
# which are `document_already_exists_exception`.
|
209
|
-
config :failure_type_logging_whitelist, :validate => :array, :default => []
|
210
|
-
|
211
|
-
# While the output tries to reuse connections efficiently we have a maximum.
|
212
|
-
# This sets the maximum number of open connections the output will create.
|
213
|
-
# Setting this too low may mean frequently closing / opening connections
|
214
|
-
# which is bad.
|
215
|
-
config :pool_max, :validate => :number, :default => 1000
|
216
|
-
|
217
|
-
# While the output tries to reuse connections efficiently we have a maximum per endpoint.
|
218
|
-
# This sets the maximum number of open connections per endpoint the output will create.
|
219
|
-
# Setting this too low may mean frequently closing / opening connections
|
220
|
-
# which is bad.
|
221
|
-
config :pool_max_per_route, :validate => :number, :default => 100
|
222
|
-
|
223
|
-
# HTTP Path where a HEAD request is sent when a backend is marked down
|
224
|
-
# the request is sent in the background to see if it has come back again
|
225
|
-
# before it is once again eligible to service requests.
|
226
|
-
# If you have custom firewall rules you may need to change this
|
227
|
-
config :healthcheck_path, :validate => :string
|
228
|
-
|
229
|
-
# How frequently, in seconds, to wait between resurrection attempts.
|
230
|
-
# Resurrection is the process by which backend endpoints marked 'down' are checked
|
231
|
-
# to see if they have come back to life
|
232
|
-
config :resurrect_delay, :validate => :number, :default => 5
|
233
|
-
|
234
|
-
# How long to wait before checking if the connection is stale before executing a request on a connection using keepalive.
|
235
|
-
# You may want to set this lower, if you get connection errors regularly
|
236
|
-
# Quoting the Apache commons docs (this client is based Apache Commmons):
|
237
|
-
# 'Defines period of inactivity in milliseconds after which persistent connections must
|
238
|
-
# be re-validated prior to being leased to the consumer. Non-positive value passed to
|
239
|
-
# this method disables connection validation. This check helps detect connections that
|
240
|
-
# have become stale (half-closed) while kept inactive in the pool.'
|
241
|
-
# See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info]
|
242
|
-
config :validate_after_inactivity, :validate => :number, :default => 10000
|
243
|
-
|
244
|
-
# Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond
|
245
|
-
config :http_compression, :validate => :boolean, :default => false
|
246
|
-
|
247
|
-
# Custom Headers to send on each request to elasticsearch nodes
|
248
|
-
config :custom_headers, :validate => :hash, :default => {}
|
183
|
+
# The version_type to use for indexing.
|
184
|
+
# See https://www.elastic.co/blog/elasticsearch-versioning-support.
|
185
|
+
# See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
|
186
|
+
config :version_type, :validate => ["internal", 'external', "external_gt", "external_gte", "force"]
|
187
|
+
|
188
|
+
# A routing override to be applied to all processed events.
|
189
|
+
# This can be dynamic using the `%{foo}` syntax.
|
190
|
+
config :routing, :validate => :string
|
191
|
+
|
192
|
+
# For child documents, ID of the associated parent.
|
193
|
+
# This can be dynamic using the `%{foo}` syntax.
|
194
|
+
config :parent, :validate => :string, :default => nil
|
195
|
+
|
196
|
+
# For child documents, name of the join field
|
197
|
+
config :join_field, :validate => :string, :default => nil
|
198
|
+
|
199
|
+
# Set upsert content for update mode.s
|
200
|
+
# Create a new document with this parameter as json string if `document_id` doesn't exists
|
201
|
+
config :upsert, :validate => :string, :default => ""
|
202
|
+
|
203
|
+
# Enable `doc_as_upsert` for update mode.
|
204
|
+
# Create a new document with source if `document_id` doesn't exist in Elasticsearch
|
205
|
+
config :doc_as_upsert, :validate => :boolean, :default => false
|
206
|
+
|
207
|
+
# Set script name for scripted update mode
|
208
|
+
config :script, :validate => :string, :default => ""
|
209
|
+
|
210
|
+
# Define the type of script referenced by "script" variable
|
211
|
+
# inline : "script" contains inline script
|
212
|
+
# indexed : "script" contains the name of script directly indexed in elasticsearch
|
213
|
+
# file : "script" contains the name of script stored in elasticseach's config directory
|
214
|
+
config :script_type, :validate => ["inline", 'indexed', "file"], :default => ["inline"]
|
215
|
+
|
216
|
+
# Set the language of the used script. If not set, this defaults to painless in ES 5.0
|
217
|
+
config :script_lang, :validate => :string, :default => "painless"
|
218
|
+
|
219
|
+
# Set variable name passed to script (scripted update)
|
220
|
+
config :script_var_name, :validate => :string, :default => "event"
|
221
|
+
|
222
|
+
# if enabled, script is in charge of creating non-existent document (scripted update)
|
223
|
+
config :scripted_upsert, :validate => :boolean, :default => false
|
224
|
+
|
225
|
+
# The number of times Elasticsearch should internally retry an update/upserted document
|
226
|
+
# See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates]
|
227
|
+
# for more info
|
228
|
+
config :retry_on_conflict, :validate => :number, :default => 1
|
229
|
+
|
230
|
+
# Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration
|
231
|
+
# here like `pipeline => "%{INGEST_PIPELINE}"`
|
232
|
+
config :pipeline, :validate => :string, :default => nil
|
233
|
+
|
234
|
+
# -----
|
235
|
+
# ILM configurations (beta)
|
236
|
+
# -----
|
237
|
+
# Flag for enabling Index Lifecycle Management integration.
|
238
|
+
config :ilm_enabled, :validate => [true, false, 'true', 'false', 'auto'], :default => 'auto'
|
239
|
+
|
240
|
+
# Rollover alias used for indexing data. If rollover alias doesn't exist, Logstash will create it and map it to the relevant index
|
241
|
+
config :ilm_rollover_alias, :validate => :string
|
242
|
+
|
243
|
+
# appends “{now/d}-000001” by default for new index creation, subsequent rollover indices will increment based on this pattern i.e. “000002”
|
244
|
+
# {now/d} is date math, and will insert the appropriate value automatically.
|
245
|
+
config :ilm_pattern, :validate => :string, :default => '{now/d}-000001'
|
246
|
+
|
247
|
+
# ILM policy to use, if undefined the default policy will be used.
|
248
|
+
config :ilm_policy, :validate => :string, :default => DEFAULT_POLICY
|
249
|
+
|
250
|
+
attr_reader :default_index
|
251
|
+
attr_reader :default_ilm_rollover_alias
|
252
|
+
attr_reader :default_template_name
|
249
253
|
|
250
254
|
def initialize(*params)
|
251
255
|
super
|
252
256
|
setup_ecs_compatibility_related_defaults
|
253
257
|
end
|
254
258
|
|
255
|
-
def
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
@default_template_name = 'logstash'
|
261
|
-
when :v1
|
262
|
-
@default_index = "ecs-logstash-%{+yyyy.MM.dd}"
|
263
|
-
@default_ilm_rollover_alias = "ecs-logstash"
|
264
|
-
@default_template_name = 'ecs-logstash'
|
265
|
-
else
|
266
|
-
fail("unsupported ECS Compatibility `#{ecs_compatibility}`")
|
267
|
-
end
|
259
|
+
def register
|
260
|
+
@template_installed = Concurrent::AtomicBoolean.new(false)
|
261
|
+
@stopping = Concurrent::AtomicBoolean.new(false)
|
262
|
+
# To support BWC, we check if DLQ exists in core (< 5.4). If it doesn't, we use nil to resort to previous behavior.
|
263
|
+
@dlq_writer = dlq_enabled? ? execution_context.dlq_writer : nil
|
268
264
|
|
269
|
-
|
270
|
-
@ilm_rollover_alias ||= default_ilm_rollover_alias
|
271
|
-
@template_name ||= default_template_name
|
272
|
-
end
|
265
|
+
check_action_validity
|
273
266
|
|
274
|
-
|
275
|
-
|
276
|
-
|
267
|
+
# the license_checking behaviour in the Pool class is externalized in the LogStash::ElasticSearchOutputLicenseChecker
|
268
|
+
# class defined in license_check.rb. This license checking is specific to the elasticsearch output here and passed
|
269
|
+
# to build_client down to the Pool class.
|
270
|
+
build_client(LicenseChecker.new(@logger))
|
271
|
+
|
272
|
+
@template_installer = setup_after_successful_connection do
|
273
|
+
discover_cluster_uuid
|
274
|
+
install_template
|
275
|
+
setup_ilm if ilm_in_use?
|
276
|
+
end
|
277
|
+
@bulk_request_metrics = metric.namespace(:bulk_requests)
|
278
|
+
@document_level_metrics = metric.namespace(:documents)
|
279
|
+
@logger.info("New Elasticsearch output", :class => self.class.name, :hosts => @hosts.map(&:sanitized).map(&:to_s))
|
280
|
+
end
|
277
281
|
|
278
282
|
# @override to handle proxy => '' as if none was set
|
279
283
|
def config_init(params)
|
@@ -291,20 +295,12 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
291
295
|
super(params)
|
292
296
|
end
|
293
297
|
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
# see https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/934#pullrequestreview-396203307
|
299
|
-
validate_authentication
|
300
|
-
fill_hosts_from_cloud_id
|
301
|
-
setup_hosts
|
302
|
-
|
303
|
-
params["metric"] = metric
|
304
|
-
if @proxy.eql?('')
|
305
|
-
@logger.warn "Supplied proxy setting (proxy => '') has no effect"
|
298
|
+
# Receive an array of events and immediately attempt to index them (no buffering)
|
299
|
+
def multi_receive(events)
|
300
|
+
until @template_installed.true?
|
301
|
+
sleep 1
|
306
302
|
end
|
307
|
-
|
303
|
+
retrying_submit(events.map {|e| event_action_tuple(e)})
|
308
304
|
end
|
309
305
|
|
310
306
|
def close
|
@@ -313,8 +309,65 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
313
309
|
@client.close if @client
|
314
310
|
end
|
315
311
|
|
316
|
-
|
317
|
-
|
312
|
+
# not private because used by ILM specs
|
313
|
+
def stop_template_installer
|
314
|
+
@template_installer.join unless @template_installer.nil?
|
315
|
+
end
|
316
|
+
|
317
|
+
# not private for elasticsearch_spec.rb
|
318
|
+
# Convert the event into a 3-tuple of action, params, and event
|
319
|
+
def event_action_tuple(event)
|
320
|
+
action = event.sprintf(@action)
|
321
|
+
|
322
|
+
params = {
|
323
|
+
:_id => @document_id ? event.sprintf(@document_id) : nil,
|
324
|
+
:_index => event.sprintf(@index),
|
325
|
+
routing_field_name => @routing ? event.sprintf(@routing) : nil
|
326
|
+
}
|
327
|
+
|
328
|
+
params[:_type] = get_event_type(event) if use_event_type?(nil)
|
329
|
+
|
330
|
+
if @pipeline
|
331
|
+
value = event.sprintf(@pipeline)
|
332
|
+
# convention: empty string equates to not using a pipeline
|
333
|
+
# this is useful when using a field reference in the pipeline setting, e.g.
|
334
|
+
# elasticsearch {
|
335
|
+
# pipeline => "%{[@metadata][pipeline]}"
|
336
|
+
# }
|
337
|
+
params[:pipeline] = value unless value.empty?
|
338
|
+
end
|
339
|
+
|
340
|
+
if @parent
|
341
|
+
if @join_field
|
342
|
+
join_value = event.get(@join_field)
|
343
|
+
parent_value = event.sprintf(@parent)
|
344
|
+
event.set(@join_field, { "name" => join_value, "parent" => parent_value })
|
345
|
+
params[routing_field_name] = event.sprintf(@parent)
|
346
|
+
else
|
347
|
+
params[:parent] = event.sprintf(@parent)
|
348
|
+
end
|
349
|
+
end
|
350
|
+
|
351
|
+
if action == 'update'
|
352
|
+
params[:_upsert] = LogStash::Json.load(event.sprintf(@upsert)) if @upsert != ""
|
353
|
+
params[:_script] = event.sprintf(@script) if @script != ""
|
354
|
+
params[retry_on_conflict_action_name] = @retry_on_conflict
|
355
|
+
end
|
356
|
+
|
357
|
+
if @version
|
358
|
+
params[:version] = event.sprintf(@version)
|
359
|
+
end
|
360
|
+
|
361
|
+
if @version_type
|
362
|
+
params[:version_type] = event.sprintf(@version_type)
|
363
|
+
end
|
364
|
+
|
365
|
+
[action, params, event]
|
366
|
+
end
|
367
|
+
|
368
|
+
# not private for elasticsearch_spec.rb
|
369
|
+
def retry_on_conflict_action_name
|
370
|
+
maximum_seen_major_version >= 7 ? :retry_on_conflict : :_retry_on_conflict
|
318
371
|
end
|
319
372
|
|
320
373
|
@@plugins = Gem::Specification.find_all{|spec| spec.name =~ /logstash-output-elasticsearch-/ }
|
@@ -324,4 +377,88 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
|
|
324
377
|
require "logstash/outputs/elasticsearch/#{name}"
|
325
378
|
end
|
326
379
|
|
327
|
-
|
380
|
+
private
|
381
|
+
|
382
|
+
def routing_field_name
|
383
|
+
maximum_seen_major_version >= 6 ? :routing : :_routing
|
384
|
+
end
|
385
|
+
|
386
|
+
# Determine the correct value for the 'type' field for the given event
|
387
|
+
DEFAULT_EVENT_TYPE_ES6="doc".freeze
|
388
|
+
DEFAULT_EVENT_TYPE_ES7="_doc".freeze
|
389
|
+
def get_event_type(event)
|
390
|
+
# Set the 'type' value for the index.
|
391
|
+
type = if @document_type
|
392
|
+
event.sprintf(@document_type)
|
393
|
+
else
|
394
|
+
if maximum_seen_major_version < 6
|
395
|
+
event.get("type") || DEFAULT_EVENT_TYPE_ES6
|
396
|
+
elsif maximum_seen_major_version == 6
|
397
|
+
DEFAULT_EVENT_TYPE_ES6
|
398
|
+
elsif maximum_seen_major_version == 7
|
399
|
+
DEFAULT_EVENT_TYPE_ES7
|
400
|
+
else
|
401
|
+
nil
|
402
|
+
end
|
403
|
+
end
|
404
|
+
|
405
|
+
if !(type.is_a?(String) || type.is_a?(Numeric))
|
406
|
+
@logger.warn("Bad event type! Non-string/integer type value set!", :type_class => type.class, :type_value => type.to_s, :event => event)
|
407
|
+
end
|
408
|
+
|
409
|
+
type.to_s
|
410
|
+
end
|
411
|
+
|
412
|
+
##
|
413
|
+
# WARNING: This method is overridden in a subclass in Logstash Core 7.7-7.8's monitoring,
|
414
|
+
# where a `client` argument is both required and ignored. In later versions of
|
415
|
+
# Logstash Core it is optional and ignored, but to make it optional here would
|
416
|
+
# allow us to accidentally break compatibility with Logstashes where it was required.
|
417
|
+
# @param noop_required_client [nil]: required `nil` for legacy reasons.
|
418
|
+
# @return [Boolean]
|
419
|
+
def use_event_type?(noop_required_client)
|
420
|
+
# always set type for ES <= 6
|
421
|
+
# for ES 7 only set it if the user defined it
|
422
|
+
(maximum_seen_major_version < 7) || (maximum_seen_major_version == 7 && @document_type)
|
423
|
+
end
|
424
|
+
|
425
|
+
def install_template
|
426
|
+
TemplateManager.install_template(self)
|
427
|
+
@template_installed.make_true
|
428
|
+
end
|
429
|
+
|
430
|
+
def setup_ecs_compatibility_related_defaults
|
431
|
+
case ecs_compatibility
|
432
|
+
when :disabled
|
433
|
+
@default_index = "logstash-%{+yyyy.MM.dd}"
|
434
|
+
@default_ilm_rollover_alias = "logstash"
|
435
|
+
@default_template_name = 'logstash'
|
436
|
+
when :v1
|
437
|
+
@default_index = "ecs-logstash-%{+yyyy.MM.dd}"
|
438
|
+
@default_ilm_rollover_alias = "ecs-logstash"
|
439
|
+
@default_template_name = 'ecs-logstash'
|
440
|
+
else
|
441
|
+
fail("unsupported ECS Compatibility `#{ecs_compatibility}`")
|
442
|
+
end
|
443
|
+
|
444
|
+
@index ||= default_index
|
445
|
+
@ilm_rollover_alias ||= default_ilm_rollover_alias
|
446
|
+
@template_name ||= default_template_name
|
447
|
+
end
|
448
|
+
|
449
|
+
# To be overidden by the -java version
|
450
|
+
VALID_HTTP_ACTIONS=["index", "delete", "create", "update"]
|
451
|
+
def valid_actions
|
452
|
+
VALID_HTTP_ACTIONS
|
453
|
+
end
|
454
|
+
|
455
|
+
def check_action_validity
|
456
|
+
raise LogStash::ConfigurationError, "No action specified!" unless @action
|
457
|
+
|
458
|
+
# If we're using string interpolation, we're good!
|
459
|
+
return if @action =~ /%{.+}/
|
460
|
+
return if valid_actions.include?(@action)
|
461
|
+
|
462
|
+
raise LogStash::ConfigurationError, "Action '#{@action}' is invalid! Pick one of #{valid_actions} or use a sprintf style statement"
|
463
|
+
end
|
464
|
+
end
|