logstash-output-elasticsearch 10.8.1-java → 11.0.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (33) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +24 -0
  3. data/README.md +1 -1
  4. data/docs/index.asciidoc +282 -114
  5. data/lib/logstash/outputs/elasticsearch.rb +125 -65
  6. data/lib/logstash/outputs/elasticsearch/data_stream_support.rb +233 -0
  7. data/lib/logstash/outputs/elasticsearch/http_client.rb +59 -21
  8. data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +47 -34
  9. data/lib/logstash/outputs/elasticsearch/ilm.rb +11 -12
  10. data/lib/logstash/outputs/elasticsearch/license_checker.rb +19 -22
  11. data/lib/logstash/outputs/elasticsearch/template_manager.rb +3 -5
  12. data/lib/logstash/plugin_mixins/elasticsearch/api_configs.rb +157 -153
  13. data/lib/logstash/plugin_mixins/elasticsearch/common.rb +80 -60
  14. data/logstash-output-elasticsearch.gemspec +2 -2
  15. data/spec/es_spec_helper.rb +3 -6
  16. data/spec/integration/outputs/data_stream_spec.rb +61 -0
  17. data/spec/integration/outputs/ilm_spec.rb +22 -18
  18. data/spec/integration/outputs/ingest_pipeline_spec.rb +4 -2
  19. data/spec/integration/outputs/retry_spec.rb +14 -2
  20. data/spec/integration/outputs/sniffer_spec.rb +0 -1
  21. data/spec/spec_helper.rb +14 -0
  22. data/spec/unit/http_client_builder_spec.rb +9 -9
  23. data/spec/unit/outputs/elasticsearch/data_stream_support_spec.rb +542 -0
  24. data/spec/unit/outputs/elasticsearch/http_client/manticore_adapter_spec.rb +1 -0
  25. data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +27 -13
  26. data/spec/unit/outputs/elasticsearch/http_client_spec.rb +59 -41
  27. data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +1 -3
  28. data/spec/unit/outputs/elasticsearch_proxy_spec.rb +4 -5
  29. data/spec/unit/outputs/elasticsearch_spec.rb +228 -38
  30. data/spec/unit/outputs/elasticsearch_ssl_spec.rb +1 -2
  31. data/spec/unit/outputs/error_whitelist_spec.rb +4 -3
  32. data/spec/unit/outputs/license_check_spec.rb +0 -16
  33. metadata +23 -16
@@ -1,6 +1,4 @@
1
1
  require "logstash/outputs/elasticsearch"
2
- require "cabin"
3
- require "base64"
4
2
  require 'logstash/outputs/elasticsearch/http_client/pool'
5
3
  require 'logstash/outputs/elasticsearch/http_client/manticore_adapter'
6
4
  require 'cgi'
@@ -80,12 +78,16 @@ module LogStash; module Outputs; class ElasticSearch;
80
78
 
81
79
  def template_install(name, template, force=false)
82
80
  if template_exists?(name) && !force
83
- @logger.debug("Found existing Elasticsearch template. Skipping template management", :name => name)
81
+ @logger.debug("Found existing Elasticsearch template, skipping template management", name: name)
84
82
  return
85
83
  end
86
84
  template_put(name, template)
87
85
  end
88
86
 
87
+ def last_es_version
88
+ @pool.last_es_version
89
+ end
90
+
89
91
  def maximum_seen_major_version
90
92
  @pool.maximum_seen_major_version
91
93
  end
@@ -109,27 +111,50 @@ module LogStash; module Outputs; class ElasticSearch;
109
111
  body_stream = StringIO.new
110
112
  if http_compression
111
113
  body_stream.set_encoding "BINARY"
112
- stream_writer = Zlib::GzipWriter.new(body_stream, Zlib::DEFAULT_COMPRESSION, Zlib::DEFAULT_STRATEGY)
113
- else
114
+ stream_writer = gzip_writer(body_stream)
115
+ else
114
116
  stream_writer = body_stream
115
117
  end
116
118
  bulk_responses = []
117
- bulk_actions.each do |action|
119
+ batch_actions = []
120
+ bulk_actions.each_with_index do |action, index|
118
121
  as_json = action.is_a?(Array) ?
119
122
  action.map {|line| LogStash::Json.dump(line)}.join("\n") :
120
123
  LogStash::Json.dump(action)
121
124
  as_json << "\n"
122
- if (body_stream.size + as_json.bytesize) > TARGET_BULK_BYTES
123
- bulk_responses << bulk_send(body_stream) unless body_stream.size == 0
125
+ if (stream_writer.pos + as_json.bytesize) > TARGET_BULK_BYTES && stream_writer.pos > 0
126
+ stream_writer.flush # ensure writer has sync'd buffers before reporting sizes
127
+ logger.debug("Sending partial bulk request for batch with one or more actions remaining.",
128
+ :action_count => batch_actions.size,
129
+ :payload_size => stream_writer.pos,
130
+ :content_length => body_stream.size,
131
+ :batch_offset => (index + 1 - batch_actions.size))
132
+ bulk_responses << bulk_send(body_stream, batch_actions)
133
+ body_stream.truncate(0) && body_stream.seek(0)
134
+ stream_writer = gzip_writer(body_stream) if http_compression
135
+ batch_actions.clear
124
136
  end
125
137
  stream_writer.write(as_json)
138
+ batch_actions << action
126
139
  end
127
140
  stream_writer.close if http_compression
128
- bulk_responses << bulk_send(body_stream) if body_stream.size > 0
141
+ logger.debug("Sending final bulk request for batch.",
142
+ :action_count => batch_actions.size,
143
+ :payload_size => stream_writer.pos,
144
+ :content_length => body_stream.size,
145
+ :batch_offset => (actions.size - batch_actions.size))
146
+ bulk_responses << bulk_send(body_stream, batch_actions) if body_stream.size > 0
129
147
  body_stream.close if !http_compression
130
148
  join_bulk_responses(bulk_responses)
131
149
  end
132
150
 
151
+ def gzip_writer(io)
152
+ fail(ArgumentError, "Cannot create gzip writer on IO with unread bytes") unless io.eof?
153
+ fail(ArgumentError, "Cannot create gzip writer on non-empty IO") unless io.pos == 0
154
+
155
+ Zlib::GzipWriter.new(io, Zlib::DEFAULT_COMPRESSION, Zlib::DEFAULT_STRATEGY)
156
+ end
157
+
133
158
  def join_bulk_responses(bulk_responses)
134
159
  {
135
160
  "errors" => bulk_responses.any? {|r| r["errors"] == true},
@@ -137,25 +162,37 @@ module LogStash; module Outputs; class ElasticSearch;
137
162
  }
138
163
  end
139
164
 
140
- def bulk_send(body_stream)
165
+ def bulk_send(body_stream, batch_actions)
141
166
  params = http_compression ? {:headers => {"Content-Encoding" => "gzip"}} : {}
142
- # Discard the URL
143
167
  response = @pool.post(@bulk_path, params, body_stream.string)
144
- if !body_stream.closed?
145
- body_stream.truncate(0)
146
- body_stream.seek(0)
147
- end
148
168
 
149
169
  @bulk_response_metrics.increment(response.code.to_s)
150
170
 
151
- if response.code != 200
171
+ case response.code
172
+ when 200 # OK
173
+ LogStash::Json.load(response.body)
174
+ when 413 # Payload Too Large
175
+ logger.warn("Bulk request rejected: `413 Payload Too Large`", :action_count => batch_actions.size, :content_length => body_stream.size)
176
+ emulate_batch_error_response(batch_actions, response.code, 'payload_too_large')
177
+ else
152
178
  url = ::LogStash::Util::SafeURI.new(response.final_url)
153
179
  raise ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError.new(
154
180
  response.code, url, body_stream.to_s, response.body
155
181
  )
156
182
  end
183
+ end
157
184
 
158
- LogStash::Json.load(response.body)
185
+ def emulate_batch_error_response(actions, http_code, reason)
186
+ {
187
+ "errors" => true,
188
+ "items" => actions.map do |action|
189
+ action = action.first if action.is_a?(Array)
190
+ request_action, request_parameters = action.first
191
+ {
192
+ request_action => {"status" => http_code, "error" => { "type" => reason }}
193
+ }
194
+ end
195
+ }
159
196
  end
160
197
 
161
198
  def get(path)
@@ -349,7 +386,7 @@ module LogStash; module Outputs; class ElasticSearch;
349
386
 
350
387
  def template_put(name, template)
351
388
  path = "#{template_endpoint}/#{name}"
352
- logger.info("Installing elasticsearch template to #{path}")
389
+ logger.info("Installing Elasticsearch template", name: name)
353
390
  @pool.put(path, nil, LogStash::Json.dump(template))
354
391
  end
355
392
 
@@ -366,13 +403,13 @@ module LogStash; module Outputs; class ElasticSearch;
366
403
 
367
404
  # Create a new rollover alias
368
405
  def rollover_alias_put(alias_name, alias_definition)
369
- logger.info("Creating rollover alias #{alias_name}")
370
406
  begin
371
407
  @pool.put(CGI::escape(alias_name), nil, LogStash::Json.dump(alias_definition))
408
+ logger.info("Created rollover alias", name: alias_name)
372
409
  # If the rollover alias already exists, ignore the error that comes back from Elasticsearch
373
410
  rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError => e
374
411
  if e.response_code == 400
375
- logger.info("Rollover Alias #{alias_name} already exists. Skipping")
412
+ logger.info("Rollover alias already exists, skipping", name: alias_name)
376
413
  return
377
414
  end
378
415
  raise e
@@ -393,13 +430,14 @@ module LogStash; module Outputs; class ElasticSearch;
393
430
 
394
431
  def ilm_policy_put(name, policy)
395
432
  path = "_ilm/policy/#{name}"
396
- logger.info("Installing ILM policy #{policy} to #{path}")
433
+ logger.info("Installing ILM policy #{policy}", name: name)
397
434
  @pool.put(path, nil, LogStash::Json.dump(policy))
398
435
  end
399
436
 
400
437
 
401
438
  # Build a bulk item for an elasticsearch update action
402
439
  def update_action_builder(args, source)
440
+ args = args.clone()
403
441
  if args[:_script]
404
442
  # Use the event as a hash from your script with variable name defined
405
443
  # by script_var_name (default: "event")
@@ -1,3 +1,4 @@
1
+ require "concurrent/atomic/atomic_reference"
1
2
  require "logstash/plugin_mixins/elasticsearch/noop_license_checker"
2
3
 
3
4
  module LogStash; module Outputs; class ElasticSearch; class HttpClient;
@@ -71,6 +72,8 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
71
72
  @stopping = false
72
73
 
73
74
  @license_checker = options[:license_checker] || LogStash::PluginMixins::ElasticSearch::NoopLicenseChecker::INSTANCE
75
+
76
+ @last_es_version = Concurrent::AtomicReference.new
74
77
  end
75
78
 
76
79
  def start
@@ -118,12 +121,6 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
118
121
  @state_mutex.synchronize { @url_info }
119
122
  end
120
123
 
121
- def maximum_seen_major_version
122
- @state_mutex.synchronize do
123
- @maximum_seen_major_version
124
- end
125
- end
126
-
127
124
  def urls
128
125
  url_info.keys
129
126
  end
@@ -252,11 +249,12 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
252
249
  response = perform_request_to_url(url, :get, LICENSE_PATH)
253
250
  LogStash::Json.load(response.body)
254
251
  rescue => e
255
- logger.error("Unable to get license information", url: url.sanitized.to_s, error_type: e.class, error: e.message)
252
+ logger.error("Unable to get license information", url: url.sanitized.to_s, exception: e.class, message: e.message)
256
253
  {}
257
254
  end
258
255
 
259
256
  def health_check_request(url)
257
+ logger.debug("Running health check to see if an ES connection is working", url: url.sanitized.to_s, path: @healthcheck_path)
260
258
  perform_request_to_url(url, :head, @healthcheck_path)
261
259
  end
262
260
 
@@ -264,29 +262,20 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
264
262
  # Try to keep locking granularity low such that we don't affect IO...
265
263
  @state_mutex.synchronize { @url_info.select {|url,meta| meta[:state] != :alive } }.each do |url,meta|
266
264
  begin
267
- logger.debug("Running health check to see if an Elasticsearch connection is working",
268
- :healthcheck_url => url, :path => @healthcheck_path)
269
265
  health_check_request(url)
270
266
  # If no exception was raised it must have succeeded!
271
- logger.warn("Restored connection to ES instance", :url => url.sanitized.to_s)
267
+ logger.warn("Restored connection to ES instance", url: url.sanitized.to_s)
272
268
  # We reconnected to this node, check its ES version
273
269
  es_version = get_es_version(url)
274
270
  @state_mutex.synchronize do
275
271
  meta[:version] = es_version
276
- major = major_version(es_version)
277
- if !@maximum_seen_major_version
278
- @logger.info("ES Output version determined", :es_version => major)
279
- set_new_major_version(major)
280
- elsif major > @maximum_seen_major_version
281
- @logger.warn("Detected a node with a higher major version than previously observed. This could be the result of an elasticsearch cluster upgrade.", :previous_major => @maximum_seen_major_version, :new_major => major, :node_url => url.sanitized.to_s)
282
- set_new_major_version(major)
283
- end
272
+ set_last_es_version(es_version, url)
284
273
 
285
274
  alive = @license_checker.appropriate_license?(self, url)
286
275
  meta[:state] = alive ? :alive : :dead
287
276
  end
288
277
  rescue HostUnreachableError, BadResponseCodeError => e
289
- logger.warn("Attempted to resurrect connection to dead ES instance, but got an error.", url: url.sanitized.to_s, error_type: e.class, error: e.message)
278
+ logger.warn("Attempted to resurrect connection to dead ES instance, but got an error", url: url.sanitized.to_s, exception: e.class, message: e.message)
290
279
  end
291
280
  end
292
281
  end
@@ -355,9 +344,7 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
355
344
  end
356
345
 
357
346
  if state_changes[:removed].size > 0 || state_changes[:added].size > 0
358
- if logger.info?
359
- logger.info("Elasticsearch pool URLs updated", :changes => state_changes)
360
- end
347
+ logger.info? && logger.info("Elasticsearch pool URLs updated", :changes => state_changes)
361
348
  end
362
349
 
363
350
  # Run an inline healthcheck anytime URLs are updated
@@ -371,10 +358,6 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
371
358
  @state_mutex.synchronize { @url_info.size }
372
359
  end
373
360
 
374
- def es_versions
375
- @state_mutex.synchronize { @url_info.size }
376
- end
377
-
378
361
  def add_url(url)
379
362
  @url_info[url] ||= empty_url_meta
380
363
  end
@@ -459,22 +442,52 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
459
442
 
460
443
  def return_connection(url)
461
444
  @state_mutex.synchronize do
462
- if @url_info[url] # Guard against the condition where the connection has already been deleted
463
- @url_info[url][:in_use] -= 1
464
- end
445
+ info = @url_info[url]
446
+ info[:in_use] -= 1 if info # Guard against the condition where the connection has already been deleted
465
447
  end
466
448
  end
467
449
 
468
450
  def get_es_version(url)
469
451
  request = perform_request_to_url(url, :get, ROOT_URI_PATH)
470
- LogStash::Json.load(request.body)["version"]["number"]
452
+ LogStash::Json.load(request.body)["version"]["number"] # e.g. "7.10.0"
453
+ end
454
+
455
+ def last_es_version
456
+ @last_es_version.get
457
+ end
458
+
459
+ def maximum_seen_major_version
460
+ @state_mutex.synchronize { @maximum_seen_major_version }
461
+ end
462
+
463
+ private
464
+
465
+ # @private executing within @state_mutex
466
+ def set_last_es_version(version, url)
467
+ @last_es_version.set(version)
468
+
469
+ major = major_version(version)
470
+ if @maximum_seen_major_version.nil?
471
+ @logger.info("Elasticsearch version determined (#{version})", es_version: major)
472
+ set_maximum_seen_major_version(major)
473
+ elsif major > @maximum_seen_major_version
474
+ warn_on_higher_major_version(major, url)
475
+ @maximum_seen_major_version = major
476
+ end
471
477
  end
472
478
 
473
- def set_new_major_version(version)
474
- @maximum_seen_major_version = version
475
- if @maximum_seen_major_version >= 6
476
- @logger.warn("Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type", :es_version => @maximum_seen_major_version)
479
+ def set_maximum_seen_major_version(major)
480
+ if major >= 6
481
+ @logger.warn("Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type", es_version: major)
477
482
  end
483
+ @maximum_seen_major_version = major
478
484
  end
485
+
486
+ def warn_on_higher_major_version(major, url)
487
+ @logger.warn("Detected a node with a higher major version than previously observed, " +
488
+ "this could be the result of an Elasticsearch cluster upgrade",
489
+ previous_major: @maximum_seen_major_version, new_major: major, node_url: url.sanitized.to_s)
490
+ end
491
+
479
492
  end
480
493
  end; end; end; end;
@@ -4,21 +4,12 @@ module LogStash; module Outputs; class ElasticSearch
4
4
  ILM_POLICY_PATH = "default-ilm-policy.json"
5
5
 
6
6
  def setup_ilm
7
- return unless ilm_in_use?
8
7
  logger.warn("Overwriting supplied index #{@index} with rollover alias #{@ilm_rollover_alias}") unless default_index?(@index)
9
8
  @index = @ilm_rollover_alias
10
9
  maybe_create_rollover_alias
11
10
  maybe_create_ilm_policy
12
11
  end
13
12
 
14
- def default_rollover_alias?(rollover_alias)
15
- rollover_alias == default_ilm_rollover_alias
16
- end
17
-
18
- def ilm_alias_set?
19
- default_index?(@index) || !default_rollover_alias?(@ilm_rollover_alias)
20
- end
21
-
22
13
  def ilm_in_use?
23
14
  return @ilm_actually_enabled if defined?(@ilm_actually_enabled)
24
15
  @ilm_actually_enabled =
@@ -46,6 +37,12 @@ module LogStash; module Outputs; class ElasticSearch
46
37
  end
47
38
  end
48
39
 
40
+ private
41
+
42
+ def ilm_alias_set?
43
+ default_index?(@index) || !default_rollover_alias?(@ilm_rollover_alias)
44
+ end
45
+
49
46
  def ilm_on_by_default?
50
47
  maximum_seen_major_version >= 7
51
48
  end
@@ -73,19 +70,21 @@ module LogStash; module Outputs; class ElasticSearch
73
70
  end
74
71
  end
75
72
 
76
- private
77
-
78
73
  def default_index?(index)
79
74
  index == @default_index
80
75
  end
81
76
 
77
+ def default_rollover_alias?(rollover_alias)
78
+ rollover_alias == default_ilm_rollover_alias
79
+ end
80
+
82
81
  def ilm_policy_default?
83
82
  ilm_policy == LogStash::Outputs::ElasticSearch::DEFAULT_POLICY
84
83
  end
85
84
 
86
85
  def maybe_create_ilm_policy
87
86
  if ilm_policy_default?
88
- client.ilm_policy_put(ilm_policy, policy_payload) unless client.ilm_policy_exists?(ilm_policy)
87
+ client.ilm_policy_put(ilm_policy, policy_payload) unless client.ilm_policy_exists?(ilm_policy)
89
88
  else
90
89
  raise LogStash::ConfigurationError, "The specified ILM policy #{ilm_policy} does not exist on your Elasticsearch instance" unless client.ilm_policy_exists?(ilm_policy)
91
90
  end
@@ -7,41 +7,38 @@ module LogStash; module Outputs; class ElasticSearch
7
7
 
8
8
  # Figure out if the provided license is appropriate or not
9
9
  # The appropriate_license? methods is the method called from LogStash::Outputs::ElasticSearch::HttpClient::Pool#healthcheck!
10
+ # @param pool
10
11
  # @param url [LogStash::Util::SafeURI] ES node URL
11
- # @param license [Hash] ES node deserialized licence document
12
12
  # @return [Boolean] true if provided license is deemed appropriate
13
13
  def appropriate_license?(pool, url)
14
- return true if oss?
15
-
16
14
  license = pool.get_license(url)
17
- if valid_es_license?(license)
15
+ case license_status(license)
16
+ when 'active'
18
17
  true
19
- else
20
- # As this version is to be shipped with Logstash 7.x we won't mark the connection as unlicensed
21
- #
22
- # @logger.error("Cannot connect to the Elasticsearch cluster configured in the Elasticsearch output. Logstash requires the default distribution of Elasticsearch. Please update to the default distribution of Elasticsearch for full access to all free features, or switch to the OSS distribution of Logstash.", :url => url.sanitized.to_s)
23
- # meta[:state] = :unlicensed
24
- #
25
- # Instead we'll log a deprecation warning and mark it as alive:
26
- #
27
- log_license_deprecation_warn(url)
18
+ when nil
19
+ warn_no_license(url)
20
+ false
21
+ else # 'invalid', 'expired'
22
+ warn_invalid_license(url, license)
28
23
  true
29
24
  end
30
25
  end
31
26
 
32
- # Note that oss? could be private but is used by the Pool specs
33
- def oss?
34
- LogStash::OSS
27
+ def license_status(license)
28
+ license.fetch("license", {}).fetch("status", nil)
35
29
  end
36
30
 
37
- # Note that valid_es_license? could be private but is used by the Pool specs
38
- def valid_es_license?(license)
39
- license.fetch("license", {}).fetch("status", nil) == "active"
31
+ private
32
+
33
+ def warn_no_license(url)
34
+ @logger.error("Connecting to an OSS distribution of Elasticsearch is no longer supported, " +
35
+ "please upgrade to the default distribution of Elasticsearch", url: url.sanitized.to_s)
40
36
  end
41
37
 
42
- # Note that log_license_deprecation_warn could be private but is used by the Pool specs
43
- def log_license_deprecation_warn(url)
44
- @logger.warn("DEPRECATION WARNING: Connecting to an OSS distribution of Elasticsearch using the default distribution of Logstash will stop working in Logstash 8.0.0. Please upgrade to the default distribution of Elasticsearch, or use the OSS distribution of Logstash", :url => url.sanitized.to_s)
38
+ def warn_invalid_license(url, license)
39
+ @logger.warn("WARNING: Current Elasticsearch license is not active, " +
40
+ "please check Elasticsearch's licensing information", url: url.sanitized.to_s, license: license)
45
41
  end
42
+
46
43
  end
47
44
  end; end; end
@@ -13,10 +13,8 @@ module LogStash; module Outputs; class ElasticSearch
13
13
  end
14
14
 
15
15
  add_ilm_settings_to_template(plugin, template) if plugin.ilm_in_use?
16
- plugin.logger.info("Attempting to install template", :manage_template => template)
16
+ plugin.logger.debug("Attempting to install template", template: template)
17
17
  install(plugin.client, template_name(plugin), template, plugin.template_overwrite)
18
- rescue => e
19
- plugin.logger.error("Failed to install template.", :message => e.message, :class => e.class.name, :backtrace => e.backtrace)
20
18
  end
21
19
 
22
20
  private
@@ -38,7 +36,7 @@ module LogStash; module Outputs; class ElasticSearch
38
36
  template['index_patterns'] = "#{plugin.ilm_rollover_alias}-*"
39
37
  settings = template_settings(plugin, template)
40
38
  if settings && (settings['index.lifecycle.name'] || settings['index.lifecycle.rollover_alias'])
41
- plugin.logger.info("Overwriting index lifecycle name and rollover alias as ILM is enabled.")
39
+ plugin.logger.info("Overwriting index lifecycle name and rollover alias as ILM is enabled")
42
40
  end
43
41
  settings.update({ 'index.lifecycle.name' => plugin.ilm_policy, 'index.lifecycle.rollover_alias' => plugin.ilm_rollover_alias})
44
42
  end
@@ -61,7 +59,7 @@ module LogStash; module Outputs; class ElasticSearch
61
59
  end
62
60
 
63
61
  def self.read_template_file(template_path)
64
- raise ArgumentError, "Template file '#{template_path}' could not be found!" unless ::File.exists?(template_path)
62
+ raise ArgumentError, "Template file '#{template_path}' could not be found" unless ::File.exists?(template_path)
65
63
  template_data = ::IO.read(template_path)
66
64
  LogStash::Json.load(template_data)
67
65
  end