logstash-output-elasticsearch 10.8.2-java → 11.0.1-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (32) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +25 -0
  3. data/docs/index.asciidoc +134 -23
  4. data/lib/logstash/outputs/elasticsearch.rb +137 -63
  5. data/lib/logstash/outputs/elasticsearch/data_stream_support.rb +233 -0
  6. data/lib/logstash/outputs/elasticsearch/http_client.rb +59 -21
  7. data/lib/logstash/outputs/elasticsearch/http_client/pool.rb +47 -34
  8. data/lib/logstash/outputs/elasticsearch/ilm.rb +11 -12
  9. data/lib/logstash/outputs/elasticsearch/license_checker.rb +19 -22
  10. data/lib/logstash/outputs/elasticsearch/template_manager.rb +3 -5
  11. data/lib/logstash/plugin_mixins/elasticsearch/api_configs.rb +157 -153
  12. data/lib/logstash/plugin_mixins/elasticsearch/common.rb +81 -60
  13. data/logstash-output-elasticsearch.gemspec +2 -2
  14. data/spec/es_spec_helper.rb +3 -6
  15. data/spec/integration/outputs/data_stream_spec.rb +61 -0
  16. data/spec/integration/outputs/ilm_spec.rb +22 -18
  17. data/spec/integration/outputs/ingest_pipeline_spec.rb +4 -2
  18. data/spec/integration/outputs/retry_spec.rb +14 -2
  19. data/spec/integration/outputs/sniffer_spec.rb +0 -1
  20. data/spec/spec_helper.rb +14 -0
  21. data/spec/unit/http_client_builder_spec.rb +9 -9
  22. data/spec/unit/outputs/elasticsearch/data_stream_support_spec.rb +542 -0
  23. data/spec/unit/outputs/elasticsearch/http_client/manticore_adapter_spec.rb +1 -0
  24. data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +27 -13
  25. data/spec/unit/outputs/elasticsearch/http_client_spec.rb +59 -41
  26. data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +1 -3
  27. data/spec/unit/outputs/elasticsearch_proxy_spec.rb +4 -5
  28. data/spec/unit/outputs/elasticsearch_spec.rb +280 -47
  29. data/spec/unit/outputs/elasticsearch_ssl_spec.rb +1 -2
  30. data/spec/unit/outputs/error_whitelist_spec.rb +4 -3
  31. data/spec/unit/outputs/license_check_spec.rb +0 -16
  32. metadata +23 -16
@@ -5,7 +5,7 @@ module LogStash; module PluginMixins; module ElasticSearch
5
5
 
6
6
  # This module defines common methods that can be reused by alternate elasticsearch output plugins such as the elasticsearch_data_streams output.
7
7
 
8
- attr_reader :client, :hosts
8
+ attr_reader :hosts
9
9
 
10
10
  # These codes apply to documents, not at the request level
11
11
  DOC_DLQ_CODES = [400, 404]
@@ -31,7 +31,7 @@ module LogStash; module PluginMixins; module ElasticSearch
31
31
  if @proxy.eql?('')
32
32
  @logger.warn "Supplied proxy setting (proxy => '') has no effect"
33
33
  end
34
- @client ||= ::LogStash::Outputs::ElasticSearch::HttpClientBuilder.build(@logger, @hosts, params)
34
+ ::LogStash::Outputs::ElasticSearch::HttpClientBuilder.build(@logger, @hosts, params)
35
35
  end
36
36
 
37
37
  def validate_authentication
@@ -115,6 +115,15 @@ module LogStash; module PluginMixins; module ElasticSearch
115
115
  end
116
116
  private :parse_user_password_from_cloud_auth
117
117
 
118
+ # Plugin initialization extension point (after a successful ES connection).
119
+ def finish_register
120
+ end
121
+ protected :finish_register
122
+
123
+ def last_es_version
124
+ client.last_es_version
125
+ end
126
+
118
127
  def maximum_seen_major_version
119
128
  client.maximum_seen_major_version
120
129
  end
@@ -126,25 +135,24 @@ module LogStash; module PluginMixins; module ElasticSearch
126
135
  # launch a thread that waits for an initial successful connection to the ES cluster to call the given block
127
136
  # @param block [Proc] the block to execute upon initial successful connection
128
137
  # @return [Thread] the successful connection wait thread
129
- def setup_after_successful_connection(&block)
138
+ def after_successful_connection(&block)
130
139
  Thread.new do
131
140
  sleep_interval = @retry_initial_interval
132
141
  until successful_connection? || @stopping.true?
133
- @logger.debug("Waiting for connectivity to Elasticsearch cluster. Retrying in #{sleep_interval}s")
134
- Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
135
- sleep_interval = next_sleep_interval(sleep_interval)
142
+ @logger.debug("Waiting for connectivity to Elasticsearch cluster, retrying in #{sleep_interval}s")
143
+ sleep_interval = sleep_for_interval(sleep_interval)
136
144
  end
137
145
  block.call if successful_connection?
138
146
  end
139
147
  end
148
+ private :after_successful_connection
140
149
 
141
150
  def discover_cluster_uuid
142
151
  return unless defined?(plugin_metadata)
143
152
  cluster_info = client.get('/')
144
153
  plugin_metadata.set(:cluster_uuid, cluster_info['cluster_uuid'])
145
154
  rescue => e
146
- # TODO introducing this logging message breaks many tests that need refactoring
147
- # @logger.error("Unable to retrieve elasticsearch cluster uuid", error => e.message)
155
+ @logger.error("Unable to retrieve Elasticsearch cluster uuid", message: e.message, exception: e.class, backtrace: e.backtrace)
148
156
  end
149
157
 
150
158
  def retrying_submit(actions)
@@ -159,13 +167,11 @@ module LogStash; module PluginMixins; module ElasticSearch
159
167
  begin
160
168
  submit_actions = submit(submit_actions)
161
169
  if submit_actions && submit_actions.size > 0
162
- @logger.info("Retrying individual bulk actions that failed or were rejected by the previous bulk request.", :count => submit_actions.size)
170
+ @logger.info("Retrying individual bulk actions that failed or were rejected by the previous bulk request", count: submit_actions.size)
163
171
  end
164
172
  rescue => e
165
- @logger.error("Encountered an unexpected error submitting a bulk request! Will retry.",
166
- :error_message => e.message,
167
- :class => e.class.name,
168
- :backtrace => e.backtrace)
173
+ @logger.error("Encountered an unexpected error submitting a bulk request, will retry",
174
+ message: e.message, exception: e.class, backtrace: e.backtrace)
169
175
  end
170
176
 
171
177
  # Everything was a success!
@@ -173,21 +179,42 @@ module LogStash; module PluginMixins; module ElasticSearch
173
179
 
174
180
  # If we're retrying the action sleep for the recommended interval
175
181
  # Double the interval for the next time through to achieve exponential backoff
176
- Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
177
- sleep_interval = next_sleep_interval(sleep_interval)
182
+ sleep_interval = sleep_for_interval(sleep_interval)
178
183
  end
179
184
  end
180
185
 
181
186
  def sleep_for_interval(sleep_interval)
182
- Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
187
+ stoppable_sleep(sleep_interval)
183
188
  next_sleep_interval(sleep_interval)
184
189
  end
185
190
 
191
+ def stoppable_sleep(interval)
192
+ Stud.stoppable_sleep(interval) { @stopping.true? }
193
+ end
194
+
186
195
  def next_sleep_interval(current_interval)
187
196
  doubled = current_interval * 2
188
197
  doubled > @retry_max_interval ? @retry_max_interval : doubled
189
198
  end
190
199
 
200
+ def handle_dlq_status(message, action, status, response)
201
+ # To support bwc, we check if DLQ exists. otherwise we log and drop event (previous behavior)
202
+ if @dlq_writer
203
+ event, action = action.event, [action[0], action[1], action[2]]
204
+ # TODO: Change this to send a map with { :status => status, :action => action } in the future
205
+ @dlq_writer.write(event, "#{message} status: #{status}, action: #{action}, response: #{response}")
206
+ else
207
+ if dig_value(response, 'index', 'error', 'type') == 'invalid_index_name_exception'
208
+ level = :error
209
+ else
210
+ level = :warn
211
+ end
212
+ @logger.send level, message, status: status, action: action, response: response
213
+ end
214
+ end
215
+
216
+ private
217
+
191
218
  def submit(actions)
192
219
  bulk_response = safe_bulk(actions)
193
220
 
@@ -204,12 +231,20 @@ module LogStash; module PluginMixins; module ElasticSearch
204
231
  return
205
232
  end
206
233
 
234
+ responses = bulk_response["items"]
235
+ if responses.size != actions.size # can not map action -> response reliably
236
+ # an ES bug (on 7.10.2, 7.11.1) where a _bulk request to index X documents would return Y (> X) items
237
+ msg = "Sent #{actions.size} documents but Elasticsearch returned #{responses.size} responses"
238
+ @logger.warn(msg, actions: actions, responses: responses)
239
+ fail("#{msg} (likely a bug with _bulk endpoint)")
240
+ end
241
+
207
242
  actions_to_retry = []
208
- bulk_response["items"].each_with_index do |response,idx|
243
+ responses.each_with_index do |response,idx|
209
244
  action_type, action_props = response.first
210
245
 
211
246
  status = action_props["status"]
212
- failure = action_props["error"]
247
+ error = action_props["error"]
213
248
  action = actions[idx]
214
249
  action_params = action[1]
215
250
 
@@ -222,7 +257,7 @@ module LogStash; module PluginMixins; module ElasticSearch
222
257
  next
223
258
  elsif DOC_CONFLICT_CODE == status
224
259
  @document_level_metrics.increment(:non_retryable_failures)
225
- @logger.warn "Failed action.", status: status, action: action, response: response if !failure_type_logging_whitelist.include?(failure["type"])
260
+ @logger.warn "Failed action", status: status, action: action, response: response if log_failure_type?(error)
226
261
  next
227
262
  elsif DOC_DLQ_CODES.include?(status)
228
263
  handle_dlq_status("Could not index event to Elasticsearch.", action, status, response)
@@ -231,7 +266,7 @@ module LogStash; module PluginMixins; module ElasticSearch
231
266
  else
232
267
  # only log what the user whitelisted
233
268
  @document_level_metrics.increment(:retryable_failures)
234
- @logger.info "retrying failed action with response code: #{status} (#{failure})" if !failure_type_logging_whitelist.include?(failure["type"])
269
+ @logger.info "Retrying failed action", status: status, action: action, error: error if log_failure_type?(error)
235
270
  actions_to_retry << action
236
271
  end
237
272
  end
@@ -239,40 +274,25 @@ module LogStash; module PluginMixins; module ElasticSearch
239
274
  actions_to_retry
240
275
  end
241
276
 
242
- def handle_dlq_status(message, action, status, response)
243
- # To support bwc, we check if DLQ exists. otherwise we log and drop event (previous behavior)
244
- if @dlq_writer
245
- # TODO: Change this to send a map with { :status => status, :action => action } in the future
246
- @dlq_writer.write(action[2], "#{message} status: #{status}, action: #{action}, response: #{response}")
247
- else
248
- error_type = response.fetch('index', {}).fetch('error', {})['type']
249
- if 'invalid_index_name_exception' == error_type
250
- level = :error
251
- else
252
- level = :warn
253
- end
254
- @logger.send level, message, status: status, action: action, response: response
255
- end
277
+ def log_failure_type?(failure)
278
+ !failure_type_logging_whitelist.include?(failure["type"])
256
279
  end
257
280
 
258
281
  # Rescue retryable errors during bulk submission
282
+ # @param actions a [action, params, event.to_hash] tuple
283
+ # @return response [Hash] which contains 'errors' and processed 'items' entries
259
284
  def safe_bulk(actions)
260
285
  sleep_interval = @retry_initial_interval
261
286
  begin
262
- es_actions = actions.map {|action_type, params, event| [action_type, params, event.to_hash]}
263
- response = @client.bulk(es_actions)
264
- response
287
+ @client.bulk(actions) # returns { 'errors': ..., 'items': ... }
265
288
  rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError => e
266
289
  # If we can't even connect to the server let's just print out the URL (:hosts is actually a URL)
267
290
  # and let the user sort it out from there
268
291
  @logger.error(
269
- "Attempted to send a bulk request to elasticsearch'"+
270
- " but Elasticsearch appears to be unreachable or down!",
271
- :error_message => e.message,
272
- :class => e.class.name,
273
- :will_retry_in_seconds => sleep_interval
292
+ "Attempted to send a bulk request but Elasticsearch appears to be unreachable or down",
293
+ message: e.message, exception: e.class, will_retry_in_seconds: sleep_interval
274
294
  )
275
- @logger.debug("Failed actions for last bad bulk request!", :actions => actions)
295
+ @logger.debug? && @logger.debug("Failed actions for last bad bulk request", :actions => actions)
276
296
 
277
297
  # We retry until there are no errors! Errors should all go to the retry queue
278
298
  sleep_interval = sleep_for_interval(sleep_interval)
@@ -280,20 +300,19 @@ module LogStash; module PluginMixins; module ElasticSearch
280
300
  retry unless @stopping.true?
281
301
  rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::NoConnectionAvailableError => e
282
302
  @logger.error(
283
- "Attempted to send a bulk request to elasticsearch, but no there are no living connections in the connection pool. Perhaps Elasticsearch is unreachable or down?",
284
- :error_message => e.message,
285
- :class => e.class.name,
286
- :will_retry_in_seconds => sleep_interval
303
+ "Attempted to send a bulk request but there are no living connections in the pool " +
304
+ "(perhaps Elasticsearch is unreachable or down?)",
305
+ message: e.message, exception: e.class, will_retry_in_seconds: sleep_interval
287
306
  )
288
- Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
289
- sleep_interval = next_sleep_interval(sleep_interval)
307
+
308
+ sleep_interval = sleep_for_interval(sleep_interval)
290
309
  @bulk_request_metrics.increment(:failures)
291
310
  retry unless @stopping.true?
292
311
  rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError => e
293
312
  @bulk_request_metrics.increment(:failures)
294
- log_hash = {:code => e.response_code, :url => e.url.sanitized.to_s}
313
+ log_hash = {:code => e.response_code, :url => e.url.sanitized.to_s, :content_length => e.request_body.bytesize}
295
314
  log_hash[:body] = e.response_body if @logger.debug? # Generally this is too verbose
296
- message = "Encountered a retryable error. Will Retry with exponential backoff "
315
+ message = "Encountered a retryable error (will retry with exponential backoff)"
297
316
 
298
317
  # We treat 429s as a special case because these really aren't errors, but
299
318
  # rather just ES telling us to back off a bit, which we do.
@@ -307,17 +326,12 @@ module LogStash; module PluginMixins; module ElasticSearch
307
326
 
308
327
  sleep_interval = sleep_for_interval(sleep_interval)
309
328
  retry
310
- rescue => e
311
- # Stuff that should never happen
312
- # For all other errors print out full connection issues
329
+ rescue => e # Stuff that should never happen - print out full connection issues
313
330
  @logger.error(
314
- "An unknown error occurred sending a bulk request to Elasticsearch. We will retry indefinitely",
315
- :error_message => e.message,
316
- :error_class => e.class.name,
317
- :backtrace => e.backtrace
331
+ "An unknown error occurred sending a bulk request to Elasticsearch (will retry indefinitely)",
332
+ message: e.message, exception: e.class, backtrace: e.backtrace
318
333
  )
319
-
320
- @logger.debug("Failed actions for last bad bulk request!", :actions => actions)
334
+ @logger.debug? && @logger.debug("Failed actions for last bad bulk request", :actions => actions)
321
335
 
322
336
  sleep_interval = sleep_for_interval(sleep_interval)
323
337
  @bulk_request_metrics.increment(:failures)
@@ -331,5 +345,12 @@ module LogStash; module PluginMixins; module ElasticSearch
331
345
  respond_to?(:execution_context) && execution_context.respond_to?(:dlq_writer) &&
332
346
  !execution_context.dlq_writer.inner_writer.is_a?(::LogStash::Util::DummyDeadLetterQueueWriter)
333
347
  end
348
+
349
+ def dig_value(val, first_key, *rest_keys)
350
+ fail(TypeError, "cannot dig value from #{val.class}") unless val.kind_of?(Hash)
351
+ val = val[first_key]
352
+ return val if rest_keys.empty? || val == nil
353
+ dig_value(val, *rest_keys)
354
+ end
334
355
  end
335
356
  end; end; end
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-output-elasticsearch'
3
- s.version = '10.8.2'
3
+ s.version = '11.0.1'
4
4
 
5
5
  s.licenses = ['apache-2.0']
6
6
  s.summary = "Stores logs in Elasticsearch"
@@ -23,13 +23,13 @@ Gem::Specification.new do |s|
23
23
 
24
24
  s.add_runtime_dependency "manticore", '>= 0.5.4', '< 1.0.0'
25
25
  s.add_runtime_dependency 'stud', ['>= 0.0.17', '~> 0.0']
26
- s.add_runtime_dependency 'cabin', ['~> 0.6']
27
26
  s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
28
27
  s.add_runtime_dependency 'logstash-mixin-ecs_compatibility_support', '~>1.0'
29
28
 
30
29
  s.add_development_dependency 'logstash-codec-plain'
31
30
  s.add_development_dependency 'logstash-devutils'
32
31
  s.add_development_dependency 'flores'
32
+ s.add_development_dependency 'cabin', ['~> 0.6']
33
33
  # Still used in some specs, we should remove this ASAP
34
34
  s.add_development_dependency 'elasticsearch'
35
35
  end
@@ -1,5 +1,5 @@
1
- require "logstash/devutils/rspec/spec_helper"
2
- require 'manticore'
1
+ require_relative './spec_helper'
2
+
3
3
  require 'elasticsearch'
4
4
  require_relative "support/elasticsearch/api/actions/delete_ilm_policy"
5
5
  require_relative "support/elasticsearch/api/actions/get_alias"
@@ -8,10 +8,7 @@ require_relative "support/elasticsearch/api/actions/get_ilm_policy"
8
8
  require_relative "support/elasticsearch/api/actions/put_ilm_policy"
9
9
 
10
10
  require 'json'
11
-
12
- unless defined?(LogStash::OSS)
13
- LogStash::OSS = ENV['DISTRIBUTION'] != "default"
14
- end
11
+ require 'cabin'
15
12
 
16
13
  module ESHelper
17
14
  def get_host_port
@@ -0,0 +1,61 @@
1
+ require_relative "../../../spec/es_spec_helper"
2
+ require "logstash/outputs/elasticsearch"
3
+
4
+ describe "data streams", :integration => true do
5
+
6
+ let(:ds_name) { "logs-#{ds_dataset}-default" }
7
+ let(:ds_dataset) { 'integration_test' }
8
+
9
+ let(:options) do
10
+ { "data_stream" => 'true', "data_stream_dataset" => ds_dataset, "hosts" => get_host_port() }
11
+ end
12
+
13
+ subject { LogStash::Outputs::ElasticSearch.new(options) }
14
+
15
+ before :each do
16
+ @es = get_client
17
+ @es.delete_by_query(index: ".ds-#{ds_name}-*", expand_wildcards: :all, body: { query: { match_all: {} } }) rescue nil
18
+
19
+ es_version = @es.info['version']['number']
20
+ if Gem::Version.create(es_version) < Gem::Version.create('7.9.0')
21
+ skip "ES version #{es_version} does not support data-streams"
22
+ end
23
+ end
24
+
25
+ it "creates a new document" do
26
+ subject.register
27
+ subject.multi_receive([LogStash::Event.new("message" => "MSG 111")])
28
+
29
+ @es.indices.refresh
30
+
31
+ Stud::try(3.times) do
32
+ r = @es.search(index: ds_name)
33
+
34
+ expect( r['hits']['total']['value'] ).to eq 1
35
+ doc = r['hits']['hits'].first
36
+ expect( doc['_source'] ).to include "message"=>"MSG 111"
37
+ expect( doc['_source'] ).to include "data_stream"=>{"dataset"=>ds_dataset, "type"=>"logs", "namespace"=>"default"}
38
+ end
39
+ end
40
+
41
+ context "with document_id" do
42
+
43
+ let(:document_id) { '1234567890' }
44
+ let(:options) { super().merge("document_id" => document_id) }
45
+
46
+ it "creates a new document" do
47
+ subject.register
48
+ subject.multi_receive([LogStash::Event.new("message" => "foo")])
49
+
50
+ @es.indices.refresh
51
+
52
+ Stud::try(3.times) do
53
+ r = @es.search(index: ds_name, body: { query: { match: { _id: document_id } } })
54
+ expect( r['hits']['total']['value'] ).to eq 1
55
+ doc = r['hits']['hits'].first
56
+ expect( doc['_source'] ).to include "message"=>"foo"
57
+ end
58
+ end
59
+
60
+ end
61
+ end
@@ -5,7 +5,7 @@ shared_examples_for 'an ILM enabled Logstash' do
5
5
  context 'with a policy with a maximum number of documents' do
6
6
  let (:policy) { small_max_doc_policy }
7
7
  let (:ilm_policy_name) { "logstash-policy-custom"}
8
- let (:settings) { super.merge("ilm_policy" => ilm_policy_name)}
8
+ let (:settings) { super().merge("ilm_policy" => ilm_policy_name)}
9
9
 
10
10
  it 'should rollover when the policy max docs is reached' do
11
11
  put_policy(@es, ilm_policy_name, policy)
@@ -54,7 +54,7 @@ shared_examples_for 'an ILM enabled Logstash' do
54
54
  context 'with a policy where the maximum number of documents is not reached' do
55
55
  let (:policy) { large_max_doc_policy }
56
56
  let (:ilm_policy_name) { "logstash-policy-custom-policy"}
57
- let (:settings) { super.merge("ilm_policy" => ilm_policy_name)}
57
+ let (:settings) { super().merge("ilm_policy" => ilm_policy_name)}
58
58
 
59
59
  it 'should ingest into a single index when max docs is not reached' do
60
60
  put_policy(@es,ilm_policy_name, policy)
@@ -119,7 +119,7 @@ shared_examples_for 'an ILM disabled Logstash' do
119
119
  context 'with an existing policy that will roll over' do
120
120
  let (:policy) { small_max_doc_policy }
121
121
  let (:ilm_policy_name) { "logstash-policy-3_docs"}
122
- let (:settings) { super.merge("ilm_policy" => ilm_policy_name)}
122
+ let (:settings) { super().merge("ilm_policy" => ilm_policy_name)}
123
123
 
124
124
  it 'should not roll over indices' do
125
125
  subject.register
@@ -155,7 +155,7 @@ shared_examples_for 'an ILM disabled Logstash' do
155
155
 
156
156
  context 'with a custom template name' do
157
157
  let (:template_name) { "logstash_custom_template_name" }
158
- let (:settings) { super.merge('template_name' => template_name)}
158
+ let (:settings) { super().merge('template_name' => template_name)}
159
159
 
160
160
  it 'should not write the ILM settings into the template' do
161
161
  subject.register
@@ -195,28 +195,32 @@ shared_examples_for 'an Elasticsearch instance that does not support index lifec
195
195
  subject { LogStash::Outputs::ElasticSearch.new(settings) }
196
196
 
197
197
  context 'when ilm is enabled in Logstash' do
198
- let (:settings) { super.merge!({ 'ilm_enabled' => true }) }
198
+ let (:settings) { super().merge!({ 'ilm_enabled' => true }) }
199
199
 
200
200
  it 'should raise a configuration error' do
201
+ # TODO should be refactored not to rely on plugin internals
202
+ finish_register = subject.method(:finish_register)
203
+ expect(subject).to receive(:finish_register)
201
204
  expect do
202
205
  begin
203
206
  subject.register
204
- sleep(1)
207
+ finish_register.call
208
+ sleep(1.5) # wait_for_successful_connection (for the thread to raise)
205
209
  ensure
206
- subject.stop_template_installer
210
+ subject.send :stop_after_successful_connection_thread
207
211
  end
208
212
  end.to raise_error(LogStash::ConfigurationError)
209
213
  end
210
214
  end
211
215
 
212
216
  context 'when ilm is disabled in Logstash' do
213
- let (:settings) { super.merge!({ 'ilm_enabled' => false }) }
217
+ let (:settings) { super().merge!({ 'ilm_enabled' => false }) }
214
218
 
215
219
  it_behaves_like 'an ILM disabled Logstash'
216
220
  end
217
221
 
218
222
  context 'when ilm is set to auto in Logstash' do
219
- let (:settings) { super.merge!({ 'ilm_enabled' => 'auto' }) }
223
+ let (:settings) { super().merge!({ 'ilm_enabled' => 'auto' }) }
220
224
 
221
225
  it_behaves_like 'an ILM disabled Logstash'
222
226
  end
@@ -286,7 +290,7 @@ if ESHelper.es_version_satisfies?(">= 6.6")
286
290
 
287
291
  context 'when using the default policy' do
288
292
  context 'with a custom pattern' do
289
- let (:settings) { super.merge("ilm_pattern" => "000001")}
293
+ let (:settings) { super().merge("ilm_pattern" => "000001")}
290
294
  it 'should create a rollover alias' do
291
295
  expect(@es.indices.exists_alias(name: "logstash")).to be_falsey
292
296
  subject.register
@@ -346,7 +350,7 @@ if ESHelper.es_version_satisfies?(">= 6.6")
346
350
 
347
351
  context 'when not using the default policy' do
348
352
  let (:ilm_policy_name) {"logstash-policy-small"}
349
- let (:settings) { super.merge("ilm_policy" => ilm_policy_name)}
353
+ let (:settings) { super().merge("ilm_policy" => ilm_policy_name)}
350
354
  let (:policy) { small_max_doc_policy }
351
355
 
352
356
  before do
@@ -363,7 +367,7 @@ if ESHelper.es_version_satisfies?(">= 6.6")
363
367
 
364
368
  context 'when using a time based policy' do
365
369
  let (:ilm_policy_name) {"logstash-policy-time"}
366
- let (:settings) { super.merge("ilm_policy" => ilm_policy_name)}
370
+ let (:settings) { super().merge("ilm_policy" => ilm_policy_name)}
367
371
  let (:policy) { max_age_policy("1d") }
368
372
 
369
373
  before do
@@ -409,7 +413,7 @@ if ESHelper.es_version_satisfies?(">= 6.6")
409
413
  let (:template) { "spec/fixtures/template-with-policy-es6x.json" }
410
414
  end
411
415
 
412
- let (:settings) { super.merge("template" => template,
416
+ let (:settings) { super().merge("template" => template,
413
417
  "index" => "overwrite-4")}
414
418
 
415
419
  it 'should not overwrite the index patterns' do
@@ -426,7 +430,7 @@ if ESHelper.es_version_satisfies?(">= 6.6")
426
430
  let (:ilm_rollover_alias) { "logstash_the_cat_in_the_hat" }
427
431
  let (:index) { ilm_rollover_alias }
428
432
  let(:expected_index) { index }
429
- let (:settings) { super.merge("ilm_policy" => ilm_policy_name,
433
+ let (:settings) { super().merge("ilm_policy" => ilm_policy_name,
430
434
  "template" => template,
431
435
  "ilm_rollover_alias" => ilm_rollover_alias)}
432
436
 
@@ -480,7 +484,7 @@ if ESHelper.es_version_satisfies?(">= 6.6")
480
484
 
481
485
  context 'with a different template_name' do
482
486
  let (:template_name) { "logstash_custom_template_name" }
483
- let (:settings) { super.merge('template_name' => template_name)}
487
+ let (:settings) { super().merge('template_name' => template_name)}
484
488
 
485
489
  it_behaves_like 'an ILM enabled Logstash'
486
490
 
@@ -514,7 +518,7 @@ if ESHelper.es_version_satisfies?(">= 6.6")
514
518
  end
515
519
 
516
520
  context 'when ilm_enabled is the default' do
517
- let (:settings) { super.tap{|x|x.delete('ilm_enabled')}}
521
+ let (:settings) { super().tap{|x|x.delete('ilm_enabled')}}
518
522
 
519
523
  if ESHelper.es_version_satisfies?(">=7.0")
520
524
  context 'when Elasticsearch is version 7 or above' do
@@ -530,13 +534,13 @@ if ESHelper.es_version_satisfies?(">= 6.6")
530
534
  end
531
535
 
532
536
  context 'with ilm disabled' do
533
- let (:settings) { super.merge('ilm_enabled' => false )}
537
+ let (:settings) { super().merge('ilm_enabled' => false )}
534
538
 
535
539
  it_behaves_like 'an ILM disabled Logstash'
536
540
  end
537
541
 
538
542
  context 'with ilm disabled using a string' do
539
- let (:settings) { super.merge('ilm_enabled' => 'false' )}
543
+ let (:settings) { super().merge('ilm_enabled' => 'false' )}
540
544
 
541
545
  it_behaves_like 'an ILM disabled Logstash'
542
546
  end