logstash-output-elasticsearch 9.0.3-java → 9.1.1-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ae7148b3b08c44548f172bc06c88ff48b94809ef756aeef21d4597efb2d50716
4
- data.tar.gz: a6c62363e0d1f953cfe9ee9ef8ceed192f7d54f0e3421f2c403f1c110a11a1ed
3
+ metadata.gz: 8c72ac7c8b2bd9eb64be1be449a402d64d9808c33d4ad8c723c567d23704a4ae
4
+ data.tar.gz: e327128618734f8783150fb9e8d347e9fd56b5911ffadec3d042a29fc9f7ffc8
5
5
  SHA512:
6
- metadata.gz: 420c2f66334764c1693936ca462cb45e8b196dec449ae7521373ba44c5521e6f82b76f9d3ac02a950dc597149b671701f16e2226f0070a8196c8946a2bdc9f92
7
- data.tar.gz: 0f30be7f309a6734f6f0e050b8ff725db3ce71a7ce0c617997f35b0ec013410c7ca56991c59978ec147da8d87cb598d214272e7325ce84145257341fdfd4568c
6
+ metadata.gz: fa5f19f2277c551466016c91212ed04fa18a3d80d81dea618618c859f2c274f4fd673243c2719450401b7e861f3a197e1a66c8731eb9d8f95d61e9cc68730959
7
+ data.tar.gz: 393208c3c8911549d05ed94ba39471c982fc997858e22d54b5493b4a2fc008e01173cafbc8ea9e602a984cd41ac0012b555ab24817f0dd0d5dafaf95b468ce5f
@@ -1,3 +1,11 @@
1
+ ## 9.1.1
2
+ - Docs: Set the default_codec doc attribute.
3
+
4
+ ## 9.1.0
5
+ - Set number_of_shards to 1 and document_type to '_doc' for es 7.x clusters #741 #747
6
+ - Fix usage of upsert and script when update action is interpolated #239
7
+ - Add metrics to track bulk level and document level responses #585
8
+
1
9
  ## 9.0.3
2
10
  - Ignore master-only nodes when using sniffing
3
11
 
@@ -1,5 +1,6 @@
1
1
  :plugin: elasticsearch
2
2
  :type: output
3
+ :default_codec: plain
3
4
 
4
5
  ///////////////////////////////////////////
5
6
  START - GENERATED VARIABLES, DO NOT EDIT!
@@ -105,8 +106,8 @@ not reevaluate its DNS value while the keepalive is in effect.
105
106
 
106
107
  This plugin supports request and response compression. Response compression is enabled by default and
107
108
  for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for
108
- it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in
109
- Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin
109
+ it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[in
110
+ Elasticsearch] to take advantage of response compression when using this plugin
110
111
 
111
112
  For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression`
112
113
  setting in their Logstash config file.
@@ -238,7 +239,7 @@ Elasticsearch with the same ID.
238
239
  * There is no default value for this setting.
239
240
  * This option is deprecated
240
241
 
241
- Note: This option is deprecated due to the https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html[removal of types in Logstash 6.0].
242
+ Note: This option is deprecated due to the https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html[removal of types in Elasticsearch 6.0].
242
243
  It will be removed in the next major version of Logstash.
243
244
  This sets the document type to write events to. Generally you should try to write only
244
245
  similar events to the same 'type'. String expansion `%{foo}` works here.
@@ -476,6 +477,14 @@ This can be dynamic using the `%{foo}` syntax.
476
477
 
477
478
  Set script name for scripted update mode
478
479
 
480
+ Example:
481
+ [source,ruby]
482
+ output {
483
+ elasticsearch {
484
+ script => "ctx._source.message = params.event.get('message')"
485
+ }
486
+ }
487
+
479
488
  [id="plugins-{type}s-{plugin}-script_lang"]
480
489
  ===== `script_lang`
481
490
 
@@ -494,7 +503,7 @@ When using indexed (stored) scripts on Elasticsearch 6 and higher, you must set
494
503
  Define the type of script referenced by "script" variable
495
504
  inline : "script" contains inline script
496
505
  indexed : "script" contains the name of script directly indexed in elasticsearch
497
- file : "script" contains the name of script stored in elasticseach's config directory
506
+ file : "script" contains the name of script stored in elasticsearch's config directory
498
507
 
499
508
  [id="plugins-{type}s-{plugin}-script_var_name"]
500
509
  ===== `script_var_name`
@@ -682,3 +691,5 @@ See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-in
682
691
 
683
692
  [id="plugins-{type}s-{plugin}-common-options"]
684
693
  include::{include_path}/{type}.asciidoc[]
694
+
695
+ :default_codec!:
@@ -227,6 +227,7 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
227
227
  config :http_compression, :validate => :boolean, :default => false
228
228
 
229
229
  def build_client
230
+ params["metric"] = metric
230
231
  @client ||= ::LogStash::Outputs::ElasticSearch::HttpClientBuilder.build(@logger, @hosts, params)
231
232
  end
232
233
 
@@ -10,9 +10,9 @@ module LogStash; module Outputs; class ElasticSearch;
10
10
  DOC_CONFLICT_CODE = 409
11
11
 
12
12
  # When you use external versioning, you are communicating that you want
13
- # to ignore conflicts. More obviously, since an external version is a
13
+ # to ignore conflicts. More obviously, since an external version is a
14
14
  # constant part of the incoming document, we should not retry, as retrying
15
- # will never succeed.
15
+ # will never succeed.
16
16
  VERSION_TYPES_PERMITTING_CONFLICT = ["external", "external_gt", "external_gte"]
17
17
 
18
18
  def register
@@ -25,6 +25,8 @@ module LogStash; module Outputs; class ElasticSearch;
25
25
 
26
26
  install_template
27
27
  check_action_validity
28
+ @bulk_request_metrics = metric.namespace(:bulk_requests)
29
+ @document_level_metrics = metric.namespace(:documents)
28
30
 
29
31
  @logger.info("New Elasticsearch output", :class => self.class.name, :hosts => @hosts.map(&:sanitized).map(&:to_s))
30
32
  end
@@ -36,8 +38,45 @@ module LogStash; module Outputs; class ElasticSearch;
36
38
 
37
39
  # Convert the event into a 3-tuple of action, params, and event
38
40
  def event_action_tuple(event)
39
- params = event_action_params(event)
41
+
40
42
  action = event.sprintf(@action)
43
+
44
+ params = {
45
+ :_id => @document_id ? event.sprintf(@document_id) : nil,
46
+ :_index => event.sprintf(@index),
47
+ :_type => get_event_type(event),
48
+ :_routing => @routing ? event.sprintf(@routing) : nil
49
+ }
50
+
51
+ if @pipeline
52
+ params[:pipeline] = event.sprintf(@pipeline)
53
+ end
54
+
55
+ if @parent
56
+ if @join_field
57
+ join_value = event.get(@join_field)
58
+ parent_value = event.sprintf(@parent)
59
+ event.set(@join_field, { "name" => join_value, "parent" => parent_value })
60
+ params[:_routing] = event.sprintf(@parent)
61
+ else
62
+ params[:parent] = event.sprintf(@parent)
63
+ end
64
+ end
65
+
66
+ if action == 'update'
67
+ params[:_upsert] = LogStash::Json.load(event.sprintf(@upsert)) if @upsert != ""
68
+ params[:_script] = event.sprintf(@script) if @script != ""
69
+ params[:_retry_on_conflict] = @retry_on_conflict
70
+ end
71
+
72
+ if @version
73
+ params[:version] = event.sprintf(@version)
74
+ end
75
+
76
+ if @version_type
77
+ params[:version_type] = event.sprintf(@version_type)
78
+ end
79
+
41
80
  [action, params, event]
42
81
  end
43
82
 
@@ -119,8 +158,16 @@ module LogStash; module Outputs; class ElasticSearch;
119
158
 
120
159
  # If the response is nil that means we were in a retry loop
121
160
  # and aborted since we're shutting down
161
+ return if bulk_response.nil?
162
+
122
163
  # If it did return and there are no errors we're good as well
123
- return if bulk_response.nil? || !bulk_response["errors"]
164
+ if bulk_response["errors"]
165
+ @bulk_request_metrics.increment(:with_errors)
166
+ else
167
+ @bulk_request_metrics.increment(:successes)
168
+ @document_level_metrics.increment(:successes, actions.size)
169
+ return
170
+ end
124
171
 
125
172
  actions_to_retry = []
126
173
  bulk_response["items"].each_with_index do |response,idx|
@@ -136,8 +183,10 @@ module LogStash; module Outputs; class ElasticSearch;
136
183
  # - For a mapping error, we send to dead letter queue for a human to intervene at a later point.
137
184
  # - For everything else there's mastercard. Yep, and we retry indefinitely. This should fix #572 and other transient network issues
138
185
  if DOC_SUCCESS_CODES.include?(status)
186
+ @document_level_metrics.increment(:successes)
139
187
  next
140
188
  elsif DOC_CONFLICT_CODE == status
189
+ @document_level_metrics.increment(:non_retryable_failures)
141
190
  @logger.warn "Failed action.", status: status, action: action, response: response if !failure_type_logging_whitelist.include?(failure["type"])
142
191
  next
143
192
  elsif DOC_DLQ_CODES.include?(status)
@@ -149,9 +198,11 @@ module LogStash; module Outputs; class ElasticSearch;
149
198
  else
150
199
  @logger.warn "Could not index event to Elasticsearch.", status: status, action: action, response: response
151
200
  end
201
+ @document_level_metrics.increment(:non_retryable_failures)
152
202
  next
153
203
  else
154
204
  # only log what the user whitelisted
205
+ @document_level_metrics.increment(:retryable_failures)
155
206
  @logger.info "retrying failed action with response code: #{status} (#{failure})" if !failure_type_logging_whitelist.include?(failure["type"])
156
207
  actions_to_retry << action
157
208
  end
@@ -160,60 +211,20 @@ module LogStash; module Outputs; class ElasticSearch;
160
211
  actions_to_retry
161
212
  end
162
213
 
163
- # get the action parameters for the given event
164
- def event_action_params(event)
165
- type = get_event_type(event)
166
-
167
- params = {
168
- :_id => @document_id ? event.sprintf(@document_id) : nil,
169
- :_index => event.sprintf(@index),
170
- :_type => type,
171
- :_routing => @routing ? event.sprintf(@routing) : nil
172
- }
173
-
174
- if @pipeline
175
- params[:pipeline] = event.sprintf(@pipeline)
176
- end
177
-
178
- if @parent
179
- if @join_field
180
- join_value = event.get(@join_field)
181
- parent_value = event.sprintf(@parent)
182
- event.set(@join_field, { "name" => join_value, "parent" => parent_value })
183
- params[:_routing] = event.sprintf(@parent)
184
- else
185
- params[:parent] = event.sprintf(@parent)
186
- end
187
- end
188
-
189
- if @action == 'update'
190
- params[:_upsert] = LogStash::Json.load(event.sprintf(@upsert)) if @upsert != ""
191
- params[:_script] = event.sprintf(@script) if @script != ""
192
- params[:_retry_on_conflict] = @retry_on_conflict
193
- end
194
-
195
- if @version
196
- params[:version] = event.sprintf(@version)
197
- end
198
-
199
- if @version_type
200
- params[:version_type] = event.sprintf(@version_type)
201
- end
202
-
203
- params
204
- end
205
-
206
214
  # Determine the correct value for the 'type' field for the given event
207
- DEFAULT_EVENT_TYPE="doc".freeze
215
+ DEFAULT_EVENT_TYPE_ES6="doc".freeze
216
+ DEFAULT_EVENT_TYPE_ES7="_doc".freeze
208
217
  def get_event_type(event)
209
218
  # Set the 'type' value for the index.
210
219
  type = if @document_type
211
220
  event.sprintf(@document_type)
212
221
  else
213
222
  if client.maximum_seen_major_version < 6
214
- event.get("type") || DEFAULT_EVENT_TYPE
223
+ event.get("type") || DEFAULT_EVENT_TYPE_ES6
224
+ elsif client.maximum_seen_major_version == 6
225
+ DEFAULT_EVENT_TYPE_ES6
215
226
  else
216
- DEFAULT_EVENT_TYPE
227
+ DEFAULT_EVENT_TYPE_ES7
217
228
  end
218
229
  end
219
230
 
@@ -245,6 +256,7 @@ module LogStash; module Outputs; class ElasticSearch;
245
256
 
246
257
  # We retry until there are no errors! Errors should all go to the retry queue
247
258
  sleep_interval = sleep_for_interval(sleep_interval)
259
+ @bulk_request_metrics.increment(:failures)
248
260
  retry unless @stopping.true?
249
261
  rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::NoConnectionAvailableError => e
250
262
  @logger.error(
@@ -255,8 +267,10 @@ module LogStash; module Outputs; class ElasticSearch;
255
267
  )
256
268
  Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
257
269
  sleep_interval = next_sleep_interval(sleep_interval)
270
+ @bulk_request_metrics.increment(:failures)
258
271
  retry unless @stopping.true?
259
272
  rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError => e
273
+ @bulk_request_metrics.increment(:failures)
260
274
  log_hash = {:code => e.response_code, :url => e.url.sanitized.to_s}
261
275
  log_hash[:body] = e.response_body if @logger.debug? # Generally this is too verbose
262
276
  message = "Encountered a retryable error. Will Retry with exponential backoff "
@@ -286,6 +300,7 @@ module LogStash; module Outputs; class ElasticSearch;
286
300
  @logger.debug("Failed actions for last bad bulk request!", :actions => actions)
287
301
 
288
302
  sleep_interval = sleep_for_interval(sleep_interval)
303
+ @bulk_request_metrics.increment(:failures)
289
304
  retry unless @stopping.true?
290
305
  end
291
306
  end
@@ -2,7 +2,8 @@
2
2
  "template" : "logstash-*",
3
3
  "version" : 60001,
4
4
  "settings" : {
5
- "index.refresh_interval" : "5s"
5
+ "index.refresh_interval" : "5s",
6
+ "number_of_shards": 1
6
7
  },
7
8
  "mappings" : {
8
9
  "_doc" : {
@@ -50,6 +50,9 @@ module LogStash; module Outputs; class ElasticSearch;
50
50
  # through a special http path, such as using mod_rewrite.
51
51
  def initialize(options={})
52
52
  @logger = options[:logger]
53
+ @metric = options[:metric]
54
+ @bulk_request_metrics = @metric.namespace(:bulk_requests)
55
+ @bulk_response_metrics = @bulk_request_metrics.namespace(:responses)
53
56
 
54
57
  # Again, in case we use DEFAULT_OPTIONS in the future, uncomment this.
55
58
  # @options = DEFAULT_OPTIONS.merge(options)
@@ -142,9 +145,12 @@ module LogStash; module Outputs; class ElasticSearch;
142
145
  body_stream.seek(0)
143
146
  end
144
147
 
148
+ @bulk_response_metrics.increment(response.code.to_s)
149
+
145
150
  if response.code != 200
151
+ url = ::LogStash::Util::SafeURI.new(response.final_url)
146
152
  raise ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError.new(
147
- response.code, @bulk_path, body_stream.to_s, response.body
153
+ response.code, url, body_stream.to_s, response.body
148
154
  )
149
155
  end
150
156
 
@@ -282,7 +288,8 @@ module LogStash; module Outputs; class ElasticSearch;
282
288
  :sniffing_path => options[:sniffing_path],
283
289
  :healthcheck_path => options[:healthcheck_path],
284
290
  :resurrect_delay => options[:resurrect_delay],
285
- :url_normalizer => self.method(:host_to_url)
291
+ :url_normalizer => self.method(:host_to_url),
292
+ :metric => options[:metric]
286
293
  }
287
294
  pool_options[:scheme] = self.scheme if self.scheme
288
295
 
@@ -45,6 +45,7 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
45
45
  def initialize(logger, adapter, initial_urls=[], options={})
46
46
  @logger = logger
47
47
  @adapter = adapter
48
+ @metric = options[:metric]
48
49
  @initial_urls = initial_urls
49
50
 
50
51
  raise ArgumentError, "No URL Normalizer specified!" unless options[:url_normalizer]
@@ -161,8 +162,8 @@ module LogStash; module Outputs; class ElasticSearch; class HttpClient;
161
162
  # Sniffs and returns the results. Does not update internal URLs!
162
163
  def check_sniff
163
164
  _, url_meta, resp = perform_request(:get, @sniffing_path)
165
+ @metric.increment(:sniff_requests)
164
166
  parsed = LogStash::Json.load(resp.body)
165
-
166
167
  nodes = parsed['nodes']
167
168
  if !nodes || nodes.empty?
168
169
  @logger.warn("Sniff returned no nodes! Will not update hosts.")
@@ -14,6 +14,7 @@ module LogStash; module Outputs; class ElasticSearch;
14
14
 
15
15
  common_options = {
16
16
  :client_settings => client_settings,
17
+ :metric => params["metric"],
17
18
  :resurrect_delay => params["resurrect_delay"]
18
19
  }
19
20
 
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-output-elasticsearch'
3
- s.version = '9.0.3'
3
+ s.version = '9.1.1'
4
4
  s.licenses = ['apache-2.0']
5
5
  s.summary = "Stores logs in Elasticsearch"
6
6
  s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
@@ -0,0 +1,70 @@
1
+ require_relative "../../../spec/es_spec_helper"
2
+
3
+ describe "metrics", :integration => true do
4
+ subject! do
5
+ require "logstash/outputs/elasticsearch"
6
+ settings = {
7
+ "manage_template" => false,
8
+ "hosts" => "#{get_host_port()}"
9
+ }
10
+ plugin = LogStash::Outputs::ElasticSearch.new(settings)
11
+ end
12
+
13
+ let(:metric) { subject.metric }
14
+ let(:bulk_request_metrics) { subject.instance_variable_get(:@bulk_request_metrics) }
15
+ let(:document_level_metrics) { subject.instance_variable_get(:@document_level_metrics) }
16
+
17
+ before :each do
18
+ require "elasticsearch"
19
+
20
+ # Clean ES of data before we start.
21
+ @es = get_client
22
+ @es.indices.delete_template(:name => "*")
23
+
24
+ # This can fail if there are no indexes, ignore failure.
25
+ @es.indices.delete(:index => "*") rescue nil
26
+ #@es.indices.refresh
27
+ subject.register
28
+ end
29
+
30
+ context "after a succesful bulk insert" do
31
+ let(:bulk) { [
32
+ LogStash::Event.new("message" => "sample message here"),
33
+ LogStash::Event.new("somemessage" => { "message" => "sample nested message here" }),
34
+ LogStash::Event.new("somevalue" => 100),
35
+ LogStash::Event.new("somevalue" => 10),
36
+ LogStash::Event.new("somevalue" => 1),
37
+ LogStash::Event.new("country" => "us"),
38
+ LogStash::Event.new("country" => "at"),
39
+ LogStash::Event.new("geoip" => { "location" => [ 0.0, 0.0 ] })
40
+ ]}
41
+
42
+ it "increases successful bulk request metric" do
43
+ expect(bulk_request_metrics).to receive(:increment).with(:successes).once
44
+ subject.multi_receive(bulk)
45
+ end
46
+
47
+ it "increases number of successful inserted documents" do
48
+ expect(document_level_metrics).to receive(:increment).with(:successes, bulk.size).once
49
+ subject.multi_receive(bulk)
50
+ end
51
+ end
52
+
53
+ context "after a bulk insert that generates errors" do
54
+ let(:bulk) { [
55
+ LogStash::Event.new("message" => "sample message here"),
56
+ LogStash::Event.new("message" => { "message" => "sample nested message here" }),
57
+ ]}
58
+ it "increases bulk request with error metric" do
59
+ expect(bulk_request_metrics).to receive(:increment).with(:with_errors).once
60
+ expect(bulk_request_metrics).to_not receive(:increment).with(:successes)
61
+ subject.multi_receive(bulk)
62
+ end
63
+
64
+ it "increases number of successful and non retryable documents" do
65
+ expect(document_level_metrics).to receive(:increment).with(:non_retryable_failures).once
66
+ expect(document_level_metrics).to receive(:increment).with(:successes).once
67
+ subject.multi_receive(bulk)
68
+ end
69
+ end
70
+ end
@@ -7,7 +7,13 @@ describe "pool sniffer", :integration => true do
7
7
  let(:logger) { Cabin::Channel.get }
8
8
  let(:adapter) { LogStash::Outputs::ElasticSearch::HttpClient::ManticoreAdapter.new(logger) }
9
9
  let(:initial_urls) { [::LogStash::Util::SafeURI.new("http://#{get_host_port}")] }
10
- let(:options) { {:resurrect_delay => 2, :url_normalizer => proc {|u| u}} } # Shorten the delay a bit to speed up tests
10
+ let(:options) do
11
+ {
12
+ :resurrect_delay => 2, # Shorten the delay a bit to speed up tests
13
+ :url_normalizer => proc {|u| u},
14
+ :metric => ::LogStash::Instrument::NullMetric.new(:dummy).namespace(:alsodummy)
15
+ }
16
+ end
11
17
 
12
18
  subject { LogStash::Outputs::ElasticSearch::HttpClient::Pool.new(logger, adapter, initial_urls, options) }
13
19
 
@@ -7,7 +7,8 @@ describe LogStash::Outputs::ElasticSearch::HttpClient do
7
7
  let(:base_options) do
8
8
  opts = {
9
9
  :hosts => [::LogStash::Util::SafeURI.new("127.0.0.1")],
10
- :logger => Cabin::Channel.get
10
+ :logger => Cabin::Channel.get,
11
+ :metric => ::LogStash::Instrument::NamespacedNullMetric.new(:dummy_metric)
11
12
  }
12
13
 
13
14
  if !ssl.nil? # Shortcut to set this
@@ -39,14 +39,17 @@ describe LogStash::Outputs::ElasticSearch do
39
39
 
40
40
  let(:manticore_urls) { subject.client.pool.urls }
41
41
  let(:manticore_url) { manticore_urls.first }
42
-
43
- describe "getting a document type" do
44
- it "should default to 'doc'" do
45
- expect(subject.send(:get_event_type, LogStash::Event.new)).to eql("doc")
46
- end
47
42
 
43
+ describe "getting a document type" do
48
44
  context "if document_type isn't set" do
49
45
  let(:options) { super.merge("document_type" => nil)}
46
+ context "for 7.x elasticsearch clusters" do
47
+ let(:maximum_seen_major_version) { 7 }
48
+ it "should return '_doc'" do
49
+ expect(subject.send(:get_event_type, LogStash::Event.new("type" => "foo"))).to eql("_doc")
50
+ end
51
+ end
52
+
50
53
  context "for 6.x elasticsearch clusters" do
51
54
  let(:maximum_seen_major_version) { 6 }
52
55
  it "should return 'doc'" do
@@ -88,25 +91,25 @@ describe LogStash::Outputs::ElasticSearch do
88
91
  end
89
92
  end
90
93
  end
91
-
94
+
92
95
  describe "with auth" do
93
96
  let(:user) { "myuser" }
94
97
  let(:password) { ::LogStash::Util::Password.new("mypassword") }
95
-
98
+
96
99
  shared_examples "an authenticated config" do
97
100
  it "should set the URL auth correctly" do
98
101
  expect(manticore_url.user).to eq user
99
102
  end
100
103
  end
101
-
104
+
102
105
  context "as part of a URL" do
103
106
  let(:options) {
104
107
  super.merge("hosts" => ["http://#{user}:#{password.value}@localhost:9200"])
105
108
  }
106
-
109
+
107
110
  include_examples("an authenticated config")
108
111
  end
109
-
112
+
110
113
  context "as a hash option" do
111
114
  let(:options) {
112
115
  super.merge!(
@@ -114,7 +117,7 @@ describe LogStash::Outputs::ElasticSearch do
114
117
  "password" => password
115
118
  )
116
119
  }
117
-
120
+
118
121
  include_examples("an authenticated config")
119
122
  end
120
123
  end
@@ -218,7 +221,7 @@ describe LogStash::Outputs::ElasticSearch do
218
221
 
219
222
  context "429 errors" do
220
223
  let(:event) { ::LogStash::Event.new("foo" => "bar") }
221
- let(:error) do
224
+ let(:error) do
222
225
  ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError.new(
223
226
  429, double("url").as_null_object, double("request body"), double("response body")
224
227
  )
@@ -252,7 +255,7 @@ describe LogStash::Outputs::ElasticSearch do
252
255
  end
253
256
  end
254
257
  end
255
-
258
+
256
259
  context "with timeout set" do
257
260
  let(:listener) { Flores::Random.tcp_listener }
258
261
  let(:port) { listener[2] }
@@ -290,6 +293,16 @@ describe LogStash::Outputs::ElasticSearch do
290
293
  end
291
294
  end
292
295
 
296
+ context "with a sprintf action equals to update" do
297
+ let(:options) { {"action" => "%{myactionfield}", "upsert" => '{"message": "some text"}' } }
298
+
299
+ let(:event) { LogStash::Event.new("myactionfield" => "update", "message" => "blah") }
300
+
301
+ it "should obtain specific action's params from event_action_tuple" do
302
+ expect(subject.event_action_tuple(event)[1]).to include(:_upsert)
303
+ end
304
+ end
305
+
293
306
  context "with an invalid action" do
294
307
  let(:options) { {"action" => "SOME Garbaaage"} }
295
308
  let(:do_register) { false } # this is what we want to test, so we disable the before(:each) call
@@ -301,8 +314,8 @@ describe LogStash::Outputs::ElasticSearch do
301
314
  end
302
315
 
303
316
  describe "SSL end to end" do
304
- let(:manticore_double) do
305
- double("manticoreX#{self.inspect}")
317
+ let(:manticore_double) do
318
+ double("manticoreX#{self.inspect}")
306
319
  end
307
320
 
308
321
  before(:each) do
@@ -311,18 +324,18 @@ describe LogStash::Outputs::ElasticSearch do
311
324
  allow(manticore_double).to receive(:head).with(any_args).and_return(response_double)
312
325
  allow(manticore_double).to receive(:get).with(any_args).and_return(response_double)
313
326
  allow(manticore_double).to receive(:close)
314
-
327
+
315
328
  allow(::Manticore::Client).to receive(:new).and_return(manticore_double)
316
329
  subject.register
317
330
  end
318
-
331
+
319
332
  shared_examples("an encrypted client connection") do
320
333
  it "should enable SSL in manticore" do
321
334
  expect(subject.client.pool.urls.map(&:scheme).uniq).to eql(['https'])
322
335
  end
323
336
  end
324
337
 
325
-
338
+
326
339
  context "With the 'ssl' option" do
327
340
  let(:options) { {"ssl" => true}}
328
341
 
@@ -337,24 +350,34 @@ describe LogStash::Outputs::ElasticSearch do
337
350
 
338
351
  describe "retry_on_conflict" do
339
352
  let(:num_retries) { 123 }
340
- let(:event) { LogStash::Event.new("message" => "blah") }
353
+ let(:event) { LogStash::Event.new("myactionfield" => "update", "message" => "blah") }
341
354
  let(:options) { { 'retry_on_conflict' => num_retries } }
342
355
 
343
356
  context "with a regular index" do
344
357
  let(:options) { super.merge("action" => "index") }
345
358
 
346
- it "should interpolate the requested action value when creating an event_action_tuple" do
359
+ it "should not set the retry_on_conflict parameter when creating an event_action_tuple" do
347
360
  action, params, event_data = subject.event_action_tuple(event)
348
361
  expect(params).not_to include({:_retry_on_conflict => num_retries})
349
362
  end
350
363
  end
351
364
 
352
365
  context "using a plain update" do
353
- let(:options) { super.merge("action" => "update", "retry_on_conflict" => num_retries, "document_id" => 1) }
366
+ let(:options) { super.merge("action" => "update", "retry_on_conflict" => num_retries, "document_id" => 1) }
354
367
 
355
- it "should interpolate the requested action value when creating an event_action_tuple" do
368
+ it "should set the retry_on_conflict parameter when creating an event_action_tuple" do
369
+ action, params, event_data = subject.event_action_tuple(event)
370
+ expect(params).to include({:_retry_on_conflict => num_retries})
371
+ end
372
+ end
373
+
374
+ context "with a sprintf action that resolves to update" do
375
+ let(:options) { super.merge("action" => "%{myactionfield}", "retry_on_conflict" => num_retries, "document_id" => 1) }
376
+
377
+ it "should set the retry_on_conflict parameter when creating an event_action_tuple" do
356
378
  action, params, event_data = subject.event_action_tuple(event)
357
379
  expect(params).to include({:_retry_on_conflict => num_retries})
380
+ expect(action).to eq("update")
358
381
  end
359
382
  end
360
383
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 9.0.3
4
+ version: 9.1.1
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-02-08 00:00:00.000000000 Z
11
+ date: 2018-04-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -246,6 +246,7 @@ files:
246
246
  - spec/integration/outputs/index_spec.rb
247
247
  - spec/integration/outputs/index_version_spec.rb
248
248
  - spec/integration/outputs/ingest_pipeline_spec.rb
249
+ - spec/integration/outputs/metrics_spec.rb
249
250
  - spec/integration/outputs/painless_update_spec.rb
250
251
  - spec/integration/outputs/parent_spec.rb
251
252
  - spec/integration/outputs/retry_spec.rb
@@ -285,7 +286,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
285
286
  version: '0'
286
287
  requirements: []
287
288
  rubyforge_project:
288
- rubygems_version: 2.6.13
289
+ rubygems_version: 2.6.11
289
290
  signing_key:
290
291
  specification_version: 4
291
292
  summary: Stores logs in Elasticsearch
@@ -312,6 +313,7 @@ test_files:
312
313
  - spec/integration/outputs/index_spec.rb
313
314
  - spec/integration/outputs/index_version_spec.rb
314
315
  - spec/integration/outputs/ingest_pipeline_spec.rb
316
+ - spec/integration/outputs/metrics_spec.rb
315
317
  - spec/integration/outputs/painless_update_spec.rb
316
318
  - spec/integration/outputs/parent_spec.rb
317
319
  - spec/integration/outputs/retry_spec.rb