logstash-output-amazon_es 2.0.1-java → 6.4.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (35) hide show
  1. checksums.yaml +5 -5
  2. data/CONTRIBUTORS +12 -0
  3. data/Gemfile +8 -0
  4. data/LICENSE +10 -199
  5. data/README.md +34 -65
  6. data/lib/logstash/outputs/amazon_es.rb +218 -423
  7. data/lib/logstash/outputs/amazon_es/common.rb +347 -0
  8. data/lib/logstash/outputs/amazon_es/common_configs.rb +141 -0
  9. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es2x.json +95 -0
  10. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es5x.json +46 -0
  11. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es6x.json +45 -0
  12. data/lib/logstash/outputs/amazon_es/elasticsearch-template-es7x.json +46 -0
  13. data/lib/logstash/outputs/amazon_es/http_client.rb +359 -74
  14. data/lib/logstash/outputs/amazon_es/http_client/manticore_adapter.rb +169 -0
  15. data/lib/logstash/outputs/amazon_es/http_client/pool.rb +457 -0
  16. data/lib/logstash/outputs/amazon_es/http_client_builder.rb +164 -0
  17. data/lib/logstash/outputs/amazon_es/template_manager.rb +36 -0
  18. data/logstash-output-amazon_es.gemspec +13 -22
  19. data/spec/es_spec_helper.rb +37 -0
  20. data/spec/unit/http_client_builder_spec.rb +189 -0
  21. data/spec/unit/outputs/elasticsearch/http_client/manticore_adapter_spec.rb +105 -0
  22. data/spec/unit/outputs/elasticsearch/http_client/pool_spec.rb +198 -0
  23. data/spec/unit/outputs/elasticsearch/http_client_spec.rb +222 -0
  24. data/spec/unit/outputs/elasticsearch/template_manager_spec.rb +25 -0
  25. data/spec/unit/outputs/elasticsearch_spec.rb +615 -0
  26. data/spec/unit/outputs/error_whitelist_spec.rb +60 -0
  27. metadata +49 -110
  28. data/lib/logstash/outputs/amazon_es/aws_transport.rb +0 -109
  29. data/lib/logstash/outputs/amazon_es/aws_v4_signer.rb +0 -7
  30. data/lib/logstash/outputs/amazon_es/aws_v4_signer_impl.rb +0 -62
  31. data/lib/logstash/outputs/amazon_es/elasticsearch-template.json +0 -41
  32. data/spec/amazon_es_spec_helper.rb +0 -69
  33. data/spec/unit/outputs/amazon_es_spec.rb +0 -50
  34. data/spec/unit/outputs/elasticsearch/protocol_spec.rb +0 -36
  35. data/spec/unit/outputs/elasticsearch_proxy_spec.rb +0 -58
@@ -0,0 +1,347 @@
1
+ require "logstash/outputs/amazon_es/template_manager"
2
+
3
+ module LogStash; module Outputs; class ElasticSearch;
4
+ module Common
5
+ attr_reader :client, :hosts
6
+
7
+ # These codes apply to documents, not at the request level
8
+ DOC_DLQ_CODES = [400, 404]
9
+ DOC_SUCCESS_CODES = [200, 201]
10
+ DOC_CONFLICT_CODE = 409
11
+
12
+ # When you use external versioning, you are communicating that you want
13
+ # to ignore conflicts. More obviously, since an external version is a
14
+ # constant part of the incoming document, we should not retry, as retrying
15
+ # will never succeed.
16
+ VERSION_TYPES_PERMITTING_CONFLICT = ["external", "external_gt", "external_gte"]
17
+
18
+ def register
19
+ @template_installed = Concurrent::AtomicBoolean.new(false)
20
+ @stopping = Concurrent::AtomicBoolean.new(false)
21
+ # To support BWC, we check if DLQ exists in core (< 5.4). If it doesn't, we use nil to resort to previous behavior.
22
+ @dlq_writer = dlq_enabled? ? execution_context.dlq_writer : nil
23
+
24
+ setup_hosts # properly sets @hosts
25
+ build_client
26
+ check_action_validity
27
+ @bulk_request_metrics = metric.namespace(:bulk_requests)
28
+ @document_level_metrics = metric.namespace(:documents)
29
+ install_template_after_successful_connection
30
+ @logger.info("New Elasticsearch output", :class => self.class.name, :hosts => @hosts.map(&:sanitized).map(&:to_s))
31
+ end
32
+
33
+ # Receive an array of events and immediately attempt to index them (no buffering)
34
+ def multi_receive(events)
35
+ until @template_installed.true?
36
+ sleep 1
37
+ end
38
+ retrying_submit(events.map {|e| event_action_tuple(e)})
39
+ end
40
+
41
+ def install_template_after_successful_connection
42
+ @template_installer ||= Thread.new do
43
+ sleep_interval = @retry_initial_interval
44
+ until successful_connection? || @stopping.true?
45
+ @logger.debug("Waiting for connectivity to Elasticsearch cluster. Retrying in #{sleep_interval}s")
46
+ Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
47
+ sleep_interval = next_sleep_interval(sleep_interval)
48
+ end
49
+ install_template if successful_connection?
50
+ end
51
+ end
52
+
53
+ def stop_template_installer
54
+ @template_installer.join unless @template_installer.nil?
55
+ end
56
+
57
+ def successful_connection?
58
+ !!maximum_seen_major_version
59
+ end
60
+
61
+ # Convert the event into a 3-tuple of action, params, and event
62
+ def event_action_tuple(event)
63
+
64
+ action = event.sprintf(@action)
65
+
66
+ params = {
67
+ :_id => @document_id ? event.sprintf(@document_id) : nil,
68
+ :_index => event.sprintf(@index),
69
+ :_type => get_event_type(event),
70
+ :_routing => @routing ? event.sprintf(@routing) : nil
71
+ }
72
+
73
+ if @pipeline
74
+ params[:pipeline] = event.sprintf(@pipeline)
75
+ end
76
+
77
+ if @parent
78
+ if @join_field
79
+ join_value = event.get(@join_field)
80
+ parent_value = event.sprintf(@parent)
81
+ event.set(@join_field, { "name" => join_value, "parent" => parent_value })
82
+ params[:_routing] = event.sprintf(@parent)
83
+ else
84
+ params[:parent] = event.sprintf(@parent)
85
+ end
86
+ end
87
+
88
+ if action == 'update'
89
+ params[:_upsert] = LogStash::Json.load(event.sprintf(@upsert)) if @upsert != ""
90
+ params[:_script] = event.sprintf(@script) if @script != ""
91
+ params[:_retry_on_conflict] = @retry_on_conflict
92
+ end
93
+
94
+ if @version
95
+ params[:version] = event.sprintf(@version)
96
+ end
97
+
98
+ if @version_type
99
+ params[:version_type] = event.sprintf(@version_type)
100
+ end
101
+
102
+ [action, params, event]
103
+ end
104
+
105
+ def setup_hosts
106
+ @hosts = Array(@hosts)
107
+ if @hosts.empty?
108
+ @logger.info("No 'host' set in amazon_es output. Defaulting to localhost")
109
+ @hosts.replace(["localhost"])
110
+ end
111
+ end
112
+
113
+ def maximum_seen_major_version
114
+ client.maximum_seen_major_version
115
+ end
116
+
117
+ def install_template
118
+ TemplateManager.install_template(self)
119
+ @template_installed.make_true
120
+ end
121
+
122
+ def check_action_validity
123
+ raise LogStash::ConfigurationError, "No action specified!" unless @action
124
+
125
+ # If we're using string interpolation, we're good!
126
+ return if @action =~ /%{.+}/
127
+ return if valid_actions.include?(@action)
128
+
129
+ raise LogStash::ConfigurationError, "Action '#{@action}' is invalid! Pick one of #{valid_actions} or use a sprintf style statement"
130
+ end
131
+
132
+ # To be overidden by the -java version
133
+ VALID_HTTP_ACTIONS=["index", "delete", "create", "update"]
134
+ def valid_actions
135
+ VALID_HTTP_ACTIONS
136
+ end
137
+
138
+ def retrying_submit(actions)
139
+ # Initially we submit the full list of actions
140
+ submit_actions = actions
141
+
142
+ sleep_interval = @retry_initial_interval
143
+
144
+ while submit_actions && submit_actions.length > 0
145
+
146
+ # We retry with whatever is didn't succeed
147
+ begin
148
+ submit_actions = submit(submit_actions)
149
+ if submit_actions && submit_actions.size > 0
150
+ @logger.info("Retrying individual bulk actions that failed or were rejected by the previous bulk request.", :count => submit_actions.size)
151
+ end
152
+ rescue => e
153
+ @logger.error("Encountered an unexpected error submitting a bulk request! Will retry.",
154
+ :error_message => e.message,
155
+ :class => e.class.name,
156
+ :backtrace => e.backtrace)
157
+ end
158
+
159
+ # Everything was a success!
160
+ break if !submit_actions || submit_actions.empty?
161
+
162
+ # If we're retrying the action sleep for the recommended interval
163
+ # Double the interval for the next time through to achieve exponential backoff
164
+ Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
165
+ sleep_interval = next_sleep_interval(sleep_interval)
166
+ end
167
+ end
168
+
169
+ def sleep_for_interval(sleep_interval)
170
+ Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
171
+ next_sleep_interval(sleep_interval)
172
+ end
173
+
174
+ def next_sleep_interval(current_interval)
175
+ doubled = current_interval * 2
176
+ doubled > @retry_max_interval ? @retry_max_interval : doubled
177
+ end
178
+
179
+ def submit(actions)
180
+ bulk_response = safe_bulk(actions)
181
+
182
+ # If the response is nil that means we were in a retry loop
183
+ # and aborted since we're shutting down
184
+ return if bulk_response.nil?
185
+
186
+ # If it did return and there are no errors we're good as well
187
+ if bulk_response["errors"]
188
+ @bulk_request_metrics.increment(:with_errors)
189
+ else
190
+ @bulk_request_metrics.increment(:successes)
191
+ @document_level_metrics.increment(:successes, actions.size)
192
+ return
193
+ end
194
+
195
+ actions_to_retry = []
196
+ bulk_response["items"].each_with_index do |response,idx|
197
+ action_type, action_props = response.first
198
+
199
+ status = action_props["status"]
200
+ failure = action_props["error"]
201
+ action = actions[idx]
202
+ action_params = action[1]
203
+
204
+ # Retry logic: If it is success, we move on. If it is a failure, we have 3 paths:
205
+ # - For 409, we log and drop. there is nothing we can do
206
+ # - For a mapping error, we send to dead letter queue for a human to intervene at a later point.
207
+ # - For everything else there's mastercard. Yep, and we retry indefinitely. This should fix #572 and other transient network issues
208
+ if DOC_SUCCESS_CODES.include?(status)
209
+ @document_level_metrics.increment(:successes)
210
+ next
211
+ elsif DOC_CONFLICT_CODE == status
212
+ @document_level_metrics.increment(:non_retryable_failures)
213
+ @logger.warn "Failed action.", status: status, action: action, response: response if !failure_type_logging_whitelist.include?(failure["type"])
214
+ next
215
+ elsif DOC_DLQ_CODES.include?(status)
216
+ handle_dlq_status("Could not index event to Elasticsearch.", action, status, response)
217
+ @document_level_metrics.increment(:non_retryable_failures)
218
+ next
219
+ else
220
+ # only log what the user whitelisted
221
+ @document_level_metrics.increment(:retryable_failures)
222
+ @logger.info "retrying failed action with response code: #{status} (#{failure})" if !failure_type_logging_whitelist.include?(failure["type"])
223
+ actions_to_retry << action
224
+ end
225
+ end
226
+
227
+ actions_to_retry
228
+ end
229
+
230
+ def handle_dlq_status(message, action, status, response)
231
+ # To support bwc, we check if DLQ exists. otherwise we log and drop event (previous behavior)
232
+ if @dlq_writer
233
+ # TODO: Change this to send a map with { :status => status, :action => action } in the future
234
+ @dlq_writer.write(action[2], "#{message} status: #{status}, action: #{action}, response: #{response}")
235
+ else
236
+ error_type = response.fetch('index', {}).fetch('error', {})['type']
237
+ if 'invalid_index_name_exception' == error_type
238
+ level = :error
239
+ else
240
+ level = :warn
241
+ end
242
+ @logger.send level, message, status: status, action: action, response: response
243
+ end
244
+ end
245
+
246
+ # Determine the correct value for the 'type' field for the given event
247
+ DEFAULT_EVENT_TYPE_ES6="doc".freeze
248
+ DEFAULT_EVENT_TYPE_ES7="_doc".freeze
249
+ def get_event_type(event)
250
+ # Set the 'type' value for the index.
251
+ type = if @document_type
252
+ event.sprintf(@document_type)
253
+ else
254
+ if client.maximum_seen_major_version < 6
255
+ event.get("type") || DEFAULT_EVENT_TYPE_ES6
256
+ elsif client.maximum_seen_major_version == 6
257
+ DEFAULT_EVENT_TYPE_ES6
258
+ else
259
+ DEFAULT_EVENT_TYPE_ES7
260
+ end
261
+ end
262
+
263
+ if !(type.is_a?(String) || type.is_a?(Numeric))
264
+ @logger.warn("Bad event type! Non-string/integer type value set!", :type_class => type.class, :type_value => type.to_s, :event => event)
265
+ end
266
+
267
+ type.to_s
268
+ end
269
+
270
+ # Rescue retryable errors during bulk submission
271
+ def safe_bulk(actions)
272
+ sleep_interval = @retry_initial_interval
273
+ begin
274
+ es_actions = actions.map {|action_type, params, event| [action_type, params, event.to_hash]}
275
+ response = @client.bulk(es_actions)
276
+ response
277
+ rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError => e
278
+ # If we can't even connect to the server let's just print out the URL (:hosts is actually a URL)
279
+ # and let the user sort it out from there
280
+ @logger.error(
281
+ "Attempted to send a bulk request to elasticsearch'"+
282
+ " but Elasticsearch appears to be unreachable or down!",
283
+ :error_message => e.message,
284
+ :class => e.class.name,
285
+ :will_retry_in_seconds => sleep_interval
286
+ )
287
+ @logger.debug("Failed actions for last bad bulk request!", :actions => actions)
288
+
289
+ # We retry until there are no errors! Errors should all go to the retry queue
290
+ sleep_interval = sleep_for_interval(sleep_interval)
291
+ @bulk_request_metrics.increment(:failures)
292
+ retry unless @stopping.true?
293
+ rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::NoConnectionAvailableError => e
294
+ @logger.error(
295
+ "Attempted to send a bulk request to elasticsearch, but no there are no living connections in the connection pool. Perhaps Elasticsearch is unreachable or down?",
296
+ :error_message => e.message,
297
+ :class => e.class.name,
298
+ :will_retry_in_seconds => sleep_interval
299
+ )
300
+ Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
301
+ sleep_interval = next_sleep_interval(sleep_interval)
302
+ @bulk_request_metrics.increment(:failures)
303
+ retry unless @stopping.true?
304
+ rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError => e
305
+ @bulk_request_metrics.increment(:failures)
306
+ log_hash = {:code => e.response_code, :url => e.url.sanitized.to_s}
307
+ log_hash[:body] = e.response_body if @logger.debug? # Generally this is too verbose
308
+ message = "Encountered a retryable error. Will Retry with exponential backoff "
309
+
310
+ # We treat 429s as a special case because these really aren't errors, but
311
+ # rather just ES telling us to back off a bit, which we do.
312
+ # The other retryable code is 503, which are true errors
313
+ # Even though we retry the user should be made aware of these
314
+ if e.response_code == 429
315
+ logger.debug(message, log_hash)
316
+ else
317
+ logger.error(message, log_hash)
318
+ end
319
+
320
+ sleep_interval = sleep_for_interval(sleep_interval)
321
+ retry
322
+ rescue => e
323
+ # Stuff that should never happen
324
+ # For all other errors print out full connection issues
325
+ @logger.error(
326
+ "An unknown error occurred sending a bulk request to Elasticsearch. We will retry indefinitely",
327
+ :error_message => e.message,
328
+ :error_class => e.class.name,
329
+ :backtrace => e.backtrace
330
+ )
331
+
332
+ @logger.debug("Failed actions for last bad bulk request!", :actions => actions)
333
+
334
+ sleep_interval = sleep_for_interval(sleep_interval)
335
+ @bulk_request_metrics.increment(:failures)
336
+ retry unless @stopping.true?
337
+ end
338
+ end
339
+
340
+ def dlq_enabled?
341
+ # TODO there should be a better way to query if DLQ is enabled
342
+ # See more in: https://github.com/elastic/logstash/issues/8064
343
+ respond_to?(:execution_context) && execution_context.respond_to?(:dlq_writer) &&
344
+ !execution_context.dlq_writer.inner_writer.is_a?(::LogStash::Util::DummyDeadLetterQueueWriter)
345
+ end
346
+ end
347
+ end; end; end
@@ -0,0 +1,141 @@
1
+ require 'forwardable' # Needed for logstash core SafeURI. We need to patch this in core: https://github.com/elastic/logstash/pull/5978
2
+
3
+ module LogStash; module Outputs; class ElasticSearch
4
+ module CommonConfigs
5
+ def self.included(mod)
6
+ # The index to write events to. This can be dynamic using the `%{foo}` syntax.
7
+ # The default value will partition your indices by day so you can more easily
8
+ # delete old data or only search specific date ranges.
9
+ # Indexes may not contain uppercase characters.
10
+ # For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}.
11
+ # LS uses Joda to format the index pattern from event timestamp.
12
+ # Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here].
13
+ mod.config :index, :validate => :string, :default => "logstash-%{+YYYY.MM.dd}"
14
+
15
+ mod.config :document_type,
16
+ :validate => :string,
17
+ :deprecated => "Document types are being deprecated in Elasticsearch 6.0, and removed entirely in 7.0. You should avoid this feature"
18
+
19
+ # From Logstash 1.3 onwards, a template is applied to Elasticsearch during
20
+ # Logstash's startup if one with the name `template_name` does not already exist.
21
+ # By default, the contents of this template is the default template for
22
+ # `logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern
23
+ # `logstash-*`. Should you require support for other index names, or would like
24
+ # to change the mappings in the template in general, a custom template can be
25
+ # specified by setting `template` to the path of a template file.
26
+ #
27
+ # Setting `manage_template` to false disables this feature. If you require more
28
+ # control over template creation, (e.g. creating indices dynamically based on
29
+ # field names) you should set `manage_template` to false and use the REST
30
+ # API to apply your templates manually.
31
+ mod.config :manage_template, :validate => :boolean, :default => true
32
+
33
+ # This configuration option defines how the template is named inside Elasticsearch.
34
+ # Note that if you have used the template management features and subsequently
35
+ # change this, you will need to prune the old template manually, e.g.
36
+ #
37
+ # `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
38
+ #
39
+ # where `OldTemplateName` is whatever the former setting was.
40
+ mod.config :template_name, :validate => :string, :default => "logstash"
41
+
42
+ # You can set the path to your own template here, if you so desire.
43
+ # If not set, the included template will be used.
44
+ mod.config :template, :validate => :path
45
+
46
+ # The template_overwrite option will always overwrite the indicated template
47
+ # in Elasticsearch with either the one indicated by template or the included one.
48
+ # This option is set to false by default. If you always want to stay up to date
49
+ # with the template provided by Logstash, this option could be very useful to you.
50
+ # Likewise, if you have your own template file managed by puppet, for example, and
51
+ # you wanted to be able to update it regularly, this option could help there as well.
52
+ #
53
+ # Please note that if you are using your own customized version of the Logstash
54
+ # template (logstash), setting this to true will make Logstash to overwrite
55
+ # the "logstash" template (i.e. removing all customized settings)
56
+ mod.config :template_overwrite, :validate => :boolean, :default => false
57
+
58
+ # The document ID for the index. Useful for overwriting existing entries in
59
+ # Elasticsearch with the same ID.
60
+ mod.config :document_id, :validate => :string
61
+
62
+ # The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here.
63
+ # See https://www.elastic.co/blog/elasticsearch-versioning-support.
64
+ mod.config :version, :validate => :string
65
+
66
+ # The version_type to use for indexing.
67
+ # See https://www.elastic.co/blog/elasticsearch-versioning-support.
68
+ # See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
69
+ mod.config :version_type, :validate => ["internal", 'external', "external_gt", "external_gte", "force"]
70
+
71
+ # A routing override to be applied to all processed events.
72
+ # This can be dynamic using the `%{foo}` syntax.
73
+ mod.config :routing, :validate => :string
74
+
75
+ # For child documents, ID of the associated parent.
76
+ # This can be dynamic using the `%{foo}` syntax.
77
+ mod.config :parent, :validate => :string, :default => nil
78
+
79
+ # For child documents, name of the join field
80
+ mod.config :join_field, :validate => :string, :default => nil
81
+
82
+ # Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter.
83
+ # Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300).
84
+ # `"127.0.0.1"`
85
+ # `["127.0.0.1:9200","127.0.0.2:9200"]`
86
+ # `["http://127.0.0.1"]`
87
+ # `["https://127.0.0.1:9200"]`
88
+ # `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath)
89
+ # It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list
90
+ # to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch.
91
+ #
92
+ # Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance.
93
+ mod.config :hosts, :validate => :uri, :default => [::LogStash::Util::SafeURI.new("//127.0.0.1")], :list => true
94
+
95
+ mod.config :flush_size, :validate => :number, :obsolete => "This setting is no longer available as we now try to restrict bulk requests to sane sizes. See the 'Batch Sizes' section of the docs. If you think you still need to restrict payloads based on the number, not size, of events, please open a ticket."
96
+
97
+ mod.config :idle_flush_time, :validate => :number, :obsolete => "This settings is no longer valid. This was a no-op now as every pipeline batch is flushed synchronously obviating the need for this option."
98
+
99
+ # Set upsert content for update mode.s
100
+ # Create a new document with this parameter as json string if `document_id` doesn't exists
101
+ mod.config :upsert, :validate => :string, :default => ""
102
+
103
+ # Enable `doc_as_upsert` for update mode.
104
+ # Create a new document with source if `document_id` doesn't exist in Elasticsearch
105
+ mod.config :doc_as_upsert, :validate => :boolean, :default => false
106
+
107
+ # Set script name for scripted update mode
108
+ mod.config :script, :validate => :string, :default => ""
109
+
110
+ # Define the type of script referenced by "script" variable
111
+ # inline : "script" contains inline script
112
+ # indexed : "script" contains the name of script directly indexed in amazon_es
113
+ # file : "script" contains the name of script stored in elasticseach's config directory
114
+ mod.config :script_type, :validate => ["inline", 'indexed', "file"], :default => ["inline"]
115
+
116
+ # Set the language of the used script. If not set, this defaults to painless in ES 5.0
117
+ mod.config :script_lang, :validate => :string, :default => "painless"
118
+
119
+ # Set variable name passed to script (scripted update)
120
+ mod.config :script_var_name, :validate => :string, :default => "event"
121
+
122
+ # if enabled, script is in charge of creating non-existent document (scripted update)
123
+ mod.config :scripted_upsert, :validate => :boolean, :default => false
124
+
125
+ # Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval`
126
+ mod.config :retry_initial_interval, :validate => :number, :default => 2
127
+
128
+ # Set max interval in seconds between bulk retries.
129
+ mod.config :retry_max_interval, :validate => :number, :default => 64
130
+
131
+ # The number of times Elasticsearch should internally retry an update/upserted document
132
+ # See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates]
133
+ # for more info
134
+ mod.config :retry_on_conflict, :validate => :number, :default => 1
135
+
136
+ # Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration
137
+ # here like `pipeline => "%{INGEST_PIPELINE}"`
138
+ mod.config :pipeline, :validate => :string, :default => nil
139
+ end
140
+ end
141
+ end end end