logstash-filter-elasticsearch 3.19.0 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +9 -11
- data/docs/index.asciidoc +23 -240
- data/lib/logstash/filters/elasticsearch/client.rb +2 -27
- data/lib/logstash/filters/elasticsearch.rb +124 -177
- data/logstash-filter-elasticsearch.gemspec +3 -6
- data/spec/filters/elasticsearch_spec.rb +272 -163
- data/spec/filters/elasticsearch_ssl_spec.rb +17 -0
- data/spec/filters/integration/elasticsearch_spec.rb +2 -9
- metadata +3 -59
- data/lib/logstash/filters/elasticsearch/dsl_executor.rb +0 -140
- data/lib/logstash/filters/elasticsearch/esql_executor.rb +0 -178
- data/spec/filters/elasticsearch_dsl_spec.rb +0 -372
- data/spec/filters/elasticsearch_esql_spec.rb +0 -211
- data/spec/filters/integration/elasticsearch_esql_spec.rb +0 -167
@@ -1,140 +0,0 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
|
3
|
-
module LogStash
|
4
|
-
module Filters
|
5
|
-
class Elasticsearch
|
6
|
-
class DslExecutor
|
7
|
-
def initialize(plugin, logger)
|
8
|
-
@index = plugin.params["index"]
|
9
|
-
@query = plugin.params["query"]
|
10
|
-
@query_dsl = plugin.query_dsl
|
11
|
-
@fields = plugin.params["fields"]
|
12
|
-
@result_size = plugin.params["result_size"]
|
13
|
-
@docinfo_fields = plugin.params["docinfo_fields"]
|
14
|
-
@tag_on_failure = plugin.params["tag_on_failure"]
|
15
|
-
@enable_sort = plugin.params["enable_sort"]
|
16
|
-
@sort = plugin.params["sort"]
|
17
|
-
@aggregation_fields = plugin.params["aggregation_fields"]
|
18
|
-
@logger = logger
|
19
|
-
@event_decorator = plugin.method(:decorate)
|
20
|
-
@target_field = plugin.params["target"]
|
21
|
-
if @target_field
|
22
|
-
def self.apply_target(path); "[#{@target_field}][#{path}]"; end
|
23
|
-
else
|
24
|
-
def self.apply_target(path); path; end
|
25
|
-
end
|
26
|
-
end
|
27
|
-
|
28
|
-
def process(client, event)
|
29
|
-
matched = false
|
30
|
-
begin
|
31
|
-
params = { :index => event.sprintf(@index) }
|
32
|
-
|
33
|
-
if @query_dsl
|
34
|
-
query = LogStash::Json.load(event.sprintf(@query_dsl))
|
35
|
-
params[:body] = query
|
36
|
-
else
|
37
|
-
query = event.sprintf(@query)
|
38
|
-
params[:q] = query
|
39
|
-
params[:size] = @result_size
|
40
|
-
params[:sort] = @sort if @enable_sort
|
41
|
-
end
|
42
|
-
|
43
|
-
@logger.debug("Querying elasticsearch for lookup", :params => params)
|
44
|
-
|
45
|
-
results = client.search(params)
|
46
|
-
raise "Elasticsearch query error: #{results["_shards"]["failures"]}" if results["_shards"].include? "failures"
|
47
|
-
|
48
|
-
event.set("[@metadata][total_hits]", extract_total_from_hits(results['hits']))
|
49
|
-
|
50
|
-
result_hits = results["hits"]["hits"]
|
51
|
-
if !result_hits.nil? && !result_hits.empty?
|
52
|
-
matched = true
|
53
|
-
@fields.each do |old_key, new_key|
|
54
|
-
old_key_path = extract_path(old_key)
|
55
|
-
extracted_hit_values = result_hits.map do |doc|
|
56
|
-
extract_value(doc["_source"], old_key_path)
|
57
|
-
end
|
58
|
-
value_to_set = extracted_hit_values.count > 1 ? extracted_hit_values : extracted_hit_values.first
|
59
|
-
set_to_event_target(event, new_key, value_to_set)
|
60
|
-
end
|
61
|
-
@docinfo_fields.each do |old_key, new_key|
|
62
|
-
old_key_path = extract_path(old_key)
|
63
|
-
extracted_docs_info = result_hits.map do |doc|
|
64
|
-
extract_value(doc, old_key_path)
|
65
|
-
end
|
66
|
-
value_to_set = extracted_docs_info.count > 1 ? extracted_docs_info : extracted_docs_info.first
|
67
|
-
set_to_event_target(event, new_key, value_to_set)
|
68
|
-
end
|
69
|
-
end
|
70
|
-
|
71
|
-
result_aggregations = results["aggregations"]
|
72
|
-
if !result_aggregations.nil? && !result_aggregations.empty?
|
73
|
-
matched = true
|
74
|
-
@aggregation_fields.each do |agg_name, ls_field|
|
75
|
-
set_to_event_target(event, ls_field, result_aggregations[agg_name])
|
76
|
-
end
|
77
|
-
end
|
78
|
-
|
79
|
-
rescue => e
|
80
|
-
if @logger.trace?
|
81
|
-
@logger.warn("Failed to query elasticsearch for previous event", :index => @index, :query => @query, :event => event.to_hash, :error => e.message, :backtrace => e.backtrace)
|
82
|
-
elsif @logger.debug?
|
83
|
-
@logger.warn("Failed to query elasticsearch for previous event", :index => @index, :error => e.message, :backtrace => e.backtrace)
|
84
|
-
else
|
85
|
-
@logger.warn("Failed to query elasticsearch for previous event", :index => @index, :error => e.message)
|
86
|
-
end
|
87
|
-
@tag_on_failure.each { |tag| event.tag(tag) }
|
88
|
-
else
|
89
|
-
@event_decorator.call(event) if matched
|
90
|
-
end
|
91
|
-
end
|
92
|
-
|
93
|
-
private
|
94
|
-
|
95
|
-
# Given a "hits" object from an Elasticsearch response, return the total number of hits in
|
96
|
-
# the result set.
|
97
|
-
# @param hits [Hash{String=>Object}]
|
98
|
-
# @return [Integer]
|
99
|
-
def extract_total_from_hits(hits)
|
100
|
-
total = hits['total']
|
101
|
-
|
102
|
-
# Elasticsearch 7.x produces an object containing `value` and `relation` in order
|
103
|
-
# to enable unambiguous reporting when the total is only a lower bound; if we get
|
104
|
-
# an object back, return its `value`.
|
105
|
-
return total['value'] if total.kind_of?(Hash)
|
106
|
-
total
|
107
|
-
end
|
108
|
-
|
109
|
-
# get an array of path elements from a path reference
|
110
|
-
def extract_path(path_reference)
|
111
|
-
return [path_reference] unless path_reference.start_with?('[') && path_reference.end_with?(']')
|
112
|
-
|
113
|
-
path_reference[1...-1].split('][')
|
114
|
-
end
|
115
|
-
|
116
|
-
# given a Hash and an array of path fragments, returns the value at the path
|
117
|
-
# @param source [Hash{String=>Object}]
|
118
|
-
# @param path [Array{String}]
|
119
|
-
# @return [Object]
|
120
|
-
def extract_value(source, path)
|
121
|
-
path.reduce(source) do |memo, old_key_fragment|
|
122
|
-
break unless memo.include?(old_key_fragment)
|
123
|
-
memo[old_key_fragment]
|
124
|
-
end
|
125
|
-
end
|
126
|
-
|
127
|
-
# if @target is defined, creates a nested structure to inject result into target field
|
128
|
-
# if not defined, directly sets to the top-level event field
|
129
|
-
# @param event [LogStash::Event]
|
130
|
-
# @param new_key [String] name of the field to set
|
131
|
-
# @param value_to_set [Array] values to set
|
132
|
-
# @return [void]
|
133
|
-
def set_to_event_target(event, new_key, value_to_set)
|
134
|
-
key_to_set = self.apply_target(new_key)
|
135
|
-
event.set(key_to_set, value_to_set)
|
136
|
-
end
|
137
|
-
end
|
138
|
-
end
|
139
|
-
end
|
140
|
-
end
|
@@ -1,178 +0,0 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
|
3
|
-
module LogStash
|
4
|
-
module Filters
|
5
|
-
class Elasticsearch
|
6
|
-
class EsqlExecutor
|
7
|
-
|
8
|
-
ESQL_PARSERS_BY_TYPE = Hash.new(lambda { |x| x }).merge(
|
9
|
-
'date' => ->(value) { value && LogStash::Timestamp.new(value) },
|
10
|
-
)
|
11
|
-
|
12
|
-
def initialize(plugin, logger)
|
13
|
-
@logger = logger
|
14
|
-
|
15
|
-
@event_decorator = plugin.method(:decorate)
|
16
|
-
@query = plugin.params["query"]
|
17
|
-
|
18
|
-
query_params = plugin.query_params || {}
|
19
|
-
reference_valued_params, static_valued_params = query_params.partition { |_, v| v.kind_of?(String) && v.match?(/^\[.*\]$/) }
|
20
|
-
@referenced_params = reference_valued_params&.to_h
|
21
|
-
# keep static params as an array of hashes to attach to the ES|QL api param easily
|
22
|
-
@static_params = static_valued_params.map { |k, v| { k => v } }
|
23
|
-
@tag_on_failure = plugin.params["tag_on_failure"]
|
24
|
-
@logger.debug("ES|QL query executor initialized with ", query: @query, query_params: query_params)
|
25
|
-
|
26
|
-
# if the target is specified, all result entries will be copied to the target field
|
27
|
-
# otherwise, the first value of the result will be copied to the event
|
28
|
-
@target_field = plugin.params["target"]
|
29
|
-
@logger.warn("Only first query result will be copied to the event. Please specify `target` in plugin config to include all") if @target_field.nil?
|
30
|
-
end
|
31
|
-
|
32
|
-
def process(client, event)
|
33
|
-
resolved_params = @referenced_params&.any? ? resolve_parameters(event) : []
|
34
|
-
resolved_params.concat(@static_params) if @static_params&.any?
|
35
|
-
response = execute_query(client, resolved_params)
|
36
|
-
inform_warning(response)
|
37
|
-
process_response(event, response)
|
38
|
-
@event_decorator.call(event)
|
39
|
-
rescue => e
|
40
|
-
@logger.error("Failed to process ES|QL filter", exception: e)
|
41
|
-
@tag_on_failure.each { |tag| event.tag(tag) }
|
42
|
-
end
|
43
|
-
|
44
|
-
private
|
45
|
-
|
46
|
-
def resolve_parameters(event)
|
47
|
-
@referenced_params.map do |key, value|
|
48
|
-
begin
|
49
|
-
resolved_value = event.get(value)
|
50
|
-
@logger.debug("Resolved value for #{key}: #{resolved_value}, its class: #{resolved_value.class}")
|
51
|
-
{ key => resolved_value }
|
52
|
-
rescue => e
|
53
|
-
# catches invalid field reference
|
54
|
-
raise "Failed to resolve parameter `#{key}` with `#{value}`. Error: #{e.message}"
|
55
|
-
end
|
56
|
-
end
|
57
|
-
end
|
58
|
-
|
59
|
-
def execute_query(client, params)
|
60
|
-
# debug logs may help to check what query shape the plugin is sending to ES
|
61
|
-
@logger.debug("Executing ES|QL query", query: @query, params: params)
|
62
|
-
client.esql_query({ body: { query: @query, params: params }, format: 'json', drop_null_columns: true })
|
63
|
-
end
|
64
|
-
|
65
|
-
def process_response(event, response)
|
66
|
-
columns = response['columns']&.freeze || []
|
67
|
-
values = response['values']&.freeze || []
|
68
|
-
if values.nil? || values.size == 0
|
69
|
-
@logger.debug("Empty ES|QL query result", columns: columns, values: values)
|
70
|
-
return
|
71
|
-
end
|
72
|
-
|
73
|
-
# this shouldn't happen but just in case to avoid crashes the plugin
|
74
|
-
if columns.nil? || columns.size == 0
|
75
|
-
@logger.error("No columns exist but received values", columns: columns, values: values)
|
76
|
-
return
|
77
|
-
end
|
78
|
-
|
79
|
-
event.set("[@metadata][total_values]", values.size)
|
80
|
-
@logger.debug("ES|QL query result values size ", size: values.size)
|
81
|
-
|
82
|
-
column_specs = columns.map { |column| ColumnSpec.new(column) }
|
83
|
-
sub_element_mark_map = mark_sub_elements(column_specs)
|
84
|
-
multi_fields = sub_element_mark_map.filter_map { |key, val| key.name if val == true }
|
85
|
-
|
86
|
-
@logger.debug("Multi-fields found in ES|QL result and they will not be available in the event. Please use `RENAME` command if you want to include them.", { :detected_multi_fields => multi_fields }) if multi_fields.any?
|
87
|
-
|
88
|
-
if @target_field
|
89
|
-
values_to_set = values.map do |row|
|
90
|
-
mapped_data = column_specs.each_with_index.with_object({}) do |(column, index), mapped_data|
|
91
|
-
# `unless value.nil?` is a part of `drop_null_columns` that if some of the columns' values are not `nil`, `nil` values appear,
|
92
|
-
# we should continuously filter them out to achieve full `drop_null_columns` on each individual row (ideal `LIMIT 1` result)
|
93
|
-
# we also exclude sub-elements of the base field
|
94
|
-
if row[index] && sub_element_mark_map[column] == false
|
95
|
-
value_to_set = ESQL_PARSERS_BY_TYPE[column.type].call(row[index])
|
96
|
-
mapped_data[column.name] = value_to_set
|
97
|
-
end
|
98
|
-
end
|
99
|
-
generate_nested_structure(mapped_data) unless mapped_data.empty?
|
100
|
-
end
|
101
|
-
event.set("[#{@target_field}]", values_to_set)
|
102
|
-
else
|
103
|
-
column_specs.zip(values.first).each do |(column, value) |
|
104
|
-
if value && sub_element_mark_map[column] == false
|
105
|
-
value_to_set = ESQL_PARSERS_BY_TYPE[column.type].call(value)
|
106
|
-
event.set(column.field_reference, value_to_set)
|
107
|
-
end
|
108
|
-
end
|
109
|
-
end
|
110
|
-
end
|
111
|
-
|
112
|
-
def inform_warning(response)
|
113
|
-
return unless (warning = response&.headers&.dig('warning'))
|
114
|
-
@logger.warn("ES|QL executor received warning", { message: warning })
|
115
|
-
end
|
116
|
-
|
117
|
-
# Transforms dotted keys to nested JSON shape
|
118
|
-
# @param dot_keyed_hash [Hash] whose keys are dotted (example 'a.b.c.d': 'val')
|
119
|
-
# @return [Hash] whose keys are nested with value mapped ({'a':{'b':{'c':{'d':'val'}}}})
|
120
|
-
def generate_nested_structure(dot_keyed_hash)
|
121
|
-
dot_keyed_hash.each_with_object({}) do |(key, value), result|
|
122
|
-
key_parts = key.to_s.split('.')
|
123
|
-
*path, leaf = key_parts
|
124
|
-
leaf_scope = path.inject(result) { |scope, part| scope[part] ||= {} }
|
125
|
-
leaf_scope[leaf] = value
|
126
|
-
end
|
127
|
-
end
|
128
|
-
|
129
|
-
# Determines whether each column in a collection is a nested sub-element (e.g "user.age")
|
130
|
-
# of another column in the same collection (e.g "user").
|
131
|
-
#
|
132
|
-
# @param columns [Array<ColumnSpec>] An array of objects with a `name` attribute representing field paths.
|
133
|
-
# @return [Hash<ColumnSpec, Boolean>] A hash mapping each column to `true` if it is a sub-element of another field, `false` otherwise.
|
134
|
-
# Time complexity: (O(NlogN+N*K)) where K is the number of conflict depth
|
135
|
-
# without (`prefix_set`) memoization, it would be O(N^2)
|
136
|
-
def mark_sub_elements(columns)
|
137
|
-
# Sort columns by name length (ascending)
|
138
|
-
sorted_columns = columns.sort_by { |c| c.name.length }
|
139
|
-
prefix_set = Set.new # memoization set
|
140
|
-
|
141
|
-
sorted_columns.each_with_object({}) do |column, memo|
|
142
|
-
# Split the column name into parts (e.g., "user.profile.age" → ["user", "profile", "age"])
|
143
|
-
parts = column.name.split('.')
|
144
|
-
|
145
|
-
# Generate all possible parent prefixes (e.g., "user", "user.profile")
|
146
|
-
# and check if any parent prefix exists in the set
|
147
|
-
parent_prefixes = (0...parts.size - 1).map { |i| parts[0..i].join('.') }
|
148
|
-
memo[column] = parent_prefixes.any? { |prefix| prefix_set.include?(prefix) }
|
149
|
-
prefix_set.add(column.name)
|
150
|
-
end
|
151
|
-
end
|
152
|
-
end
|
153
|
-
|
154
|
-
# Class representing a column specification in the ESQL response['columns']
|
155
|
-
# The class's main purpose is to provide a structure for the event key
|
156
|
-
# columns is an array with `name` and `type` pair (example: `{"name"=>"@timestamp", "type"=>"date"}`)
|
157
|
-
# @attr_reader :name [String] The name of the column
|
158
|
-
# @attr_reader :type [String] The type of the column
|
159
|
-
class ColumnSpec
|
160
|
-
attr_reader :name, :type
|
161
|
-
|
162
|
-
def initialize(spec)
|
163
|
-
@name = isolate(spec.fetch('name'))
|
164
|
-
@type = isolate(spec.fetch('type'))
|
165
|
-
end
|
166
|
-
|
167
|
-
def field_reference
|
168
|
-
@_field_reference ||= '[' + name.gsub('.', '][') + ']'
|
169
|
-
end
|
170
|
-
|
171
|
-
private
|
172
|
-
def isolate(value)
|
173
|
-
value.frozen? ? value : value.clone.freeze
|
174
|
-
end
|
175
|
-
end
|
176
|
-
end
|
177
|
-
end
|
178
|
-
end
|
@@ -1,372 +0,0 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
require "logstash/devutils/rspec/spec_helper"
|
3
|
-
require "logstash/filters/elasticsearch"
|
4
|
-
|
5
|
-
describe LogStash::Filters::Elasticsearch::DslExecutor do
|
6
|
-
let(:client) { instance_double(LogStash::Filters::ElasticsearchClient) }
|
7
|
-
let(:logger) { double("logger") }
|
8
|
-
let(:plugin) { LogStash::Filters::Elasticsearch.new(plugin_config) }
|
9
|
-
let(:plugin_config) do
|
10
|
-
{
|
11
|
-
"index" => "test_index",
|
12
|
-
"query" => "test_query",
|
13
|
-
"fields" => { "field1" => "field1_mapped" },
|
14
|
-
"result_size" => 10,
|
15
|
-
"docinfo_fields" => { "_id" => "doc_id" },
|
16
|
-
"tag_on_failure" => ["_failure"],
|
17
|
-
"enable_sort" => true,
|
18
|
-
"sort" => "@timestamp:desc",
|
19
|
-
"aggregation_fields" => { "agg1" => "agg1_mapped" }
|
20
|
-
}
|
21
|
-
end
|
22
|
-
let(:dsl_executor) { described_class.new(plugin, logger) }
|
23
|
-
let(:event) { LogStash::Event.new({}) }
|
24
|
-
|
25
|
-
describe "#initialize" do
|
26
|
-
it "initializes instance variables correctly" do
|
27
|
-
expect(dsl_executor.instance_variable_get(:@index)).to eq("test_index")
|
28
|
-
expect(dsl_executor.instance_variable_get(:@query)).to eq("test_query")
|
29
|
-
expect(dsl_executor.instance_variable_get(:@query_dsl)).to eq(nil)
|
30
|
-
expect(dsl_executor.instance_variable_get(:@fields)).to eq({ "field1" => "field1_mapped" })
|
31
|
-
expect(dsl_executor.instance_variable_get(:@result_size)).to eq(10)
|
32
|
-
expect(dsl_executor.instance_variable_get(:@docinfo_fields)).to eq({ "_id" => "doc_id" })
|
33
|
-
expect(dsl_executor.instance_variable_get(:@tag_on_failure)).to eq(["_failure"])
|
34
|
-
expect(dsl_executor.instance_variable_get(:@enable_sort)).to eq(true)
|
35
|
-
expect(dsl_executor.instance_variable_get(:@sort)).to eq("@timestamp:desc")
|
36
|
-
expect(dsl_executor.instance_variable_get(:@aggregation_fields)).to eq({ "agg1" => "agg1_mapped" })
|
37
|
-
expect(dsl_executor.instance_variable_get(:@logger)).to eq(logger)
|
38
|
-
expect(dsl_executor.instance_variable_get(:@event_decorator)).not_to be_nil
|
39
|
-
end
|
40
|
-
end
|
41
|
-
|
42
|
-
describe "data fetch" do
|
43
|
-
let(:plugin_config) do
|
44
|
-
{
|
45
|
-
"hosts" => ["localhost:9200"],
|
46
|
-
"query" => "response: 404",
|
47
|
-
"fields" => { "response" => "code" },
|
48
|
-
"docinfo_fields" => { "_index" => "es_index" },
|
49
|
-
"aggregation_fields" => { "bytes_avg" => "bytes_avg_ls_field" }
|
50
|
-
}
|
51
|
-
end
|
52
|
-
|
53
|
-
let(:response) do
|
54
|
-
LogStash::Json.load(File.read(File.join(File.dirname(__FILE__), "fixtures", "request_x_1.json")))
|
55
|
-
end
|
56
|
-
|
57
|
-
let(:client) { double(:client) }
|
58
|
-
|
59
|
-
before(:each) do
|
60
|
-
allow(LogStash::Filters::ElasticsearchClient).to receive(:new).and_return(client)
|
61
|
-
if defined?(Elastic::Transport)
|
62
|
-
allow(client).to receive(:es_transport_client_type).and_return('elastic_transport')
|
63
|
-
else
|
64
|
-
allow(client).to receive(:es_transport_client_type).and_return('elasticsearch_transport')
|
65
|
-
end
|
66
|
-
allow(client).to receive(:search).and_return(response)
|
67
|
-
allow(plugin).to receive(:test_connection!)
|
68
|
-
allow(plugin).to receive(:setup_serverless)
|
69
|
-
plugin.register
|
70
|
-
end
|
71
|
-
|
72
|
-
after(:each) do
|
73
|
-
Thread.current[:filter_elasticsearch_client] = nil
|
74
|
-
end
|
75
|
-
|
76
|
-
it "should enhance the current event with new data" do
|
77
|
-
plugin.filter(event)
|
78
|
-
expect(event.get("code")).to eq(404)
|
79
|
-
expect(event.get("es_index")).to eq("logstash-2014.08.26")
|
80
|
-
expect(event.get("bytes_avg_ls_field")["value"]).to eq(294)
|
81
|
-
end
|
82
|
-
|
83
|
-
it "should receive all necessary params to perform the search" do
|
84
|
-
expect(client).to receive(:search).with({:q=>"response: 404", :size=>1, :index=>"", :sort=>"@timestamp:desc"})
|
85
|
-
plugin.filter(event)
|
86
|
-
end
|
87
|
-
|
88
|
-
context "when asking to hit specific index" do
|
89
|
-
|
90
|
-
let(:plugin_config) do
|
91
|
-
{
|
92
|
-
"index" => "foo*",
|
93
|
-
"hosts" => ["localhost:9200"],
|
94
|
-
"query" => "response: 404",
|
95
|
-
"fields" => { "response" => "code" }
|
96
|
-
}
|
97
|
-
end
|
98
|
-
|
99
|
-
it "should receive all necessary params to perform the search" do
|
100
|
-
expect(client).to receive(:search).with({:q=>"response: 404", :size=>1, :index=>"foo*", :sort=>"@timestamp:desc"})
|
101
|
-
plugin.filter(event)
|
102
|
-
end
|
103
|
-
end
|
104
|
-
|
105
|
-
context "when asking for more than one result" do
|
106
|
-
|
107
|
-
let(:plugin_config) do
|
108
|
-
{
|
109
|
-
"hosts" => ["localhost:9200"],
|
110
|
-
"query" => "response: 404",
|
111
|
-
"fields" => { "response" => "code" },
|
112
|
-
"result_size" => 10
|
113
|
-
}
|
114
|
-
end
|
115
|
-
|
116
|
-
let(:response) do
|
117
|
-
LogStash::Json.load(File.read(File.join(File.dirname(__FILE__), "fixtures", "request_x_10.json")))
|
118
|
-
end
|
119
|
-
|
120
|
-
it "should enhance the current event with new data" do
|
121
|
-
plugin.filter(event)
|
122
|
-
expect(event.get("code")).to eq([404]*10)
|
123
|
-
end
|
124
|
-
end
|
125
|
-
|
126
|
-
context 'when Elasticsearch 7.x gives us a totals object instead of an integer' do
|
127
|
-
let(:plugin_config) do
|
128
|
-
{
|
129
|
-
"hosts" => ["localhost:9200"],
|
130
|
-
"query" => "response: 404",
|
131
|
-
"fields" => { "response" => "code" },
|
132
|
-
"result_size" => 10
|
133
|
-
}
|
134
|
-
end
|
135
|
-
|
136
|
-
let(:response) do
|
137
|
-
LogStash::Json.load(File.read(File.join(File.dirname(__FILE__), "fixtures", "elasticsearch_7.x_hits_total_as_object.json")))
|
138
|
-
end
|
139
|
-
|
140
|
-
it "should enhance the current event with new data" do
|
141
|
-
plugin.filter(event)
|
142
|
-
expect(event.get("[@metadata][total_hits]")).to eq(13476)
|
143
|
-
end
|
144
|
-
end
|
145
|
-
|
146
|
-
context "if something wrong happen during connection" do
|
147
|
-
|
148
|
-
before(:each) do
|
149
|
-
allow(LogStash::Filters::ElasticsearchClient).to receive(:new).and_return(client)
|
150
|
-
allow(client).to receive(:search).and_raise("connection exception")
|
151
|
-
plugin.register
|
152
|
-
end
|
153
|
-
|
154
|
-
it "tag the event as something happened, but still deliver it" do
|
155
|
-
expect(plugin.logger).to receive(:warn)
|
156
|
-
plugin.filter(event)
|
157
|
-
expect(event.to_hash["tags"]).to include("_elasticsearch_lookup_failure")
|
158
|
-
end
|
159
|
-
end
|
160
|
-
|
161
|
-
# Tagging test for positive results
|
162
|
-
context "Tagging should occur if query returns results" do
|
163
|
-
let(:plugin_config) do
|
164
|
-
{
|
165
|
-
"index" => "foo*",
|
166
|
-
"hosts" => ["localhost:9200"],
|
167
|
-
"query" => "response: 404",
|
168
|
-
"add_tag" => ["tagged"]
|
169
|
-
}
|
170
|
-
end
|
171
|
-
|
172
|
-
let(:response) do
|
173
|
-
LogStash::Json.load(File.read(File.join(File.dirname(__FILE__), "fixtures", "request_x_10.json")))
|
174
|
-
end
|
175
|
-
|
176
|
-
it "should tag the current event if results returned" do
|
177
|
-
plugin.filter(event)
|
178
|
-
expect(event.to_hash["tags"]).to include("tagged")
|
179
|
-
end
|
180
|
-
end
|
181
|
-
|
182
|
-
context "an aggregation search with size 0 that matches" do
|
183
|
-
let(:plugin_config) do
|
184
|
-
{
|
185
|
-
"index" => "foo*",
|
186
|
-
"hosts" => ["localhost:9200"],
|
187
|
-
"query" => "response: 404",
|
188
|
-
"add_tag" => ["tagged"],
|
189
|
-
"result_size" => 0,
|
190
|
-
"aggregation_fields" => { "bytes_avg" => "bytes_avg_ls_field" }
|
191
|
-
}
|
192
|
-
end
|
193
|
-
|
194
|
-
let(:response) do
|
195
|
-
LogStash::Json.load(File.read(File.join(File.dirname(__FILE__), "fixtures", "request_size0_agg.json")))
|
196
|
-
end
|
197
|
-
|
198
|
-
it "should tag the current event" do
|
199
|
-
plugin.filter(event)
|
200
|
-
expect(event.get("tags")).to include("tagged")
|
201
|
-
expect(event.get("bytes_avg_ls_field")["value"]).to eq(294)
|
202
|
-
end
|
203
|
-
end
|
204
|
-
|
205
|
-
# Tagging test for negative results
|
206
|
-
context "Tagging should not occur if query has no results" do
|
207
|
-
let(:plugin_config) do
|
208
|
-
{
|
209
|
-
"index" => "foo*",
|
210
|
-
"hosts" => ["localhost:9200"],
|
211
|
-
"query" => "response: 404",
|
212
|
-
"add_tag" => ["tagged"]
|
213
|
-
}
|
214
|
-
end
|
215
|
-
|
216
|
-
let(:response) do
|
217
|
-
LogStash::Json.load(File.read(File.join(File.dirname(__FILE__), "fixtures", "request_error.json")))
|
218
|
-
end
|
219
|
-
|
220
|
-
it "should not tag the current event" do
|
221
|
-
plugin.filter(event)
|
222
|
-
expect(event.to_hash["tags"]).to_not include("tagged")
|
223
|
-
end
|
224
|
-
end
|
225
|
-
context "testing a simple query template" do
|
226
|
-
let(:plugin_config) do
|
227
|
-
{
|
228
|
-
"hosts" => ["localhost:9200"],
|
229
|
-
"query_template" => File.join(File.dirname(__FILE__), "fixtures", "query_template.json"),
|
230
|
-
"fields" => { "response" => "code" },
|
231
|
-
"result_size" => 1
|
232
|
-
}
|
233
|
-
end
|
234
|
-
|
235
|
-
let(:response) do
|
236
|
-
LogStash::Json.load(File.read(File.join(File.dirname(__FILE__), "fixtures", "request_x_1.json")))
|
237
|
-
end
|
238
|
-
|
239
|
-
it "should enhance the current event with new data" do
|
240
|
-
plugin.filter(event)
|
241
|
-
expect(event.get("code")).to eq(404)
|
242
|
-
end
|
243
|
-
|
244
|
-
end
|
245
|
-
|
246
|
-
context "testing a simple index substitution" do
|
247
|
-
let(:event) {
|
248
|
-
LogStash::Event.new(
|
249
|
-
{
|
250
|
-
"subst_field" => "subst_value"
|
251
|
-
}
|
252
|
-
)
|
253
|
-
}
|
254
|
-
let(:plugin_config) do
|
255
|
-
{
|
256
|
-
"index" => "foo_%{subst_field}*",
|
257
|
-
"hosts" => ["localhost:9200"],
|
258
|
-
"query" => "response: 404",
|
259
|
-
"fields" => { "response" => "code" }
|
260
|
-
}
|
261
|
-
end
|
262
|
-
|
263
|
-
it "should receive substituted index name" do
|
264
|
-
expect(client).to receive(:search).with({:q => "response: 404", :size => 1, :index => "foo_subst_value*", :sort => "@timestamp:desc"})
|
265
|
-
plugin.filter(event)
|
266
|
-
end
|
267
|
-
end
|
268
|
-
|
269
|
-
context "if query result errored but no exception is thrown" do
|
270
|
-
let(:response) do
|
271
|
-
LogStash::Json.load(File.read(File.join(File.dirname(__FILE__), "fixtures", "request_error.json")))
|
272
|
-
end
|
273
|
-
|
274
|
-
before(:each) do
|
275
|
-
allow(LogStash::Filters::ElasticsearchClient).to receive(:new).and_return(client)
|
276
|
-
allow(client).to receive(:search).and_return(response)
|
277
|
-
plugin.register
|
278
|
-
end
|
279
|
-
|
280
|
-
it "tag the event as something happened, but still deliver it" do
|
281
|
-
expect(plugin.logger).to receive(:warn)
|
282
|
-
plugin.filter(event)
|
283
|
-
expect(event.to_hash["tags"]).to include("_elasticsearch_lookup_failure")
|
284
|
-
end
|
285
|
-
end
|
286
|
-
|
287
|
-
context 'with client-level retries' do
|
288
|
-
let(:plugin_config) do
|
289
|
-
super().merge(
|
290
|
-
"retry_on_failure" => 3,
|
291
|
-
"retry_on_status" => [500]
|
292
|
-
)
|
293
|
-
end
|
294
|
-
end
|
295
|
-
|
296
|
-
context "with custom headers" do
|
297
|
-
let(:plugin_config) do
|
298
|
-
{
|
299
|
-
"query" => "*",
|
300
|
-
"custom_headers" => { "Custom-Header-1" => "Custom Value 1", "Custom-Header-2" => "Custom Value 2" }
|
301
|
-
}
|
302
|
-
end
|
303
|
-
|
304
|
-
let(:plugin) { LogStash::Filters::Elasticsearch.new(plugin_config) }
|
305
|
-
let(:client_double) { double("client") }
|
306
|
-
let(:transport_double) { double("transport", options: { transport_options: { headers: plugin_config["custom_headers"] } }) }
|
307
|
-
|
308
|
-
before do
|
309
|
-
allow(plugin).to receive(:get_client).and_return(client_double)
|
310
|
-
if defined?(Elastic::Transport)
|
311
|
-
allow(client_double).to receive(:es_transport_client_type).and_return('elastic_transport')
|
312
|
-
else
|
313
|
-
allow(client_double).to receive(:es_transport_client_type).and_return('elasticsearch_transport')
|
314
|
-
end
|
315
|
-
allow(client_double).to receive(:client).and_return(transport_double)
|
316
|
-
end
|
317
|
-
|
318
|
-
it "sets custom headers" do
|
319
|
-
plugin.register
|
320
|
-
client = plugin.send(:get_client).client
|
321
|
-
expect(client.options[:transport_options][:headers]).to match(hash_including(plugin_config["custom_headers"]))
|
322
|
-
end
|
323
|
-
end
|
324
|
-
|
325
|
-
context "if query is on nested field" do
|
326
|
-
let(:plugin_config) do
|
327
|
-
{
|
328
|
-
"hosts" => ["localhost:9200"],
|
329
|
-
"query" => "response: 404",
|
330
|
-
"fields" => [ ["[geoip][ip]", "ip_address"] ]
|
331
|
-
}
|
332
|
-
end
|
333
|
-
|
334
|
-
it "should enhance the current event with new data" do
|
335
|
-
plugin.filter(event)
|
336
|
-
expect(event.get("ip_address")).to eq("66.249.73.185")
|
337
|
-
end
|
338
|
-
|
339
|
-
end
|
340
|
-
end
|
341
|
-
|
342
|
-
describe "#set_to_event_target" do
|
343
|
-
it 'is ready to set to `target`' do
|
344
|
-
expect(dsl_executor.apply_target("path")).to eq("path")
|
345
|
-
end
|
346
|
-
|
347
|
-
context "when `@target` is nil, default behavior" do
|
348
|
-
it "sets the value directly to the top-level event field" do
|
349
|
-
dsl_executor.send(:set_to_event_target, event, "new_field", %w[value1 value2])
|
350
|
-
expect(event.get("new_field")).to eq(%w[value1 value2])
|
351
|
-
end
|
352
|
-
end
|
353
|
-
|
354
|
-
context "when @target is defined" do
|
355
|
-
let(:plugin_config) {
|
356
|
-
super().merge({ "target" => "nested" })
|
357
|
-
}
|
358
|
-
|
359
|
-
it "creates a nested structure under the target field" do
|
360
|
-
dsl_executor.send(:set_to_event_target, event, "new_field", %w[value1 value2])
|
361
|
-
expect(event.get("nested")).to eq({ "new_field" => %w[value1 value2] })
|
362
|
-
end
|
363
|
-
|
364
|
-
it "overwrites existing target field with new data" do
|
365
|
-
event.set("nested", { "existing_field" => "existing_value", "new_field" => "value0" })
|
366
|
-
dsl_executor.send(:set_to_event_target, event, "new_field", ["value1"])
|
367
|
-
expect(event.get("nested")).to eq({ "existing_field" => "existing_value", "new_field" => ["value1"] })
|
368
|
-
end
|
369
|
-
end
|
370
|
-
end
|
371
|
-
|
372
|
-
end
|