logstash-filter-aggregate 2.5.1 → 2.5.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/BUILD.md +81 -81
- data/CHANGELOG.md +71 -65
- data/CONTRIBUTORS +14 -14
- data/Gemfile +2 -2
- data/LICENSE +13 -13
- data/NOTICE.txt +4 -4
- data/README.md +327 -296
- data/lib/logstash/filters/aggregate.rb +685 -642
- data/logstash-filter-aggregate.gemspec +26 -26
- data/spec/filters/aggregate_spec.rb +315 -301
- data/spec/filters/aggregate_spec_helper.rb +63 -63
- metadata +2 -2
@@ -1,26 +1,26 @@
|
|
1
|
-
Gem::Specification.new do |s|
|
2
|
-
s.name = 'logstash-filter-aggregate'
|
3
|
-
s.version = '2.5.
|
4
|
-
s.licenses = ['Apache License (2.0)']
|
5
|
-
s.summary = 'The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, and finally push aggregated information into final task event.'
|
6
|
-
s.description = 'This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program'
|
7
|
-
s.authors = ['Elastic', 'Fabien Baligand']
|
8
|
-
s.email = 'info@elastic.co'
|
9
|
-
s.homepage = 'https://github.com/logstash-plugins/logstash-filter-aggregate'
|
10
|
-
s.require_paths = ['lib']
|
11
|
-
|
12
|
-
# Files
|
13
|
-
s.files = Dir['lib/**/*','spec/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.txt']
|
14
|
-
|
15
|
-
# Tests
|
16
|
-
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
17
|
-
|
18
|
-
# Special flag to let us know this is actually a logstash plugin
|
19
|
-
s.metadata = { 'logstash_plugin' => 'true', 'logstash_group' => 'filter' }
|
20
|
-
|
21
|
-
# Gem dependencies
|
22
|
-
s.add_runtime_dependency 'logstash-core-plugin-api', '>= 1.60', '<= 2.99'
|
23
|
-
|
24
|
-
# Gem test dependencies
|
25
|
-
s.add_development_dependency 'logstash-devutils'
|
26
|
-
end
|
1
|
+
Gem::Specification.new do |s|
|
2
|
+
s.name = 'logstash-filter-aggregate'
|
3
|
+
s.version = '2.5.2'
|
4
|
+
s.licenses = ['Apache License (2.0)']
|
5
|
+
s.summary = 'The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, and finally push aggregated information into final task event.'
|
6
|
+
s.description = 'This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program'
|
7
|
+
s.authors = ['Elastic', 'Fabien Baligand']
|
8
|
+
s.email = 'info@elastic.co'
|
9
|
+
s.homepage = 'https://github.com/logstash-plugins/logstash-filter-aggregate'
|
10
|
+
s.require_paths = ['lib']
|
11
|
+
|
12
|
+
# Files
|
13
|
+
s.files = Dir['lib/**/*','spec/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.txt']
|
14
|
+
|
15
|
+
# Tests
|
16
|
+
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
17
|
+
|
18
|
+
# Special flag to let us know this is actually a logstash plugin
|
19
|
+
s.metadata = { 'logstash_plugin' => 'true', 'logstash_group' => 'filter' }
|
20
|
+
|
21
|
+
# Gem dependencies
|
22
|
+
s.add_runtime_dependency 'logstash-core-plugin-api', '>= 1.60', '<= 2.99'
|
23
|
+
|
24
|
+
# Gem test dependencies
|
25
|
+
s.add_development_dependency 'logstash-devutils'
|
26
|
+
end
|
@@ -1,301 +1,315 @@
|
|
1
|
-
# encoding: utf-8
|
2
|
-
require "logstash/devutils/rspec/spec_helper"
|
3
|
-
require "logstash/filters/aggregate"
|
4
|
-
require_relative "aggregate_spec_helper"
|
5
|
-
|
6
|
-
describe LogStash::Filters::Aggregate do
|
7
|
-
|
8
|
-
before(:each) do
|
9
|
-
reset_static_variables()
|
10
|
-
@start_filter = setup_filter({ "map_action" => "create", "code" => "map['sql_duration'] = 0" })
|
11
|
-
@update_filter = setup_filter({ "map_action" => "update", "code" => "map['sql_duration'] += event.get('duration')" })
|
12
|
-
@end_filter = setup_filter({"timeout_task_id_field" => "my_id", "push_map_as_event_on_timeout" => true, "map_action" => "update", "code" => "event.set('sql_duration', map['sql_duration'])", "end_of_task" => true, "timeout" => 5, "timeout_code" => "event.set('test', 'testValue')", "timeout_tags" => ["tag1", "tag2"] })
|
13
|
-
end
|
14
|
-
|
15
|
-
context "
|
16
|
-
describe "and
|
17
|
-
it "
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
expect(aggregate_maps["%{taskid}"]
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
93
|
-
expect(
|
94
|
-
end
|
95
|
-
end
|
96
|
-
|
97
|
-
describe "and the
|
98
|
-
it "
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
expect(aggregate_maps["%{taskid}"][@task_id_value]
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
expect(
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
expect(
|
132
|
-
end
|
133
|
-
end
|
134
|
-
|
135
|
-
context "
|
136
|
-
|
137
|
-
@
|
138
|
-
|
139
|
-
@
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
end
|
154
|
-
|
155
|
-
describe "timeout
|
156
|
-
it "
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
expect(taskid_eviction_instance).to eq(@end_filter)
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
expect(
|
186
|
-
expect(entries
|
187
|
-
end
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
entries
|
195
|
-
expect(
|
196
|
-
expect(entries).to
|
197
|
-
end
|
198
|
-
end
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
expect(
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
expect(
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
expect(aggregate_maps).to be_empty
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
expect
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
expect
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
expect(
|
280
|
-
expect(aggregate_maps["%{ppm_id}"].size).to eq(
|
281
|
-
end
|
282
|
-
end
|
283
|
-
|
284
|
-
describe "when
|
285
|
-
it "flush method should return last map as new event
|
286
|
-
push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "", "push_previous_map_as_event" => true, "timeout" =>
|
287
|
-
push_filter.filter(event({"ppm_id" => "1"}))
|
288
|
-
|
289
|
-
|
290
|
-
expect(
|
291
|
-
events_to_flush
|
292
|
-
expect(events_to_flush).
|
293
|
-
expect(events_to_flush.
|
294
|
-
expect(
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/devutils/rspec/spec_helper"
|
3
|
+
require "logstash/filters/aggregate"
|
4
|
+
require_relative "aggregate_spec_helper"
|
5
|
+
|
6
|
+
describe LogStash::Filters::Aggregate do
|
7
|
+
|
8
|
+
before(:each) do
|
9
|
+
reset_static_variables()
|
10
|
+
@start_filter = setup_filter({ "map_action" => "create", "code" => "map['sql_duration'] = 0" })
|
11
|
+
@update_filter = setup_filter({ "map_action" => "update", "code" => "map['sql_duration'] += event.get('duration')" })
|
12
|
+
@end_filter = setup_filter({"timeout_task_id_field" => "my_id", "push_map_as_event_on_timeout" => true, "map_action" => "update", "code" => "event.set('sql_duration', map['sql_duration'])", "end_of_task" => true, "timeout" => 5, "timeout_code" => "event.set('test', 'testValue')", "timeout_tags" => ["tag1", "tag2"] })
|
13
|
+
end
|
14
|
+
|
15
|
+
context "Validation" do
|
16
|
+
describe "and register a filter with a task_id without dynamic expression" do
|
17
|
+
it "raises a LogStash::ConfigurationError" do
|
18
|
+
expect {
|
19
|
+
setup_filter({ "code" => "", "task_id" => "static_value" })
|
20
|
+
}.to raise_error(LogStash::ConfigurationError)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
context "Start event" do
|
26
|
+
describe "and receiving an event without task_id" do
|
27
|
+
it "does not record it" do
|
28
|
+
@start_filter.filter(event())
|
29
|
+
expect(aggregate_maps["%{taskid}"]).to be_empty
|
30
|
+
end
|
31
|
+
end
|
32
|
+
describe "and receiving an event with task_id" do
|
33
|
+
it "records it" do
|
34
|
+
event = start_event("taskid" => "id123")
|
35
|
+
@start_filter.filter(event)
|
36
|
+
|
37
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
38
|
+
expect(aggregate_maps["%{taskid}"]["id123"]).not_to be_nil
|
39
|
+
expect(aggregate_maps["%{taskid}"]["id123"].creation_timestamp).to be >= event.timestamp.time
|
40
|
+
expect(aggregate_maps["%{taskid}"]["id123"].map["sql_duration"]).to eq(0)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
describe "and receiving two 'start events' for the same task_id" do
|
45
|
+
it "keeps the first one and does nothing with the second one" do
|
46
|
+
|
47
|
+
first_start_event = start_event("taskid" => "id124")
|
48
|
+
@start_filter.filter(first_start_event)
|
49
|
+
|
50
|
+
first_update_event = update_event("taskid" => "id124", "duration" => 2)
|
51
|
+
@update_filter.filter(first_update_event)
|
52
|
+
|
53
|
+
sleep(1)
|
54
|
+
second_start_event = start_event("taskid" => "id124")
|
55
|
+
@start_filter.filter(second_start_event)
|
56
|
+
|
57
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
58
|
+
expect(aggregate_maps["%{taskid}"]["id124"].creation_timestamp).to be < second_start_event.timestamp.time
|
59
|
+
expect(aggregate_maps["%{taskid}"]["id124"].map["sql_duration"]).to eq(first_update_event.get("duration"))
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
context "End event" do
|
65
|
+
describe "receiving an event without a previous 'start event'" do
|
66
|
+
describe "but without a previous 'start event'" do
|
67
|
+
it "does nothing with the event" do
|
68
|
+
end_event = end_event("taskid" => "id124")
|
69
|
+
@end_filter.filter(end_event)
|
70
|
+
|
71
|
+
expect(aggregate_maps["%{taskid}"]).to be_empty
|
72
|
+
expect(end_event.get("sql_duration")).to be_nil
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
context "Start/end events interaction" do
|
79
|
+
describe "receiving a 'start event'" do
|
80
|
+
before(:each) do
|
81
|
+
@task_id_value = "id_123"
|
82
|
+
@start_event = start_event({"taskid" => @task_id_value})
|
83
|
+
@start_filter.filter(@start_event)
|
84
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
85
|
+
end
|
86
|
+
|
87
|
+
describe "and receiving an end event" do
|
88
|
+
describe "and without an id" do
|
89
|
+
it "does nothing" do
|
90
|
+
end_event = end_event()
|
91
|
+
@end_filter.filter(end_event)
|
92
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
93
|
+
expect(end_event.get("sql_duration")).to be_nil
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
describe "and an id different from the one of the 'start event'" do
|
98
|
+
it "does nothing" do
|
99
|
+
different_id_value = @task_id_value + "_different"
|
100
|
+
@end_filter.filter(end_event("taskid" => different_id_value))
|
101
|
+
|
102
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
103
|
+
expect(aggregate_maps["%{taskid}"][@task_id_value]).not_to be_nil
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
describe "and the same id of the 'start event'" do
|
108
|
+
it "add 'sql_duration' field to the end event and deletes the aggregate map associated to taskid" do
|
109
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
110
|
+
expect(aggregate_maps["%{taskid}"][@task_id_value].map["sql_duration"]).to eq(0)
|
111
|
+
|
112
|
+
@update_filter.filter(update_event("taskid" => @task_id_value, "duration" => 2))
|
113
|
+
expect(aggregate_maps["%{taskid}"][@task_id_value].map["sql_duration"]).to eq(2)
|
114
|
+
|
115
|
+
end_event = end_event("taskid" => @task_id_value)
|
116
|
+
@end_filter.filter(end_event)
|
117
|
+
|
118
|
+
expect(aggregate_maps["%{taskid}"]).to be_empty
|
119
|
+
expect(end_event.get("sql_duration")).to eq(2)
|
120
|
+
end
|
121
|
+
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
context "Event with integer task id" do
|
128
|
+
it "works as well as with a string task id" do
|
129
|
+
start_event = start_event("taskid" => 124)
|
130
|
+
@start_filter.filter(start_event)
|
131
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
context "Event which causes an exception when code call" do
|
136
|
+
it "intercepts exception, logs the error and tags the event with '_aggregateexception'" do
|
137
|
+
@start_filter = setup_filter({ "code" => "fail 'Test'" })
|
138
|
+
start_event = start_event("taskid" => "id124")
|
139
|
+
@start_filter.filter(start_event)
|
140
|
+
|
141
|
+
expect(start_event.get("tags")).to eq(["_aggregateexception"])
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
context "flush call" do
|
146
|
+
before(:each) do
|
147
|
+
@end_filter.timeout = 1
|
148
|
+
expect(@end_filter.timeout).to eq(1)
|
149
|
+
@task_id_value = "id_123"
|
150
|
+
@start_event = start_event({"taskid" => @task_id_value})
|
151
|
+
@start_filter.filter(@start_event)
|
152
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
153
|
+
end
|
154
|
+
|
155
|
+
describe "no timeout defined in none filter" do
|
156
|
+
it "defines a default timeout on a default filter" do
|
157
|
+
reset_timeout_management()
|
158
|
+
expect(taskid_eviction_instance).to be_nil
|
159
|
+
@end_filter.flush()
|
160
|
+
expect(taskid_eviction_instance).to eq(@end_filter)
|
161
|
+
expect(@end_filter.timeout).to eq(LogStash::Filters::Aggregate::DEFAULT_TIMEOUT)
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
describe "timeout is defined on another filter" do
|
166
|
+
it "taskid eviction_instance is not updated" do
|
167
|
+
expect(taskid_eviction_instance).not_to be_nil
|
168
|
+
@start_filter.flush()
|
169
|
+
expect(taskid_eviction_instance).not_to eq(@start_filter)
|
170
|
+
expect(taskid_eviction_instance).to eq(@end_filter)
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
describe "no timeout defined on the filter" do
|
175
|
+
it "event is not removed" do
|
176
|
+
sleep(2)
|
177
|
+
@start_filter.flush()
|
178
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
describe "timeout defined on the filter" do
|
183
|
+
it "event is not removed if not expired" do
|
184
|
+
entries = @end_filter.flush()
|
185
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
186
|
+
expect(entries).to be_empty
|
187
|
+
end
|
188
|
+
it "removes event if expired and creates a new timeout event" do
|
189
|
+
sleep(2)
|
190
|
+
entries = @end_filter.flush()
|
191
|
+
expect(aggregate_maps["%{taskid}"]).to be_empty
|
192
|
+
expect(entries.size).to eq(1)
|
193
|
+
expect(entries[0].get("my_id")).to eq("id_123") # task id
|
194
|
+
expect(entries[0].get("sql_duration")).to eq(0) # Aggregation map
|
195
|
+
expect(entries[0].get("test")).to eq("testValue") # Timeout code
|
196
|
+
expect(entries[0].get("tags")).to eq(["tag1", "tag2"]) # Timeout tags
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
describe "timeout defined on another filter with another task_id pattern" do
|
201
|
+
it "does not remove event" do
|
202
|
+
another_filter = setup_filter({ "task_id" => "%{another_taskid}", "code" => "", "timeout" => 1 })
|
203
|
+
sleep(2)
|
204
|
+
entries = another_filter.flush()
|
205
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
206
|
+
expect(entries).to be_empty
|
207
|
+
end
|
208
|
+
end
|
209
|
+
end
|
210
|
+
|
211
|
+
context "aggregate_maps_path option is defined, " do
|
212
|
+
describe "close event append then register event append, " do
|
213
|
+
it "stores aggregate maps to configured file and then loads aggregate maps from file" do
|
214
|
+
store_file = "aggregate_maps"
|
215
|
+
expect(File.exist?(store_file)).to be false
|
216
|
+
|
217
|
+
one_filter = setup_filter({ "task_id" => "%{one_special_field}", "code" => ""})
|
218
|
+
store_filter = setup_filter({ "code" => "map['sql_duration'] = 0", "aggregate_maps_path" => store_file })
|
219
|
+
expect(aggregate_maps["%{one_special_field}"]).to be_empty
|
220
|
+
expect(aggregate_maps["%{taskid}"]).to be_empty
|
221
|
+
|
222
|
+
start_event = start_event("taskid" => 124)
|
223
|
+
filter = store_filter.filter(start_event)
|
224
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
225
|
+
|
226
|
+
@end_filter.close()
|
227
|
+
expect(aggregate_maps).not_to be_empty
|
228
|
+
|
229
|
+
store_filter.close()
|
230
|
+
expect(File.exist?(store_file)).to be true
|
231
|
+
expect(aggregate_maps).to be_empty
|
232
|
+
|
233
|
+
one_filter = setup_filter({ "task_id" => "%{one_special_field}", "code" => ""})
|
234
|
+
store_filter = setup_filter({ "code" => "map['sql_duration'] = 0", "aggregate_maps_path" => store_file })
|
235
|
+
expect(File.exist?(store_file)).to be false
|
236
|
+
expect(aggregate_maps["%{one_special_field}"]).to be_empty
|
237
|
+
expect(aggregate_maps["%{taskid}"].size).to eq(1)
|
238
|
+
end
|
239
|
+
end
|
240
|
+
|
241
|
+
describe "when aggregate_maps_path option is defined in 2 instances, " do
|
242
|
+
it "raises Logstash::ConfigurationError" do
|
243
|
+
expect {
|
244
|
+
setup_filter({ "code" => "", "aggregate_maps_path" => "aggregate_maps1" })
|
245
|
+
setup_filter({ "code" => "", "aggregate_maps_path" => "aggregate_maps2" })
|
246
|
+
}.to raise_error(LogStash::ConfigurationError)
|
247
|
+
end
|
248
|
+
end
|
249
|
+
end
|
250
|
+
|
251
|
+
context "Logstash reload occurs, " do
|
252
|
+
describe "close method is called, " do
|
253
|
+
it "reinitializes static variables" do
|
254
|
+
@end_filter.close()
|
255
|
+
expect(aggregate_maps).to be_empty
|
256
|
+
expect(taskid_eviction_instance).to be_nil
|
257
|
+
expect(static_close_instance).not_to be_nil
|
258
|
+
expect(aggregate_maps_path_set).to be false
|
259
|
+
|
260
|
+
@end_filter.register()
|
261
|
+
expect(static_close_instance).to be_nil
|
262
|
+
end
|
263
|
+
end
|
264
|
+
end
|
265
|
+
|
266
|
+
context "push_previous_map_as_event option is defined, " do
|
267
|
+
describe "when push_previous_map_as_event option is activated on another filter with same task_id pattern" do
|
268
|
+
it "should throw a LogStash::ConfigurationError" do
|
269
|
+
expect {
|
270
|
+
setup_filter({"code" => "map['taskid'] = event.get('taskid')", "push_previous_map_as_event" => true})
|
271
|
+
}.to raise_error(LogStash::ConfigurationError)
|
272
|
+
end
|
273
|
+
end
|
274
|
+
|
275
|
+
describe "when a new task id is detected, " do
|
276
|
+
it "should push previous map as new event" do
|
277
|
+
push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event.get('ppm_id')", "push_previous_map_as_event" => true, "timeout" => 5, "timeout_task_id_field" => "timeout_task_id_field" })
|
278
|
+
push_filter.filter(event({"ppm_id" => "1"})) { |yield_event| fail "task 1 shouldn't have yield event" }
|
279
|
+
push_filter.filter(event({"ppm_id" => "2"})) { |yield_event| expect(yield_event.get("ppm_id")).to eq("1") ; expect(yield_event.get("timeout_task_id_field")).to eq("1") }
|
280
|
+
expect(aggregate_maps["%{ppm_id}"].size).to eq(1)
|
281
|
+
end
|
282
|
+
end
|
283
|
+
|
284
|
+
describe "when timeout happens, " do
|
285
|
+
it "flush method should return last map as new event" do
|
286
|
+
push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event.get('ppm_id')", "push_previous_map_as_event" => true, "timeout" => 1, "timeout_code" => "event.set('test', 'testValue')" })
|
287
|
+
push_filter.filter(event({"ppm_id" => "1"}))
|
288
|
+
sleep(2)
|
289
|
+
events_to_flush = push_filter.flush()
|
290
|
+
expect(events_to_flush).not_to be_nil
|
291
|
+
expect(events_to_flush.size).to eq(1)
|
292
|
+
expect(events_to_flush[0].get("ppm_id")).to eq("1")
|
293
|
+
expect(events_to_flush[0].get('test')).to eq("testValue")
|
294
|
+
expect(aggregate_maps["%{ppm_id}"].size).to eq(0)
|
295
|
+
end
|
296
|
+
end
|
297
|
+
|
298
|
+
describe "when Logstash shutdown happens, " do
|
299
|
+
it "flush method should return last map as new event even if timeout has not occured" do
|
300
|
+
push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "", "push_previous_map_as_event" => true, "timeout" => 4 })
|
301
|
+
push_filter.filter(event({"ppm_id" => "1"}))
|
302
|
+
events_to_flush = push_filter.flush({:final=>false})
|
303
|
+
expect(events_to_flush).to be_empty
|
304
|
+
expect(aggregate_maps["%{ppm_id}"].size).to eq(1)
|
305
|
+
events_to_flush = push_filter.flush({:final=>true})
|
306
|
+
expect(events_to_flush).not_to be_nil
|
307
|
+
expect(events_to_flush.size).to eq(1)
|
308
|
+
expect(events_to_flush[0].get("tags")).to eq(["_aggregatefinalflush"])
|
309
|
+
expect(aggregate_maps["%{ppm_id}"].size).to eq(0)
|
310
|
+
end
|
311
|
+
end
|
312
|
+
end
|
313
|
+
|
314
|
+
|
315
|
+
end
|