logstash-filter-aggregate 2.5.1 → 2.5.2

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,26 +1,26 @@
1
- Gem::Specification.new do |s|
2
- s.name = 'logstash-filter-aggregate'
3
- s.version = '2.5.1'
4
- s.licenses = ['Apache License (2.0)']
5
- s.summary = 'The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, and finally push aggregated information into final task event.'
6
- s.description = 'This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program'
7
- s.authors = ['Elastic', 'Fabien Baligand']
8
- s.email = 'info@elastic.co'
9
- s.homepage = 'https://github.com/logstash-plugins/logstash-filter-aggregate'
10
- s.require_paths = ['lib']
11
-
12
- # Files
13
- s.files = Dir['lib/**/*','spec/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.txt']
14
-
15
- # Tests
16
- s.test_files = s.files.grep(%r{^(test|spec|features)/})
17
-
18
- # Special flag to let us know this is actually a logstash plugin
19
- s.metadata = { 'logstash_plugin' => 'true', 'logstash_group' => 'filter' }
20
-
21
- # Gem dependencies
22
- s.add_runtime_dependency 'logstash-core-plugin-api', '>= 1.60', '<= 2.99'
23
-
24
- # Gem test dependencies
25
- s.add_development_dependency 'logstash-devutils'
26
- end
1
+ Gem::Specification.new do |s|
2
+ s.name = 'logstash-filter-aggregate'
3
+ s.version = '2.5.2'
4
+ s.licenses = ['Apache License (2.0)']
5
+ s.summary = 'The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, and finally push aggregated information into final task event.'
6
+ s.description = 'This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program'
7
+ s.authors = ['Elastic', 'Fabien Baligand']
8
+ s.email = 'info@elastic.co'
9
+ s.homepage = 'https://github.com/logstash-plugins/logstash-filter-aggregate'
10
+ s.require_paths = ['lib']
11
+
12
+ # Files
13
+ s.files = Dir['lib/**/*','spec/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.txt']
14
+
15
+ # Tests
16
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
17
+
18
+ # Special flag to let us know this is actually a logstash plugin
19
+ s.metadata = { 'logstash_plugin' => 'true', 'logstash_group' => 'filter' }
20
+
21
+ # Gem dependencies
22
+ s.add_runtime_dependency 'logstash-core-plugin-api', '>= 1.60', '<= 2.99'
23
+
24
+ # Gem test dependencies
25
+ s.add_development_dependency 'logstash-devutils'
26
+ end
@@ -1,301 +1,315 @@
1
- # encoding: utf-8
2
- require "logstash/devutils/rspec/spec_helper"
3
- require "logstash/filters/aggregate"
4
- require_relative "aggregate_spec_helper"
5
-
6
- describe LogStash::Filters::Aggregate do
7
-
8
- before(:each) do
9
- reset_static_variables()
10
- @start_filter = setup_filter({ "map_action" => "create", "code" => "map['sql_duration'] = 0" })
11
- @update_filter = setup_filter({ "map_action" => "update", "code" => "map['sql_duration'] += event.get('duration')" })
12
- @end_filter = setup_filter({"timeout_task_id_field" => "my_id", "push_map_as_event_on_timeout" => true, "map_action" => "update", "code" => "event.set('sql_duration', map['sql_duration'])", "end_of_task" => true, "timeout" => 5, "timeout_code" => "event.set('test', 'testValue')", "timeout_tags" => ["tag1", "tag2"] })
13
- end
14
-
15
- context "Start event" do
16
- describe "and receiving an event without task_id" do
17
- it "does not record it" do
18
- @start_filter.filter(event())
19
- expect(aggregate_maps["%{taskid}"]).to be_empty
20
- end
21
- end
22
- describe "and receiving an event with task_id" do
23
- it "records it" do
24
- event = start_event("taskid" => "id123")
25
- @start_filter.filter(event)
26
-
27
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
28
- expect(aggregate_maps["%{taskid}"]["id123"]).not_to be_nil
29
- expect(aggregate_maps["%{taskid}"]["id123"].creation_timestamp).to be >= event.timestamp.time
30
- expect(aggregate_maps["%{taskid}"]["id123"].map["sql_duration"]).to eq(0)
31
- end
32
- end
33
-
34
- describe "and receiving two 'start events' for the same task_id" do
35
- it "keeps the first one and does nothing with the second one" do
36
-
37
- first_start_event = start_event("taskid" => "id124")
38
- @start_filter.filter(first_start_event)
39
-
40
- first_update_event = update_event("taskid" => "id124", "duration" => 2)
41
- @update_filter.filter(first_update_event)
42
-
43
- sleep(1)
44
- second_start_event = start_event("taskid" => "id124")
45
- @start_filter.filter(second_start_event)
46
-
47
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
48
- expect(aggregate_maps["%{taskid}"]["id124"].creation_timestamp).to be < second_start_event.timestamp.time
49
- expect(aggregate_maps["%{taskid}"]["id124"].map["sql_duration"]).to eq(first_update_event.get("duration"))
50
- end
51
- end
52
- end
53
-
54
- context "End event" do
55
- describe "receiving an event without a previous 'start event'" do
56
- describe "but without a previous 'start event'" do
57
- it "does nothing with the event" do
58
- end_event = end_event("taskid" => "id124")
59
- @end_filter.filter(end_event)
60
-
61
- expect(aggregate_maps["%{taskid}"]).to be_empty
62
- expect(end_event.get("sql_duration")).to be_nil
63
- end
64
- end
65
- end
66
- end
67
-
68
- context "Start/end events interaction" do
69
- describe "receiving a 'start event'" do
70
- before(:each) do
71
- @task_id_value = "id_123"
72
- @start_event = start_event({"taskid" => @task_id_value})
73
- @start_filter.filter(@start_event)
74
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
75
- end
76
-
77
- describe "and receiving an end event" do
78
- describe "and without an id" do
79
- it "does nothing" do
80
- end_event = end_event()
81
- @end_filter.filter(end_event)
82
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
83
- expect(end_event.get("sql_duration")).to be_nil
84
- end
85
- end
86
-
87
- describe "and an id different from the one of the 'start event'" do
88
- it "does nothing" do
89
- different_id_value = @task_id_value + "_different"
90
- @end_filter.filter(end_event("taskid" => different_id_value))
91
-
92
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
93
- expect(aggregate_maps["%{taskid}"][@task_id_value]).not_to be_nil
94
- end
95
- end
96
-
97
- describe "and the same id of the 'start event'" do
98
- it "add 'sql_duration' field to the end event and deletes the aggregate map associated to taskid" do
99
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
100
- expect(aggregate_maps["%{taskid}"][@task_id_value].map["sql_duration"]).to eq(0)
101
-
102
- @update_filter.filter(update_event("taskid" => @task_id_value, "duration" => 2))
103
- expect(aggregate_maps["%{taskid}"][@task_id_value].map["sql_duration"]).to eq(2)
104
-
105
- end_event = end_event("taskid" => @task_id_value)
106
- @end_filter.filter(end_event)
107
-
108
- expect(aggregate_maps["%{taskid}"]).to be_empty
109
- expect(end_event.get("sql_duration")).to eq(2)
110
- end
111
-
112
- end
113
- end
114
- end
115
- end
116
-
117
- context "Event with integer task id" do
118
- it "works as well as with a string task id" do
119
- start_event = start_event("taskid" => 124)
120
- @start_filter.filter(start_event)
121
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
122
- end
123
- end
124
-
125
- context "Event which causes an exception when code call" do
126
- it "intercepts exception, logs the error and tags the event with '_aggregateexception'" do
127
- @start_filter = setup_filter({ "code" => "fail 'Test'" })
128
- start_event = start_event("taskid" => "id124")
129
- @start_filter.filter(start_event)
130
-
131
- expect(start_event.get("tags")).to eq(["_aggregateexception"])
132
- end
133
- end
134
-
135
- context "flush call" do
136
- before(:each) do
137
- @end_filter.timeout = 1
138
- expect(@end_filter.timeout).to eq(1)
139
- @task_id_value = "id_123"
140
- @start_event = start_event({"taskid" => @task_id_value})
141
- @start_filter.filter(@start_event)
142
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
143
- end
144
-
145
- describe "no timeout defined in none filter" do
146
- it "defines a default timeout on a default filter" do
147
- reset_timeout_management()
148
- expect(taskid_eviction_instance).to be_nil
149
- @end_filter.flush()
150
- expect(taskid_eviction_instance).to eq(@end_filter)
151
- expect(@end_filter.timeout).to eq(LogStash::Filters::Aggregate::DEFAULT_TIMEOUT)
152
- end
153
- end
154
-
155
- describe "timeout is defined on another filter" do
156
- it "taskid eviction_instance is not updated" do
157
- expect(taskid_eviction_instance).not_to be_nil
158
- @start_filter.flush()
159
- expect(taskid_eviction_instance).not_to eq(@start_filter)
160
- expect(taskid_eviction_instance).to eq(@end_filter)
161
- end
162
- end
163
-
164
- describe "no timeout defined on the filter" do
165
- it "event is not removed" do
166
- sleep(2)
167
- @start_filter.flush()
168
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
169
- end
170
- end
171
-
172
- describe "timeout defined on the filter" do
173
- it "event is not removed if not expired" do
174
- entries = @end_filter.flush()
175
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
176
- expect(entries).to be_empty
177
- end
178
- it "removes event if expired and creates a new timeout event" do
179
- sleep(2)
180
- entries = @end_filter.flush()
181
- expect(aggregate_maps["%{taskid}"]).to be_empty
182
- expect(entries.size).to eq(1)
183
- expect(entries[0].get("my_id")).to eq("id_123") # task id
184
- expect(entries[0].get("sql_duration")).to eq(0) # Aggregation map
185
- expect(entries[0].get("test")).to eq("testValue") # Timeout code
186
- expect(entries[0].get("tags")).to eq(["tag1", "tag2"]) # Timeout tags
187
- end
188
- end
189
-
190
- describe "timeout defined on another filter with another task_id pattern" do
191
- it "does not remove event" do
192
- another_filter = setup_filter({ "task_id" => "%{another_taskid}", "code" => "", "timeout" => 1 })
193
- sleep(2)
194
- entries = another_filter.flush()
195
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
196
- expect(entries).to be_empty
197
- end
198
- end
199
- end
200
-
201
- context "aggregate_maps_path option is defined, " do
202
- describe "close event append then register event append, " do
203
- it "stores aggregate maps to configured file and then loads aggregate maps from file" do
204
- store_file = "aggregate_maps"
205
- expect(File.exist?(store_file)).to be false
206
-
207
- store_filter = setup_filter({ "code" => "map['sql_duration'] = 0", "aggregate_maps_path" => store_file })
208
- expect(aggregate_maps["%{taskid}"]).to be_empty
209
-
210
- start_event = start_event("taskid" => 124)
211
- filter = store_filter.filter(start_event)
212
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
213
-
214
- @end_filter.close()
215
- expect(aggregate_maps).not_to be_empty
216
-
217
- store_filter.close()
218
- expect(File.exist?(store_file)).to be true
219
- expect(aggregate_maps).to be_empty
220
-
221
- store_filter = setup_filter({ "code" => "map['sql_duration'] = 0", "aggregate_maps_path" => store_file })
222
- expect(File.exist?(store_file)).to be false
223
- expect(aggregate_maps["%{taskid}"].size).to eq(1)
224
- end
225
- end
226
-
227
- describe "when aggregate_maps_path option is defined in 2 instances, " do
228
- it "raises Logstash::ConfigurationError" do
229
- expect {
230
- setup_filter({ "code" => "", "aggregate_maps_path" => "aggregate_maps1" })
231
- setup_filter({ "code" => "", "aggregate_maps_path" => "aggregate_maps2" })
232
- }.to raise_error(LogStash::ConfigurationError)
233
- end
234
- end
235
- end
236
-
237
- context "Logstash reload occurs, " do
238
- describe "close method is called, " do
239
- it "reinitializes static variables" do
240
- @end_filter.close()
241
- expect(aggregate_maps).to be_empty
242
- expect(taskid_eviction_instance).to be_nil
243
- expect(static_close_instance).not_to be_nil
244
- expect(aggregate_maps_path_set).to be false
245
-
246
- @end_filter.register()
247
- expect(static_close_instance).to be_nil
248
- end
249
- end
250
- end
251
-
252
- context "push_previous_map_as_event option is defined, " do
253
- describe "when push_previous_map_as_event option is activated on another filter with same task_id pattern" do
254
- it "should throw a LogStash::ConfigurationError" do
255
- expect {
256
- setup_filter({"code" => "map['taskid'] = event.get('taskid')", "push_previous_map_as_event" => true})
257
- }.to raise_error(LogStash::ConfigurationError)
258
- end
259
- end
260
-
261
- describe "when a new task id is detected, " do
262
- it "should push previous map as new event" do
263
- push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event.get('ppm_id')", "push_previous_map_as_event" => true, "timeout" => 5, "timeout_task_id_field" => "timeout_task_id_field" })
264
- push_filter.filter(event({"ppm_id" => "1"})) { |yield_event| fail "task 1 shouldn't have yield event" }
265
- push_filter.filter(event({"ppm_id" => "2"})) { |yield_event| expect(yield_event.get("ppm_id")).to eq("1") ; expect(yield_event.get("timeout_task_id_field")).to eq("1") }
266
- expect(aggregate_maps["%{ppm_id}"].size).to eq(1)
267
- end
268
- end
269
-
270
- describe "when timeout happens, " do
271
- it "flush method should return last map as new event" do
272
- push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event.get('ppm_id')", "push_previous_map_as_event" => true, "timeout" => 1, "timeout_code" => "event.set('test', 'testValue')" })
273
- push_filter.filter(event({"ppm_id" => "1"}))
274
- sleep(2)
275
- events_to_flush = push_filter.flush()
276
- expect(events_to_flush).not_to be_nil
277
- expect(events_to_flush.size).to eq(1)
278
- expect(events_to_flush[0].get("ppm_id")).to eq("1")
279
- expect(events_to_flush[0].get('test')).to eq("testValue")
280
- expect(aggregate_maps["%{ppm_id}"].size).to eq(0)
281
- end
282
- end
283
-
284
- describe "when Logstash shutdown happens, " do
285
- it "flush method should return last map as new event even if timeout has not occured" do
286
- push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "", "push_previous_map_as_event" => true, "timeout" => 4 })
287
- push_filter.filter(event({"ppm_id" => "1"}))
288
- events_to_flush = push_filter.flush({:final=>false})
289
- expect(events_to_flush).to be_empty
290
- expect(aggregate_maps["%{ppm_id}"].size).to eq(1)
291
- events_to_flush = push_filter.flush({:final=>true})
292
- expect(events_to_flush).not_to be_nil
293
- expect(events_to_flush.size).to eq(1)
294
- expect(events_to_flush[0].get("tags")).to eq(["_aggregatefinalflush"])
295
- expect(aggregate_maps["%{ppm_id}"].size).to eq(0)
296
- end
297
- end
298
- end
299
-
300
-
301
- end
1
+ # encoding: utf-8
2
+ require "logstash/devutils/rspec/spec_helper"
3
+ require "logstash/filters/aggregate"
4
+ require_relative "aggregate_spec_helper"
5
+
6
+ describe LogStash::Filters::Aggregate do
7
+
8
+ before(:each) do
9
+ reset_static_variables()
10
+ @start_filter = setup_filter({ "map_action" => "create", "code" => "map['sql_duration'] = 0" })
11
+ @update_filter = setup_filter({ "map_action" => "update", "code" => "map['sql_duration'] += event.get('duration')" })
12
+ @end_filter = setup_filter({"timeout_task_id_field" => "my_id", "push_map_as_event_on_timeout" => true, "map_action" => "update", "code" => "event.set('sql_duration', map['sql_duration'])", "end_of_task" => true, "timeout" => 5, "timeout_code" => "event.set('test', 'testValue')", "timeout_tags" => ["tag1", "tag2"] })
13
+ end
14
+
15
+ context "Validation" do
16
+ describe "and register a filter with a task_id without dynamic expression" do
17
+ it "raises a LogStash::ConfigurationError" do
18
+ expect {
19
+ setup_filter({ "code" => "", "task_id" => "static_value" })
20
+ }.to raise_error(LogStash::ConfigurationError)
21
+ end
22
+ end
23
+ end
24
+
25
+ context "Start event" do
26
+ describe "and receiving an event without task_id" do
27
+ it "does not record it" do
28
+ @start_filter.filter(event())
29
+ expect(aggregate_maps["%{taskid}"]).to be_empty
30
+ end
31
+ end
32
+ describe "and receiving an event with task_id" do
33
+ it "records it" do
34
+ event = start_event("taskid" => "id123")
35
+ @start_filter.filter(event)
36
+
37
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
38
+ expect(aggregate_maps["%{taskid}"]["id123"]).not_to be_nil
39
+ expect(aggregate_maps["%{taskid}"]["id123"].creation_timestamp).to be >= event.timestamp.time
40
+ expect(aggregate_maps["%{taskid}"]["id123"].map["sql_duration"]).to eq(0)
41
+ end
42
+ end
43
+
44
+ describe "and receiving two 'start events' for the same task_id" do
45
+ it "keeps the first one and does nothing with the second one" do
46
+
47
+ first_start_event = start_event("taskid" => "id124")
48
+ @start_filter.filter(first_start_event)
49
+
50
+ first_update_event = update_event("taskid" => "id124", "duration" => 2)
51
+ @update_filter.filter(first_update_event)
52
+
53
+ sleep(1)
54
+ second_start_event = start_event("taskid" => "id124")
55
+ @start_filter.filter(second_start_event)
56
+
57
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
58
+ expect(aggregate_maps["%{taskid}"]["id124"].creation_timestamp).to be < second_start_event.timestamp.time
59
+ expect(aggregate_maps["%{taskid}"]["id124"].map["sql_duration"]).to eq(first_update_event.get("duration"))
60
+ end
61
+ end
62
+ end
63
+
64
+ context "End event" do
65
+ describe "receiving an event without a previous 'start event'" do
66
+ describe "but without a previous 'start event'" do
67
+ it "does nothing with the event" do
68
+ end_event = end_event("taskid" => "id124")
69
+ @end_filter.filter(end_event)
70
+
71
+ expect(aggregate_maps["%{taskid}"]).to be_empty
72
+ expect(end_event.get("sql_duration")).to be_nil
73
+ end
74
+ end
75
+ end
76
+ end
77
+
78
+ context "Start/end events interaction" do
79
+ describe "receiving a 'start event'" do
80
+ before(:each) do
81
+ @task_id_value = "id_123"
82
+ @start_event = start_event({"taskid" => @task_id_value})
83
+ @start_filter.filter(@start_event)
84
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
85
+ end
86
+
87
+ describe "and receiving an end event" do
88
+ describe "and without an id" do
89
+ it "does nothing" do
90
+ end_event = end_event()
91
+ @end_filter.filter(end_event)
92
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
93
+ expect(end_event.get("sql_duration")).to be_nil
94
+ end
95
+ end
96
+
97
+ describe "and an id different from the one of the 'start event'" do
98
+ it "does nothing" do
99
+ different_id_value = @task_id_value + "_different"
100
+ @end_filter.filter(end_event("taskid" => different_id_value))
101
+
102
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
103
+ expect(aggregate_maps["%{taskid}"][@task_id_value]).not_to be_nil
104
+ end
105
+ end
106
+
107
+ describe "and the same id of the 'start event'" do
108
+ it "add 'sql_duration' field to the end event and deletes the aggregate map associated to taskid" do
109
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
110
+ expect(aggregate_maps["%{taskid}"][@task_id_value].map["sql_duration"]).to eq(0)
111
+
112
+ @update_filter.filter(update_event("taskid" => @task_id_value, "duration" => 2))
113
+ expect(aggregate_maps["%{taskid}"][@task_id_value].map["sql_duration"]).to eq(2)
114
+
115
+ end_event = end_event("taskid" => @task_id_value)
116
+ @end_filter.filter(end_event)
117
+
118
+ expect(aggregate_maps["%{taskid}"]).to be_empty
119
+ expect(end_event.get("sql_duration")).to eq(2)
120
+ end
121
+
122
+ end
123
+ end
124
+ end
125
+ end
126
+
127
+ context "Event with integer task id" do
128
+ it "works as well as with a string task id" do
129
+ start_event = start_event("taskid" => 124)
130
+ @start_filter.filter(start_event)
131
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
132
+ end
133
+ end
134
+
135
+ context "Event which causes an exception when code call" do
136
+ it "intercepts exception, logs the error and tags the event with '_aggregateexception'" do
137
+ @start_filter = setup_filter({ "code" => "fail 'Test'" })
138
+ start_event = start_event("taskid" => "id124")
139
+ @start_filter.filter(start_event)
140
+
141
+ expect(start_event.get("tags")).to eq(["_aggregateexception"])
142
+ end
143
+ end
144
+
145
+ context "flush call" do
146
+ before(:each) do
147
+ @end_filter.timeout = 1
148
+ expect(@end_filter.timeout).to eq(1)
149
+ @task_id_value = "id_123"
150
+ @start_event = start_event({"taskid" => @task_id_value})
151
+ @start_filter.filter(@start_event)
152
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
153
+ end
154
+
155
+ describe "no timeout defined in none filter" do
156
+ it "defines a default timeout on a default filter" do
157
+ reset_timeout_management()
158
+ expect(taskid_eviction_instance).to be_nil
159
+ @end_filter.flush()
160
+ expect(taskid_eviction_instance).to eq(@end_filter)
161
+ expect(@end_filter.timeout).to eq(LogStash::Filters::Aggregate::DEFAULT_TIMEOUT)
162
+ end
163
+ end
164
+
165
+ describe "timeout is defined on another filter" do
166
+ it "taskid eviction_instance is not updated" do
167
+ expect(taskid_eviction_instance).not_to be_nil
168
+ @start_filter.flush()
169
+ expect(taskid_eviction_instance).not_to eq(@start_filter)
170
+ expect(taskid_eviction_instance).to eq(@end_filter)
171
+ end
172
+ end
173
+
174
+ describe "no timeout defined on the filter" do
175
+ it "event is not removed" do
176
+ sleep(2)
177
+ @start_filter.flush()
178
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
179
+ end
180
+ end
181
+
182
+ describe "timeout defined on the filter" do
183
+ it "event is not removed if not expired" do
184
+ entries = @end_filter.flush()
185
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
186
+ expect(entries).to be_empty
187
+ end
188
+ it "removes event if expired and creates a new timeout event" do
189
+ sleep(2)
190
+ entries = @end_filter.flush()
191
+ expect(aggregate_maps["%{taskid}"]).to be_empty
192
+ expect(entries.size).to eq(1)
193
+ expect(entries[0].get("my_id")).to eq("id_123") # task id
194
+ expect(entries[0].get("sql_duration")).to eq(0) # Aggregation map
195
+ expect(entries[0].get("test")).to eq("testValue") # Timeout code
196
+ expect(entries[0].get("tags")).to eq(["tag1", "tag2"]) # Timeout tags
197
+ end
198
+ end
199
+
200
+ describe "timeout defined on another filter with another task_id pattern" do
201
+ it "does not remove event" do
202
+ another_filter = setup_filter({ "task_id" => "%{another_taskid}", "code" => "", "timeout" => 1 })
203
+ sleep(2)
204
+ entries = another_filter.flush()
205
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
206
+ expect(entries).to be_empty
207
+ end
208
+ end
209
+ end
210
+
211
+ context "aggregate_maps_path option is defined, " do
212
+ describe "close event append then register event append, " do
213
+ it "stores aggregate maps to configured file and then loads aggregate maps from file" do
214
+ store_file = "aggregate_maps"
215
+ expect(File.exist?(store_file)).to be false
216
+
217
+ one_filter = setup_filter({ "task_id" => "%{one_special_field}", "code" => ""})
218
+ store_filter = setup_filter({ "code" => "map['sql_duration'] = 0", "aggregate_maps_path" => store_file })
219
+ expect(aggregate_maps["%{one_special_field}"]).to be_empty
220
+ expect(aggregate_maps["%{taskid}"]).to be_empty
221
+
222
+ start_event = start_event("taskid" => 124)
223
+ filter = store_filter.filter(start_event)
224
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
225
+
226
+ @end_filter.close()
227
+ expect(aggregate_maps).not_to be_empty
228
+
229
+ store_filter.close()
230
+ expect(File.exist?(store_file)).to be true
231
+ expect(aggregate_maps).to be_empty
232
+
233
+ one_filter = setup_filter({ "task_id" => "%{one_special_field}", "code" => ""})
234
+ store_filter = setup_filter({ "code" => "map['sql_duration'] = 0", "aggregate_maps_path" => store_file })
235
+ expect(File.exist?(store_file)).to be false
236
+ expect(aggregate_maps["%{one_special_field}"]).to be_empty
237
+ expect(aggregate_maps["%{taskid}"].size).to eq(1)
238
+ end
239
+ end
240
+
241
+ describe "when aggregate_maps_path option is defined in 2 instances, " do
242
+ it "raises Logstash::ConfigurationError" do
243
+ expect {
244
+ setup_filter({ "code" => "", "aggregate_maps_path" => "aggregate_maps1" })
245
+ setup_filter({ "code" => "", "aggregate_maps_path" => "aggregate_maps2" })
246
+ }.to raise_error(LogStash::ConfigurationError)
247
+ end
248
+ end
249
+ end
250
+
251
+ context "Logstash reload occurs, " do
252
+ describe "close method is called, " do
253
+ it "reinitializes static variables" do
254
+ @end_filter.close()
255
+ expect(aggregate_maps).to be_empty
256
+ expect(taskid_eviction_instance).to be_nil
257
+ expect(static_close_instance).not_to be_nil
258
+ expect(aggregate_maps_path_set).to be false
259
+
260
+ @end_filter.register()
261
+ expect(static_close_instance).to be_nil
262
+ end
263
+ end
264
+ end
265
+
266
+ context "push_previous_map_as_event option is defined, " do
267
+ describe "when push_previous_map_as_event option is activated on another filter with same task_id pattern" do
268
+ it "should throw a LogStash::ConfigurationError" do
269
+ expect {
270
+ setup_filter({"code" => "map['taskid'] = event.get('taskid')", "push_previous_map_as_event" => true})
271
+ }.to raise_error(LogStash::ConfigurationError)
272
+ end
273
+ end
274
+
275
+ describe "when a new task id is detected, " do
276
+ it "should push previous map as new event" do
277
+ push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event.get('ppm_id')", "push_previous_map_as_event" => true, "timeout" => 5, "timeout_task_id_field" => "timeout_task_id_field" })
278
+ push_filter.filter(event({"ppm_id" => "1"})) { |yield_event| fail "task 1 shouldn't have yield event" }
279
+ push_filter.filter(event({"ppm_id" => "2"})) { |yield_event| expect(yield_event.get("ppm_id")).to eq("1") ; expect(yield_event.get("timeout_task_id_field")).to eq("1") }
280
+ expect(aggregate_maps["%{ppm_id}"].size).to eq(1)
281
+ end
282
+ end
283
+
284
+ describe "when timeout happens, " do
285
+ it "flush method should return last map as new event" do
286
+ push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event.get('ppm_id')", "push_previous_map_as_event" => true, "timeout" => 1, "timeout_code" => "event.set('test', 'testValue')" })
287
+ push_filter.filter(event({"ppm_id" => "1"}))
288
+ sleep(2)
289
+ events_to_flush = push_filter.flush()
290
+ expect(events_to_flush).not_to be_nil
291
+ expect(events_to_flush.size).to eq(1)
292
+ expect(events_to_flush[0].get("ppm_id")).to eq("1")
293
+ expect(events_to_flush[0].get('test')).to eq("testValue")
294
+ expect(aggregate_maps["%{ppm_id}"].size).to eq(0)
295
+ end
296
+ end
297
+
298
+ describe "when Logstash shutdown happens, " do
299
+ it "flush method should return last map as new event even if timeout has not occured" do
300
+ push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "", "push_previous_map_as_event" => true, "timeout" => 4 })
301
+ push_filter.filter(event({"ppm_id" => "1"}))
302
+ events_to_flush = push_filter.flush({:final=>false})
303
+ expect(events_to_flush).to be_empty
304
+ expect(aggregate_maps["%{ppm_id}"].size).to eq(1)
305
+ events_to_flush = push_filter.flush({:final=>true})
306
+ expect(events_to_flush).not_to be_nil
307
+ expect(events_to_flush.size).to eq(1)
308
+ expect(events_to_flush[0].get("tags")).to eq(["_aggregatefinalflush"])
309
+ expect(aggregate_maps["%{ppm_id}"].size).to eq(0)
310
+ end
311
+ end
312
+ end
313
+
314
+
315
+ end