logstash-filter-aggregate 2.5.1 → 2.5.2

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,643 +1,686 @@
1
- # encoding: utf-8
2
-
3
- require "logstash/filters/base"
4
- require "logstash/namespace"
5
- require "thread"
6
- require "logstash/util/decorators"
7
-
8
- #
9
- # The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task,
10
- # and finally push aggregated information into final task event.
11
- #
12
- # You should be very careful to set logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly
13
- # otherwise events may be processed out of sequence and unexpected results will occur.
14
- #
15
- # ==== Example #1
16
- #
17
- # * with these given logs :
18
- # [source,ruby]
19
- # ----------------------------------
20
- # INFO - 12345 - TASK_START - start
21
- # INFO - 12345 - SQL - sqlQuery1 - 12
22
- # INFO - 12345 - SQL - sqlQuery2 - 34
23
- # INFO - 12345 - TASK_END - end
24
- # ----------------------------------
25
- #
26
- # * you can aggregate "sql duration" for the whole task with this configuration :
27
- # [source,ruby]
28
- # ----------------------------------
29
- # filter {
30
- # grok {
31
- # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ]
32
- # }
33
- #
34
- # if [logger] == "TASK_START" {
35
- # aggregate {
36
- # task_id => "%{taskid}"
37
- # code => "map['sql_duration'] = 0"
38
- # map_action => "create"
39
- # }
40
- # }
41
- #
42
- # if [logger] == "SQL" {
43
- # aggregate {
44
- # task_id => "%{taskid}"
45
- # code => "map['sql_duration'] += event.get('duration')"
46
- # map_action => "update"
47
- # }
48
- # }
49
- #
50
- # if [logger] == "TASK_END" {
51
- # aggregate {
52
- # task_id => "%{taskid}"
53
- # code => "event.set('sql_duration', map['sql_duration'])"
54
- # map_action => "update"
55
- # end_of_task => true
56
- # timeout => 120
57
- # }
58
- # }
59
- # }
60
- # ----------------------------------
61
- #
62
- # * the final event then looks like :
63
- # [source,ruby]
64
- # ----------------------------------
65
- # {
66
- # "message" => "INFO - 12345 - TASK_END - end message",
67
- # "sql_duration" => 46
68
- # }
69
- # ----------------------------------
70
- #
71
- # the field `sql_duration` is added and contains the sum of all sql queries durations.
72
- #
73
- # ==== Example #2 : no start event
74
- #
75
- # * If you have the same logs than example #1, but without a start log :
76
- # [source,ruby]
77
- # ----------------------------------
78
- # INFO - 12345 - SQL - sqlQuery1 - 12
79
- # INFO - 12345 - SQL - sqlQuery2 - 34
80
- # INFO - 12345 - TASK_END - end
81
- # ----------------------------------
82
- #
83
- # * you can also aggregate "sql duration" with a slightly different configuration :
84
- # [source,ruby]
85
- # ----------------------------------
86
- # filter {
87
- # grok {
88
- # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ]
89
- # }
90
- #
91
- # if [logger] == "SQL" {
92
- # aggregate {
93
- # task_id => "%{taskid}"
94
- # code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')"
95
- # }
96
- # }
97
- #
98
- # if [logger] == "TASK_END" {
99
- # aggregate {
100
- # task_id => "%{taskid}"
101
- # code => "event.set('sql_duration', map['sql_duration'])"
102
- # end_of_task => true
103
- # timeout => 120
104
- # }
105
- # }
106
- # }
107
- # ----------------------------------
108
- #
109
- # * the final event is exactly the same than example #1
110
- # * the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized
111
- #
112
- #
113
- # ==== Example #3 : no end event
114
- #
115
- # Third use case: You have no specific end event.
116
- #
117
- # A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction.
118
- #
119
- # In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs.
120
- # In addition, we can enable 'timeout_code' to execute code on the populated timeout event.
121
- # We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID.
122
- #
123
- # * Given these logs:
124
- #
125
- # [source,ruby]
126
- # ----------------------------------
127
- # INFO - 12345 - Clicked One
128
- # INFO - 12345 - Clicked Two
129
- # INFO - 12345 - Clicked Three
130
- # ----------------------------------
131
- #
132
- # * You can aggregate the amount of clicks the user did like this:
133
- #
134
- # [source,ruby]
135
- # ----------------------------------
136
- # filter {
137
- # grok {
138
- # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ]
139
- # }
140
- #
141
- # aggregate {
142
- # task_id => "%{user_id}"
143
- # code => "map['clicks'] ||= 0; map['clicks'] += 1;"
144
- # push_map_as_event_on_timeout => true
145
- # timeout_task_id_field => "user_id"
146
- # timeout => 600 # 10 minutes timeout
147
- # timeout_tags => ['_aggregatetimeout']
148
- # timeout_code => "event.set('several_clicks', event.get('clicks') > 1)"
149
- # }
150
- # }
151
- # ----------------------------------
152
- #
153
- # * After ten minutes, this will yield an event like:
154
- #
155
- # [source,json]
156
- # ----------------------------------
157
- # {
158
- # "user_id": "12345",
159
- # "clicks": 3,
160
- # "several_clicks": true,
161
- # "tags": [
162
- # "_aggregatetimeout"
163
- # ]
164
- # }
165
- # ----------------------------------
166
- #
167
- # ==== Example #4 : no end event and tasks come one after the other
168
- #
169
- # Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other.
170
- # That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ...
171
- # In that case, you don't want to wait task timeout to flush aggregation map.
172
- # * A typical case is aggregating results from jdbc input plugin.
173
- # * Given that you have this SQL query : `SELECT country_name, town_name FROM town`
174
- # * Using jdbc input plugin, you get these 3 events from :
175
- # [source,json]
176
- # ----------------------------------
177
- # { "country_name": "France", "town_name": "Paris" }
178
- # { "country_name": "France", "town_name": "Marseille" }
179
- # { "country_name": "USA", "town_name": "New-York" }
180
- # ----------------------------------
181
- # * And you would like these 2 result events to push them into elasticsearch :
182
- # [source,json]
183
- # ----------------------------------
184
- # { "country_name": "France", "town_name": [ "Paris", "Marseille" ] }
185
- # { "country_name": "USA", "town_name": [ "New-York" ] }
186
- # ----------------------------------
187
- # * You can do that using `push_previous_map_as_event` aggregate plugin option :
188
- # [source,ruby]
189
- # ----------------------------------
190
- # filter {
191
- # aggregate {
192
- # task_id => "%{country_name}"
193
- # code => "
194
- # map['town_name'] ||= []
195
- # event.to_hash.each do |key,value|
196
- # map[key] = value unless map.has_key?(key)
197
- # map[key] << value if map[key].is_a?(Array) and !value.is_a?(Array)
198
- # end
199
- # "
200
- # push_previous_map_as_event => true
201
- # timeout => 5
202
- # timeout_tags => ['aggregated']
203
- # }
204
- #
205
- # if "aggregated" not in [tags] {
206
- # drop {}
207
- # }
208
- # }
209
- # ----------------------------------
210
- # * The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new logstash event (with 'aggregated' tag), and then creates a new empty map for the next country
211
- # * When 5s timeout comes, the last aggregate map is pushed as a new event
212
- # * Finally, initial events (which are not aggregated) are dropped because useless
213
- #
214
- #
215
- # ==== How it works
216
- # * the filter needs a "task_id" to correlate events (log lines) of a same task
217
- # * at the task beggining, filter creates a map, attached to task_id
218
- # * for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map)
219
- # * in the final event, you can execute a last code (for instance, add map data to final event)
220
- # * after the final event, the map attached to task is deleted
221
- # * in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps
222
- # * if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted
223
- # * all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags
224
- # * if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception'
225
- #
226
- #
227
- # ==== Use Cases
228
- # * extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2)
229
- # * extract error information in any task log line, and push it in final task event (to get a final event with all error information if any)
230
- # * extract all back-end calls as a list, and push this list in final task event (to get a task profile)
231
- # * extract all http headers logged in several lines to push this list in final task event (complete http request info)
232
- # * for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...)
233
- # * Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ...
234
- #
235
- #
236
- class LogStash::Filters::Aggregate < LogStash::Filters::Base
237
-
238
- config_name "aggregate"
239
-
240
- # The expression defining task ID to correlate logs.
241
- #
242
- # This value must uniquely identify the task in the system.
243
- #
244
- # Example value : "%{application}%{my_task_id}"
245
- config :task_id, :validate => :string, :required => true
246
-
247
- # The code to execute to update map, using current event.
248
- #
249
- # Or on the contrary, the code to execute to update event, using current map.
250
- #
251
- # You will have a 'map' variable and an 'event' variable available (that is the event itself).
252
- #
253
- # Example value : `"map['sql_duration'] += event.get('duration')"`
254
- config :code, :validate => :string, :required => true
255
-
256
- # Tell the filter what to do with aggregate map.
257
- #
258
- # `create`: create the map, and execute the code only if map wasn't created before
259
- #
260
- # `update`: doesn't create the map, and execute the code only if map was created before
261
- #
262
- # `create_or_update`: create the map if it wasn't created before, execute the code in all cases
263
- config :map_action, :validate => :string, :default => "create_or_update"
264
-
265
- # Tell the filter that task is ended, and therefore, to delete aggregate map after code execution.
266
- config :end_of_task, :validate => :boolean, :default => false
267
-
268
- # The path to file where aggregate maps are stored when logstash stops
269
- # and are loaded from when logstash starts.
270
- #
271
- # If not defined, aggregate maps will not be stored at logstash stop and will be lost.
272
- # Must be defined in only one aggregate filter (as aggregate maps are global).
273
- #
274
- # Example value : `"/path/to/.aggregate_maps"`
275
- config :aggregate_maps_path, :validate => :string, :required => false
276
-
277
- # The amount of seconds after a task "end event" can be considered lost.
278
- #
279
- # When timeout occurs for a task, The task "map" is evicted.
280
- #
281
- # Timeout can be defined for each "task_id" pattern.
282
- #
283
- # If no timeout is defined, default timeout will be applied : 1800 seconds.
284
- config :timeout, :validate => :number, :required => false
285
-
286
- # The code to execute to complete timeout generated event, when 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true.
287
- # The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map.
288
- #
289
- # If 'timeout_task_id_field' is set, the event is also populated with the task_id value
290
- #
291
- # Example value: `"event.set('state', 'timeout')"`
292
- config :timeout_code, :validate => :string, :required => false
293
-
294
- # When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new logstash event.
295
- # This enables to detect and process task timeouts in logstash, but also to manage tasks that have no explicit end event.
296
- config :push_map_as_event_on_timeout, :validate => :boolean, :required => false, :default => false
297
-
298
- # When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new logstash event,
299
- # and then creates a new empty map for the next task.
300
- #
301
- # WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc...
302
- config :push_previous_map_as_event, :validate => :boolean, :required => false, :default => false
303
-
304
- # This option indicates the timeout generated event's field for the "task_id" value.
305
- # The task id will then be set into the timeout event. This can help correlate which tasks have been timed out.
306
- #
307
- # This field has no default value and will not be set on the event if not configured.
308
- #
309
- # Example:
310
- #
311
- # If the task_id is "12345" and this field is set to "my_id", the generated timeout event will contain `'my_id'` key with `'12345'` value.
312
- config :timeout_task_id_field, :validate => :string, :required => false
313
-
314
- # Defines tags to add when a timeout event is generated and yield
315
- config :timeout_tags, :validate => :array, :required => false, :default => []
316
-
317
-
318
- # STATIC VARIABLES
319
-
320
-
321
- # Default timeout (in seconds) when not defined in plugin configuration
322
- DEFAULT_TIMEOUT = 1800
323
-
324
- # This is the state of the filter.
325
- # For each entry, key is "task_id" and value is a map freely updatable by 'code' config
326
- @@aggregate_maps = {}
327
-
328
- # Mutex used to synchronize access to 'aggregate_maps'
329
- @@mutex = Mutex.new
330
-
331
- # Default timeout for task_id patterns where timeout is not defined in logstash filter configuration
332
- @@default_timeout = nil
333
-
334
- # For each "task_id" pattern, defines which Aggregate instance will process flush() call, processing expired Aggregate elements (older than timeout)
335
- # For each entry, key is "task_id pattern" and value is "aggregate instance"
336
- @@flush_instance_map = {}
337
-
338
- # last time where timeout management in flush() method was launched, per "task_id" pattern
339
- @@last_flush_timestamp_map = {}
340
-
341
- # flag indicating if aggregate_maps_path option has been already set on one aggregate instance
342
- @@aggregate_maps_path_set = false
343
-
344
- # defines which Aggregate instance will close Aggregate static variables
345
- @@static_close_instance = nil
346
-
347
-
348
- # Initialize plugin
349
- public
350
- def register
351
-
352
- @logger.debug("Aggregate register call", :code => @code)
353
-
354
- # process lambda expression to call in each filter call
355
- eval("@codeblock = lambda { |event, map| #{@code} }", binding, "(aggregate filter code)")
356
-
357
- # process lambda expression to call in the timeout case or previous event case
358
- if @timeout_code
359
- eval("@timeout_codeblock = lambda { |event| #{@timeout_code} }", binding, "(aggregate filter timeout code)")
360
- end
361
-
362
- @@mutex.synchronize do
363
-
364
- # timeout management : define eviction_instance for current task_id pattern
365
- if has_timeout_options?
366
- if @@flush_instance_map.has_key?(@task_id)
367
- # all timeout options have to be defined in only one aggregate filter per task_id pattern
368
- raise LogStash::ConfigurationError, "Aggregate plugin: For task_id pattern #{@task_id}, there are more than one filter which defines timeout options. All timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : #{display_timeout_options}"
369
- end
370
- @@flush_instance_map[@task_id] = self
371
- @logger.debug("Aggregate timeout for '#{@task_id}' pattern: #{@timeout} seconds")
372
- end
373
-
374
- # timeout management : define default_timeout
375
- if !@timeout.nil? && (@@default_timeout.nil? || @timeout < @@default_timeout)
376
- @@default_timeout = @timeout
377
- @logger.debug("Aggregate default timeout: #{@timeout} seconds")
378
- end
379
-
380
- # reinit static_close_instance (if necessary)
381
- if !@@aggregate_maps_path_set && !@@static_close_instance.nil?
382
- @@static_close_instance = nil
383
- end
384
-
385
- # check if aggregate_maps_path option has already been set on another instance else set @@aggregate_maps_path_set
386
- if !@aggregate_maps_path.nil?
387
- if @@aggregate_maps_path_set
388
- @@aggregate_maps_path_set = false
389
- raise LogStash::ConfigurationError, "Aggregate plugin: Option 'aggregate_maps_path' must be set on only one aggregate filter"
390
- else
391
- @@aggregate_maps_path_set = true
392
- @@static_close_instance = self
393
- end
394
- end
395
-
396
- # load aggregate maps from file (if option defined)
397
- if !@aggregate_maps_path.nil? && File.exist?(@aggregate_maps_path)
398
- File.open(@aggregate_maps_path, "r") { |from_file| @@aggregate_maps = Marshal.load(from_file) }
399
- File.delete(@aggregate_maps_path)
400
- @logger.info("Aggregate maps loaded from : #{@aggregate_maps_path}")
401
- end
402
-
403
- # init aggregate_maps
404
- @@aggregate_maps[@task_id] ||= {}
405
- end
406
- end
407
-
408
- # Called when logstash stops
409
- public
410
- def close
411
-
412
- @logger.debug("Aggregate close call", :code => @code)
413
-
414
- # define static close instance if none is already defined
415
- @@static_close_instance = self if @@static_close_instance.nil?
416
-
417
- if @@static_close_instance == self
418
- # store aggregate maps to file (if option defined)
419
- @@mutex.synchronize do
420
- @@aggregate_maps.delete_if { |key, value| value.empty? }
421
- if !@aggregate_maps_path.nil? && !@@aggregate_maps.empty?
422
- File.open(@aggregate_maps_path, "w"){ |to_file| Marshal.dump(@@aggregate_maps, to_file) }
423
- @logger.info("Aggregate maps stored to : #{@aggregate_maps_path}")
424
- end
425
- @@aggregate_maps.clear()
426
- end
427
-
428
- # reinit static variables for logstash reload
429
- @@default_timeout = nil
430
- @@flush_instance_map = {}
431
- @@last_flush_timestamp_map = {}
432
- @@aggregate_maps_path_set = false
433
- end
434
-
435
- end
436
-
437
- # This method is invoked each time an event matches the filter
438
- public
439
- def filter(event)
440
-
441
- # define task id
442
- task_id = event.sprintf(@task_id)
443
- return if task_id.nil? || task_id == @task_id
444
-
445
- noError = false
446
- event_to_yield = nil
447
-
448
- # protect aggregate_maps against concurrent access, using a mutex
449
- @@mutex.synchronize do
450
-
451
- # retrieve the current aggregate map
452
- aggregate_maps_element = @@aggregate_maps[@task_id][task_id]
453
-
454
-
455
- # create aggregate map, if it doesn't exist
456
- if aggregate_maps_element.nil?
457
- return if @map_action == "update"
458
- # create new event from previous map, if @push_previous_map_as_event is enabled
459
- if @push_previous_map_as_event && !@@aggregate_maps[@task_id].empty?
460
- event_to_yield = extract_previous_map_as_event()
461
- end
462
- aggregate_maps_element = LogStash::Filters::Aggregate::Element.new(Time.now);
463
- @@aggregate_maps[@task_id][task_id] = aggregate_maps_element
464
- else
465
- return if @map_action == "create"
466
- end
467
- map = aggregate_maps_element.map
468
-
469
- # execute the code to read/update map and event
470
- begin
471
- @codeblock.call(event, map)
472
- @logger.debug("Aggregate successful filter code execution", :code => @code)
473
- noError = true
474
- rescue => exception
475
- @logger.error("Aggregate exception occurred",
476
- :error => exception,
477
- :code => @code,
478
- :map => map,
479
- :event_data => event.to_hash_with_metadata)
480
- event.tag("_aggregateexception")
481
- end
482
-
483
- # delete the map if task is ended
484
- @@aggregate_maps[@task_id].delete(task_id) if @end_of_task
485
-
486
- end
487
-
488
- # match the filter, only if no error occurred
489
- filter_matched(event) if noError
490
-
491
- # yield previous map as new event if set
492
- yield event_to_yield unless event_to_yield.nil?
493
-
494
- end
495
-
496
- # Create a new event from the aggregation_map and the corresponding task_id
497
- # This will create the event and
498
- # if @timeout_task_id_field is set, it will set the task_id on the timeout event
499
- # if @timeout_code is set, it will execute the timeout code on the created timeout event
500
- # returns the newly created event
501
- def create_timeout_event(aggregation_map, task_id)
502
-
503
- @logger.debug("Aggregate create_timeout_event call with task_id '#{task_id}'")
504
-
505
- event_to_yield = LogStash::Event.new(aggregation_map)
506
-
507
- if @timeout_task_id_field
508
- event_to_yield.set(@timeout_task_id_field, task_id)
509
- end
510
-
511
- LogStash::Util::Decorators.add_tags(@timeout_tags, event_to_yield, "filters/#{self.class.name}")
512
-
513
- # Call code block if available
514
- if @timeout_code
515
- begin
516
- @timeout_codeblock.call(event_to_yield)
517
- rescue => exception
518
- @logger.error("Aggregate exception occurred",
519
- :error => exception,
520
- :timeout_code => @timeout_code,
521
- :timeout_event_data => event_to_yield.to_hash_with_metadata)
522
- event_to_yield.tag("_aggregateexception")
523
- end
524
- end
525
-
526
- return event_to_yield
527
- end
528
-
529
- # Extract the previous map in aggregate maps, and return it as a new Logstash event
530
- def extract_previous_map_as_event
531
- previous_entry = @@aggregate_maps[@task_id].shift()
532
- previous_task_id = previous_entry[0]
533
- previous_map = previous_entry[1].map
534
- return create_timeout_event(previous_map, previous_task_id)
535
- end
536
-
537
- # Necessary to indicate logstash to periodically call 'flush' method
538
- def periodic_flush
539
- true
540
- end
541
-
542
- # This method is invoked by LogStash every 5 seconds.
543
- def flush(options = {})
544
-
545
- @logger.debug("Aggregate flush call with #{options}")
546
-
547
- # Protection against no timeout defined by logstash conf : define a default eviction instance with timeout = DEFAULT_TIMEOUT seconds
548
- if @@default_timeout.nil?
549
- @@default_timeout = DEFAULT_TIMEOUT
550
- end
551
- if !@@flush_instance_map.has_key?(@task_id)
552
- @@flush_instance_map[@task_id] = self
553
- @timeout = @@default_timeout
554
- elsif @@flush_instance_map[@task_id].timeout.nil?
555
- @@flush_instance_map[@task_id].timeout = @@default_timeout
556
- end
557
-
558
- # Launch timeout management only every interval of (@timeout / 2) seconds or at Logstash shutdown
559
- if @@flush_instance_map[@task_id] == self && (!@@last_flush_timestamp_map.has_key?(@task_id) || Time.now > @@last_flush_timestamp_map[@task_id] + @timeout / 2 || options[:final])
560
- events_to_flush = remove_expired_maps()
561
-
562
- # at Logstash shutdown, if push_previous_map_as_event is enabled, it's important to force flush (particularly for jdbc input plugin)
563
- if options[:final] && @push_previous_map_as_event && !@@aggregate_maps[@task_id].empty?
564
- events_to_flush << extract_previous_map_as_event()
565
- end
566
-
567
- # tag flushed events, indicating "final flush" special event
568
- if options[:final]
569
- events_to_flush.each { |event_to_flush| event_to_flush.tag("_aggregatefinalflush") }
570
- end
571
-
572
- # update last flush timestamp
573
- @@last_flush_timestamp_map[@task_id] = Time.now
574
-
575
- # return events to flush into Logstash pipeline
576
- return events_to_flush
577
- else
578
- return []
579
- end
580
-
581
- end
582
-
583
-
584
- # Remove the expired Aggregate maps from @@aggregate_maps if they are older than timeout.
585
- # If @push_previous_map_as_event option is set, or @push_map_as_event_on_timeout is set, expired maps are returned as new events to be flushed to Logstash pipeline.
586
- def remove_expired_maps()
587
- events_to_flush = []
588
- min_timestamp = Time.now - @timeout
589
-
590
- @@mutex.synchronize do
591
-
592
- @logger.debug("Aggregate remove_expired_maps call with '#{@task_id}' pattern and #{@@aggregate_maps[@task_id].length} maps")
593
-
594
- @@aggregate_maps[@task_id].delete_if do |key, element|
595
- if element.creation_timestamp < min_timestamp
596
- if @push_previous_map_as_event || @push_map_as_event_on_timeout
597
- events_to_flush << create_timeout_event(element.map, key)
598
- end
599
- next true
600
- end
601
- next false
602
- end
603
- end
604
-
605
- return events_to_flush
606
- end
607
-
608
- # return if this filter instance has any timeout option enabled in logstash configuration
609
- def has_timeout_options?()
610
- return (
611
- timeout ||
612
- timeout_code ||
613
- push_map_as_event_on_timeout ||
614
- push_previous_map_as_event ||
615
- timeout_task_id_field ||
616
- !timeout_tags.empty?
617
- )
618
- end
619
-
620
- # display all possible timeout options
621
- def display_timeout_options()
622
- return [
623
- "timeout",
624
- "timeout_code",
625
- "push_map_as_event_on_timeout",
626
- "push_previous_map_as_event",
627
- "timeout_task_id_field",
628
- "timeout_tags"
629
- ].join(", ")
630
- end
631
-
632
- end # class LogStash::Filters::Aggregate
633
-
634
- # Element of "aggregate_maps"
635
- class LogStash::Filters::Aggregate::Element
636
-
637
- attr_accessor :creation_timestamp, :map
638
-
639
- def initialize(creation_timestamp)
640
- @creation_timestamp = creation_timestamp
641
- @map = {}
642
- end
1
+ # encoding: utf-8
2
+
3
+ require "logstash/filters/base"
4
+ require "logstash/namespace"
5
+ require "thread"
6
+ require "logstash/util/decorators"
7
+
8
+ #
9
+ # The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task,
10
+ # and finally push aggregated information into final task event.
11
+ #
12
+ # You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly
13
+ # otherwise events may be processed out of sequence and unexpected results will occur.
14
+ #
15
+ # ==== Example #1
16
+ #
17
+ # * with these given logs :
18
+ # [source,ruby]
19
+ # ----------------------------------
20
+ # INFO - 12345 - TASK_START - start
21
+ # INFO - 12345 - SQL - sqlQuery1 - 12
22
+ # INFO - 12345 - SQL - sqlQuery2 - 34
23
+ # INFO - 12345 - TASK_END - end
24
+ # ----------------------------------
25
+ #
26
+ # * you can aggregate "sql duration" for the whole task with this configuration :
27
+ # [source,ruby]
28
+ # ----------------------------------
29
+ # filter {
30
+ # grok {
31
+ # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ]
32
+ # }
33
+ #
34
+ # if [logger] == "TASK_START" {
35
+ # aggregate {
36
+ # task_id => "%{taskid}"
37
+ # code => "map['sql_duration'] = 0"
38
+ # map_action => "create"
39
+ # }
40
+ # }
41
+ #
42
+ # if [logger] == "SQL" {
43
+ # aggregate {
44
+ # task_id => "%{taskid}"
45
+ # code => "map['sql_duration'] += event.get('duration')"
46
+ # map_action => "update"
47
+ # }
48
+ # }
49
+ #
50
+ # if [logger] == "TASK_END" {
51
+ # aggregate {
52
+ # task_id => "%{taskid}"
53
+ # code => "event.set('sql_duration', map['sql_duration'])"
54
+ # map_action => "update"
55
+ # end_of_task => true
56
+ # timeout => 120
57
+ # }
58
+ # }
59
+ # }
60
+ # ----------------------------------
61
+ #
62
+ # * the final event then looks like :
63
+ # [source,ruby]
64
+ # ----------------------------------
65
+ # {
66
+ # "message" => "INFO - 12345 - TASK_END - end message",
67
+ # "sql_duration" => 46
68
+ # }
69
+ # ----------------------------------
70
+ #
71
+ # the field `sql_duration` is added and contains the sum of all sql queries durations.
72
+ #
73
+ # ==== Example #2 : no start event
74
+ #
75
+ # * If you have the same logs than example #1, but without a start log :
76
+ # [source,ruby]
77
+ # ----------------------------------
78
+ # INFO - 12345 - SQL - sqlQuery1 - 12
79
+ # INFO - 12345 - SQL - sqlQuery2 - 34
80
+ # INFO - 12345 - TASK_END - end
81
+ # ----------------------------------
82
+ #
83
+ # * you can also aggregate "sql duration" with a slightly different configuration :
84
+ # [source,ruby]
85
+ # ----------------------------------
86
+ # filter {
87
+ # grok {
88
+ # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ]
89
+ # }
90
+ #
91
+ # if [logger] == "SQL" {
92
+ # aggregate {
93
+ # task_id => "%{taskid}"
94
+ # code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')"
95
+ # }
96
+ # }
97
+ #
98
+ # if [logger] == "TASK_END" {
99
+ # aggregate {
100
+ # task_id => "%{taskid}"
101
+ # code => "event.set('sql_duration', map['sql_duration'])"
102
+ # end_of_task => true
103
+ # timeout => 120
104
+ # }
105
+ # }
106
+ # }
107
+ # ----------------------------------
108
+ #
109
+ # * the final event is exactly the same than example #1
110
+ # * the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized
111
+ #
112
+ #
113
+ # ==== Example #3 : no end event
114
+ #
115
+ # Third use case: You have no specific end event.
116
+ #
117
+ # A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction.
118
+ #
119
+ # In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs.
120
+ # In addition, we can enable 'timeout_code' to execute code on the populated timeout event.
121
+ # We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID.
122
+ #
123
+ # * Given these logs:
124
+ #
125
+ # [source,ruby]
126
+ # ----------------------------------
127
+ # INFO - 12345 - Clicked One
128
+ # INFO - 12345 - Clicked Two
129
+ # INFO - 12345 - Clicked Three
130
+ # ----------------------------------
131
+ #
132
+ # * You can aggregate the amount of clicks the user did like this:
133
+ #
134
+ # [source,ruby]
135
+ # ----------------------------------
136
+ # filter {
137
+ # grok {
138
+ # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ]
139
+ # }
140
+ #
141
+ # aggregate {
142
+ # task_id => "%{user_id}"
143
+ # code => "map['clicks'] ||= 0; map['clicks'] += 1;"
144
+ # push_map_as_event_on_timeout => true
145
+ # timeout_task_id_field => "user_id"
146
+ # timeout => 600 # 10 minutes timeout
147
+ # timeout_tags => ['_aggregatetimeout']
148
+ # timeout_code => "event.set('several_clicks', event.get('clicks') > 1)"
149
+ # }
150
+ # }
151
+ # ----------------------------------
152
+ #
153
+ # * After ten minutes, this will yield an event like:
154
+ #
155
+ # [source,json]
156
+ # ----------------------------------
157
+ # {
158
+ # "user_id": "12345",
159
+ # "clicks": 3,
160
+ # "several_clicks": true,
161
+ # "tags": [
162
+ # "_aggregatetimeout"
163
+ # ]
164
+ # }
165
+ # ----------------------------------
166
+ #
167
+ # ==== Example #4 : no end event and tasks come one after the other
168
+ #
169
+ # Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other.
170
+ # That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ...
171
+ # In that case, you don't want to wait task timeout to flush aggregation map.
172
+ # * A typical case is aggregating results from jdbc input plugin.
173
+ # * Given that you have this SQL query : `SELECT country_name, town_name FROM town`
174
+ # * Using jdbc input plugin, you get these 3 events from :
175
+ # [source,json]
176
+ # ----------------------------------
177
+ # { "country_name": "France", "town_name": "Paris" }
178
+ # { "country_name": "France", "town_name": "Marseille" }
179
+ # { "country_name": "USA", "town_name": "New-York" }
180
+ # ----------------------------------
181
+ # * And you would like these 2 result events to push them into elasticsearch :
182
+ # [source,json]
183
+ # ----------------------------------
184
+ # { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] }
185
+ # { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] }
186
+ # ----------------------------------
187
+ # * You can do that using `push_previous_map_as_event` aggregate plugin option :
188
+ # [source,ruby]
189
+ # ----------------------------------
190
+ # filter {
191
+ # aggregate {
192
+ # task_id => "%{country_name}"
193
+ # code => "
194
+ # map['country_name'] = event.get('country_name')
195
+ # map['towns'] ||= []
196
+ # map['towns'] << {'town_name' => event.get('town_name')}
197
+ # event.cancel()
198
+ # "
199
+ # push_previous_map_as_event => true
200
+ # timeout => 3
201
+ # }
202
+ # }
203
+ # ----------------------------------
204
+ # * The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country
205
+ # * When 5s timeout comes, the last aggregate map is pushed as a new event
206
+ # * Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`)
207
+ #
208
+ #
209
+ # ==== How it works
210
+ # * the filter needs a "task_id" to correlate events (log lines) of a same task
211
+ # * at the task beggining, filter creates a map, attached to task_id
212
+ # * for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map)
213
+ # * in the final event, you can execute a last code (for instance, add map data to final event)
214
+ # * after the final event, the map attached to task is deleted (thanks to `end_of_task => true`)
215
+ # * an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map.
216
+ # * in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps
217
+ # * if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted
218
+ # * all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags
219
+ # * if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception'
220
+ #
221
+ #
222
+ # ==== Use Cases
223
+ # * extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2)
224
+ # * extract error information in any task log line, and push it in final task event (to get a final event with all error information if any)
225
+ # * extract all back-end calls as a list, and push this list in final task event (to get a task profile)
226
+ # * extract all http headers logged in several lines to push this list in final task event (complete http request info)
227
+ # * for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...)
228
+ # * Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ...
229
+ #
230
+ #
231
+ class LogStash::Filters::Aggregate < LogStash::Filters::Base
232
+
233
+
234
+ # ############## #
235
+ # CONFIG OPTIONS #
236
+ # ############## #
237
+
238
+
239
+ config_name "aggregate"
240
+
241
+ # The expression defining task ID to correlate logs.
242
+ #
243
+ # This value must uniquely identify the task.
244
+ #
245
+ # Example:
246
+ # [source,ruby]
247
+ # filter {
248
+ # aggregate {
249
+ # task_id => "%{type}%{my_task_id}"
250
+ # }
251
+ # }
252
+ config :task_id, :validate => :string, :required => true
253
+
254
+ # The code to execute to update map, using current event.
255
+ #
256
+ # Or on the contrary, the code to execute to update event, using current map.
257
+ #
258
+ # You will have a 'map' variable and an 'event' variable available (that is the event itself).
259
+ #
260
+ # Example:
261
+ # [source,ruby]
262
+ # filter {
263
+ # aggregate {
264
+ # code => "map['sql_duration'] += event.get('duration')"
265
+ # }
266
+ # }
267
+ config :code, :validate => :string, :required => true
268
+
269
+ # Tell the filter what to do with aggregate map.
270
+ #
271
+ # `"create"`: create the map, and execute the code only if map wasn't created before
272
+ #
273
+ # `"update"`: doesn't create the map, and execute the code only if map was created before
274
+ #
275
+ # `"create_or_update"`: create the map if it wasn't created before, execute the code in all cases
276
+ config :map_action, :validate => :string, :default => "create_or_update"
277
+
278
+ # Tell the filter that task is ended, and therefore, to delete aggregate map after code execution.
279
+ config :end_of_task, :validate => :boolean, :default => false
280
+
281
+ # The path to file where aggregate maps are stored when Logstash stops
282
+ # and are loaded from when Logstash starts.
283
+ #
284
+ # If not defined, aggregate maps will not be stored at Logstash stop and will be lost.
285
+ # Must be defined in only one aggregate filter (as aggregate maps are global).
286
+ #
287
+ # Example:
288
+ # [source,ruby]
289
+ # filter {
290
+ # aggregate {
291
+ # aggregate_maps_path => "/path/to/.aggregate_maps"
292
+ # }
293
+ # }
294
+ config :aggregate_maps_path, :validate => :string, :required => false
295
+
296
+ # The amount of seconds after a task "end event" can be considered lost.
297
+ #
298
+ # When timeout occurs for a task, The task "map" is evicted.
299
+ #
300
+ # Timeout can be defined for each "task_id" pattern.
301
+ #
302
+ # If no timeout is defined, default timeout will be applied : 1800 seconds.
303
+ config :timeout, :validate => :number, :required => false
304
+
305
+ # The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true.
306
+ # The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map.
307
+ #
308
+ # If `'timeout_task_id_field'` is set, the event is also populated with the task_id value
309
+ #
310
+ # Example:
311
+ # [source,ruby]
312
+ # filter {
313
+ # aggregate {
314
+ # timeout_code => "event.set('state', 'timeout')"
315
+ # }
316
+ # }
317
+ config :timeout_code, :validate => :string, :required => false
318
+
319
+ # When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event.
320
+ # This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event.
321
+ config :push_map_as_event_on_timeout, :validate => :boolean, :required => false, :default => false
322
+
323
+ # When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event,
324
+ # and then creates a new empty map for the next task.
325
+ #
326
+ # WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc...
327
+ config :push_previous_map_as_event, :validate => :boolean, :required => false, :default => false
328
+
329
+ # This option indicates the timeout generated event's field for the "task_id" value.
330
+ # The task id will then be set into the timeout event. This can help correlate which tasks have been timed out.
331
+ #
332
+ # For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`.
333
+ #
334
+ # By default, if this option is not set, task id value won't be set into timeout generated event.
335
+ config :timeout_task_id_field, :validate => :string, :required => false
336
+
337
+ # Defines tags to add when a timeout event is generated and yield
338
+ #
339
+ # Example:
340
+ # [source,ruby]
341
+ # filter {
342
+ # aggregate {
343
+ # timeout_tags => ["aggregate_timeout']
344
+ # }
345
+ # }
346
+ config :timeout_tags, :validate => :array, :required => false, :default => []
347
+
348
+
349
+ # ################ #
350
+ # STATIC VARIABLES #
351
+ # ################ #
352
+
353
+
354
+ # Default timeout (in seconds) when not defined in plugin configuration
355
+ DEFAULT_TIMEOUT = 1800
356
+
357
+ # This is the state of the filter.
358
+ # For each entry, key is "task_id" and value is a map freely updatable by 'code' config
359
+ @@aggregate_maps = {}
360
+
361
+ # Mutex used to synchronize access to 'aggregate_maps'
362
+ @@mutex = Mutex.new
363
+
364
+ # Default timeout for task_id patterns where timeout is not defined in Logstash filter configuration
365
+ @@default_timeout = nil
366
+
367
+ # For each "task_id" pattern, defines which Aggregate instance will process flush() call, processing expired Aggregate elements (older than timeout)
368
+ # For each entry, key is "task_id pattern" and value is "aggregate instance"
369
+ @@flush_instance_map = {}
370
+
371
+ # last time where timeout management in flush() method was launched, per "task_id" pattern
372
+ @@last_flush_timestamp_map = {}
373
+
374
+ # flag indicating if aggregate_maps_path option has been already set on one aggregate instance
375
+ @@aggregate_maps_path_set = false
376
+
377
+ # defines which Aggregate instance will close Aggregate static variables
378
+ @@static_close_instance = nil
379
+
380
+
381
+ # ####### #
382
+ # METHODS #
383
+ # ####### #
384
+
385
+
386
+ # Initialize plugin
387
+ public
388
+ def register
389
+
390
+ @logger.debug("Aggregate register call", :code => @code)
391
+
392
+ # validate task_id option
393
+ if !@task_id.match(/%\{.+\}/)
394
+ raise LogStash::ConfigurationError, "Aggregate plugin: task_id pattern '#{@task_id}' must contain a dynamic expression like '%{field}'"
395
+ end
396
+
397
+ # process lambda expression to call in each filter call
398
+ eval("@codeblock = lambda { |event, map| #{@code} }", binding, "(aggregate filter code)")
399
+
400
+ # process lambda expression to call in the timeout case or previous event case
401
+ if @timeout_code
402
+ eval("@timeout_codeblock = lambda { |event| #{@timeout_code} }", binding, "(aggregate filter timeout code)")
403
+ end
404
+
405
+ @@mutex.synchronize do
406
+
407
+ # timeout management : define eviction_instance for current task_id pattern
408
+ if has_timeout_options?
409
+ if @@flush_instance_map.has_key?(@task_id)
410
+ # all timeout options have to be defined in only one aggregate filter per task_id pattern
411
+ raise LogStash::ConfigurationError, "Aggregate plugin: For task_id pattern '#{@task_id}', there are more than one filter which defines timeout options. All timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : #{display_timeout_options}"
412
+ end
413
+ @@flush_instance_map[@task_id] = self
414
+ @logger.debug("Aggregate timeout for '#{@task_id}' pattern: #{@timeout} seconds")
415
+ end
416
+
417
+ # timeout management : define default_timeout
418
+ if !@timeout.nil? && (@@default_timeout.nil? || @timeout < @@default_timeout)
419
+ @@default_timeout = @timeout
420
+ @logger.debug("Aggregate default timeout: #{@timeout} seconds")
421
+ end
422
+
423
+ # reinit static_close_instance (if necessary)
424
+ if !@@aggregate_maps_path_set && !@@static_close_instance.nil?
425
+ @@static_close_instance = nil
426
+ end
427
+
428
+ # check if aggregate_maps_path option has already been set on another instance else set @@aggregate_maps_path_set
429
+ if !@aggregate_maps_path.nil?
430
+ if @@aggregate_maps_path_set
431
+ @@aggregate_maps_path_set = false
432
+ raise LogStash::ConfigurationError, "Aggregate plugin: Option 'aggregate_maps_path' must be set on only one aggregate filter"
433
+ else
434
+ @@aggregate_maps_path_set = true
435
+ @@static_close_instance = self
436
+ end
437
+ end
438
+
439
+ # load aggregate maps from file (if option defined)
440
+ if !@aggregate_maps_path.nil? && File.exist?(@aggregate_maps_path)
441
+ File.open(@aggregate_maps_path, "r") { |from_file| @@aggregate_maps.merge!(Marshal.load(from_file)) }
442
+ File.delete(@aggregate_maps_path)
443
+ @logger.info("Aggregate maps loaded from : #{@aggregate_maps_path}")
444
+ end
445
+
446
+ # init aggregate_maps
447
+ @@aggregate_maps[@task_id] ||= {}
448
+ end
449
+ end
450
+
451
+ # Called when Logstash stops
452
+ public
453
+ def close
454
+
455
+ @logger.debug("Aggregate close call", :code => @code)
456
+
457
+ # define static close instance if none is already defined
458
+ @@static_close_instance = self if @@static_close_instance.nil?
459
+
460
+ if @@static_close_instance == self
461
+ # store aggregate maps to file (if option defined)
462
+ @@mutex.synchronize do
463
+ @@aggregate_maps.delete_if { |key, value| value.empty? }
464
+ if !@aggregate_maps_path.nil? && !@@aggregate_maps.empty?
465
+ File.open(@aggregate_maps_path, "w"){ |to_file| Marshal.dump(@@aggregate_maps, to_file) }
466
+ @logger.info("Aggregate maps stored to : #{@aggregate_maps_path}")
467
+ end
468
+ @@aggregate_maps.clear()
469
+ end
470
+
471
+ # reinit static variables for Logstash reload
472
+ @@default_timeout = nil
473
+ @@flush_instance_map = {}
474
+ @@last_flush_timestamp_map = {}
475
+ @@aggregate_maps_path_set = false
476
+ end
477
+
478
+ end
479
+
480
+ # This method is invoked each time an event matches the filter
481
+ public
482
+ def filter(event)
483
+
484
+ # define task id
485
+ task_id = event.sprintf(@task_id)
486
+ return if task_id.nil? || task_id == @task_id
487
+
488
+ noError = false
489
+ event_to_yield = nil
490
+
491
+ # protect aggregate_maps against concurrent access, using a mutex
492
+ @@mutex.synchronize do
493
+
494
+ # retrieve the current aggregate map
495
+ aggregate_maps_element = @@aggregate_maps[@task_id][task_id]
496
+
497
+
498
+ # create aggregate map, if it doesn't exist
499
+ if aggregate_maps_element.nil?
500
+ return if @map_action == "update"
501
+ # create new event from previous map, if @push_previous_map_as_event is enabled
502
+ if @push_previous_map_as_event && !@@aggregate_maps[@task_id].empty?
503
+ event_to_yield = extract_previous_map_as_event()
504
+ end
505
+ aggregate_maps_element = LogStash::Filters::Aggregate::Element.new(Time.now);
506
+ @@aggregate_maps[@task_id][task_id] = aggregate_maps_element
507
+ else
508
+ return if @map_action == "create"
509
+ end
510
+ map = aggregate_maps_element.map
511
+
512
+ # execute the code to read/update map and event
513
+ begin
514
+ @codeblock.call(event, map)
515
+ @logger.debug("Aggregate successful filter code execution", :code => @code)
516
+ noError = true
517
+ rescue => exception
518
+ @logger.error("Aggregate exception occurred",
519
+ :error => exception,
520
+ :code => @code,
521
+ :map => map,
522
+ :event_data => event.to_hash_with_metadata)
523
+ event.tag("_aggregateexception")
524
+ end
525
+
526
+ # delete the map if task is ended
527
+ @@aggregate_maps[@task_id].delete(task_id) if @end_of_task
528
+
529
+ end
530
+
531
+ # match the filter, only if no error occurred
532
+ filter_matched(event) if noError
533
+
534
+ # yield previous map as new event if set
535
+ yield event_to_yield unless event_to_yield.nil?
536
+
537
+ end
538
+
539
+ # Create a new event from the aggregation_map and the corresponding task_id
540
+ # This will create the event and
541
+ # if @timeout_task_id_field is set, it will set the task_id on the timeout event
542
+ # if @timeout_code is set, it will execute the timeout code on the created timeout event
543
+ # returns the newly created event
544
+ def create_timeout_event(aggregation_map, task_id)
545
+
546
+ @logger.debug("Aggregate create_timeout_event call with task_id '#{task_id}'")
547
+
548
+ event_to_yield = LogStash::Event.new(aggregation_map)
549
+
550
+ if @timeout_task_id_field
551
+ event_to_yield.set(@timeout_task_id_field, task_id)
552
+ end
553
+
554
+ LogStash::Util::Decorators.add_tags(@timeout_tags, event_to_yield, "filters/#{self.class.name}")
555
+
556
+ # Call code block if available
557
+ if @timeout_code
558
+ begin
559
+ @timeout_codeblock.call(event_to_yield)
560
+ rescue => exception
561
+ @logger.error("Aggregate exception occurred",
562
+ :error => exception,
563
+ :timeout_code => @timeout_code,
564
+ :timeout_event_data => event_to_yield.to_hash_with_metadata)
565
+ event_to_yield.tag("_aggregateexception")
566
+ end
567
+ end
568
+
569
+ return event_to_yield
570
+ end
571
+
572
+ # Extract the previous map in aggregate maps, and return it as a new Logstash event
573
+ def extract_previous_map_as_event
574
+ previous_entry = @@aggregate_maps[@task_id].shift()
575
+ previous_task_id = previous_entry[0]
576
+ previous_map = previous_entry[1].map
577
+ return create_timeout_event(previous_map, previous_task_id)
578
+ end
579
+
580
+ # Necessary to indicate Logstash to periodically call 'flush' method
581
+ def periodic_flush
582
+ true
583
+ end
584
+
585
+ # This method is invoked by LogStash every 5 seconds.
586
+ def flush(options = {})
587
+
588
+ @logger.debug("Aggregate flush call with #{options}")
589
+
590
+ # Protection against no timeout defined by Logstash conf : define a default eviction instance with timeout = DEFAULT_TIMEOUT seconds
591
+ if @@default_timeout.nil?
592
+ @@default_timeout = DEFAULT_TIMEOUT
593
+ end
594
+ if !@@flush_instance_map.has_key?(@task_id)
595
+ @@flush_instance_map[@task_id] = self
596
+ @timeout = @@default_timeout
597
+ elsif @@flush_instance_map[@task_id].timeout.nil?
598
+ @@flush_instance_map[@task_id].timeout = @@default_timeout
599
+ end
600
+
601
+ # Launch timeout management only every interval of (@timeout / 2) seconds or at Logstash shutdown
602
+ if @@flush_instance_map[@task_id] == self && (!@@last_flush_timestamp_map.has_key?(@task_id) || Time.now > @@last_flush_timestamp_map[@task_id] + @timeout / 2 || options[:final])
603
+ events_to_flush = remove_expired_maps()
604
+
605
+ # at Logstash shutdown, if push_previous_map_as_event is enabled, it's important to force flush (particularly for jdbc input plugin)
606
+ if options[:final] && @push_previous_map_as_event && !@@aggregate_maps[@task_id].empty?
607
+ events_to_flush << extract_previous_map_as_event()
608
+ end
609
+
610
+ # tag flushed events, indicating "final flush" special event
611
+ if options[:final]
612
+ events_to_flush.each { |event_to_flush| event_to_flush.tag("_aggregatefinalflush") }
613
+ end
614
+
615
+ # update last flush timestamp
616
+ @@last_flush_timestamp_map[@task_id] = Time.now
617
+
618
+ # return events to flush into Logstash pipeline
619
+ return events_to_flush
620
+ else
621
+ return []
622
+ end
623
+
624
+ end
625
+
626
+
627
+ # Remove the expired Aggregate maps from @@aggregate_maps if they are older than timeout.
628
+ # If @push_previous_map_as_event option is set, or @push_map_as_event_on_timeout is set, expired maps are returned as new events to be flushed to Logstash pipeline.
629
+ def remove_expired_maps()
630
+ events_to_flush = []
631
+ min_timestamp = Time.now - @timeout
632
+
633
+ @@mutex.synchronize do
634
+
635
+ @logger.debug("Aggregate remove_expired_maps call with '#{@task_id}' pattern and #{@@aggregate_maps[@task_id].length} maps")
636
+
637
+ @@aggregate_maps[@task_id].delete_if do |key, element|
638
+ if element.creation_timestamp < min_timestamp
639
+ if @push_previous_map_as_event || @push_map_as_event_on_timeout
640
+ events_to_flush << create_timeout_event(element.map, key)
641
+ end
642
+ next true
643
+ end
644
+ next false
645
+ end
646
+ end
647
+
648
+ return events_to_flush
649
+ end
650
+
651
+ # return if this filter instance has any timeout option enabled in Logstash configuration
652
+ def has_timeout_options?()
653
+ return (
654
+ timeout ||
655
+ timeout_code ||
656
+ push_map_as_event_on_timeout ||
657
+ push_previous_map_as_event ||
658
+ timeout_task_id_field ||
659
+ !timeout_tags.empty?
660
+ )
661
+ end
662
+
663
+ # display all possible timeout options
664
+ def display_timeout_options()
665
+ return [
666
+ "timeout",
667
+ "timeout_code",
668
+ "push_map_as_event_on_timeout",
669
+ "push_previous_map_as_event",
670
+ "timeout_task_id_field",
671
+ "timeout_tags"
672
+ ].join(", ")
673
+ end
674
+
675
+ end # class LogStash::Filters::Aggregate
676
+
677
+ # Element of "aggregate_maps"
678
+ class LogStash::Filters::Aggregate::Element
679
+
680
+ attr_accessor :creation_timestamp, :map
681
+
682
+ def initialize(creation_timestamp)
683
+ @creation_timestamp = creation_timestamp
684
+ @map = {}
685
+ end
643
686
  end