logstash-filter-aggregate 2.6.3 → 2.6.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: aa6b23a1b94834464efa84ac45aee913ec266a5e
4
- data.tar.gz: 0410fb7378db5e96b6334e6348a42a092810527d
3
+ metadata.gz: dae791a411f617516bd80f0481456ce0b06e2197
4
+ data.tar.gz: b7a082089b57954c853d4966f6f5c7d7f82509a8
5
5
  SHA512:
6
- metadata.gz: 8c7606253c30c0b13f26dbfc938180cb02b318a735ff3c0286eef0e7b0b782219a3abf3f200be20739ba038d84264fbcd94a33ab3024d557f220c5a34802b745
7
- data.tar.gz: 7b05448e759a64ca53f6fe1b0eb37c908009cd094ac8bafcc897da289e735cf1bbe3e1a4c81b6459ebcbf8b9114194b77e20856f432c776af1f40026c38e5ded
6
+ metadata.gz: f5c3902f05ae72c511c48f7fb6df713f1a904c0528bcdca11b2bed0f8af00afd30ad054688f34d5ff8f436b20aebaeb2ae2ce27b0572e63797a604296bc7b047
7
+ data.tar.gz: a82642ab7984e484c7b4744dc4c6f12f46abe86c6b39ae3eb914d2c822d15d5f6ba0563d2f9965707080fbca9baf822a3010eac2c69b23d65c681e130c6f90a5
@@ -1,11 +1,15 @@
1
+ ## 2.6.4
2
+ - bugfix: fix a NPE issue at Logstash 6.0 shutdown
3
+ - docs: remove all redundant documentation in aggregate.rb (now only present in docs/index.asciidoc)
4
+
1
5
  ## 2.6.3
2
- - Fix some documentation issues
6
+ - docs: fix some documentation issues
3
7
 
4
8
  ## 2.6.2
5
- - Docs: Remove incorrectly coded, redundant links
9
+ - docs: remove incorrectly coded, redundant links
6
10
 
7
11
  ## 2.6.1
8
- - Docs: Bump patch level for doc build
12
+ - docs: bump patch level for doc build
9
13
 
10
14
  ## 2.6.0
11
15
  - new feature: 'inactivity_timeout'. Events for a given `task_id` will be aggregated for as long as they keep arriving within the defined `inactivity_timeout` option - the inactivity timeout is reset each time a new event happens. On the contrary, `timeout` is never reset and happens after `timeout` seconds since aggregation map creation.
@@ -5,289 +5,7 @@ require "logstash/namespace"
5
5
  require "thread"
6
6
  require "logstash/util/decorators"
7
7
 
8
- #
9
- # The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task,
10
- # and finally push aggregated information into final task event.
11
- #
12
- # You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly
13
- # otherwise events may be processed out of sequence and unexpected results will occur.
14
- #
15
- # ==== Example #1
16
- #
17
- # * with these given logs :
18
- # [source,ruby]
19
- # ----------------------------------
20
- # INFO - 12345 - TASK_START - start
21
- # INFO - 12345 - SQL - sqlQuery1 - 12
22
- # INFO - 12345 - SQL - sqlQuery2 - 34
23
- # INFO - 12345 - TASK_END - end
24
- # ----------------------------------
25
- #
26
- # * you can aggregate "sql duration" for the whole task with this configuration :
27
- # [source,ruby]
28
- # ----------------------------------
29
- # filter {
30
- # grok {
31
- # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ]
32
- # }
33
- #
34
- # if [logger] == "TASK_START" {
35
- # aggregate {
36
- # task_id => "%{taskid}"
37
- # code => "map['sql_duration'] = 0"
38
- # map_action => "create"
39
- # }
40
- # }
41
- #
42
- # if [logger] == "SQL" {
43
- # aggregate {
44
- # task_id => "%{taskid}"
45
- # code => "map['sql_duration'] += event.get('duration')"
46
- # map_action => "update"
47
- # }
48
- # }
49
- #
50
- # if [logger] == "TASK_END" {
51
- # aggregate {
52
- # task_id => "%{taskid}"
53
- # code => "event.set('sql_duration', map['sql_duration'])"
54
- # map_action => "update"
55
- # end_of_task => true
56
- # timeout => 120
57
- # }
58
- # }
59
- # }
60
- # ----------------------------------
61
- #
62
- # * the final event then looks like :
63
- # [source,ruby]
64
- # ----------------------------------
65
- # {
66
- # "message" => "INFO - 12345 - TASK_END - end message",
67
- # "sql_duration" => 46
68
- # }
69
- # ----------------------------------
70
- #
71
- # the field `sql_duration` is added and contains the sum of all sql queries durations.
72
- #
73
- # ==== Example #2 : no start event
74
- #
75
- # * If you have the same logs than example #1, but without a start log :
76
- # [source,ruby]
77
- # ----------------------------------
78
- # INFO - 12345 - SQL - sqlQuery1 - 12
79
- # INFO - 12345 - SQL - sqlQuery2 - 34
80
- # INFO - 12345 - TASK_END - end
81
- # ----------------------------------
82
- #
83
- # * you can also aggregate "sql duration" with a slightly different configuration :
84
- # [source,ruby]
85
- # ----------------------------------
86
- # filter {
87
- # grok {
88
- # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ]
89
- # }
90
- #
91
- # if [logger] == "SQL" {
92
- # aggregate {
93
- # task_id => "%{taskid}"
94
- # code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')"
95
- # }
96
- # }
97
- #
98
- # if [logger] == "TASK_END" {
99
- # aggregate {
100
- # task_id => "%{taskid}"
101
- # code => "event.set('sql_duration', map['sql_duration'])"
102
- # end_of_task => true
103
- # timeout => 120
104
- # }
105
- # }
106
- # }
107
- # ----------------------------------
108
- #
109
- # * the final event is exactly the same than example #1
110
- # * the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized
111
- #
112
- #
113
- # ==== Example #3 : no end event
114
- #
115
- # Third use case: You have no specific end event.
116
- #
117
- # A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction.
118
- #
119
- # In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs.
120
- # In addition, we can enable 'timeout_code' to execute code on the populated timeout event.
121
- # We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID.
122
- #
123
- # * Given these logs:
124
- #
125
- # [source,ruby]
126
- # ----------------------------------
127
- # INFO - 12345 - Clicked One
128
- # INFO - 12345 - Clicked Two
129
- # INFO - 12345 - Clicked Three
130
- # ----------------------------------
131
- #
132
- # * You can aggregate the amount of clicks the user did like this:
133
- #
134
- # [source,ruby]
135
- # ----------------------------------
136
- # filter {
137
- # grok {
138
- # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ]
139
- # }
140
- #
141
- # aggregate {
142
- # task_id => "%{user_id}"
143
- # code => "map['clicks'] ||= 0; map['clicks'] += 1;"
144
- # push_map_as_event_on_timeout => true
145
- # timeout_task_id_field => "user_id"
146
- # timeout => 600 # 10 minutes timeout
147
- # timeout_tags => ['_aggregatetimeout']
148
- # timeout_code => "event.set('several_clicks', event.get('clicks') > 1)"
149
- # }
150
- # }
151
- # ----------------------------------
152
- #
153
- # * After ten minutes, this will yield an event like:
154
- #
155
- # [source,json]
156
- # ----------------------------------
157
- # {
158
- # "user_id": "12345",
159
- # "clicks": 3,
160
- # "several_clicks": true,
161
- # "tags": [
162
- # "_aggregatetimeout"
163
- # ]
164
- # }
165
- # ----------------------------------
166
- #
167
- # ==== Example #4 : no end event and tasks come one after the other
168
- #
169
- # Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other.
170
- # That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ...
171
- # In that case, you don't want to wait task timeout to flush aggregation map.
172
- # * A typical case is aggregating results from jdbc input plugin.
173
- # * Given that you have this SQL query : `SELECT country_name, town_name FROM town`
174
- # * Using jdbc input plugin, you get these 3 events from :
175
- # [source,json]
176
- # ----------------------------------
177
- # { "country_name": "France", "town_name": "Paris" }
178
- # { "country_name": "France", "town_name": "Marseille" }
179
- # { "country_name": "USA", "town_name": "New-York" }
180
- # ----------------------------------
181
- # * And you would like these 2 result events to push them into elasticsearch :
182
- # [source,json]
183
- # ----------------------------------
184
- # { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] }
185
- # { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] }
186
- # ----------------------------------
187
- # * You can do that using `push_previous_map_as_event` aggregate plugin option :
188
- # [source,ruby]
189
- # ----------------------------------
190
- # filter {
191
- # aggregate {
192
- # task_id => "%{country_name}"
193
- # code => "
194
- # map['country_name'] = event.get('country_name')
195
- # map['towns'] ||= []
196
- # map['towns'] << {'town_name' => event.get('town_name')}
197
- # event.cancel()
198
- # "
199
- # push_previous_map_as_event => true
200
- # timeout => 3
201
- # }
202
- # }
203
- # ----------------------------------
204
- # * The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country
205
- # * When 5s timeout comes, the last aggregate map is pushed as a new event
206
- # * Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`)
207
- #
208
- #
209
- # ==== Example #5 : no end event and push events as soon as possible
210
- #
211
- # Fifth use case: like example #3, there is no end event. Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. This allows to have the aggregated events pushed closer to real time.
212
- #
213
- # A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`.
214
- #
215
- # If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs.
216
- #
217
- # The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event.
218
- #
219
- # In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs.
220
- # In addition, we can enable 'timeout_code' to execute code on the populated timeout event.
221
- # We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID.
222
- #
223
- # * Given these logs:
224
- #
225
- # [source,ruby]
226
- # ----------------------------------
227
- # INFO - 12345 - Clicked One
228
- # INFO - 12345 - Clicked Two
229
- # INFO - 12345 - Clicked Three
230
- # ----------------------------------
231
- #
232
- # * You can aggregate the amount of clicks the user did like this:
233
- #
234
- # [source,ruby]
235
- # ----------------------------------
236
- # filter {
237
- # grok {
238
- # match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ]
239
- # }
240
- #
241
- # aggregate {
242
- # task_id => "%{user_id}"
243
- # code => "map['clicks'] ||= 0; map['clicks'] += 1;"
244
- # push_map_as_event_on_timeout => true
245
- # timeout_task_id_field => "user_id"
246
- # timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming
247
- # inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event
248
- # timeout_tags => ['_aggregatetimeout']
249
- # timeout_code => "event.set('several_clicks', event.get('clicks') > 1)"
250
- # }
251
- # }
252
- # ----------------------------------
253
- #
254
- # * After five minutes of inactivity or one hour since first event, this will yield an event like:
255
- #
256
- # [source,json]
257
- # ----------------------------------
258
- # {
259
- # "user_id": "12345",
260
- # "clicks": 3,
261
- # "several_clicks": true,
262
- # "tags": [
263
- # "_aggregatetimeout"
264
- # ]
265
- # }
266
- # ----------------------------------
267
- #
268
- #
269
- # ==== How it works
270
- # * the filter needs a "task_id" to correlate events (log lines) of a same task
271
- # * at the task beggining, filter creates a map, attached to task_id
272
- # * for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map)
273
- # * in the final event, you can execute a last code (for instance, add map data to final event)
274
- # * after the final event, the map attached to task is deleted (thanks to `end_of_task => true`)
275
- # * an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map.
276
- # * in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps
277
- # * if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted
278
- # * all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, inactivity_timeout,timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags
279
- # * if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception'
280
- #
281
- #
282
- # ==== Use Cases
283
- # * extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2)
284
- # * extract error information in any task log line, and push it in final task event (to get a final event with all error information if any)
285
- # * extract all back-end calls as a list, and push this list in final task event (to get a task profile)
286
- # * extract all http headers logged in several lines to push this list in final task event (complete http request info)
287
- # * for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...)
288
- # * Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ...
289
- #
290
- #
8
+
291
9
  class LogStash::Filters::Aggregate < LogStash::Filters::Base
292
10
 
293
11
 
@@ -298,126 +16,28 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
298
16
 
299
17
  config_name "aggregate"
300
18
 
301
- # The expression defining task ID to correlate logs.
302
- #
303
- # This value must uniquely identify the task.
304
- #
305
- # Example:
306
- # [source,ruby]
307
- # filter {
308
- # aggregate {
309
- # task_id => "%{type}%{my_task_id}"
310
- # }
311
- # }
312
19
  config :task_id, :validate => :string, :required => true
313
20
 
314
- # The code to execute to update map, using current event.
315
- #
316
- # Or on the contrary, the code to execute to update event, using current map.
317
- #
318
- # You will have a 'map' variable and an 'event' variable available (that is the event itself).
319
- #
320
- # Example:
321
- # [source,ruby]
322
- # filter {
323
- # aggregate {
324
- # code => "map['sql_duration'] += event.get('duration')"
325
- # }
326
- # }
327
21
  config :code, :validate => :string, :required => true
328
22
 
329
- # Tell the filter what to do with aggregate map.
330
- #
331
- # `"create"`: create the map, and execute the code only if map wasn't created before
332
- #
333
- # `"update"`: doesn't create the map, and execute the code only if map was created before
334
- #
335
- # `"create_or_update"`: create the map if it wasn't created before, execute the code in all cases
336
23
  config :map_action, :validate => :string, :default => "create_or_update"
337
24
 
338
- # Tell the filter that task is ended, and therefore, to delete aggregate map after code execution.
339
25
  config :end_of_task, :validate => :boolean, :default => false
340
26
 
341
- # The path to file where aggregate maps are stored when Logstash stops
342
- # and are loaded from when Logstash starts.
343
- #
344
- # If not defined, aggregate maps will not be stored at Logstash stop and will be lost.
345
- # Must be defined in only one aggregate filter (as aggregate maps are global).
346
- #
347
- # Example:
348
- # [source,ruby]
349
- # filter {
350
- # aggregate {
351
- # aggregate_maps_path => "/path/to/.aggregate_maps"
352
- # }
353
- # }
354
27
  config :aggregate_maps_path, :validate => :string, :required => false
355
28
 
356
- # The amount of seconds (since the first event) after which a task is considered as expired.
357
- #
358
- # When timeout occurs for a task, its aggregate map is evicted.
359
- #
360
- # If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event.
361
- #
362
- # Timeout can be defined for each "task_id" pattern.
363
- #
364
- # If no timeout is defined, default timeout will be applied : 1800 seconds.
365
29
  config :timeout, :validate => :number, :required => false
366
30
 
367
- # The amount of seconds (since the last event) after which a task is considered as expired.
368
- #
369
- # When timeout occurs for a task, its aggregate map is evicted.
370
- #
371
- # If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event.
372
- #
373
- # `inactivity_timeout` can be defined for each "task_id" pattern.
374
- #
375
- # `inactivity_timeout` must be lower than `timeout`.
376
- #
377
- # If no `inactivity_timeout` is defined, no inactivity timeout will be applied (only timeout will be applied).
378
31
  config :inactivity_timeout, :validate => :number, :required => false
379
32
 
380
- # The code to execute to complete timeout generated event, when 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true.
381
- # The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map.
382
- #
383
- # If `'timeout_task_id_field'` is set, the event is also populated with the task_id value
384
- #
385
- # Example:
386
- # [source,ruby]
387
- # filter {
388
- # aggregate {
389
- # timeout_code => "event.set('state', 'timeout')"
390
- # }
391
- # }
392
33
  config :timeout_code, :validate => :string, :required => false
393
34
 
394
- # When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event.
395
- # This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event.
396
35
  config :push_map_as_event_on_timeout, :validate => :boolean, :required => false, :default => false
397
36
 
398
- # When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event,
399
- # and then creates a new empty map for the next task.
400
- #
401
- # WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc...
402
37
  config :push_previous_map_as_event, :validate => :boolean, :required => false, :default => false
403
38
 
404
- # This option indicates the timeout generated event's field for the "task_id" value.
405
- # The task id will then be set into the timeout event. This can help correlate which tasks have been timed out.
406
- #
407
- # For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`.
408
- #
409
- # By default, if this option is not set, task id value won't be set into timeout generated event.
410
39
  config :timeout_task_id_field, :validate => :string, :required => false
411
40
 
412
- # Defines tags to add when a timeout event is generated and yield
413
- #
414
- # Example:
415
- # [source,ruby]
416
- # filter {
417
- # aggregate {
418
- # timeout_tags => ["aggregate_timeout']
419
- # }
420
- # }
421
41
  config :timeout_tags, :validate => :array, :required => false, :default => []
422
42
 
423
43
 
@@ -684,7 +304,7 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
684
304
  end
685
305
 
686
306
  # Launch timeout management only every interval of (@inactivity_timeout / 2) seconds or at Logstash shutdown
687
- if @@flush_instance_map[@task_id] == self && (!@@last_flush_timestamp_map.has_key?(@task_id) || Time.now > @@last_flush_timestamp_map[@task_id] + @inactivity_timeout / 2 || options[:final])
307
+ if @@flush_instance_map[@task_id] == self && !@@aggregate_maps[@task_id].nil? && (!@@last_flush_timestamp_map.has_key?(@task_id) || Time.now > @@last_flush_timestamp_map[@task_id] + @inactivity_timeout / 2 || options[:final])
688
308
  events_to_flush = remove_expired_maps()
689
309
 
690
310
  # at Logstash shutdown, if push_previous_map_as_event is enabled, it's important to force flush (particularly for jdbc input plugin)
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-filter-aggregate'
3
- s.version = '2.6.3'
3
+ s.version = '2.6.4'
4
4
  s.licenses = ['Apache License (2.0)']
5
5
  s.summary = 'The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, and finally push aggregated information into final task event.'
6
6
  s.description = 'This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program'
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-filter-aggregate
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.6.3
4
+ version: 2.6.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elastic
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2017-08-15 00:00:00.000000000 Z
12
+ date: 2017-10-10 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  requirement: !ruby/object:Gem::Requirement