logstash-filter-aggregate 2.4.0 → 2.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 86bf41e2f4183eb1bca1053dcdd54bba16ce715d
4
- data.tar.gz: edd15d9f860d6becda2d0faabf246de87cb404bd
3
+ metadata.gz: b8769a07ab85fdc52359b1e08838f8f308f96e61
4
+ data.tar.gz: 0e2c0b0c078b789c87c57678683ac2ce72919fa5
5
5
  SHA512:
6
- metadata.gz: adf8addee7fcfd1efeddf980a85933b9ccb35e89f1d3e1e784fb302902a0308ec31209decb5c3d3da83151860d212e2d7c79f41349bc5e2cc4a16acdc2352d5c
7
- data.tar.gz: 320a2b40266aa4cc506ef01ec6e61e6b0f92985f2ccd4914a621b725fc9ace12f50e8d3314c36afedff97c5e0bb886a2460c2bc87549ebf41ff92389754b6842
6
+ metadata.gz: 7baa1843fac215316d44e8d029aa840b9fe295f7bb953e2bb51873665da473d70f54551b23960491256a542687732d20e482a3efd98fbd8124012c4cea5c3e84
7
+ data.tar.gz: b073f65ebad9629bdda9893f7e08fe630c8f48643bb4575001ed2430e46fbf6f13a218b09e2df9585788d519a5acf2fcb5b36c0e7e67ed8c45245c61dbdab523
@@ -1,3 +1,7 @@
1
+ ## 2.5.0
2
+ - new feature: add compatibility with Logstash 5
3
+ - breaking: need Logstash 2.4 or later
4
+
1
5
  ## 2.4.0
2
6
  - new feature: You can now define timeout options per task_id pattern (#42)
3
7
  timeout options are : `timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags`
@@ -27,6 +31,7 @@
27
31
 
28
32
  ## 2.0.5
29
33
  - internal,deps: Depend on logstash-core-plugin-api instead of logstash-core, removing the need to mass update plugins on major releases of logstash
34
+ - breaking: need Logstash 2.3 or later
30
35
 
31
36
  ## 2.0.4
32
37
  - internal,deps: New dependency requirements for logstash-core for the 5.0 release
@@ -0,0 +1,5 @@
1
+ Elasticsearch
2
+ Copyright 2012-2015 Elasticsearch
3
+
4
+ This product includes software developed by The Apache Software
5
+ Foundation (http://www.apache.org/).
data/README.md CHANGED
@@ -35,7 +35,7 @@ otherwise events may be processed out of sequence and unexpected results will oc
35
35
  if [logger] == "SQL" {
36
36
  aggregate {
37
37
  task_id => "%{taskid}"
38
- code => "map['sql_duration'] += event['duration']"
38
+ code => "map['sql_duration'] += event.get('duration')"
39
39
  map_action => "update"
40
40
  }
41
41
  }
@@ -43,7 +43,7 @@ otherwise events may be processed out of sequence and unexpected results will oc
43
43
  if [logger] == "TASK_END" {
44
44
  aggregate {
45
45
  task_id => "%{taskid}"
46
- code => "event['sql_duration'] = map['sql_duration']"
46
+ code => "event.set('sql_duration', map['sql_duration'])"
47
47
  map_action => "update"
48
48
  end_of_task => true
49
49
  timeout => 120
@@ -81,14 +81,14 @@ the field `sql_duration` is added and contains the sum of all sql queries durati
81
81
  if [logger] == "SQL" {
82
82
  aggregate {
83
83
  task_id => "%{taskid}"
84
- code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event['duration']"
84
+ code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')"
85
85
  }
86
86
  }
87
87
 
88
88
  if [logger] == "TASK_END" {
89
89
  aggregate {
90
90
  task_id => "%{taskid}"
91
- code => "event['sql_duration'] = map['sql_duration']"
91
+ code => "event.set('sql_duration', map['sql_duration'])"
92
92
  end_of_task => true
93
93
  timeout => 120
94
94
  }
@@ -133,7 +133,7 @@ We can also add 'timeout_task_id_field' so we can correlate the task_id, which i
133
133
  timeout_task_id_field => "user_id"
134
134
  timeout => 600 # 10 minutes timeout
135
135
  timeout_tags => ['_aggregatetimeout']
136
- timeout_code => "event['several_clicks'] = (event['clicks'] > 1)"
136
+ timeout_code => "event.set('several_clicks', event.get('clicks') > 1)"
137
137
  }
138
138
  }
139
139
  ```
@@ -228,7 +228,7 @@ The code to execute to update map, using current event.
228
228
  Or on the contrary, the code to execute to update event, using current map.
229
229
  You will have a 'map' variable and an 'event' variable available (that is the event itself).
230
230
  This option is required.
231
- Example value : `"map['sql_duration'] += event['duration']"`
231
+ Example value : `"map['sql_duration'] += event.get('duration')"`
232
232
 
233
233
  - **map_action:**
234
234
  Tell the filter what to do with aggregate map (default : "create_or_update").
@@ -257,7 +257,7 @@ If no timeout is defined, default timeout will be applied : 1800 seconds.
257
257
  The code to execute to complete timeout generated event, when 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true.
258
258
  The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map.
259
259
  If 'timeout_task_id_field' is set, the event is also populated with the task_id value
260
- Example value: `"event['state'] = 'timeout'"`
260
+ Example value: `"event.set('state', 'timeout')"`
261
261
 
262
262
  - **push_map_as_event_on_timeout**
263
263
  When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new logstash event.
@@ -275,8 +275,7 @@ This option indicates the timeout generated event's field for the "task_id" valu
275
275
  The task id will then be set into the timeout event. This can help correlate which tasks have been timed out.
276
276
  This field has no default value and will not be set on the event if not configured.
277
277
  Example:
278
- If the task_id is "12345" and this field is set to "my_id", the generated event will have:
279
- `event[ "my_id" ] = "12345"`
278
+ If the task_id is "12345" and this field is set to "my_id", the generated timeout event will contain `'my_id'` key with `'12345'` value.
280
279
 
281
280
  - **timeout_tags**
282
281
  Defines tags to add when a timeout event is generated and yield.
@@ -42,7 +42,7 @@ require "logstash/util/decorators"
42
42
  # if [logger] == "SQL" {
43
43
  # aggregate {
44
44
  # task_id => "%{taskid}"
45
- # code => "map['sql_duration'] += event['duration']"
45
+ # code => "map['sql_duration'] += event.get('duration')"
46
46
  # map_action => "update"
47
47
  # }
48
48
  # }
@@ -50,7 +50,7 @@ require "logstash/util/decorators"
50
50
  # if [logger] == "TASK_END" {
51
51
  # aggregate {
52
52
  # task_id => "%{taskid}"
53
- # code => "event['sql_duration'] = map['sql_duration']"
53
+ # code => "event.set('sql_duration', map['sql_duration'])"
54
54
  # map_action => "update"
55
55
  # end_of_task => true
56
56
  # timeout => 120
@@ -91,14 +91,14 @@ require "logstash/util/decorators"
91
91
  # if [logger] == "SQL" {
92
92
  # aggregate {
93
93
  # task_id => "%{taskid}"
94
- # code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event['duration']"
94
+ # code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')"
95
95
  # }
96
96
  # }
97
97
  #
98
98
  # if [logger] == "TASK_END" {
99
99
  # aggregate {
100
100
  # task_id => "%{taskid}"
101
- # code => "event['sql_duration'] = map['sql_duration']"
101
+ # code => "event.set('sql_duration', map['sql_duration'])"
102
102
  # end_of_task => true
103
103
  # timeout => 120
104
104
  # }
@@ -145,7 +145,7 @@ require "logstash/util/decorators"
145
145
  # timeout_task_id_field => "user_id"
146
146
  # timeout => 600 # 10 minutes timeout
147
147
  # timeout_tags => ['_aggregatetimeout']
148
- # timeout_code => "event['several_clicks'] = (event['clicks'] > 1)"
148
+ # timeout_code => "event.set('several_clicks', event.get('clicks') > 1)"
149
149
  # }
150
150
  # }
151
151
  # ----------------------------------
@@ -250,7 +250,7 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
250
250
  #
251
251
  # You will have a 'map' variable and an 'event' variable available (that is the event itself).
252
252
  #
253
- # Example value : `"map['sql_duration'] += event['duration']"`
253
+ # Example value : `"map['sql_duration'] += event.get('duration')"`
254
254
  config :code, :validate => :string, :required => true
255
255
 
256
256
  # Tell the filter what to do with aggregate map.
@@ -288,7 +288,7 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
288
288
  #
289
289
  # If 'timeout_task_id_field' is set, the event is also populated with the task_id value
290
290
  #
291
- # Example value: `"event['state'] = 'timeout'"`
291
+ # Example value: `"event.set('state', 'timeout')"`
292
292
  config :timeout_code, :validate => :string, :required => false
293
293
 
294
294
  # When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new logstash event.
@@ -308,9 +308,7 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
308
308
  #
309
309
  # Example:
310
310
  #
311
- # If the task_id is "12345" and this field is set to "my_id", the generated event will have:
312
- # event[ "my_id" ] = "12345"
313
- #
311
+ # If the task_id is "12345" and this field is set to "my_id", the generated timeout event will contain `'my_id'` key with `'12345'` value.
314
312
  config :timeout_task_id_field, :validate => :string, :required => false
315
313
 
316
314
  # Defines tags to add when a timeout event is generated and yield
@@ -362,13 +360,13 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
362
360
  raise LogStash::ConfigurationError, "Aggregate plugin: For task_id pattern #{@task_id}, there are more than one filter which defines timeout options. All timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : #{display_timeout_options}"
363
361
  end
364
362
  @@eviction_instance_map[@task_id] = self
365
- @logger.info("Aggregate plugin: timeout for '#{@task_id}' pattern: #{@timeout} seconds")
363
+ @logger.debug("Aggregate timeout for '#{@task_id}' pattern: #{@timeout} seconds")
366
364
  end
367
365
 
368
366
  # timeout management : define default_timeout
369
367
  if !@timeout.nil? && (@@default_timeout.nil? || @timeout < @@default_timeout)
370
368
  @@default_timeout = @timeout
371
- @logger.info("Aggregate plugin: default timeout: #{@timeout} seconds")
369
+ @logger.debug("Aggregate default timeout: #{@timeout} seconds")
372
370
  end
373
371
 
374
372
  # check if aggregate_maps_path option has already been set on another instance
@@ -385,7 +383,7 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
385
383
  if !@aggregate_maps_path.nil? && File.exist?(@aggregate_maps_path)
386
384
  File.open(@aggregate_maps_path, "r") { |from_file| @@aggregate_maps = Marshal.load(from_file) }
387
385
  File.delete(@aggregate_maps_path)
388
- @logger.info("Aggregate plugin: load aggregate maps from : #{@aggregate_maps_path}")
386
+ @logger.info("Aggregate maps loaded from : #{@aggregate_maps_path}")
389
387
  end
390
388
 
391
389
  # init aggregate_maps
@@ -402,7 +400,7 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
402
400
  @@aggregate_maps.delete_if { |key, value| value.empty? }
403
401
  if !@aggregate_maps_path.nil? && !@@aggregate_maps.empty?
404
402
  File.open(@aggregate_maps_path, "w"){ |to_file| Marshal.dump(@@aggregate_maps, to_file) }
405
- @logger.info("Aggregate plugin: store aggregate maps to : #{@aggregate_maps_path}")
403
+ @logger.info("Aggregate maps stored to : #{@aggregate_maps_path}")
406
404
  end
407
405
  @@aggregate_maps.clear()
408
406
  end
@@ -452,7 +450,11 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
452
450
  @codeblock.call(event, map)
453
451
  noError = true
454
452
  rescue => exception
455
- @logger.error("Aggregate exception occurred. Error: #{exception} ; Code: #{@code} ; Map: #{map} ; EventData: #{event.instance_variable_get('@data')}")
453
+ @logger.error("Aggregate exception occurred",
454
+ :error => exception,
455
+ :code => @code,
456
+ :map => map,
457
+ :event_data => event.to_hash_with_metadata)
456
458
  event.tag("_aggregateexception")
457
459
  end
458
460
 
@@ -478,17 +480,20 @@ class LogStash::Filters::Aggregate < LogStash::Filters::Base
478
480
  event_to_yield = LogStash::Event.new(aggregation_map)
479
481
 
480
482
  if @timeout_task_id_field
481
- event_to_yield[@timeout_task_id_field] = task_id
483
+ event_to_yield.set(@timeout_task_id_field, task_id)
482
484
  end
483
485
 
484
- LogStash::Util::Decorators.add_tags(@timeout_tags,event_to_yield,"filters/#{self.class.name}")
486
+ LogStash::Util::Decorators.add_tags(@timeout_tags, event_to_yield, "filters/#{self.class.name}")
485
487
 
486
488
  # Call code block if available
487
489
  if @timeout_code
488
490
  begin
489
491
  @timeout_codeblock.call(event_to_yield)
490
492
  rescue => exception
491
- @logger.error("Aggregate exception occurred. Error: #{exception} ; TimeoutCode: #{@timeout_code} ; TimeoutEventData: #{event_to_yield.instance_variable_get('@data')}")
493
+ @logger.error("Aggregate exception occurred",
494
+ :error => exception,
495
+ :timeout_code => @timeout_code,
496
+ :timeout_event_data => event_to_yield.to_hash_with_metadata)
492
497
  event_to_yield.tag("_aggregateexception")
493
498
  end
494
499
  end
@@ -1,24 +1,26 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'logstash-filter-aggregate'
3
- s.version = '2.4.0'
3
+ s.version = '2.5.0'
4
4
  s.licenses = ['Apache License (2.0)']
5
- s.summary = "The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, and finally push aggregated information into final task event."
6
- s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
7
- s.authors = ["Elastic", "Fabien Baligand"]
5
+ s.summary = 'The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, and finally push aggregated information into final task event.'
6
+ s.description = 'This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program'
7
+ s.authors = ['Elastic', 'Fabien Baligand']
8
8
  s.email = 'info@elastic.co'
9
- s.homepage = "https://github.com/logstash-plugins/logstash-filter-aggregate"
10
- s.require_paths = ["lib"]
9
+ s.homepage = 'https://github.com/logstash-plugins/logstash-filter-aggregate'
10
+ s.require_paths = ['lib']
11
11
 
12
12
  # Files
13
- s.files = Dir['lib/**/*','spec/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE']
13
+ s.files = Dir['lib/**/*','spec/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.txt']
14
14
 
15
15
  # Tests
16
16
  s.test_files = s.files.grep(%r{^(test|spec|features)/})
17
17
 
18
18
  # Special flag to let us know this is actually a logstash plugin
19
- s.metadata = { "logstash_plugin" => "true", "logstash_group" => "filter" }
19
+ s.metadata = { 'logstash_plugin' => 'true', 'logstash_group' => 'filter' }
20
20
 
21
21
  # Gem dependencies
22
- s.add_runtime_dependency "logstash-core-plugin-api", "~> 1.0"
23
- s.add_development_dependency 'logstash-devutils', '~> 0'
22
+ s.add_runtime_dependency 'logstash-core-plugin-api', '>= 1.60', '<= 2.99'
23
+
24
+ # Gem test dependencies
25
+ s.add_development_dependency 'logstash-devutils'
24
26
  end
@@ -9,8 +9,8 @@ describe LogStash::Filters::Aggregate do
9
9
  reset_timeout_management()
10
10
  aggregate_maps.clear()
11
11
  @start_filter = setup_filter({ "map_action" => "create", "code" => "map['sql_duration'] = 0" })
12
- @update_filter = setup_filter({ "map_action" => "update", "code" => "map['sql_duration'] += event['duration']" })
13
- @end_filter = setup_filter({"timeout_task_id_field" => "my_id", "push_map_as_event_on_timeout" => true, "map_action" => "update", "code" => "event.to_hash.merge!(map)", "end_of_task" => true, "timeout" => 5, "timeout_code" => "event['test'] = 'testValue'", "timeout_tags" => ["tag1", "tag2"] })
12
+ @update_filter = setup_filter({ "map_action" => "update", "code" => "map['sql_duration'] += event.get('duration')" })
13
+ @end_filter = setup_filter({"timeout_task_id_field" => "my_id", "push_map_as_event_on_timeout" => true, "map_action" => "update", "code" => "event.set('sql_duration', map['sql_duration'])", "end_of_task" => true, "timeout" => 5, "timeout_code" => "event.set('test', 'testValue')", "timeout_tags" => ["tag1", "tag2"] })
14
14
  end
15
15
 
16
16
  context "Start event" do
@@ -27,7 +27,7 @@ describe LogStash::Filters::Aggregate do
27
27
 
28
28
  expect(aggregate_maps["%{taskid}"].size).to eq(1)
29
29
  expect(aggregate_maps["%{taskid}"]["id123"]).not_to be_nil
30
- expect(aggregate_maps["%{taskid}"]["id123"].creation_timestamp).to be >= event["@timestamp"]
30
+ expect(aggregate_maps["%{taskid}"]["id123"].creation_timestamp).to be >= event.timestamp.time
31
31
  expect(aggregate_maps["%{taskid}"]["id123"].map["sql_duration"]).to eq(0)
32
32
  end
33
33
  end
@@ -46,8 +46,8 @@ describe LogStash::Filters::Aggregate do
46
46
  @start_filter.filter(second_start_event)
47
47
 
48
48
  expect(aggregate_maps["%{taskid}"].size).to eq(1)
49
- expect(aggregate_maps["%{taskid}"]["id124"].creation_timestamp).to be < second_start_event["@timestamp"]
50
- expect(aggregate_maps["%{taskid}"]["id124"].map["sql_duration"]).to eq(first_update_event["duration"])
49
+ expect(aggregate_maps["%{taskid}"]["id124"].creation_timestamp).to be < second_start_event.timestamp.time
50
+ expect(aggregate_maps["%{taskid}"]["id124"].map["sql_duration"]).to eq(first_update_event.get("duration"))
51
51
  end
52
52
  end
53
53
  end
@@ -60,7 +60,7 @@ describe LogStash::Filters::Aggregate do
60
60
  @end_filter.filter(end_event)
61
61
 
62
62
  expect(aggregate_maps["%{taskid}"]).to be_empty
63
- expect(end_event["sql_duration"]).to be_nil
63
+ expect(end_event.get("sql_duration")).to be_nil
64
64
  end
65
65
  end
66
66
  end
@@ -81,7 +81,7 @@ describe LogStash::Filters::Aggregate do
81
81
  end_event = end_event()
82
82
  @end_filter.filter(end_event)
83
83
  expect(aggregate_maps["%{taskid}"].size).to eq(1)
84
- expect(end_event["sql_duration"]).to be_nil
84
+ expect(end_event.get("sql_duration")).to be_nil
85
85
  end
86
86
  end
87
87
 
@@ -98,14 +98,16 @@ describe LogStash::Filters::Aggregate do
98
98
  describe "and the same id of the 'start event'" do
99
99
  it "add 'sql_duration' field to the end event and deletes the aggregate map associated to taskid" do
100
100
  expect(aggregate_maps["%{taskid}"].size).to eq(1)
101
+ expect(aggregate_maps["%{taskid}"][@task_id_value].map["sql_duration"]).to eq(0)
101
102
 
102
103
  @update_filter.filter(update_event("taskid" => @task_id_value, "duration" => 2))
104
+ expect(aggregate_maps["%{taskid}"][@task_id_value].map["sql_duration"]).to eq(2)
103
105
 
104
106
  end_event = end_event("taskid" => @task_id_value)
105
107
  @end_filter.filter(end_event)
106
108
 
107
109
  expect(aggregate_maps["%{taskid}"]).to be_empty
108
- expect(end_event["sql_duration"]).to eq(2)
110
+ expect(end_event.get("sql_duration")).to eq(2)
109
111
  end
110
112
 
111
113
  end
@@ -127,7 +129,7 @@ describe LogStash::Filters::Aggregate do
127
129
  start_event = start_event("taskid" => "id124")
128
130
  @start_filter.filter(start_event)
129
131
 
130
- expect(start_event["tags"]).to eq(["_aggregateexception"])
132
+ expect(start_event.get("tags")).to eq(["_aggregateexception"])
131
133
  end
132
134
  end
133
135
 
@@ -179,10 +181,10 @@ describe LogStash::Filters::Aggregate do
179
181
  entries = @end_filter.flush()
180
182
  expect(aggregate_maps["%{taskid}"]).to be_empty
181
183
  expect(entries.size).to eq(1)
182
- expect(entries[0]['my_id']).to eq("id_123") # task id
183
- expect(entries[0]["sql_duration"]).to eq(0) # Aggregation map
184
- expect(entries[0]['test']).to eq("testValue") # Timeout code
185
- expect(entries[0]['tags']).to eq(["tag1", "tag2"]) # Timeout tags
184
+ expect(entries[0].get("my_id")).to eq("id_123") # task id
185
+ expect(entries[0].get("sql_duration")).to eq(0) # Aggregation map
186
+ expect(entries[0].get("test")).to eq("testValue") # Timeout code
187
+ expect(entries[0].get("tags")).to eq(["tag1", "tag2"]) # Timeout tags
186
188
  end
187
189
  end
188
190
 
@@ -234,30 +236,30 @@ describe LogStash::Filters::Aggregate do
234
236
  describe "when push_previous_map_as_event option is activated on another filter with same task_id pattern" do
235
237
  it "should throw a LogStash::ConfigurationError" do
236
238
  expect {
237
- setup_filter({"code" => "map['taskid'] = event['taskid']", "push_previous_map_as_event" => true})
239
+ setup_filter({"code" => "map['taskid'] = event.get('taskid')", "push_previous_map_as_event" => true})
238
240
  }.to raise_error(LogStash::ConfigurationError)
239
241
  end
240
242
  end
241
243
 
242
244
  describe "when a new task id is detected, " do
243
245
  it "should push previous map as new event" do
244
- push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event['ppm_id']", "push_previous_map_as_event" => true, "timeout" => 5 })
246
+ push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event.get('ppm_id')", "push_previous_map_as_event" => true, "timeout" => 5 })
245
247
  push_filter.filter(event({"ppm_id" => "1"})) { |yield_event| fail "task 1 shouldn't have yield event" }
246
- push_filter.filter(event({"ppm_id" => "2"})) { |yield_event| expect(yield_event["ppm_id"]).to eq("1") }
248
+ push_filter.filter(event({"ppm_id" => "2"})) { |yield_event| expect(yield_event.get("ppm_id")).to eq("1") }
247
249
  expect(aggregate_maps["%{ppm_id}"].size).to eq(1)
248
250
  end
249
251
  end
250
252
 
251
253
  describe "when timeout happens, " do
252
254
  it "flush method should return last map as new event" do
253
- push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event['ppm_id']", "push_previous_map_as_event" => true, "timeout" => 1, "timeout_code" => "event['test'] = 'testValue'" })
255
+ push_filter = setup_filter({ "task_id" => "%{ppm_id}", "code" => "map['ppm_id'] = event.get('ppm_id')", "push_previous_map_as_event" => true, "timeout" => 1, "timeout_code" => "event.set('test', 'testValue')" })
254
256
  push_filter.filter(event({"ppm_id" => "1"}))
255
257
  sleep(2)
256
258
  events_to_flush = push_filter.flush()
257
259
  expect(events_to_flush).not_to be_nil
258
260
  expect(events_to_flush.size).to eq(1)
259
- expect(events_to_flush[0]["ppm_id"]).to eq("1")
260
- expect(events_to_flush[0]['test']).to eq("testValue")
261
+ expect(events_to_flush[0].get("ppm_id")).to eq("1")
262
+ expect(events_to_flush[0].get('test')).to eq("testValue")
261
263
  expect(aggregate_maps["%{ppm_id}"].size).to eq(0)
262
264
  end
263
265
  end
@@ -2,8 +2,6 @@
2
2
  require "logstash/filters/aggregate"
3
3
 
4
4
  def event(data = {})
5
- data["message"] ||= "Log message"
6
- data["@timestamp"] ||= Time.now
7
5
  LogStash::Event.new(data)
8
6
  end
9
7
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-filter-aggregate
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.4.0
4
+ version: 2.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elastic
@@ -9,26 +9,32 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2016-10-15 00:00:00.000000000 Z
12
+ date: 2016-10-30 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  requirement: !ruby/object:Gem::Requirement
16
16
  requirements:
17
- - - "~>"
17
+ - - ">="
18
18
  - !ruby/object:Gem::Version
19
- version: '1.0'
19
+ version: '1.60'
20
+ - - "<="
21
+ - !ruby/object:Gem::Version
22
+ version: '2.99'
20
23
  name: logstash-core-plugin-api
21
24
  prerelease: false
22
25
  type: :runtime
23
26
  version_requirements: !ruby/object:Gem::Requirement
24
27
  requirements:
25
- - - "~>"
28
+ - - ">="
29
+ - !ruby/object:Gem::Version
30
+ version: '1.60'
31
+ - - "<="
26
32
  - !ruby/object:Gem::Version
27
- version: '1.0'
33
+ version: '2.99'
28
34
  - !ruby/object:Gem::Dependency
29
35
  requirement: !ruby/object:Gem::Requirement
30
36
  requirements:
31
- - - "~>"
37
+ - - ">="
32
38
  - !ruby/object:Gem::Version
33
39
  version: '0'
34
40
  name: logstash-devutils
@@ -36,7 +42,7 @@ dependencies:
36
42
  type: :development
37
43
  version_requirements: !ruby/object:Gem::Requirement
38
44
  requirements:
39
- - - "~>"
45
+ - - ">="
40
46
  - !ruby/object:Gem::Version
41
47
  version: '0'
42
48
  description: This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program
@@ -50,6 +56,7 @@ files:
50
56
  - CONTRIBUTORS
51
57
  - Gemfile
52
58
  - LICENSE
59
+ - NOTICE.txt
53
60
  - README.md
54
61
  - lib/logstash/filters/aggregate.rb
55
62
  - logstash-filter-aggregate.gemspec